prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from .base import AbstractStatistics
from ..compat import pickle
from ..price_parser import PriceParser
import datetime
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
class SimpleStatistics(AbstractStatistics):
"""
Simple Statistics fornisce un semplice esempio di statistiche
che possono essere raccolte tramite il trading.
Le statistiche incluse sono Sharpe Ratio, Drawdown, Max Drawdown,
Max Drawdown Duration.
TODO prevedere Alpha/Beta, comparazione della strategia con il benchmark.
TODO verificare la speed -- prevede l'esecusione per ogni tick o comunque per trade sotto il minuto
TODO prevedere slippage, fill rate, ecc..
TODO costi di commissione?
TODO prevedere qualche tipo di parametro trading-frequency parameter nel setup.
Per il calcolo dello Sharpe bisogna conoscere se si riferisce a timeframe
giornaliero, orario, un minuto, ecc.
"""
def __init__(self, config, portfolio_handler):
"""
Prevede un portfolio handler.
"""
self.config = config
self.drawdowns = [0]
self.equity = []
self.equity_returns = [0.0]
# Inizializza il timeseries. Il corretto timestamp non รจ ancora disponibile.
self.timeseries = ["0000-00-00 00:00:00"]
# Inizializzazione in modo che il primo step dei calcoli sia corretto.
current_equity = PriceParser.display(portfolio_handler.portfolio.equity)
self.hwm = [current_equity]
self.equity.append(current_equity)
def update(self, timestamp, portfolio_handler):
"""
Aggiorna tutte le statistiche che devono essere tracciate nel tempo.
"""
if timestamp != self.timeseries[-1]:
# Ricava il valore equity del Portfolio
current_equity = PriceParser.display(portfolio_handler.portfolio.equity)
self.equity.append(current_equity)
self.timeseries.append(timestamp)
# Calcula il rendimento percentuale tra l'attuale e il precedente valore dell'equity.
pct = ((self.equity[-1] - self.equity[-2]) / self.equity[-1]) * 100
self.equity_returns.append(round(pct, 4))
# Calcola il drawdown.
self.hwm.append(max(self.hwm[-1], self.equity[-1]))
self.drawdowns.append(self.hwm[-1] - self.equity[-1])
def get_results(self):
"""
Restituisci un dizionario con tutti i risultati e le statistiche importanti.
"""
# Modifica le serie temporali solo nell'ambito locale. Inizializziamo
# con 0-date, ma potrebbe mostrare una data di inizio realistica.
timeseries = self.timeseries
timeseries[0] = pd.to_datetime(timeseries[1]) - pd.Timedelta(days=1)
statistics = {}
statistics["sharpe"] = self.calculate_sharpe()
statistics["drawdowns"] = | pd.Series(self.drawdowns, index=timeseries) | pandas.Series |
import datetime
import random
import sys
import time
import unittest
import matplotlib as mpl
#from pandas import Series
import pandas as pd
import numpy as np
from pandas_datareader import data as wb
import matplotlib.pyplot as plt
'''
่ฟไธชๆไปถ็ไปฃ็ ้ฝๆฏ ๅฎ้ชๆง่ดจ็ใ scribble code๏ผ
'''
#from QUANTAXIS.QAFetch import (QATdx );
from QUANTAXIS.QAUtil import QADate, QADate_trade
# ๅญฆไน lru_cache
class MyCache:
'''
'''
def __init__(self):
'''constructor'''
self.cache = {}
self.max_cache_size = 10
def __contains__(self, key):
'''
ๆ นๆฎ่ฏฅ้ฎๆฏๅฆๅญๅจไบ็ผๅญๅฝไธญ่ฟๅTrueๆ่
False
:param key:
:return:
'''
return key in self.cache
def update(self, key, value):
'''
ๆดๆฐ่ฏฅ็ผๅญๅญๅ
ธ๏ผๆจๅฏ้ๆฉๆงๅ ้คๆๆฉๆก็ฎ
:param key:
:param value:
:return:
'''
if key not in self.cache and len(self.cache) >= self.max_cache_size:
self.remove_oldest()
self.cache[key] = {
'date_accessed': datetime.datetime.now(), 'value': value}
def remove_oldest(self):
"""
ๅ ้คๅ
ทๅคๆๆฉ่ฎฟ้ฎๆฅๆ็่พๅ
ฅๆฐๆฎ
"""
oldest_entry = None
for key in self.cache:
if oldest_entry == None:
oldest_entry = key
print('assign oldest_entry key', oldest_entry)
elif self.cache[key]['date_accessed'] < self.cache[oldest_entry]['date_accessed']:
oldest_entry = key
print('delete oles key', oldest_entry)
self.cache.pop(oldest_entry)
@property
def size(self):
"""
่ฟๅ็ผๅญๅฎน้ๅคงๅฐ
"""
return len(self.cache)
class Test_LRU(unittest.TestCase):
def test_LRU(self):
# ๆต่ฏ็ผๅญ
keys = ['test', 'red', 'fox', 'fence', 'junk',
'other7', 'alpha8', 'bravo9', 'cal10', 'devo11', 'ele12',
'other1', 'alpha2', 'bravo3', 'cal4', 'devo5', 'ele6',
]
s = 'abcdefghijklmnop'
cache = MyCache()
for i, key in enumerate(keys):
if key in cache:
continue
else:
value = ''.join([random.choice(s) for i in range(20)])
cache.update(key, value)
print("#%s iterations, #%s cached entries" % (i + 1, cache.size))
pass
def fab(self, max):
n, a, b = 0, 0, 1
while n < max:
print(b)
a, b = b, a + b
n = n + 1
def test_Generator(self):
self.fab(10)
def test_None(self):
pass
class Test_DataFrame_0(unittest.TestCase):
def test_make_Series(self):
pass
def test_make_dataframe(self):
'''
possible data inputs to dataframe constructor
2d ndarray
dict of array, list, or tuples
numpy stuctured / record array
dict of series
dict of dict
list of dict or series
list of list or tuples
another DataFrame
Numpy masked
:return:
'''
# demo the dict of dict
self.pop = {'Nevada': {2001:2.4, 2002:2.9}, 'Ohio': {2000:1.5, 2001:1.7, 2002:6.6}}
frame3 = pd.DataFrame(self.pop)
print(frame3)
frame3 = pd.DataFrame(self.pop, index=[2000, 2001, 2002])
print(frame3)
frame3 = pd.DataFrame(self.pop, index = [2000,2001, 2002, 2003])
print(frame3)
pass
def test_assign_serial_to_data_frame(self):
# when you assign list or arrays to columns, the value's length must match the length of the DataFrame.
self.data = {
'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada'],
'year': [2000, 2001, 2002, 2001, 2002],
'pop': [1.5, 1.7, 3.6, 2.4, 2.9]
}
frame2 = pd.DataFrame(self.data,
columns=['year', 'state', 'pop', 'debt'],
index = ['one', 'two', 'three', 'four','five'])
val = pd.Series([-1.2,-1.5,-1.7,None], index=['two','four','five1','ss'])
frame2['debt'] = val
print(frame2)
frame2['eastern'] = frame2.state == 'Ohio'
print(frame2)
del frame2['eastern']
print(frame2)
pass
def test_index_object(self):
#pandas's index objects are responsible for holding the axis labels nd other metadata ( like the axis name or names)
# index
# Int64Index
# MultiIndex
# DatetimeIndex
# PeriodIndex
obj = pd.Series(range(3), index = ['a','b','c'])
index = obj.index
print(index)
print(index[:1])
obj2 = pd.Series(range(6,9,1), index = ['d','e','f'])
#index append index
print(obj2)
newIndex = obj.index.append( obj2.index )
print(newIndex)
print(obj)
pass
def test_essential_functionality(self):
obj = pd.Series([4.5, 7.22, -5.3, 3.88], index=['d','e','a','c'])
print(obj)
obj = obj.reindex(['a','b','c','d','e'])
print(obj)
obj3 = pd.Series(['blue','purple','yellow'], index = [0,2,4])
print(obj3)
obj4 = obj3.reindex( range(6), method='ffill' )
print(obj4)
frame2 = pd.DataFrame(np.arange(9).reshape((3,3)), index = ['a','c','d'], columns=['Ohio','Texas','California'])
print(frame2)
states = ['Texas', 'Utah', 'California']
frame2.ix[['a', 'b', 'c', 'd'], states]
print(frame2)
pass
def test_dropping_entire(self):
obj = pd.Series(np.arange(5.), index=['a','b','c','d','e'])
# new_obj = obj.drop('c')
# print(obj)
# print(new_obj)
#
# new_obj2 = obj.drop(['d','c'])
# print(new_obj2)
#
# new_obj3 = pd.Series(np.arange(5.), index=['a','b','c','d','es'])
# print(new_obj3)
# print(new_obj3['b'])
data = pd.DataFrame(np.arange(16).reshape((4,4)), index=['Ohio','Colorado','Utha','New York'], columns=['one','two','three','four'])
# d2=data.drop(['Colorado','Ohio'])
# print(d2)
#
# d3=data.drop(['two','three'],axis=1)
# print(d3)
#
# d4 = data.drop(['Ohio'], axis=0)
# print(d4)
#
# pp = data.ix['Colorado', ['two', 'three']]
# print(pp)
ppp = data['two']
print(ppp)
kkk = data[0:2]
print(kkk)
jjj = data.ix['Colorado']
print(jjj)
jjjj = data.iloc[[0,1,2]]
print(jjjj)
cccc = data.loc[['Utha']]
print(cccc)
def test_Arithmetic_data_Alignment(self):
s1 = pd.Series([7.3, -2.5, 3.4, 1.5], index=['a', 'c', 'd', 'e'])
s2 = | pd.Series([-2.1, 3.6, -1.5, 4, 3.1], index=['a', 'c', 'e', 'f', 'g']) | pandas.Series |
import pandas as pd
from dateutil.parser import parse
from datetime import timedelta
import requests
def download_file(url, filename):
"""
Helper method handling downloading large files from `url` to `filename`. Returns a pointer to `filename`.
"""
chunkSize = 1024 ** 2
r = requests.get(url, stream=True)
with open(filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=chunkSize):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return filename
def open_global_ts(path):
"""
Abre serie de tiempo y cambia el formato para un facil uso.
Parametros
----------
path : str or pathlib's Path
Direcciรณn del archivo csv
Returns
-------
ts: Pandas Dataframe
"""
ts = pd.read_csv(path)
ini, end = parse(ts.columns[4]), parse(ts.columns[-1])
delta = end - ini
dates = [ini + timedelta(days=i) for i in range(delta.days + 1)]
ts_cols = [date.strftime('%-m/%-d/%y') for date in dates]
ts = ts[ts_cols + ['Country']].set_index('Country').T
ts.index = pd.to_datetime(ts.index)
ts.sort_index(inplace=True)
ts.columns.name = ''
return ts
def ts_since_two_per_country(df):
"""
Abre serie de tiempo global por pais y extrae series de tiempo
para cada pais desde que el valor es superior a 2.
Parametros
----------
df : Pandas Dataframe
Dataframe que contiene una serie de tiempo por pais.
Returns
-------
countries: list of Dataframes per country
"""
df = df.loc[:,~df.columns.duplicated()]
countries = [df[df[country] > 2][[country]] for country in df.columns[:]]
return countries
def compute_tasa_incidencia(df, pais):
"""
Calcula la Tasa de incidencia cada 100.000 habitantes para el paรญs dado.
TI = \frac{\text{Total Confirmados Acumulados} \cdot 10^5}{\text{Poblacion}}
Parametros
----------
df : Pandas Dataframe
Serie de tiempo de confirmados.
pais : str
Nombre del pais, debe ser una columna.
Returns
-------
tasa_incidencia: float
"""
return df[pais].values[-1] * 1e5 / info_countries[info_countries['Country'] == pais]['Population (2020)'].values
def compute_tasa_contagio(pais, inicio, fin, return_params=False):
"""
Calcula la Tasa de Contagio cada 100.000 habitantes para un periodo dado.
\text{Tasa de Contagio} = \frac{\text{cantidad de casos activos} \cdot 10^5}{\text{poblaciรณn con riesgo de infectarse durante el periodo escogido}}
Parametros
----------
pais : str
Nombre del pais, debe ser una columna
incio : datetime
Inicio del periodo
end : datetime
Fin del periodo
return_params : boolean, optional
Devuelve parametros involucrados
Returns
-------
tasa_incidencia: float
"""
inicio = pd.to_datetime(inicio)
fin = | pd.to_datetime(fin) | pandas.to_datetime |
import os
import sys
import math
from neuralprophet.df_utils import join_dataframes
import numpy as np
import pandas as pd
import torch
from collections import OrderedDict
from neuralprophet import hdays as hdays_part2
import holidays as pyholidays
import warnings
import logging
log = logging.getLogger("NP.utils")
def reg_func_abs(weights):
"""Regularization of weights to induce sparcity
Args:
weights (torch tensor): Model weights to be regularized towards zero
Returns:
regularization loss, scalar
"""
return torch.mean(torch.abs(weights)).squeeze()
def reg_func_trend(weights, threshold=None):
"""Regularization of weights to induce sparcity
Args:
weights (torch tensor): Model weights to be regularized towards zero
threshold (float): value below which not to regularize weights
Returns:
regularization loss, scalar
"""
abs_weights = torch.abs(weights)
if threshold is not None and not math.isclose(threshold, 0):
abs_weights = torch.clamp(abs_weights - threshold, min=0.0)
reg = torch.sum(abs_weights).squeeze()
return reg
def reg_func_season(weights):
return reg_func_abs(weights)
def reg_func_events(events_config, country_holidays_config, model):
"""
Regularization of events coefficients to induce sparcity
Args:
events_config (OrderedDict): Configurations (upper, lower windows, regularization) for user specified events
country_holidays_config (OrderedDict): Configurations (holiday_names, upper, lower windows, regularization)
for country specific holidays
model (TimeNet): The TimeNet model object
Returns:
regularization loss, scalar
"""
reg_events_loss = 0.0
if events_config is not None:
for event, configs in events_config.items():
reg_lambda = configs.reg_lambda
if reg_lambda is not None:
weights = model.get_event_weights(event)
for offset in weights.keys():
reg_events_loss += reg_lambda * reg_func_abs(weights[offset])
if country_holidays_config is not None:
reg_lambda = country_holidays_config.reg_lambda
if reg_lambda is not None:
for holiday in country_holidays_config.holiday_names:
weights = model.get_event_weights(holiday)
for offset in weights.keys():
reg_events_loss += reg_lambda * reg_func_abs(weights[offset])
return reg_events_loss
def reg_func_regressors(regressors_config, model):
"""
Regularization of regressors coefficients to induce sparcity
Args:
regressors_config (OrderedDict): Configurations for user specified regressors
model (TimeNet): The TimeNet model object
Returns:
regularization loss, scalar
"""
reg_regressor_loss = 0.0
for regressor, configs in regressors_config.items():
reg_lambda = configs.reg_lambda
if reg_lambda is not None:
weight = model.get_reg_weights(regressor)
reg_regressor_loss += reg_lambda * reg_func_abs(weight)
return reg_regressor_loss
def symmetric_total_percentage_error(values, estimates):
"""Compute STPE
Args:
values (np.array):
estimates (np.array):
Returns:
scalar (float)
"""
sum_abs_diff = np.sum(np.abs(estimates - values))
sum_abs = np.sum(np.abs(estimates) + np.abs(values))
return 100 * sum_abs_diff / (10e-9 + sum_abs)
def season_config_to_model_dims(season_config):
"""Convert the NeuralProphet seasonal model configuration to input dims for TimeNet model.
Args:
season_config (AllSeasonConfig): NeuralProphet seasonal model configuration
Returns:
seasonal_dims (dict(int)): input dims for TimeNet model
"""
if season_config is None or len(season_config.periods) < 1:
return None
seasonal_dims = OrderedDict({})
for name, period in season_config.periods.items():
resolution = period.resolution
if season_config.computation == "fourier":
resolution = 2 * resolution
seasonal_dims[name] = resolution
return seasonal_dims
def get_holidays_from_country(country, df=None):
"""
Return all possible holiday names of given country
Args:
country (string): country name to retrieve country specific holidays
df (Dataframe or list of dataframes): Dataframe or list of dataframes from which datestamps will be
retrieved from
Returns:
A set of all possible holiday names of given country
"""
if df is None:
dates = None
else:
if isinstance(df, list):
df, _ = join_dataframes(df)
dates = df["ds"].copy(deep=True)
if dates is None:
years = np.arange(1995, 2045)
else:
years = list({x.year for x in dates})
# manually defined holidays
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
holiday_names = getattr(hdays_part2, country)(years=years).values()
except AttributeError:
try:
holiday_names = getattr(pyholidays, country)(years=years).values()
except AttributeError:
raise AttributeError("Holidays in {} are not currently supported!".format(country))
return set(holiday_names)
def events_config_to_model_dims(events_config, country_holidays_config):
"""
Convert the NeuralProphet user specified events configurations along with country specific
holidays to input dims for TimeNet model.
Args:
events_config (OrderedDict): Configurations (upper, lower windows, regularization) for user specified events
country_holidays_config (configure.Holidays): Configurations (holiday_names, upper, lower windows, regularization)
for country specific holidays
Returns:
events_dims (OrderedDict): A dictionary with keys corresponding to individual holidays
containing configs with properties such as the mode, list of event delims of the event corresponding to the offsets,
and the indices in the input dataframe corresponding to each event.
"""
if events_config is None and country_holidays_config is None:
return None
additive_events_dims = pd.DataFrame(columns=["event", "event_delim"])
multiplicative_events_dims = pd.DataFrame(columns=["event", "event_delim"])
if events_config is not None:
for event, configs in events_config.items():
mode = configs.mode
for offset in range(configs.lower_window, configs.upper_window + 1):
event_delim = create_event_names_for_offsets(event, offset)
if mode == "additive":
additive_events_dims = additive_events_dims.append(
{"event": event, "event_delim": event_delim}, ignore_index=True
)
else:
multiplicative_events_dims = multiplicative_events_dims.append(
{"event": event, "event_delim": event_delim}, ignore_index=True
)
if country_holidays_config is not None:
lower_window = country_holidays_config.lower_window
upper_window = country_holidays_config.upper_window
mode = country_holidays_config.mode
for country_holiday in country_holidays_config.holiday_names:
for offset in range(lower_window, upper_window + 1):
holiday_delim = create_event_names_for_offsets(country_holiday, offset)
if mode == "additive":
additive_events_dims = additive_events_dims.append(
{"event": country_holiday, "event_delim": holiday_delim}, ignore_index=True
)
else:
multiplicative_events_dims = multiplicative_events_dims.append(
{"event": country_holiday, "event_delim": holiday_delim}, ignore_index=True
)
# sort based on event_delim
event_dims = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Polifact_Analysis
#
# ### @Author : <NAME>
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
import plotly.express as px
from scipy import signal
import warnings
warnings.filterwarnings("ignore")
#to make shell more intractive
from IPython.display import display
from IPython.display import Image
# setting up the chart size and background
plt.rcParams['figure.figsize'] = (16, 8)
plt.style.use('fivethirtyeight')
# In[2]:
pwd
# In[3]:
path ='E:\\DataScience\\MachineLearning\\Polotifact_Data'
# In[4]:
import os
from glob import glob
os.listdir(path)
# In[5]:
df = pd.read_csv(path+"\\politifact.csv")
df.head(5)
# In[6]:
pd.set_option('display.max_colwidth', 200)
df.head(10)
# In[7]:
df.shape
# In[8]:
df.info()
# In[9]:
df.isnull().sum()
# In[10]:
df.nunique(axis=0)
# In[11]:
df['source'].unique()
# In[12]:
df['veracity'].unique()
# In[13]:
df['source'].value_counts()
# In[14]:
df['veracity'].value_counts()
# In[15]:
# Veracity means : conformity with truth or fact
# Full Flop , Half Flip , No Flop does not give any clear meaning in sence of Veracity
# Half - True it contains 50 % True and 50 % False
df_new = df[~df.isin(['Half-True','Full Flop','No Flip','Half Flip']).any(axis=1)]
# In[16]:
df_new.head(5)
# In[17]:
df_new['veracity'].value_counts()
# In[18]:
#df_new = df_new.replace({'False': 0 ,'Mostly False' : 0 , 'Pants on Fire!' : 0, 'Mostly True' : 1 , 'True' : 1 })
# In[20]:
df_new.head()
# In[21]:
df_new.shape
# In[22]:
Source_top_10 = df_new['source'].value_counts().nlargest(10)
Source_top_10
# In[23]:
df_10 = df_new[df_new.isin(['<NAME>','<NAME>','Bloggers','<NAME>','<NAME>','<NAME>',
'<NAME>','<NAME>','<NAME>','<NAME>']).any(axis=1)]
df_10.head()
# In[24]:
df_10['source'].value_counts()
sns.histplot(data=df_10,x='source',kde=True, hue=df_10['veracity'])
# In[25]:
fig = px.pie(df_10 , values =Source_top_10 ,
names =['<NAME>','<NAME>','Bloggers','<NAME>','<NAME>','<NAME>',
'<NAME>','<NAME>','<NAME>','<NAME>'],
title = 'Top 10 Sources From where Statement is taken :',
labels={'source'})
fig.show()
# In[26]:
# 0 - False statement , 1- True statement .....
fig = px.sunburst(df_10, names=None, values=None, parents=None, path=['source','veracity'],
color='veracity', color_continuous_scale=None, range_color=None, color_continuous_midpoint=None,
color_discrete_sequence=None,
labels={'source','veracity'}, title= "Detailed_Analysis Chart : 0 - False Statement & 1 - True Statement")
fig.show()
# In[27]:
remove = "/web/20180705082623"
len(remove)
# In[28]:
df_new['link'] =df_new['link'].apply(lambda x: x[len(remove):])
# In[29]:
df_new.head()
# In[30]:
# extracting the date from the link column
# r() = first group to extract; you can use multiple ()
# ?P<column_name> = column name for convenience
# \d = digit
# {n} = number of digits to include
# . = wildcard
# + = greedy search
# ? = but not too greedy
df_new["date"] = df_new.link.str.extract(r'(\d{4}/.+?\d{2})')
# In[31]:
df_new.head()
# In[32]:
# change the date column to a datetime column for convenience
df_new.date = | pd.to_datetime(df_new.date,infer_datetime_format=True) | pandas.to_datetime |
import builtins
from io import StringIO
from itertools import product
from string import ascii_lowercase
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna)
import pandas.core.nanops as nanops
from pandas.util import testing as tm
@pytest.mark.parametrize("agg_func", ['any', 'all'])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("vals", [
['foo', 'bar', 'baz'], ['foo', '', ''], ['', '', ''],
[1, 2, 3], [1, 0, 0], [0, 0, 0],
[1., 2., 3.], [1., 0., 0.], [0., 0., 0.],
[True, True, True], [True, False, False], [False, False, False],
[np.nan, np.nan, np.nan]
])
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({'key': ['a'] * 3 + ['b'] * 3, 'val': vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == 'any':
exp = False
exp_df = DataFrame([exp] * 2, columns=['val'], index=Index(
['a', 'b'], name='key'))
result = getattr(df.groupby('key'), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({'nn': [11, 11, 22, 22],
'ii': [1, 2, 3, 4],
'ss': 4 * ['mama']})
result = aa.groupby('nn').max()
assert 'ss' in result
result = aa.groupby('nn').max(numeric_only=False)
assert 'ss' in result
result = aa.groupby('nn').min()
assert 'ss' in result
result = aa.groupby('nn').min(numeric_only=False)
assert 'ss' in result
def test_intercept_builtin_sum():
s = Series([1., 2., np.nan, 3.])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize('keys', [
"jim", # Single key
["jim", "joe"] # Multi-key
])
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)),
columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = ("invalid frame shape: {} "
"(expected ({}, 3))".format(result.shape, ngroups))
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)))
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(),
getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{'group': [1, 1, 2],
'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'category_string': pd.Series(list('abc')).astype('category'),
'category_int': [7, 8, 9],
'datetime': pd.date_range('20130101', periods=3),
'datetimetz': pd.date_range('20130101',
periods=3,
tz='US/Eastern'),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},
columns=['group', 'int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
expected_columns_numeric = Index(['int', 'float', 'category_int'])
# mean / median
expected = pd.DataFrame(
{'category_int': [7.5, 9],
'float': [4.5, 6.],
'timedelta': [pd.Timedelta('1.5s'),
pd.Timedelta('3s')],
'int': [1.5, 3],
'datetime': [pd.Timestamp('2013-01-01 12:00:00'),
pd.Timestamp('2013-01-03 00:00:00')],
'datetimetz': [
pd.Timestamp('2013-01-01 12:00:00', tz='US/Eastern'),
pd.Timestamp('2013-01-03 00:00:00', tz='US/Eastern')]},
index=Index([1, 2], name='group'),
columns=['int', 'float', 'category_int',
'datetime', 'datetimetz', 'timedelta'])
for attr in ['mean', 'median']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(['int', 'float', 'string',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['min', 'max']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['first', 'last']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_int', 'timedelta'])
for attr in ['sum']:
f = getattr(df.groupby('group'), attr)
result = f()
| tm.assert_index_equal(result.columns, expected_columns_numeric) | pandas.util.testing.assert_index_equal |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 4 15:34:25 2020
@author: diego
"""
import pandas as pd
import os
import sqlite3
from pandas_datareader import DataReader
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from fuzzywuzzy import process
import update_db
pd.set_option('display.width', 400)
pd.set_option('display.max_columns', 10)
conn = sqlite3.connect(os.path.join('data', 'fundos.db'))
db = conn.cursor()
update_db.update_pipeline()
#%% functions
def get_fund_id():
"""
Use this function when you want to find the fund_id using the fund name.
Returns
-------
fund_id: string
The CNPJ of the fund, that is the brazilian tax id and used in this
script as fund_id.
"""
funds = pd.read_sql("SELECT DISTINCT denom_social FROM inf_cadastral", conn)
funds['denom_social_query'] = funds['denom_social'].str.normalize('NFKD').str.encode('ascii', errors='ignore').str.decode('utf-8')
funds_list = funds['denom_social_query'].to_list()
x = 0
while x == 0:
name = input("Mutual Fund name: ")
result = process.extract(name.upper(), funds_list, limit=5)
for i in range(1,6):
print(str(i)+' '+result[i-1][0])
fund = -1
while fund not in range(0,6):
query = input("Type fund number or 0 to query again: ")
try:
if int(query) in range(0,6):
fund = int(query)
if fund != 0:
x = 1
except:
print("Type a number from 1 to 5 to choose, or 0 to try again.")
fund = result[fund-1][0]
idx = funds[funds['denom_social_query'] == fund].index[0]
fund = funds.loc[idx]['denom_social']
fund_id = pd.read_sql(f"SELECT cnpj FROM inf_cadastral WHERE denom_social = '{fund}'", conn)
return fund_id.values[0][0]
def get_returns(fund_id, start='all', end='all'):
"""
Returns a pandas dataframe with log returns and net asset value (nav) that
starts at 1.
Parameters
----------
fund_id : string
Three options here: fund CNPJ, 'ibov', 'cdi'.
start : string, optional
Date formatted as '2019-12-31' or 'all'. The default is 'all'.
end : string, optional
Date formatted as '2019-12-31' or 'all'. The default is 'all'.
Returns
-------
log_returns: pandas dataframe
Log returns and net asset value that starts at 1.
"""
if start == 'all':
start = pd.to_datetime('1990-01-01')
else:
start = pd.to_datetime(start)
if end == 'all':
end = pd.to_datetime('2100-01-01')
else:
end = pd.to_datetime(end)
if fund_id == 'ibov':
returns = DataReader('^BVSP', 'yahoo', start=start+pd.DateOffset(-7), end=end )[['Adj Close']]
returns['d_factor'] = returns['Adj Close'].pct_change().fillna(0) + 1
elif fund_id == 'cdi':
returns = pd.read_sql(f"SELECT date, d_factor FROM cdi WHERE date >= '{start}' AND date <= '{end}' ORDER BY date", conn, index_col='date')
else:
returns = pd.read_sql(f"SELECT date, quota FROM quotas WHERE cnpj = '{fund_id}' AND date >= '{start+pd.DateOffset(-7)}' AND date <= '{end}' ORDER BY date", conn, index_col='date')
returns['d_factor'] = (returns['quota'].pct_change().fillna(0)) + 1
returns = returns[['d_factor']]
returns['log_return'] = np.log(returns['d_factor'])
returns.index = pd.to_datetime(returns.index)
returns = returns[returns.index >= start]
returns['nav'] = np.exp(returns['log_return'].cumsum())
return returns[['log_return', 'nav']]
def fund_performance(fund_id, start='all', end='all', benchmark='cdi', plot=True):
"""
Creates two dataframes, one with the accumulated returns and the second
with the performance table of the fund.
Parameters
----------
fund_id : string
The CNPJ of the fund.
start : string, optional
Date formatted as '2019-12-31' or 'all'. The default is 'all'.
end : string, optional
Date formatted as '2019-12-31' or 'all'. The default is 'all'.
benchmark : string, optional
Benchmark used in the plot. Can be 'ibov' or 'cdi'. The default is 'cdi'.
plot : boolean, optional
Plot or not the results. The default is True.
Returns
-------
accumulated returns : pandas dataframe
Accumulated % returns.
performance_table : pandas dataframe
Performance table of the fund.
"""
name = pd.read_sql(f"SELECT denom_social FROM inf_cadastral WHERE cnpj = '{fund_id}'", conn)
name = name.values[0][0]
name = name.split()[0]
returns = get_returns(fund_id = fund_id, start=start, end=end)
returns = returns[['log_return']]
returns = returns.rename(columns={'log_return': name})
returns.index = pd.to_datetime(returns.index)
ytd = (np.exp(returns.groupby(returns.index.year).sum())-1)*100
performance_table = returns.iloc[:,0].groupby([returns.index.year, returns.index.strftime('%m')], sort=False).sum().unstack()
performance_table = (np.exp(performance_table)-1)*100
cols = performance_table.columns.tolist()
performance_table = performance_table[sorted(cols)]
performance_table = pd.concat([performance_table,ytd], axis=1)
performance_table = performance_table.rename(columns={performance_table.columns[-1]: 'ytd'})
if plot:
acc_returns = get_returns(fund_id=benchmark, start=start, end=end)
acc_returns = acc_returns[['log_return']]
acc_returns = acc_returns.rename({'log_return': benchmark.upper()}, axis=1)
acc_returns = acc_returns.merge(returns, how='left', left_index=True, right_index=True)
acc_returns = acc_returns.dropna()
acc_returns = (np.exp(acc_returns.cumsum())-1)*100
fig, ax = plt.subplots(figsize=(16,9))
sns.heatmap(ax=ax,
data=performance_table,
center=0, vmin=-.01, vmax=.01,
linewidths=.5,
cbar=False,
annot=True, annot_kws={'fontsize': 10, 'fontweight': 'bold'}, fmt='.1f',
cmap=['#c92d1e', '#38c93b'])
plt.title('Performance Table - % return', fontsize=25)
ax.set_xlabel('Month', fontsize=15)
ax.set_ylabel('Year', fontsize=15)
plt.show()
fig, ax = plt.subplots(figsize=(16,9))
ax.plot(acc_returns.iloc[:,0], label=acc_returns.columns[0], color='black', linestyle='--')
ax.plot(acc_returns.iloc[:,1], label=acc_returns.columns[1])
ax.fill_between(acc_returns.index, acc_returns.iloc[:,0], acc_returns.iloc[:,1], alpha=.25, where=(acc_returns.iloc[:,1]>acc_returns.iloc[:,0]), color='Green', label='Above Benchmark')
ax.fill_between(acc_returns.index, acc_returns.iloc[:,0], acc_returns.iloc[:,1], alpha=.25, where=(acc_returns.iloc[:,1]<=acc_returns.iloc[:,0]), color='Red', label='Below Benchmark')
ax.grid()
ax.legend(loc='upper left')
ax.set_xlabel('Date')
ax.set_ylabel('Return (%)')
plt.show()
returns = (np.exp(returns.cumsum())-1)*100
returns = returns.rename(columns={returns.columns[-1]: 'acc_return'})
return returns[['acc_return']], performance_table
def compare(fund_ids, start='all', end='all', benchmark='cdi', best_start_date=False, plot=True):
"""
Compare the returns, volatility and sharpe ratio of the funds.
Parameters
----------
fund_ids : list of stings
list with the CNPJs of the funds to compare.
start : string, optional
Date formatted as '2019-12-31' or 'all'. The default is 'all'.
end : string, optional
Date formatted as '2019-12-31' or 'all'. The default is 'all'.
benchmark : TYPE, optional
Benchmark used in the plot. Can be 'ibov' or 'cdi'. The default is 'cdi'.
best_start_date : boolean, optional
Forces that the start date is set to a date when all funds have quotas available. The default is False.
plot : boolean, optional
Plot or not the results. The default is True.
Returns
-------
acc_returns : pandas dataframe
Accumulated returns of the funds.
details : pandas dataframe
Volatility, Sharpe ratio, Annualized return and Total return.
"""
acc_returns = get_returns(fund_id=benchmark, start=start, end=end)
acc_returns = acc_returns[['log_return']]
acc_returns = acc_returns.rename({'log_return': benchmark.upper()}, axis=1)
for fund in fund_ids:
name = pd.read_sql(f"SELECT denom_social FROM inf_cadastral WHERE cnpj = '{fund}'", conn)
name = name.values[0][0]
name = name.split()[0]
returns = get_returns(fund_id = fund, start=start, end=end)
returns = returns[['log_return']]
returns = returns.rename(columns={'log_return': name})
returns.index = | pd.to_datetime(returns.index) | pandas.to_datetime |
# To add a new cell, type '#%%'
# To add a new markdown cell, type '#%% [markdown]'
# %%
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import dm6103 as dm
#%% [markdown]
# The dataset is obtained from
# https://gssdataexplorer.norc.org
# for you here. But if you are interested, you can try get it yourself.
# create an account
# create a project
# select these eight variables:
# ballot, id, year, hrs1 (hours worked last week), marital,
# childs, income, happy,
# (use the search function to find them if needed.)
# add the variables to cart
# extract data
# name your extract
# add all the 8 variables to the extract
# Choose output option, select only years 2000 - 2018
# file format Excel Workbook (data + metadata)
# create extract
# It will take some time to process.
# When it is ready, click on the download button.
# you will get a .tar file
# if your system cannot unzip it, google it. (Windows can use 7zip utility. Mac should have it (tar function) built-in.)
# Open in excel (or other comparable software), then save it as csv
# So now you have Happy table to work with
#
# When we import using pandas, we need to do pre-processing like what we did in class
# So clean up the columns. You can use some of the functions we defined in class, like the total family income, and number of children.
# Other ones like worked hour last week, etc, you'll need a new function.
# Happy: change it to numeric codes (ordinal variable)
# Ballot: just call it a, b, or c
# Marital status, it's up to you whether you want to rename the values.
#
#
# After the preprocessing, make these plots
# Box plot for hours worked last week, for the different marital status. (So x is marital status, and y is hours worked.)
# Violin plot for income vs happiness,
# Use happiness as numeric, make scatterplot with jittering in both x and y between happiness and number of children. Choose what variable you want for hue/color.
# If you have somewhat of a belief that happiness is caused/determined/affected by number of children, or the other
# way around (having babies/children are caused/determined/affected by happiness), then put the dependent
# variable in y, and briefly explain your choice.
#
# Answer:\
# Happiness was placed as the dependent variable in the y when comparing the variables happienss and number of children.
# This was done because if it is expected that having happiness is caused by the number of children this make happiness dependent on the number of children an individual has.
# If the number of children does determine happiness some kind of pattern or trend is expected to be observed in the data when plotted on a scatterplot.
#
# Additionally, another scatterplot was created to present the dat aif the variables flipped axes.
# However, neither graph depicts a particularly strong linear trend nor does either graph indicate causality.
dfhappy = dm.api_dsLand('Happy')
#%%
# checking the values of each column to create functions specific to process the current the data set
# dfhappy.column_name.value_counts(dropna=False)
# clean up ballet column name to ballot
dfhappy.rename({'ballet': 'ballot'}, axis=1, inplace=True)
# %%
def clean_df_any(row, col):
curr_row = row[col].strip()
try:
curr_row = int(curr_row)
return curr_row
except ValueError:
pass
if curr_row == "Eight or m":
return min(8 + np.random.chisquare(2), 12)
if curr_row == "Not applicable":
return np.nan
if curr_row == "Dk na":
return np.nan
if curr_row == "No answer":
return np.nan
return curr_row
def clean_df_income(row):
curr_row = row['income'].strip()
if curr_row == "Don't know":
return np.nan
if curr_row == "No answer":
return np.nan
if curr_row == "Refused":
return np.nan
if curr_row == "Lt $1000":
return np.random.uniform(0, 999)
if curr_row == "$1000 to 2999":
return np.random.uniform(1000, 2999)
if curr_row == "$3000 to 3999":
return np.random.uniform(3000, 3999)
if curr_row == "$4000 to 4999":
return np.random.uniform(4000, 4999)
if curr_row == "$5000 to 5999":
return np.random.uniform(5000, 5999)
if curr_row == "$6000 to 6999":
return np.random.uniform(6000, 6999)
if curr_row == "$7000 to 7999":
return np.random.uniform(7000, 7999)
if curr_row == "$8000 to 9999":
return np.random.uniform(8000, 9999)
if curr_row == "$10000 - 14999":
return np.random.uniform(10000, 14999)
if curr_row == "$15000 - 19999":
return np.random.uniform(15000, 19999)
if curr_row == "$20000 - 24999":
return np.random.uniform(20000, 24999)
if curr_row == "$25000 or more":
return 25000 + 10000 * np.random.chisquare(2)
return np.nan
def clean_df_happy(row):
curr_row = row['happy'].strip()
if curr_row == "Don't know":
return np.nan
if curr_row == "No answer":
return np.nan
if curr_row == "Not applicable":
return np.nan
if curr_row == "Not too happy":
return 0
if curr_row == "Pretty happy":
return 1
if curr_row == "Very happy":
return 2
return np.nan
def clean_df_ballot(row):
curr_row = row['ballot'].strip()[-1:]
return curr_row
print("\nReady to continue.")
#%%
# preprocess data
#hrs1 column should be numeric not object type
dfhappy['hrs1'] = dfhappy.apply(clean_df_any, col='hrs1', axis=1)
dfhappy['hrs1'] = | pd.to_numeric(dfhappy['hrs1'], errors='coerce') | pandas.to_numeric |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import open as io_open
from builtins import str
from future import standard_library
standard_library.install_aliases()
__all__ = [
'generate_xref_descriptions',
'generate_files_for_reinsurance',
'ReinsuranceLayer',
'write_ri_input_files'
]
import json
import logging
import math
import os
import shutil
import subprocess32 as subprocess
from collections import namedtuple
from itertools import product
import anytree
import numbers
import pandas as pd
from ..utils.exceptions import OasisException
from ..utils.log import oasis_log
from . import oed
from six import string_types
# Metadata about an inuring layer
InuringLayer = namedtuple(
"InuringLayer",
"inuring_priority reins_numbers is_valid validation_messages")
def _get_location_tiv(location, coverage_type_id):
switcher = {
oed.BUILDING_COVERAGE_TYPE_ID: location.get('BuildingTIV', 0),
oed.OTHER_BUILDING_COVERAGE_TYPE_ID: location.get('OtherTIV', 0),
oed.CONTENTS_COVERAGE_TYPE_ID: location.get('ContentsTIV', 0),
oed.TIME_COVERAGE_TYPE_ID: location.get('BITIV', 0)
}
return switcher.get(coverage_type_id, 0)
def generate_xref_descriptions(accounts_fp, locations_fp):
accounts = pd.read_csv(accounts_fp)
locations = pd.read_csv(locations_fp)
coverage_id = 0
item_id = 0
group_id = 0
policy_agg_id = 0
profile_id = 0
site_agg_id = 0
accounts_and_locations = pd.merge(accounts, locations, left_on='AccNumber', right_on='AccNumber')
for acc_and_loc, coverage_type, peril in product((acc for _, acc in accounts_and_locations.iterrows()), oed.COVERAGE_TYPES, oed.PERILS):
tiv = _get_location_tiv(acc_and_loc, coverage_type)
if tiv > 0:
policy_agg_id += 1
profile_id += 1
group_id += 1
site_agg_id += 1
profile_id += 1
coverage_id += 1
item_id += 1
yield oed.XrefDescription(
xref_id = item_id,
account_number = acc_and_loc.get('AccNumber'),
location_number = acc_and_loc.get('LocNumber'),
location_group = acc_and_loc.get('LocGroup'),
cedant_name = acc_and_loc.get('CedantName'),
producer_name = acc_and_loc.get('ProducerName'),
lob = acc_and_loc.get('LOB'),
country_code = acc_and_loc.get('CountryCode'),
reins_tag = acc_and_loc.get('ReinsTag'),
coverage_type_id = coverage_type,
peril_id = peril,
policy_number = acc_and_loc.get('PolNumber'),
portfolio_number = acc_and_loc.get('PortNumber'),
tiv = tiv
)
@oasis_log
def generate_files_for_reinsurance(
items,
coverages,
fm_xrefs,
xref_descriptions,
ri_info_df,
ri_scope_df,
direct_oasis_files_dir,
gulsummaryxref=pd.DataFrame(),
fmsummaryxref=pd.DataFrame()):
"""
Generate files for reinsurance.
"""
inuring_metadata = {}
previous_inuring_priority = None
previous_risk_level = None
reinsurance_index = 1
for inuring_priority in range(1, ri_info_df['InuringPriority'].max() + 1):
# Filter the reinsNumbers by inuring_priority
reins_numbers = ri_info_df[ri_info_df['InuringPriority'] == inuring_priority].ReinsNumber.tolist()
risk_level_set = set(ri_scope_df[ri_scope_df['ReinsNumber'].isin(reins_numbers)].RiskLevel)
for risk_level in oed.REINS_RISK_LEVELS:
if risk_level not in risk_level_set:
continue
written_to_dir = _generate_files_for_reinsurance_risk_level(
inuring_priority,
items,
coverages,
fm_xrefs,
xref_descriptions,
gulsummaryxref,
fmsummaryxref,
ri_info_df,
ri_scope_df,
previous_inuring_priority,
previous_risk_level,
risk_level,
reinsurance_index,
direct_oasis_files_dir)
inuring_metadata[reinsurance_index] = {
'inuring_priority': inuring_priority,
'risk_level': risk_level,
'directory': written_to_dir,
}
previous_inuring_priority = inuring_priority
previous_risk_level = risk_level
reinsurance_index = reinsurance_index + 1
return inuring_metadata
def _generate_files_for_reinsurance_risk_level(
inuring_priority,
items,
coverages,
fm_xrefs,
xref_descriptions,
gulsummaryxref,
fmsummaryxref,
ri_info_df,
ri_scope_df,
previous_inuring_priority,
previous_risk_level,
risk_level,
reinsurance_index,
direct_oasis_files_dir):
"""
Generate files for a reinsurance risk level.
"""
reins_numbers_1 = ri_info_df[
ri_info_df['InuringPriority'] == inuring_priority].ReinsNumber
if reins_numbers_1.empty:
return None
reins_numbers_2 = ri_scope_df[
ri_scope_df.isin({"ReinsNumber": reins_numbers_1.tolist()}).ReinsNumber
& (ri_scope_df.RiskLevel == risk_level)].ReinsNumber
if reins_numbers_2.empty:
return None
ri_info_inuring_priority_df = ri_info_df[ri_info_df.isin(
{"ReinsNumber": reins_numbers_2.tolist()}).ReinsNumber]
output_name = "ri_{}_{}".format(inuring_priority, risk_level)
reinsurance_layer = ReinsuranceLayer(
name=output_name,
ri_info=ri_info_inuring_priority_df,
ri_scope=ri_scope_df,
items=items,
coverages=coverages,
fm_xrefs=fm_xrefs,
xref_descriptions=xref_descriptions,
gulsummaryxref=gulsummaryxref,
fmsummaryxref=fmsummaryxref,
risk_level=risk_level
)
reinsurance_layer.generate_oasis_structures()
output_dir = os.path.join(direct_oasis_files_dir, "RI_{}".format(reinsurance_index))
reinsurance_layer.write_oasis_files(output_dir)
return output_dir
@oasis_log
def write_ri_input_files(
exposure_fp,
accounts_fp,
items_fp,
coverages_fp,
gulsummaryxref_fp,
fm_xref_fp,
fmsummaryxref_fp,
ri_info_fp,
ri_scope_fp,
target_dir
):
xref_descriptions = pd.DataFrame(generate_xref_descriptions(accounts_fp, exposure_fp))
return generate_files_for_reinsurance(
pd.read_csv(items_fp),
pd.read_csv(coverages_fp),
pd.read_csv(fm_xref_fp),
xref_descriptions,
pd.read_csv(ri_info_fp),
pd.read_csv(ri_scope_fp),
target_dir,
gulsummaryxref=pd.read_csv(gulsummaryxref_fp),
fmsummaryxref=pd.read_csv(fmsummaryxref_fp)
)
class ReinsuranceLayer(object):
"""
Generates ktools inputs and runs financial module for a reinsurance structure.
"""
def __init__(self,
name, ri_info, ri_scope, items, coverages, fm_xrefs,
xref_descriptions, risk_level, fmsummaryxref=pd.DataFrame(), gulsummaryxref=pd.DataFrame(), logger=None):
self.logger = logger or logging.getLogger()
self.name = name
self.coverages = coverages
self.items = items
self.fm_xrefs = fm_xrefs
self.xref_descriptions = xref_descriptions
self.fmsummaryxref = fmsummaryxref
self.gulsummaryxref = gulsummaryxref
self.item_ids = list()
self.item_tivs = list()
self.fmprogrammes = pd.DataFrame()
self.fmprofiles = pd.DataFrame()
self.fm_policytcs = pd.DataFrame()
self.risk_level = risk_level
self.ri_info = ri_info
self.ri_scope = ri_scope
self.add_profiles_args = namedtuple(
"AddProfilesArgs",
"program_node, ri_info_row, scope_rows, overlay_loop, layer_id, "
"node_layer_profile_map, fmprofiles_list, nolossprofile_id, passthroughprofile_id")
def _add_node(self, description, parent, level_id, agg_id,
portfolio_number=oed.NOT_SET_ID, account_number=oed.NOT_SET_ID,
policy_number=oed.NOT_SET_ID, location_number=oed.NOT_SET_ID,
location_group=oed.NOT_SET_ID):
node = anytree.Node(
description,
parent=parent,
level_id=level_id,
agg_id=agg_id,
portfolio_number=portfolio_number,
account_number=account_number,
policy_number=policy_number,
location_group=location_group,
location_number=location_number)
return node
def _add_program_node(self, level_id):
return self._add_node(
"Treaty",
parent=None,
level_id=level_id,
agg_id=1)
def _add_item_node(self, xref_id, parent):
return self._add_node(
"Item_id:{}".format(xref_id),
parent=parent,
level_id=1,
agg_id=xref_id)
def _add_location_node(
self, level_id, agg_id, xref_description, parent):
return self._add_node(
"Portfolio_number:{} Account_number:{} Policy_number:{} Location_number:{}".format(
xref_description.portfolio_number,
xref_description.account_number,
xref_description.policy_number,
xref_description.location_number),
parent=parent,
level_id=level_id,
agg_id=agg_id,
portfolio_number=xref_description.portfolio_number,
account_number=xref_description.account_number,
policy_number=xref_description.policy_number,
location_group=xref_description.location_group,
location_number=xref_description.location_number)
def _add_location_group_node(
self, level_id, agg_id, xref_description, parent):
return self._add_node(
"Location_group:{}".format(xref_description.location_group),
parent=parent,
level_id=level_id,
agg_id=agg_id,
location_group=xref_description.location_group)
def _add_policy_node(
self, level_id, agg_id, xref_description, parent):
return self._add_node(
"Portfolio number:{} Account_number:{} Policy_number:{}".format(
xref_description.portfolio_number, xref_description.account_number, xref_description.policy_number),
parent=parent,
level_id=level_id,
agg_id=agg_id,
portfolio_number=xref_description.portfolio_number,
account_number=xref_description.account_number,
policy_number=xref_description.policy_number)
def _add_account_node(
self, agg_id, level_id, xref_description, parent):
return self._add_node(
"Portfolio number:{} Account_number:{}".format(
xref_description.portfolio_number, xref_description.account_number),
parent=parent,
level_id=level_id,
agg_id=agg_id,
portfolio_number=xref_description.portfolio_number,
account_number=xref_description.account_number)
def _add_portfolio_node(
self, agg_id, level_id, xref_description, parent):
return self._add_node(
"Portfolio number:{}".format(xref_description.portfolio_number),
parent=parent,
level_id=level_id,
agg_id=agg_id,
portfolio_number=xref_description.portfolio_number)
def _is_valid_id(self, id_to_check):
is_valid = self._is_defined(id_to_check) and \
((isinstance(id_to_check, string_types) and id_to_check != "")
or
(isinstance(id_to_check, numbers.Number) and id_to_check > 0))
return is_valid
def _match_portfolio(self, node, scope_row, exact=False):
if self._is_valid_id(scope_row.PortNumber):
return node.portfolio_number == scope_row.PortNumber
else:
return True
def _match_account(self, node, scope_row, exact=False):
match = False
if exact:
match = self._match_portfolio(node, scope_row) and node.account_number == scope_row.AccNumber
else:
if (self._is_valid_id(scope_row.PortNumber) and self._is_valid_id(scope_row.AccNumber)):
match = self._match_portfolio(node, scope_row) and node.account_number == scope_row.AccNumber
else:
match = self._match_portfolio(node, scope_row)
return match
def _match_policy(self, node, scope_row, exact=False):
match = False
if exact:
match = self._match_account(node, scope_row) and node.policy_number == scope_row.PolNumber
else:
if (self._is_valid_id(scope_row.PolNumber) and self._is_valid_id(scope_row.AccNumber) and self._is_valid_id(scope_row.PortNumber)):
match = self._match_account(node, scope_row) and node.policy_number == scope_row.PolNumber
else:
match = self._match_account(node, scope_row)
return match
def _match_location(self, node, scope_row, exact=False):
match = False
if self._is_valid_id(scope_row.PolNumber):
if exact:
match = self._match_policy(node, scope_row) and node.location_number == scope_row.LocNumber
else:
if self._is_valid_id(scope_row.LocNumber) and self._is_valid_id(scope_row.AccNumber) and self._is_valid_id(scope_row.PortNumber):
match = self._match_policy(node, scope_row) and node.location_number == scope_row.LocNumber
else:
match = self._match_policy(node, scope_row)
else:
if exact:
match = self._match_account(node, scope_row) and node.location_number == scope_row.LocNumber
else:
if self._is_valid_id(scope_row.LocNumber) and self._is_valid_id(scope_row.AccNumber) and self._is_valid_id(scope_row.PortNumber):
match = self._match_account(node, scope_row) and node.location_number == scope_row.LocNumber
else:
match = self._match_account(node, scope_row)
return match
def _match_location_group(self, node, scope_row, exact=False):
match = False
if self._is_valid_id(scope_row.LocGroup):
match = node.location_group == scope_row.LocGroup
return match
def _is_valid_filter(self, value):
return (value is not None and value != "" and value == value)
def _match_row(self, node, scope_row):
match = True
if match and self._is_valid_filter(scope_row.PortNumber):
match = node.portfolio_number == scope_row.PortNumber
if match and self._is_valid_filter(scope_row.AccNumber):
match = node.account_number == scope_row.AccNumber
if match and self._is_valid_filter(scope_row.PolNumber):
match = node.policy_number == scope_row.PolNumber
if match and self._is_valid_filter(scope_row.LocGroup):
match = node.location_group == scope_row.LocGroup
if match and self._is_valid_filter(scope_row.LocNumber):
match = node.location_number == scope_row.LocNumber
# if match and self._is_valid_filter(scope_row.CedantName):
# if match and self._is_valid_filter(scope_row.ProducerName):
# if match and self._is_valid_filter(scope_row.LOB):
# if match and self._is_valid_filter(scope_row.CountryCode):
# if match and self._is_valid_filter(scope_row.ReinsTag):
return match
def _scope_filter(self, nodes_list, scope_row, exact=False):
"""
Return subset of `nodes_list` based on values of a row in `ri_scope.csv`
"""
filtered_nodes_list = list(filter(
lambda n: self._match_row(n, scope_row),
nodes_list))
return filtered_nodes_list
def _risk_level_filter(self, nodes_list, scope_row, exact=False):
"""
Return subset of `nodes_list` based on values of a row in `ri_scope.csv`
"""
if (scope_row.RiskLevel == oed.REINS_RISK_LEVEL_PORTFOLIO):
return list(filter(
lambda n: self._match_portfolio(n, scope_row, exact),
nodes_list))
elif (scope_row.RiskLevel == oed.REINS_RISK_LEVEL_ACCOUNT):
return list(filter(
lambda n: self._match_account(n, scope_row, exact),
nodes_list))
elif scope_row.RiskLevel == oed.REINS_RISK_LEVEL_POLICY:
nodes_list = list(filter(
lambda n: self._match_policy(n, scope_row, exact),
nodes_list))
elif scope_row.RiskLevel == oed.REINS_RISK_LEVEL_LOCATION:
nodes_list = list(filter(
lambda n: self._match_location(n, scope_row, exact),
nodes_list))
elif scope_row.RiskLevel == oed.REINS_RISK_LEVEL_LOCATION_GROUP:
nodes_list = list(filter(
lambda n: self._match_location_group(n, scope_row, exact),
nodes_list))
else:
raise OasisException("Unknown risk level: {}".format(scope_row.RiskLevel))
return nodes_list
def _is_defined(self, num_to_check):
# If the value = NaN it will return False
return num_to_check == num_to_check
def _check_scope_row(self, scope_row):
# For some treaty types the scope filter much match exactly
okay = True
if (scope_row.RiskLevel == oed.REINS_RISK_LEVEL_ACCOUNT):
okay = \
self._is_valid_id(scope_row.AccNumber) and \
not self._is_valid_id(scope_row.PolNumber) and \
not self._is_valid_id(scope_row.LocNumber)
elif scope_row.RiskLevel == oed.REINS_RISK_LEVEL_POLICY:
okay = \
self._is_valid_id(scope_row.AccNumber) and \
self._is_valid_id(scope_row.PolNumber) and \
not self._is_valid_id(scope_row.LocNumber)
elif scope_row.RiskLevel == oed.REINS_RISK_LEVEL_LOCATION:
okay = \
self._is_valid_id(scope_row.AccNumber) and \
self._is_valid_id(scope_row.LocNumber)
elif scope_row.RiskLevel == oed.REINS_RISK_LEVEL_LOCATION_GROUP:
okay = \
self._is_valid_id(scope_row.LocGroup)
return okay
LOCATION_RISK_LEVEL = 2
def _get_tree(self):
current_location_number = 0
current_policy_number = 0
current_account_number = 0
current_portfolio_number = 0
current_location_group = 0
current_location_node = None
current_node = None
if self.risk_level == oed.REINS_RISK_LEVEL_LOCATION:
risk_level_id = self.LOCATION_RISK_LEVEL
else:
risk_level_id = self.LOCATION_RISK_LEVEL + 1
program_node_level_id = risk_level_id + 1
program_node = self._add_program_node(program_node_level_id)
if self.risk_level == oed.REINS_RISK_LEVEL_LOCATION_GROUP:
xref_descriptions = self.xref_descriptions.sort_values(
by=["location_group", "portfolio_number", "account_number", "policy_number", "location_number"])
else:
xref_descriptions = self.xref_descriptions.sort_values(
by=["portfolio_number", "account_number", "policy_number", "location_number"])
agg_id = 0
loc_agg_id = 0
for row in xref_descriptions.itertuples():
if self.risk_level == oed.REINS_RISK_LEVEL_PORTFOLIO:
if current_portfolio_number != row.portfolio_number:
agg_id = agg_id + 1
current_node = self._add_portfolio_node(
agg_id, risk_level_id, row, program_node)
elif self.risk_level == oed.REINS_RISK_LEVEL_ACCOUNT:
if \
current_portfolio_number != row.portfolio_number or \
current_account_number != row.account_number:
agg_id = agg_id + 1
current_node = self._add_account_node(
agg_id, risk_level_id, row, program_node)
elif self.risk_level == oed.REINS_RISK_LEVEL_POLICY:
if \
current_portfolio_number != row.portfolio_number or \
current_account_number != row.account_number or \
current_policy_number != row.policy_number:
agg_id = agg_id + 1
current_node = self._add_policy_node(
risk_level_id, agg_id, row, program_node)
elif self.risk_level == oed.REINS_RISK_LEVEL_LOCATION_GROUP:
if current_location_group != row.location_group:
agg_id = agg_id + 1
current_node = self._add_location_group_node(
risk_level_id, agg_id, row, program_node)
if \
current_portfolio_number != row.portfolio_number or \
current_account_number != row.account_number or \
current_policy_number != row.policy_number or \
current_location_number != row.location_number:
loc_agg_id = loc_agg_id + 1
level_id = 2
if self.risk_level == oed.REINS_RISK_LEVEL_LOCATION:
current_location_node = self._add_location_node(
level_id, loc_agg_id, row, program_node)
else:
current_location_node = self._add_location_node(
level_id, loc_agg_id, row, current_node)
current_portfolio_number = row.portfolio_number
current_account_number = row.account_number
current_policy_number = row.policy_number
current_location_number = row.location_number
current_location_group = row.location_group
self._add_item_node(row.xref_id, current_location_node)
return program_node
def _get_risk_level_id(self):
if self.risk_level == oed.REINS_RISK_LEVEL_LOCATION:
risk_level_id = 2
else:
risk_level_id = 3
return risk_level_id
def _get_filter_level_id(self):
risk_level_id = 2
return risk_level_id
def _get_next_profile_id(self, add_profiles_args):
profile_id = max(
x.profile_id for x in add_profiles_args.fmprofiles_list)
return profile_id + 1
def _add_fac_profiles(self, add_profiles_args):
self.logger.debug("Adding FAC profiles:")
profile_id = self._get_next_profile_id(add_profiles_args)
add_profiles_args.fmprofiles_list.append(oed.get_reinsurance_profile(
profile_id,
attachment=add_profiles_args.ri_info_row.RiskAttachment,
limit=add_profiles_args.ri_info_row.RiskLimit,
ceded=add_profiles_args.ri_info_row.CededPercent,
placement=add_profiles_args.ri_info_row.PlacedPercent
))
nodes_risk_level_all = anytree.search.findall(
add_profiles_args.program_node, filter_=lambda node: node.level_id == self._get_risk_level_id())
if self.risk_level != oed.REINS_RISK_LEVEL_LOCATION:
nodes_filter_level_all = anytree.search.findall(
add_profiles_args.program_node, filter_=lambda node: node.level_id == self._get_filter_level_id())
for node in nodes_filter_level_all:
add_profiles_args.node_layer_profile_map[(
node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = add_profiles_args.passthroughprofile_id
for _, ri_scope_row in add_profiles_args.scope_rows.iterrows():
# Note that FAC profiles scope much match the filter exactly.
if not self._check_scope_row(ri_scope_row):
raise OasisException("Invalid scope row: {}".format(ri_scope_row))
nodes = self._risk_level_filter(nodes_risk_level_all, ri_scope_row, exact=True)
for node in nodes:
add_profiles_args.node_layer_profile_map[(
node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = profile_id
def _add_per_risk_profiles(self, add_profiles_args):
self.logger.debug("Adding PR profiles:")
profile_id = self._get_next_profile_id(add_profiles_args)
nodes_risk_level_all = anytree.search.findall(
add_profiles_args.program_node, filter_=lambda node: node.level_id == self._get_risk_level_id())
if self.risk_level != oed.REINS_RISK_LEVEL_LOCATION:
nodes_filter_level_all = anytree.search.findall(
add_profiles_args.program_node, filter_=lambda node: node.level_id == self._get_filter_level_id())
add_profiles_args.fmprofiles_list.append(oed.get_reinsurance_profile(
profile_id,
attachment=add_profiles_args.ri_info_row.RiskAttachment,
limit=add_profiles_args.ri_info_row.RiskLimit,
ceded=add_profiles_args.ri_info_row.CededPercent,
))
for _, ri_scope_row in add_profiles_args.scope_rows.iterrows():
if self.risk_level != oed.REINS_RISK_LEVEL_LOCATION:
selected_nodes = self._scope_filter(nodes_filter_level_all, ri_scope_row, exact=False)
for node in selected_nodes:
add_profiles_args.node_layer_profile_map[(
node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = add_profiles_args.passthroughprofile_id
selected_nodes = self._risk_level_filter(nodes_risk_level_all, ri_scope_row, exact=False)
for node in selected_nodes:
add_profiles_args.node_layer_profile_map[(
node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = profile_id
# add OccLimit / Placed Percent
profile_id = profile_id + 1
add_profiles_args.fmprofiles_list.append(
oed.get_occlim_profile(
profile_id,
limit=add_profiles_args.ri_info_row.OccLimit,
placement=add_profiles_args.ri_info_row.PlacedPercent,
))
add_profiles_args.node_layer_profile_map[
(add_profiles_args.program_node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = profile_id
def _add_surplus_share_profiles(self, add_profiles_args):
self.logger.debug("Adding SS profiles:")
profile_id = self._get_next_profile_id(add_profiles_args)
nodes_risk_level_all = anytree.search.findall(
add_profiles_args.program_node, filter_=lambda node: node.level_id == self._get_risk_level_id())
if self.risk_level != oed.REINS_RISK_LEVEL_LOCATION:
nodes_filter_level_all = anytree.search.findall(
add_profiles_args.program_node, filter_=lambda node: node.level_id == self._get_filter_level_id())
for node in nodes_filter_level_all:
add_profiles_args.node_layer_profile_map[(
node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = add_profiles_args.passthroughprofile_id
for _, ri_scope_row in add_profiles_args.scope_rows.iterrows():
# Note that surplus share profiles scope much match the filter exactly.
if not self._check_scope_row(ri_scope_row):
raise OasisException("Invalid scope row: {}".format(ri_scope_row))
add_profiles_args.fmprofiles_list.append(oed.get_reinsurance_profile(
profile_id,
attachment=add_profiles_args.ri_info_row.RiskAttachment,
limit=add_profiles_args.ri_info_row.RiskLimit,
ceded=ri_scope_row.CededPercent,
))
selected_nodes = self._risk_level_filter(nodes_risk_level_all, ri_scope_row, exact=True)
for node in selected_nodes:
add_profiles_args.node_layer_profile_map[(
node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = profile_id
profile_id = profile_id + 1
# add OccLimit / Placed Percent
add_profiles_args.fmprofiles_list.append(
oed.get_occlim_profile(
profile_id,
limit=add_profiles_args.ri_info_row.OccLimit,
placement=add_profiles_args.ri_info_row.PlacedPercent,
))
add_profiles_args.node_layer_profile_map[
(add_profiles_args.program_node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = profile_id
def _add_quota_share_profiles(self, add_profiles_args):
self.logger.debug("Adding QS profiles:")
profile_id = self._get_next_profile_id(add_profiles_args)
nodes_risk_level_all = anytree.search.findall(
add_profiles_args.program_node, filter_=lambda node: node.level_id == self._get_risk_level_id())
if self.risk_level != oed.REINS_RISK_LEVEL_LOCATION:
nodes_filter_level_all = anytree.search.findall(
add_profiles_args.program_node, filter_=lambda node: node.level_id == self._get_filter_level_id())
add_profiles_args.fmprofiles_list.append(
oed.get_reinsurance_profile(
profile_id,
limit=add_profiles_args.ri_info_row.RiskLimit,
ceded=add_profiles_args.ri_info_row.CededPercent,
))
for _, ri_scope_row in add_profiles_args.scope_rows.iterrows():
# Filter
if self.risk_level != oed.REINS_RISK_LEVEL_LOCATION:
selected_nodes = self._scope_filter(nodes_filter_level_all, ri_scope_row, exact=False)
for node in selected_nodes:
add_profiles_args.node_layer_profile_map[(
node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = add_profiles_args.passthroughprofile_id
selected_nodes = self._risk_level_filter(nodes_risk_level_all, ri_scope_row, exact=False)
for node in selected_nodes:
add_profiles_args.node_layer_profile_map[(
node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = profile_id
# add OccLimit / Placed Percent
profile_id = profile_id + 1
add_profiles_args.fmprofiles_list.append(
oed.get_occlim_profile(
profile_id,
limit=add_profiles_args.ri_info_row.OccLimit,
placement=add_profiles_args.ri_info_row.PlacedPercent,
))
add_profiles_args.node_layer_profile_map[
(add_profiles_args.program_node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = profile_id
def _add_cat_xl_profiles(self, add_profiles_args):
self.logger.debug("Adding CAT XL profiles")
profile_id = self._get_next_profile_id(add_profiles_args)
nodes_risk_level_all = anytree.search.findall(
add_profiles_args.program_node, filter_=lambda node: node.level_id == self._get_risk_level_id())
if self.risk_level != oed.REINS_RISK_LEVEL_LOCATION:
nodes_filter_level_all = anytree.search.findall(
add_profiles_args.program_node, filter_=lambda node: node.level_id == self._get_filter_level_id())
for _, ri_scope_row in add_profiles_args.scope_rows.iterrows():
# Filter
if self.risk_level != oed.REINS_RISK_LEVEL_LOCATION:
selected_nodes = self._scope_filter(nodes_filter_level_all, ri_scope_row, exact=False)
for node in selected_nodes:
add_profiles_args.node_layer_profile_map[(
node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = add_profiles_args.passthroughprofile_id
selected_nodes = self._risk_level_filter(nodes_risk_level_all, ri_scope_row, exact=False)
for node in selected_nodes:
add_profiles_args.node_layer_profile_map[(
node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = add_profiles_args.passthroughprofile_id
# Add OccLimit / Placed Percent
add_profiles_args.fmprofiles_list.append(
oed.get_reinsurance_profile(
profile_id,
attachment=add_profiles_args.ri_info_row.OccAttachment,
ceded=add_profiles_args.ri_info_row.CededPercent,
limit=add_profiles_args.ri_info_row.OccLimit,
placement=add_profiles_args.ri_info_row.PlacedPercent,
))
add_profiles_args.node_layer_profile_map[
(add_profiles_args.program_node.name, add_profiles_args.layer_id, add_profiles_args.overlay_loop)] = profile_id
def _log_reinsurance_structure(self, add_profiles_args):
if self.logger:
self.logger.debug('policytc_map: "{}"'.format(self.name))
policytc_map = dict()
for k in add_profiles_args.node_layer_profile_map.keys():
profile_id = add_profiles_args.node_layer_profile_map[k]
policytc_map["(Name=%s, layer_id=%s, overlay_loop=%s)" % k] = profile_id
self.logger.debug(json.dumps(policytc_map, indent=4))
self.logger.debug('fm_policytcs: "{}"'.format(self.name))
self.logger.debug(self.fm_policytcs)
self.logger.debug('fm_profile: "{}"'.format(self.name))
self.logger.debug(self.fmprofiles)
self.logger.debug('fm_programme: "{}"'.format(self.name))
self.logger.debug(self.fmprogrammes)
def _log_tree(self, program_node):
if self.logger:
self.logger.debug('program_node tree: "{}"'.format(self.name))
self.logger.debug(anytree.RenderTree(program_node))
def _log_reinsurance_structure(self, add_profiles_args):
if self.logger:
self.logger.debug('policytc_map: "{}"'.format(self.name))
policytc_map = dict()
for k in add_profiles_args.node_layer_profile_map.keys():
profile_id = add_profiles_args.node_layer_profile_map[k]
policytc_map["(Name=%s, layer_id=%s, overlay_loop=%s)" % k] = profile_id
self.logger.debug(json.dumps(policytc_map, indent=4))
self.logger.debug('fm_policytcs: "{}"'.format(self.name))
self.logger.debug(self.fm_policytcs)
self.logger.debug('fm_profile: "{}"'.format(self.name))
self.logger.debug(self.fmprofiles)
self.logger.debug('fm_programme: "{}"'.format(self.name))
self.logger.debug(self.fmprogrammes)
def generate_oasis_structures(self):
'''
Create the Oasis structures - FM Programmes, FM Profiles and FM Policy TCs -
that represent the reinsurance structure.
The algorithm to create the stucture has three steps:
Step 1 - Build a tree representation of the insurance program, depending on the reinsurance risk level.
Step 2 - Overlay the reinsurance structure. Each reinsurance contact is a seperate layer.
Step 3 - Iterate over the tree and write out the Oasis structure.
'''
fmprogrammes_list = list()
fmprofiles_list = list()
fm_policytcs_list = list()
profile_id = 1
nolossprofile_id = profile_id
fmprofiles_list.append(
oed.get_no_loss_profile(nolossprofile_id))
profile_id = profile_id + 1
passthroughprofile_id = profile_id
fmprofiles_list.append(
oed.get_pass_through_profile(passthroughprofile_id))
node_layer_profile_map = {}
self.logger.debug(fmprofiles_list)
#
# Step 1 - Build a tree representation of the insurance program, depening on the reinsurance risk level.
#
program_node = self._get_tree()
self._log_tree(program_node)
#
# Step 2 - Overlay the reinsurance structure. Each reinsurance contact is a seperate layer.
#
layer_id = 1 # Current layer ID
overlay_loop = 0 # Overlays multiple rules in same layer
prev_reins_number = 1
for _, ri_info_row in self.ri_info.iterrows():
overlay_loop += 1
scope_rows = self.ri_scope[
(self.ri_scope.ReinsNumber == ri_info_row.ReinsNumber)
& (self.ri_scope.RiskLevel == self.risk_level)]
# If FAC, don't increment the layer number
# Else, only increment inline with the reins_number
if ri_info_row.ReinsType in ['FAC']:
pass
elif prev_reins_number < ri_info_row.ReinsNumber:
layer_id += 1
prev_reins_number = ri_info_row.ReinsNumber
if self.logger:
| pd.set_option('display.width', 1000) | pandas.set_option |
import pandas as pd
import numpy as np
import warnings; warnings.simplefilter('ignore')
from ast import literal_eval
df1 = pd.read_csv("merged_cleaned.csv")
df2 = | pd.read_csv("ratings_combined.csv") | pandas.read_csv |
import pandas as pd
porfolio1 = pd.DataFrame({'Asset': ['FX', 'FX', 'IR'],
'Instrument': ['Option', 'Swap', 'Option'],
'Number': [1, 2, 3]})
porfolio2 = pd.DataFrame({'Asset': ['FX', 'FX', 'FX', 'IR'],
'Instrument': ['Option', 'Option', 'Swap', 'Swap'],
'Number': [4, 5, 6, 7]})
print(porfolio1)
print(porfolio2)
print( | pd.merge(porfolio1, porfolio2, on='Asset') | pandas.merge |
import pandas as pd
import numpy as np
from load_data import load_csv
import constants as cst
import sys
file_path = "./data/bitstampUSD_1-min_data_2012-01-01_to_2018-06-27.csv"
def select_data(dataframe, start=None, stop=None):
"""
:param dataframe: df pandas
:param start: str, min date to considerate, format: "YYYY/MM/DD", default value is the min date
:param stop: str, max date to considerate
:return: dataframe dans les dates
:raise null dataframe exception if start and stop leaves a null dataframe
"""
if start is not None:
dataframe = dataframe[dataframe[cst.DATE] >= pd.to_datetime(start, format="%Y-%m-%d")]
if stop is not None:
dataframe = dataframe[dataframe[cst.DATE] <= | pd.to_datetime(stop, format="%Y-%m-%d") | pandas.to_datetime |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with | ensure_clean_store(setup_path) | pandas.tests.io.pytables.common.ensure_clean_store |
"""Represent SQL tokens as Pandas operations.
"""
from sqlalchemy.sql import operators
from sqlalchemy import sql
from sqlalchemy import util
from sqlalchemy import types as sqltypes
import functools
import pandas as pd
import numpy as np
import collections
from . import dbapi
from sqlalchemy.sql.functions import GenericFunction
from sqlalchemy.ext.compiler import compiles
def aggregate_fn(package=None):
"""Mark a Python function as a SQL aggregate function.
The function should typically receive a Pandas Series object
as an argument and return a scalar result.
E.g.::
from calchipan import aggregate_fn
@aggregate_fn()
def stddev(values):
return values.std()
The object is converted into a SQLAlchemy GenericFunction
object, which can be used directly::
stmt = select([stddev(table.c.value)])
or via the SQLAlchemy ``func`` namespace::
from sqlalchemy import func
stmt = select([func.stddev(table.c.value)])
Functions can be placed in ``func`` under particular
"package" names using the ``package`` argument::
@aggregate_fn(package='numpy')
def stddev(values):
return values.std()
Usage via ``func`` is then::
from sqlalchemy import func
stmt = select([func.numpy.stddev(table.c.value)])
An aggregate function that is called with multiple expressions
will be passed a single argument that is a list of Series
objects.
"""
def mark_aggregate(fn):
kwargs = {'name': fn.__name__}
if package:
kwargs['package'] = package
custom_func = type("%sFunc" % fn.__name__, (GenericFunction,), kwargs)
@compiles(custom_func, 'pandas')
def _compile_fn(expr, compiler, **kw):
return FunctionResolver(fn,
compiler.process(expr.clauses, **kw), True)
return custom_func
return mark_aggregate
def non_aggregate_fn(package=None):
"""Mark a Python function as a SQL non-aggregate function.
The function should receive zero or more scalar
Python objects as arguments and return a scalar result.
E.g.::
from calchipan import non_aggregate_fn
@non_aggregate_fn()
def add_numbers(value1, value2):
return value1 + value2
Usage and behavior is identical to that of :func:`.aggregate_fn`,
except that the function is not treated as an aggregate. Function
expressions are also expanded out to individual positional arguments,
whereas an aggregate always receives a single structure as an argument.
"""
def mark_non_aggregate(fn):
kwargs = {'name': fn.__name__}
if package:
kwargs['package'] = package
custom_func = type("%sFunc" % fn.__name__, (GenericFunction,), kwargs)
@compiles(custom_func, 'pandas')
def _compile_fn(expr, compiler, **kw):
return FunctionResolver(fn,
compiler.process(expr.clauses, **kw), False)
return custom_func
return mark_non_aggregate
ResolverContext = collections.namedtuple("ResolverContext",
["cursor", "namespace", "params"])
class Resolver(object):
def __call__(self, cursor, namespace, params):
"""Resolve this expression.
Resolvers are callables; this is called by the DBAPI."""
return self.resolve(ResolverContext(cursor, namespace, params))
def resolve(self, ctx):
"""Resolve this expression given a ResolverContext.
Front end for resolution, linked to top-level __call__()."""
raise NotImplementedError()
class NullResolver(Resolver):
def resolve(self, ctx):
pass
class ColumnElementResolver(Resolver):
"""Top level class for SQL expressions."""
def resolve_expression(self, ctx, product):
"""Resolve as a column expression.
Return value here is typically a Series or a scalar
value.
"""
raise NotImplementedError()
class FromResolver(Resolver):
"""Top level class for 'from' objects, things you can select rows from."""
def resolve_dataframe(self, ctx, names=True):
"""Resolve as a dataframe.
Return value here is a DataFrame object.
"""
raise NotImplementedError()
class FunctionResolver(ColumnElementResolver):
def __init__(self, fn, expr, aggregate):
self.fn = fn
self.expr = expr
self.aggregate = aggregate
def resolve_expression(self, ctx, product):
if self.aggregate:
q = self.fn(self.expr.resolve_expression(
ctx, product))
q = pd.Series([q], name="aggregate")
else:
q = self.fn(*self.expr.resolve_expression(
ctx, product))
return q
class ConstantResolver(ColumnElementResolver):
def __init__(self, value):
self.value = value
def resolve_expression(self, ctx, product):
return self.value
class LiteralResolver(ColumnElementResolver):
def __init__(self, value):
self.value = value
self.name = str(id(self))
def resolve_expression(self, ctx, product):
return self.value
@property
def df_index(self):
return self.name
class ColumnResolver(ColumnElementResolver):
def __init__(self, name, tablename):
self.name = name
self.tablename = tablename
def resolve_expression(self, ctx, product):
if product is None:
df = TableResolver(self.tablename).resolve_dataframe(ctx)
else:
df = product.resolve_dataframe(ctx)
return df[self.df_index]
@property
def df_index(self):
return "#T_%s_#C_%s" % (self.tablename, self.name)
class UnaryResolver(ColumnElementResolver):
def __init__(self, expression, operator, modifier):
self.operator = operator
self.modifier = modifier
self.expression = expression
def resolve_expression(self, ctx, product):
return self.expression.resolve_expression(
ctx, product)
@property
def df_index(self):
return self.expression.df_index
class LabelResolver(Resolver):
def __init__(self, expression, name):
self.expression = expression
self.name = name
def resolve_expression(self, ctx, product):
return self.expression.resolve_expression(ctx, product)
@property
def df_index(self):
return self.name
class BinaryResolver(ColumnElementResolver):
def __init__(self, left, right, operator):
self.left = left
self.right = right
self.operator = operator
def resolve_expression(self, ctx, product):
return self.operator(
self.left.resolve_expression(ctx, product),
self.right.resolve_expression(ctx, product),
)
class ClauseListResolver(ColumnElementResolver):
def __init__(self, expressions, operator):
self.expressions = expressions
self.operator = operator
def resolve_expression(self, ctx, product):
exprs = [expr.resolve_expression(ctx, product)
for expr in self.expressions]
if self.operator is operators.comma_op:
if len(exprs) == 1:
return exprs[0]
else:
return exprs
else:
return functools.reduce(self.operator, exprs)
class BindParamResolver(ColumnElementResolver):
def __init__(self, name):
self.name = name
def resolve_expression(self, ctx, product):
return ctx.params[self.name]
class DerivedResolver(FromResolver):
def __init__(self, dataframe):
self.dataframe = dataframe
def resolve_dataframe(self, ctx, names=True):
return self.dataframe
class TableResolver(FromResolver):
def __init__(self, tablename, autoincrement_col=None):
self.tablename = tablename
self.autoincrement_col = autoincrement_col
def resolve_dataframe(self, ctx, names=True):
df = ctx.namespace[self.tablename]
if names:
# performance tests show that the rename() here is
# not terribly expensive as long as copy=False. Adding the
# index as a column is much more expensive, however,
# though is not as common of a use case.
# the renamed dataframe can be cached, though this means
# that all mutation operations need to clear the cache also.
# a quicker route to having the index accessible is to
# add an explicit copy of the index to the DataFrame outside
# of the SQL dialect - that way it won't be copied here
# each time.
renamed_df = df.rename(
columns=dict(
(k, "#T_%s_#C_%s" % (self.tablename, k))
for k in df.keys()
), copy=False
)
if self.autoincrement_col and self.autoincrement_col not in df:
renamed_df["#T_%s_#C_%s" %
(self.tablename, self.autoincrement_col)] = df.index
return renamed_df
elif self.autoincrement_col and self.autoincrement_col not in df:
renamed_df = df.copy()
renamed_df[self.autoincrement_col] = df.index
return renamed_df
else:
return df
class AliasResolver(FromResolver):
def __init__(self, table, aliasname):
self.table = table
self.aliasname = aliasname
def resolve_dataframe(self, ctx, names=True):
df = self.table.resolve_dataframe(ctx, names=False)
if names:
df = df.rename(
columns=dict(
(k, "#T_%s_#C_%s" % (self.aliasname, k))
for k in df.keys()
), copy=False
)
return df
class JoinResolver(FromResolver):
def __init__(self, left, right, onclause, isouter):
self.left = left
self.right = right
self.onclause = onclause
self.isouter = isouter
def resolve_dataframe(self, ctx, names=True):
df1 = left = self.left.resolve_dataframe(ctx)
df2 = self.right.resolve_dataframe(ctx)
if self.isouter:
left['_cp_left_index'] = left.index
straight_binaries, remainder = self._produce_join_expressions(df1, df2)
df1 = self._merge_straight_binaries(ctx, df1, df2, straight_binaries)
df1 = self._merge_remainder(ctx, left, df1, df2,
straight_binaries, remainder)
return df1.where(pd.notnull(df1), None)
def _produce_join_expressions(self, df1, df2):
straight_binaries = []
remainder = []
if isinstance(self.onclause, ClauseListResolver) and \
self.onclause.operator is operators.and_:
comparisons = self.onclause.expressions
else:
comparisons = [self.onclause]
# extract comparisons like this:
# col1 == col2 AND col3 == col4 AND ...
# use pd.merge() for those
for comp in comparisons:
if isinstance(comp, BinaryResolver) and \
comp.operator is operators.eq and \
hasattr(comp.left, "df_index") and \
hasattr(comp.right, "df_index"):
if comp.left.df_index in df1 and \
comp.right.df_index in df2:
straight_binaries.append(
(comp.left.df_index, comp.right.df_index)
)
continue
elif comp.right.df_index in df1 and \
comp.left.df_index in df2:
straight_binaries.append(
(comp.right.df_index, comp.left.df_index)
)
continue
remainder.append(comp)
return straight_binaries, remainder
def _merge_straight_binaries(self, ctx, df1, df2, straight_binaries):
if straight_binaries:
# use merge() for straight binaries.
left_on, right_on = zip(*straight_binaries)
df1 = df1.merge(df2, left_on=left_on, right_on=right_on,
how='left' if self.isouter else 'inner')
return df1
def _merge_remainder(self, ctx, left, df1, df2,
straight_binaries, remainder):
# for joins that aren't straight "col == col",
# we use the ON criterion directly.
# if we don't already have a dataframe with the full
# left + right cols, we use a cartesian product first.
# ideally, we'd limit the cartesian on only those columns we
# need.
if remainder:
if len(remainder) > 1:
remainder = ClauseListResolver(remainder, operators.and_)
else:
remainder = remainder[0]
# TODO: performance optimization: do the cartesian product
# here on a subset of the two dataframes, that only includes
# those columns we need in the expression. Then reindex
# back out to the original dataframes.
if not straight_binaries:
df1 = _cartesian_dataframe(ctx, df1, df2)
expr = remainder.resolve_expression(ctx, DerivedResolver(df1))
joined = df1[expr]
if self.isouter:
# for outer join, grab remaining rows from "left"
remaining_left_ids = set(df1['_cp_left_index']).\
difference(joined['_cp_left_index'])
remaining = left.ix[remaining_left_ids]
df1 = pd.concat([joined, remaining]).reset_index()
else:
df1 = joined
return df1
class _ExprCol(ColumnElementResolver):
def __init__(self, expr, name):
self.expr = expr
self.name = name
def resolve_expression(self, ctx, product):
return self.expr.resolve_expression(ctx, product)
@property
def df_index(self):
return self.name
class BaseSelectResolver(FromResolver):
group_by = None
order_by = None
having = None
limit = None
offset = None
@util.memoized_property
def columns(self):
return []
def _evaluate(self, ctx, correlate=None):
raise NotImplementedError()
def resolve(self, ctx, correlate=None):
product = self._evaluate(ctx, correlate)
if self.group_by is not None:
df = product.resolve_dataframe(ctx)
gp = self.group_by.resolve_expression(ctx, product)
groups = [DerivedResolver(gdf[1]) for gdf in df.groupby(gp)]
else:
groups = [product]
frame_columns = list(self.columns)
if self.having is not None:
if self.group_by is None:
raise dbapi.Error("HAVING must also have GROUP BY")
frame_columns.append(_ExprCol(self.having, '_having'))
if self.order_by is not None:
for idx, ob_expr in enumerate(self.order_by.expressions):
frame_columns.append(_ExprCol(ob_expr, '_order_by_%d' % idx))
def process_aggregates(gprod):
"""detect aggregate functions in column clauses and
flatten results if present
"""
cols = [
_coerce_to_series(
ctx,
c.resolve_expression(ctx, gprod)
).reset_index(drop=True)
for c in frame_columns]
for c in cols:
if c.name == 'aggregate':
break
else:
return cols
return [
list(c)[0]
if c.name != 'aggregate'
else c
for c in cols
]
nu = _unique_name()
names = [nu(c.name) for c in self.columns]
group_results = [
pd.DataFrame.from_items(
[
(
c.df_index,
expr
)
for c, expr
in zip(frame_columns, process_aggregates(gprod))
]
)
for gprod in groups
]
non_empty = [g for g in group_results if len(g)]
if not non_empty:
# empty result
return pd.DataFrame(columns=names)
else:
results = | pd.concat(non_empty) | pandas.concat |
from tensorflow.python.ops.functional_ops import While
import tensorflow as tf
import numpy as np
import pandas as pd
import waktu as wk
import time
from datetime import datetime
from datetime import date
import schedule
import pyrebase
import json
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
from tensorflow import keras
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tensorflow.python.eager.context import num_gpus
from os import read, stat_result
from re import T, X
cred = credentials.Certificate("serviceAccountKey.json")
firebase_admin.initialize_app(cred)
dbStore = firestore.client()
def cekHari():
cekNow = date.today().strftime("%A")
if cekNow == 'Monday':
now = 0
elif cekNow == 'Tuesday':
now = 1
elif cekNow == 'Wednesday':
now = 2
elif cekNow == 'Thursday':
now = 3
elif cekNow == 'Friday':
now = 4
elif cekNow == 'Saturday':
now = 5
elif cekNow == 'Sunday':
now = 6
return now
config = {
"apiKey": "<KEY>",
"authDomain": "cloudta2021-fa4af.firebaseapp.com",
"databaseURL": "https://cloudta2021-fa4af-default-rtdb.firebaseio.com",
"storageBucket": "cloudta2021-fa4af.appspot.com"
}
DataHead = "Dataset Hasil Pengujian"
firebase = pyrebase.initialize_app(config)
db = firebase.database()
timeNow = datetime.now()
jam = timeNow.hour
menit = timeNow.minute
timestamp = timeNow.strftime("%H:%M")
day = date.today().strftime("%A")
idrelay = [0, 1, 2, 3]
hari = cekHari()
waktu = wk.cekWaktu(jam, menit)
# waktu = 120
# hari = 0
data = pd.read_csv('FixDataBind.csv')
data = | pd.DataFrame(data, columns=['waktu', 'hari', 'idrelay', 'status']) | pandas.DataFrame |
from mlapp.managers import DataManager, pipeline
from mlapp.utils.exceptions.base_exceptions import DataManagerException
import pandas as pd
import numpy as np
class FlowRegressionDataManager(DataManager):
@pipeline
def load_train_data(self,*args):
print(args)
return
@pipeline
def clean_train_data(self, data):
return data
@pipeline
def transform_train_data(self, data):
return data
@pipeline
def load_forecast_data(self, *args):
try:
models_outputs = args[0]
except Exception as e:
raise DataManagerException("Error: No data input from flow")
# read data for forecasting
features = {}
data = pd.DataFrame()
df_name = self.data_settings.get('flow_return_data')
for model_output in models_outputs:
index = self.data_settings.get('data_index')
res = model_output.get(df_name, pd.DataFrame(columns=index))
if len(res) > 0:
df = res[0]
# df.set_index(self.data_settings.get('data_index'))
data = pd.concat([data, df])
else:
continue
for feature_name in self.data_settings.get('flow_return_features'):
features[feature_name] = []
for model_output in models_outputs:
feature_data = model_output.get(feature_name, pd.DataFrame(columns=index))
if len(feature_data)>0:
if isinstance(feature_data, pd.DataFrame):
if not isinstance(features[feature_name], pd.DataFrame):
features[feature_name]=pd.DataFrame()
features[feature_name] = | pd.concat([feature_data, features[feature_name]]) | pandas.concat |
from __future__ import division
import pytest
import numpy as np
from pandas import (Interval, IntervalIndex, Index, isna,
interval_range, Timestamp, Timedelta,
compat)
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self):
return IntervalIndex.from_breaks(np.arange(10))
def test_constructors(self):
expected = self.index
actual = IntervalIndex.from_breaks(np.arange(3), closed='right')
assert expected.equals(actual)
alternate = IntervalIndex.from_breaks(np.arange(3), closed='left')
assert not expected.equals(alternate)
actual = IntervalIndex.from_intervals([Interval(0, 1), Interval(1, 2)])
assert expected.equals(actual)
actual = IntervalIndex([Interval(0, 1), Interval(1, 2)])
assert expected.equals(actual)
actual = IntervalIndex.from_arrays(np.arange(2), np.arange(2) + 1,
closed='right')
assert expected.equals(actual)
actual = Index([Interval(0, 1), Interval(1, 2)])
assert isinstance(actual, IntervalIndex)
assert expected.equals(actual)
actual = Index(expected)
assert isinstance(actual, IntervalIndex)
assert expected.equals(actual)
def test_constructors_other(self):
# all-nan
result = IntervalIndex.from_intervals([np.nan])
expected = np.array([np.nan], dtype=object)
tm.assert_numpy_array_equal(result.values, expected)
# empty
result = IntervalIndex.from_intervals([])
expected = np.array([], dtype=object)
tm.assert_numpy_array_equal(result.values, expected)
def test_constructors_errors(self):
# scalar
with pytest.raises(TypeError):
IntervalIndex(5)
# not an interval
with pytest.raises(TypeError):
IntervalIndex([0, 1])
with pytest.raises(TypeError):
IntervalIndex.from_intervals([0, 1])
# invalid closed
with pytest.raises(ValueError):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed
with pytest.raises(ValueError):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with pytest.raises(ValueError):
IntervalIndex.from_arrays([0, 10], [3, 5])
with pytest.raises(ValueError):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# no point in nesting periods in an IntervalIndex
with pytest.raises(ValueError):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
def test_constructors_datetimelike(self):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx)
expected = IntervalIndex.from_breaks(idx.values)
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
import pandas as pd
from .datastore import merge_postcodes
from .types import ErrorDefinition
from .utils import add_col_to_tables_CONTINUOUSLY_LOOKED_AFTER as add_CLA_column # Check 'Episodes' present before use!
def validate_165():
error = ErrorDefinition(
code = '165',
description = 'Data entry for mother status is invalid.',
affected_fields = ['MOTHER', 'SEX', 'ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
valid_values = ['0','1']
# prepare to merge
oc3.reset_index(inplace=True)
header.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['EPS'] = (episodes['DECOM']>=collection_start) & (episodes['DECOM']<=collection_end)
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']).merge(oc3, on='CHILD', how='left')
# Raise error if provided <MOTHER> is not a valid value.
value_validity = merged['MOTHER'].notna() & (~merged['MOTHER'].isin(valid_values))
# If not provided
female = (merged['SEX']=='1')
eps_in_year = (merged['EPS_COUNT']>0)
none_provided = (merged['ACTIV'].isna()& merged['ACCOM'].isna()& merged['IN_TOUCH'].isna())
# If provided <MOTHER> must be a valid value. If not provided <MOTHER> then either <GENDER> is male or no episode record for current year and any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided
mask = value_validity | (merged['MOTHER'].isna() & (female & (eps_in_year | none_provided)))
# That is, if value not provided and child is a female with eps in current year or no values of IN_TOUCH, ACTIV and ACCOM, then raise error.
error_locs_eps = merged.loc[mask, 'index_eps']
error_locs_header = merged.loc[mask, 'index_er']
error_locs_oc3 = merged.loc[mask, 'index']
return {'Header':error_locs_header.dropna().unique().tolist(),
'OC3':error_locs_oc3.dropna().unique().tolist()}
return error, _validate
def validate_1014():
error = ErrorDefinition(
code='1014',
description='UASC information is not required for care leavers',
affected_fields=['ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'UASC' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
uasc = dfs['UASC']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
# prepare to merge
oc3.reset_index(inplace=True)
uasc.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
date_check = (
((episodes['DECOM'] >= collection_start) & (episodes['DECOM'] <= collection_end))
| ((episodes['DEC'] >= collection_start) & (episodes['DEC'] <= collection_end))
| ((episodes['DECOM'] <= collection_start) & episodes['DEC'].isna())
)
episodes['EPS'] = date_check
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
# inner merge to take only episodes of children which are also found on the uasc table
merged = episodes.merge(uasc, on='CHILD', how='inner', suffixes=['_eps', '_sc']).merge(oc3, on='CHILD',
how='left')
# adding suffixes with the secondary merge here does not go so well yet.
some_provided = (merged['ACTIV'].notna() | merged['ACCOM'].notna() | merged['IN_TOUCH'].notna())
mask = (merged['EPS_COUNT'] == 0) & some_provided
error_locs_uasc = merged.loc[mask, 'index_sc']
error_locs_oc3 = merged.loc[mask, 'index']
return {'UASC': error_locs_uasc.unique().tolist(), 'OC3': error_locs_oc3.unique().tolist()}
return error, _validate
# !# not sure what this rule is actually supposed to be getting at - description is confusing
def validate_197B():
error = ErrorDefinition(
code='197B',
description="SDQ score or reason for no SDQ should be reported for 4- or 17-year-olds.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
(
(oc2['DOB'] + pd.DateOffset(years=4) == start) # ???
| (oc2['DOB'] + pd.DateOffset(years=17) == start)
)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
& oc2['SDQ_REASON'].isna()
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_157():
error = ErrorDefinition(
code='157',
description="Child is aged 4 years or over at the beginning of the year or 16 years or under at the end of the "
"year and Strengths and Difficulties Questionnaire (SDQ) 1 has been recorded as the reason for no "
"Strengths and Difficulties Questionnaire (SDQ) score.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
oc2['CONTINUOUSLY_LOOKED_AFTER']
& (oc2['DOB'] + pd.DateOffset(years=4) <= start)
& (oc2['DOB'] + pd.DateOffset(years=16) >= endo)
& oc2['SDQ_SCORE'].isna()
& (oc2['SDQ_REASON'] == 'SDQ1')
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_357():
error = ErrorDefinition(
code='357',
description='If this is the first episode ever for this child, reason for new episode must be S. '
'Check whether there is an episode immediately preceding this one, which has been left out. '
'If not the reason for new episode code must be amended to S.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
eps = dfs['Episodes']
eps['DECOM'] = pd.to_datetime(eps['DECOM'], format='%d/%m/%Y', errors='coerce')
eps = eps.loc[eps['DECOM'].notnull()]
first_eps = eps.loc[eps.groupby('CHILD')['DECOM'].idxmin()]
errs = first_eps[first_eps['RNE'] != 'S'].index.to_list()
return {'Episodes': errs}
return error, _validate
def validate_117():
error = ErrorDefinition(
code='117',
description='Date of decision that a child should/should no longer be placed for adoption is beyond the current collection year or after the child ceased to be looked after.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_PLACED', 'DEC', 'REC', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placed_adoption = dfs['PlacedAdoption']
collection_end = dfs['metadata']['collection_end']
# datetime
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# Drop nans and continuing episodes
episodes = episodes.dropna(subset=['DECOM'])
episodes = episodes[episodes['REC'] != 'X1']
episodes = episodes.loc[episodes.groupby('CHILD')['DECOM'].idxmax()]
# prepare to merge
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
p4a_cols = ['DATE_PLACED', 'DATE_PLACED_CEASED']
# latest episodes
merged = episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
mask = (
(merged['DATE_PLACED'] > collection_end)
| (merged['DATE_PLACED'] > merged['DEC'])
| (merged['DATE_PLACED_CEASED'] > collection_end)
| (merged['DATE_PLACED_CEASED'] > merged['DEC'])
)
# If provided <DATE_PLACED> and/or <DATE_PLACED_CEASED> must not be > <COLLECTION_END_DATE> or <DEC> of latest episode where <REC> not = 'X1'
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_118():
error = ErrorDefinition(
code='118',
description='Date of decision that a child should no longer be placed for adoption is before the current collection year or before the date the child started to be looked after.',
affected_fields=['DECOM', 'DECOM', 'LS']
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
code_list = ['V3', 'V4']
# datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
filter_by_ls = episodes[~(episodes['LS'].isin(code_list))]
earliest_episode_idxs = filter_by_ls.groupby('CHILD')['DECOM'].idxmin()
earliest_episodes = episodes[episodes.index.isin(earliest_episode_idxs)]
# prepare to merge
placed_adoption.reset_index(inplace=True)
earliest_episodes.reset_index(inplace=True)
# merge
merged = earliest_episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
# drop rows where DATE_PLACED_CEASED is not provided
merged = merged.dropna(subset=['DATE_PLACED_CEASED'])
# If provided <DATE_PLACED_CEASED> must not be prior to <COLLECTION_START_DATE> or <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
mask = (merged['DATE_PLACED_CEASED'] < merged['DECOM']) | (merged['DATE_PLACED_CEASED'] < collection_start)
# error locations
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_352():
error = ErrorDefinition(
code='352',
description='Child who started to be looked after was aged 18 or over.',
affected_fields=['DECOM', 'RNE'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + pd.DateOffset(years=18)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
care_start = episodes_merged['RNE'].str.upper().astype(str).isin(['S'])
started_over_18 = episodes_merged['DOB18'] <= episodes_merged['DECOM']
error_mask = care_start & started_over_18
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_209():
error = ErrorDefinition(
code='209',
description='Child looked after is of school age and should not have an unknown Unique Pupil Number (UPN) code of UN1.',
affected_fields=['UPN', 'DOB']
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
collection_start = dfs['metadata']['collection_start']
# convert to datetime
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
yr = collection_start.year - 1
reference_date = pd.to_datetime('31/08/' + str(yr), format='%d/%m/%Y', errors='coerce')
# If <DOB> >= 4 years prior to 31/08/YYYY then <UPN> should not be 'UN1' Note: YYYY in this instance refers to the year prior to the collection start (for collection year 2019-2020, it would be looking at the 31/08/2018).
mask = (reference_date >= (header['DOB'] + pd.offsets.DateOffset(years=4))) & (header['UPN'] == 'UN1')
# error locations
error_locs_header = header.index[mask]
return {'Header': error_locs_header.tolist()}
return error, _validate
def validate_198():
error = ErrorDefinition(
code='198',
description="Child has not been looked after continuously for at least 12 months at 31 March but a reason "
"for no Strengths and Difficulties (SDQ) score has been completed. ",
affected_fields=['SDQ_REASON'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_REASON'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_185():
error = ErrorDefinition(
code='185',
description="Child has not been looked after continuously for at least 12 months at " +
"31 March but a Strengths and Difficulties (SDQ) score has been completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_SCORE'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_186():
error = ErrorDefinition(
code='186',
description="Children aged 4 or over at the start of the year and children aged under 17 at the " +
"end of the year and who have been looked after for at least 12 months continuously " +
"should have a Strengths and Difficulties (SDQ) score completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_start_str = dfs['metadata']['collection_start']
collection_end_str = dfs['metadata']['collection_end']
collection_start = pd.to_datetime(collection_start_str, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2 = add_CLA_column(dfs, 'OC2')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
oc2['17th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=17)
error_mask = (
(oc2['4th_bday'] <= collection_start)
& (oc2['17th_bday'] > collection_end)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_187():
error = ErrorDefinition(
code='187',
description="Child cannot be looked after continuously for 12 months at " +
"31 March (OC2) and have any of adoption or care leavers returns completed.",
affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR', # OC3
'IN_TOUCH', 'ACTIV', 'ACCOM'], # AD1
)
def _validate(dfs):
if (
'OC3' not in dfs
or 'AD1' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
ad1, oc3 = add_CLA_column(dfs, ['AD1', 'OC3'])
# OC3
should_be_blank = ['IN_TOUCH', 'ACTIV', 'ACCOM']
oc3_mask = oc3['CONTINUOUSLY_LOOKED_AFTER'] & oc3[should_be_blank].notna().any(axis=1)
oc3_error_locs = oc3[oc3_mask].index.to_list()
# AD1
should_be_blank = ['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR']
ad1_mask = ad1['CONTINUOUSLY_LOOKED_AFTER'] & ad1[should_be_blank].notna().any(axis=1)
ad1_error_locs = ad1[ad1_mask].index.to_list()
return {'AD1': ad1_error_locs,
'OC3': oc3_error_locs}
return error, _validate
def validate_188():
error = ErrorDefinition(
code='188',
description="Child is aged under 4 years at the end of the year, "
"but a Strengths and Difficulties (SDQ) score or a reason "
"for no SDQ score has been completed. ",
affected_fields=['SDQ_SCORE', 'SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_end_str = dfs['metadata']['collection_end']
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
error_mask = (
(oc2['4th_bday'] > collection_end)
& oc2[['SDQ_SCORE', 'SDQ_REASON']].notna().any(axis=1)
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_190():
error = ErrorDefinition(
code='190',
description="Child has not been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been completed.",
affected_fields=['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
, # AD1
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_blank = ['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
mask = ~oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_blank].notna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_191():
error = ErrorDefinition(
code='191',
description="Child has been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been left blank.",
affected_fields=['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'], # OC2
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_present = ['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE']
mask = oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_present].isna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_607():
error = ErrorDefinition(
code='607',
description='Child ceased to be looked after in the year, but mother field has not been completed.',
affected_fields=['DEC', 'REC', 'MOTHER', 'LS', 'SEX']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
code_list = ['V3', 'V4']
# convert to datetiime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# CEASED_TO_BE_LOOKED_AFTER = DEC is not null and REC is filled but not equal to X1
CEASED_TO_BE_LOOKED_AFTER = merged['DEC'].notna() & ((merged['REC'] != 'X1') & merged['REC'].notna())
# and <LS> not = โV3โ or โV4โ
check_LS = ~(merged['LS'].isin(code_list))
# and <DEC> is in <CURRENT_COLLECTION_YEAR
check_DEC = (collection_start <= merged['DEC']) & (merged['DEC'] <= collection_end)
# Where <CEASED_TO_BE_LOOKED_AFTER> = โYโ, and <LS> not = โV3โ or โV4โ and <DEC> is in <CURRENT_COLLECTION_YEAR> and <SEX> = โ2โ then <MOTHER> should be provided.
mask = CEASED_TO_BE_LOOKED_AFTER & check_LS & check_DEC & (merged['SEX'] == '2') & (merged['MOTHER'].isna())
header_error_locs = merged.loc[mask, 'index_er']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_210():
error = ErrorDefinition(
code='210',
description='Children looked after for more than a week at 31 March should not have an unknown Unique Pupil Number (UPN) code of UN4.',
affected_fields=['UPN', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_end = dfs['metadata']['collection_end']
# convert to datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
yr = collection_end.year
reference_date = ref_date = pd.to_datetime('24/03/' + str(yr), format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
# the logical way is to merge left on UPN but that will be a one to many merge and may not go as well as a many to one merge that we've been doing.
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <UPN> = 'UN4' then no episode <DECOM> must be >` = 24/03/YYYY Note: YYYY refers to the current collection year.
mask = (merged['UPN'] == 'UN4') & (merged['DECOM'] >= reference_date)
# error locations
error_locs_header = merged.loc[mask, 'index_er']
error_locs_eps = merged.loc[mask, 'index_eps']
return {'Episodes': error_locs_eps.tolist(), 'Header': error_locs_header.unique().tolist()}
return error, _validate
def validate_1010():
error = ErrorDefinition(
code='1010',
description='This child has no episodes loaded for current year even though there was an open episode of '
+ 'care at the end of the previous year, and care leaver data has been entered.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
oc3 = dfs['OC3']
# convert DECOM to datetime, drop missing/invalid sort by CHILD then DECOM,
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last = episodes_last.dropna(subset=['DECOM']).sort_values(['CHILD', 'DECOM'], ascending=True)
# Keep only the final episode for each child (ie where the following row has a different CHILD value)
episodes_last = episodes_last[
episodes_last['CHILD'].shift(-1) != episodes_last['CHILD']
]
# Keep only the final episodes that were still open
episodes_last = episodes_last[episodes_last['DEC'].isna()]
# The remaining children ought to have episode data in the current year if they are in OC3
has_current_episodes = oc3['CHILD'].isin(episodes['CHILD'])
has_open_episode_last = oc3['CHILD'].isin(episodes_last['CHILD'])
error_mask = ~has_current_episodes & has_open_episode_last
validation_error_locations = oc3.index[error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_525():
error = ErrorDefinition(
code='525',
description='A child for whom the decision to be placed for adoption has been reversed cannot be adopted during the year.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR',
'LS_ADOPTR']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs or 'AD1' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
ad1 = dfs['AD1']
# prepare to merge
placed_adoption.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = placed_adoption.merge(ad1, on='CHILD', how='left', suffixes=['_placed', '_ad1'])
# If <DATE_PLACED_CEASED> not Null, then <DATE_INT>; <DATE_MATCH>; <FOSTER_CARE>; <NB_ADOPTR>; <SEX_ADOPTR>; and <LS_ADOPTR> should not be provided
mask = merged['DATE_PLACED_CEASED'].notna() & (
merged['DATE_INT'].notna() | merged['DATE_MATCH'].notna() | merged['FOSTER_CARE'].notna() |
merged['NB_ADOPTR'].notna() | merged['SEX_ADOPTR'].notna() | merged['LS_ADOPTR'].notna())
# error locations
pa_error_locs = merged.loc[mask, 'index_placed']
ad_error_locs = merged.loc[mask, 'index_ad1']
# return result
return {'PlacedAdoption': pa_error_locs.tolist(), 'AD1': ad_error_locs.tolist()}
return error, _validate
def validate_335():
error = ErrorDefinition(
code='335',
description='The current foster value (0) suggests that child is not adopted by current foster carer, but last placement is A2, A3, or A5. Or the current foster value (1) suggests that child is adopted by current foster carer, but last placement is A1, A4 or A6.',
affected_fields=['PLACE', 'FOSTER_CARE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'AD1' not in dfs:
return {}
else:
episodes = dfs['Episodes']
ad1 = dfs['AD1']
# prepare to merge
episodes.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = episodes.merge(ad1, on='CHILD', how='left', suffixes=['_eps', '_ad1'])
# Where <PL> = 'A2', 'A3' or 'A5' and <DEC> = 'E1', 'E11', 'E12' <FOSTER_CARE> should not be '0'; Where <PL> = โA1โ, โA4โ or โA6โ and <REC> = โE1โ, โE11โ, โE12โ <FOSTER_CARE> should not be โ1โ.
mask = (
merged['REC'].isin(['E1', 'E11', 'E12']) & (
(merged['PLACE'].isin(['A2', 'A3', 'A5']) & (merged['FOSTER_CARE'].astype(str) == '0'))
| (merged['PLACE'].isin(['A1', 'A4', 'A6']) & (merged['FOSTER_CARE'].astype(str) == '1'))
)
)
eps_error_locs = merged.loc[mask, 'index_eps']
ad1_error_locs = merged.loc[mask, 'index_ad1']
# use .unique since join is many to one
return {'Episodes': eps_error_locs.tolist(), 'AD1': ad1_error_locs.unique().tolist()}
return error, _validate
def validate_215():
error = ErrorDefinition(
code='215',
description='Child has care leaver information but one or more data items relating to children looked after for 12 months have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM', 'CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK',
'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
)
def _validate(dfs):
if 'OC3' not in dfs or 'OC2' not in dfs:
return {}
else:
oc3 = dfs['OC3']
oc2 = dfs['OC2']
# prepare to merge
oc3.reset_index(inplace=True)
oc2.reset_index(inplace=True)
merged = oc3.merge(oc2, on='CHILD', how='left', suffixes=['_3', '_2'])
# If any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided then <CONVICTED>; <HEALTH_CHECK>; <IMMUNISATIONS>; <TEETH_CHECK>; <HEALTH_ASSESSMENT>; <SUBSTANCE MISUSE>; <INTERVENTION_RECEIVED>; <INTERVENTION_OFFERED>; should not be provided
mask = (merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna()) & (
merged['CONVICTED'].notna() | merged['HEALTH_CHECK'].notna() | merged['IMMUNISATIONS'].notna() |
merged['TEETH_CHECK'].notna() | merged['HEALTH_ASSESSMENT'].notna() | merged[
'SUBSTANCE_MISUSE'].notna() | merged['INTERVENTION_RECEIVED'].notna() | merged[
'INTERVENTION_OFFERED'].notna())
# error locations
oc3_error_locs = merged.loc[mask, 'index_3']
oc2_error_locs = merged.loc[mask, 'index_2']
return {'OC3': oc3_error_locs.tolist(), 'OC2': oc2_error_locs.tolist()}
return error, _validate
def validate_399():
error = ErrorDefinition(
code='399',
description='Mother field, review field or participation field are completed but '
+ 'child is looked after under legal status V3 or V4.',
affected_fields=['MOTHER', 'LS', 'REVIEW', 'REVIEW_CODE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs or 'Reviews' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
reviews = dfs['Reviews']
code_list = ['V3', 'V4']
# prepare to merge
episodes['index_eps'] = episodes.index
header['index_hdr'] = header.index
reviews['index_revs'] = reviews.index
# merge
merged = (episodes.merge(header, on='CHILD', how='left')
.merge(reviews, on='CHILD', how='left'))
# If <LS> = 'V3' or 'V4' then <MOTHER>, <REVIEW> and <REVIEW_CODE> should not be provided
mask = merged['LS'].isin(code_list) & (
merged['MOTHER'].notna() | merged['REVIEW'].notna() | merged['REVIEW_CODE'].notna())
# Error locations
eps_errors = merged.loc[mask, 'index_eps']
header_errors = merged.loc[mask, 'index_hdr'].unique()
revs_errors = merged.loc[mask, 'index_revs'].unique()
return {'Episodes': eps_errors.tolist(),
'Header': header_errors.tolist(),
'Reviews': revs_errors.tolist()}
return error, _validate
def validate_189():
error = ErrorDefinition(
code='189',
description='Child is aged 17 years or over at the beginning of the year, but an Strengths and Difficulties '
+ '(SDQ) score or a reason for no Strengths and Difficulties (SDQ) score has been completed.',
affected_fields=['DOB', 'SDQ_SCORE', 'SDQ_REASON']
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
collection_start = dfs['metadata']['collection_start']
# datetime format allows appropriate comparison between dates
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# If <DOB> >17 years prior to <COLLECTION_START_DATE> then <SDQ_SCORE> and <SDQ_REASON> should not be provided
mask = ((oc2['DOB'] + pd.offsets.DateOffset(years=17)) <= collection_start) & (
oc2['SDQ_REASON'].notna() | oc2['SDQ_SCORE'].notna())
# That is, raise error if collection_start > DOB + 17years
oc_error_locs = oc2.index[mask]
return {'OC2': oc_error_locs.tolist()}
return error, _validate
def validate_226():
error = ErrorDefinition(
code='226',
description='Reason for placement change is not required.',
affected_fields=['REASON_PLACE_CHANGE', 'PLACE']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
code_list = ['T0', 'T1', 'T2', 'T3', 'T4']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# create column to see previous REASON_PLACE_CHANGE
episodes = episodes.sort_values(['CHILD', 'DECOM'])
episodes['PREVIOUS_REASON'] = episodes.groupby('CHILD')['REASON_PLACE_CHANGE'].shift(1)
# If <PL> = 'T0'; 'T1'; 'T2'; 'T3' or 'T4' then <REASON_PLACE_CHANGE> should be null in current episode and current episode - 1
mask = episodes['PLACE'].isin(code_list) & (
episodes['REASON_PLACE_CHANGE'].notna() | episodes['PREVIOUS_REASON'].notna())
# error locations
error_locs = episodes.index[mask]
return {'Episodes': error_locs.tolist()}
return error, _validate
def validate_358():
error = ErrorDefinition(
code='358',
description='Child with this legal status should not be under 10.',
affected_fields=['DECOM', 'DOB', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['J1', 'J2', 'J3']
# convert dates to datetime format
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# Where <LS> = โJ1โ or โJ2โ or โJ3โ then <DOB> should <= to 10 years prior to <DECOM>
mask = merged['LS'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=10) < merged['DECOM'])
# That is, raise error if DECOM > DOB + 10years
# error locations
header_error_locs = merged.loc[mask, 'index_er']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_407():
error = ErrorDefinition(
code='407',
description='Reason episode ceased is Special Guardianship Order, but child has reached age 18.',
affected_fields=['DEC', 'DOB', 'REC']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['E45', 'E46', 'E47', 'E48']
# convert dates to datetime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <REC> = โE45โ or โE46โ or โE47โ or โE48โ then <DOB> must be < 18 years prior to <DEC>
mask = merged['REC'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=18) < merged['DEC'])
# That is, raise error if DEC > DOB + 10years
# error locations
header_error_locs = merged.loc[mask, 'index_er']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_1007():
error = ErrorDefinition(
code='1007',
description='Care leaver information is not required for 17- or 18-year olds who are still looked after.',
affected_fields=['DEC', 'REC', 'DOB', 'IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_end = dfs['metadata']['collection_end']
# convert dates to datetime format
oc3['DOB'] = pd.to_datetime(oc3['DOB'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
oc3.reset_index(inplace=True)
merged = episodes.merge(oc3, on='CHILD', how='left', suffixes=['_eps', '_oc3'])
# If <DOB> < 19 and >= to 17 years prior to <COLLECTION_END_DATE> and current episode <DEC> and or <REC> not provided then <IN_TOUCH>, <ACTIV> and <ACCOM> should not be provided
check_age = (merged['DOB'] + pd.offsets.DateOffset(years=17) <= collection_end) & (
merged['DOB'] + pd.offsets.DateOffset(years=19) > collection_end)
# That is, check that 17<=age<19
check_dec_rec = merged['REC'].isna() | merged['DEC'].isna()
# if either DEC or REC are absent
mask = check_age & check_dec_rec & (
merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna())
# Then raise an error if either IN_TOUCH, ACTIV, or ACCOM have been provided too
# error locations
oc3_error_locs = merged.loc[mask, 'index_oc3']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'OC3': oc3_error_locs.unique().tolist()}
return error, _validate
def validate_442():
error = ErrorDefinition(
code='442',
description='Unique Pupil Number (UPN) field is not completed.',
affected_fields=['UPN', 'LS']
)
def _validate(dfs):
if ('Episodes' not in dfs) or ('Header' not in dfs):
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
code_list = ['V3', 'V4']
# merge left on episodes to get all children for which episodes have been recorded even if they do not exist on the header.
merged = episodes.merge(header, on=['CHILD'], how='left', suffixes=['_eps', '_er'])
# Where any episode present, with an <LS> not = 'V3' or 'V4' then <UPN> must be provided
mask = (~merged['LS'].isin(code_list)) & merged['UPN'].isna()
episode_error_locs = merged.loc[mask, 'index_eps']
header_error_locs = merged.loc[mask, 'index_er']
return {'Episodes': episode_error_locs.tolist(),
# Select unique values since many episodes are joined to one header
# and multiple errors will be raised for the same index.
'Header': header_error_locs.dropna().unique().tolist()}
return error, _validate
def validate_344():
error = ErrorDefinition(
code='344',
description='The record shows the young person has died or returned home to live with parent(s) or someone with parental responsibility for a continuous period of 6 months or more, but activity and/or accommodation on leaving care have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
# If <IN_TOUCH> = 'DIED' or 'RHOM' then <ACTIV> and <ACCOM> should not be provided
mask = ((oc3['IN_TOUCH'] == 'DIED') | (oc3['IN_TOUCH'] == 'RHOM')) & (
oc3['ACTIV'].notna() | oc3['ACCOM'].notna())
error_locations = oc3.index[mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_345():
error = ErrorDefinition(
code='345',
description='The data collection record shows the local authority is in touch with this young person, but activity and/or accommodation data items are zero.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
# If <IN_TOUCH> = 'Yes' then <ACTIV> and <ACCOM> must be provided
mask = (oc3['IN_TOUCH'] == 'YES') & (oc3['ACTIV'].isna() | oc3['ACCOM'].isna())
error_locations = oc3.index[mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_384():
error = ErrorDefinition(
code='384',
description='A child receiving respite care cannot be in a long-term foster placement ',
affected_fields=['PLACE', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# Where <LS> = 'V3' or 'V4' then <PL> must not be 'U1' or 'U4'
mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & (
(episodes['PLACE'] == 'U1') | (episodes['PLACE'] == 'U4'))
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_390():
error = ErrorDefinition(
code='390',
description='Reason episode ceased is adopted but child has not been previously placed for adoption.',
affected_fields=['PLACE', 'REC']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# If <REC> = 'E11' or 'E12' then <PL> must be one of 'A3', 'A4', 'A5' or 'A6'
mask = ((episodes['REC'] == 'E11') | (episodes['REC'] == 'E12')) & ~(
(episodes['PLACE'] == 'A3') | (episodes['PLACE'] == 'A4') | (episodes['PLACE'] == 'A5') | (
episodes['PLACE'] == 'A6'))
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_378():
error = ErrorDefinition(
code='378',
description='A child who is placed with parent(s) cannot be looked after under a single period of accommodation under Section 20 of the Children Act 1989.',
affected_fields=['PLACE', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# the & sign supercedes the ==, so brackets are necessary here
mask = (episodes['PLACE'] == 'P1') & (episodes['LS'] == 'V2')
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_398():
error = ErrorDefinition(
code='398',
description='Distance field completed but child looked after under legal status V3 or V4.',
affected_fields=['LS', 'HOME_POST', 'PL_POST']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & (
episodes['HOME_POST'].notna() | episodes['PL_POST'].notna())
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_451():
error = ErrorDefinition(
code='451',
description='Child is still freed for adoption, but freeing orders could not be applied for since 30 December 2005.',
affected_fields=['DEC', 'REC', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = episodes['DEC'].isna() & episodes['REC'].isna() & (episodes['LS'] == 'D1')
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_519():
error = ErrorDefinition(
code='519',
description='Data entered on the legal status of adopters shows civil partnership couple, but data entered on genders of adopters does not show it as a couple.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR']
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
mask = (ad1['LS_ADOPTR'] == 'L2') & (
(ad1['SEX_ADOPTR'] != 'MM') & (ad1['SEX_ADOPTR'] != 'FF') & (ad1['SEX_ADOPTR'] != 'MF'))
error_locations = ad1.index[mask]
return {'AD1': error_locations.to_list()}
return error, _validate
def validate_520():
error = ErrorDefinition(
code='520',
description='Data entry on the legal status of adopters shows different gender married couple but data entry on genders of adopters shows it as a same gender couple.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR']
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
# check condition
mask = (ad1['LS_ADOPTR'] == 'L11') & (ad1['SEX_ADOPTR'] != 'MF')
error_locations = ad1.index[mask]
return {'AD1': error_locations.to_list()}
return error, _validate
def validate_522():
error = ErrorDefinition(
code='522',
description='Date of decision that the child should be placed for adoption must be on or before the date that a child should no longer be placed for adoption.',
affected_fields=['DATE_PLACED', 'DATE_PLACED_CEASED']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
# Convert to datetimes
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
# Boolean mask
mask = placed_adoption['DATE_PLACED_CEASED'] > placed_adoption['DATE_PLACED']
error_locations = placed_adoption.index[mask]
return {'PlacedAdoption': error_locations.to_list()}
return error, _validate
def validate_563():
error = ErrorDefinition(
code='563',
description='The child should no longer be placed for adoption but the date of the decision that the child should be placed for adoption is blank',
affected_fields=['DATE_PLACED', 'REASON_PLACED_CEASED', 'DATE_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
mask = placed_adoption['REASON_PLACED_CEASED'].notna() & placed_adoption['DATE_PLACED_CEASED'].notna() & \
placed_adoption['DATE_PLACED'].isna()
error_locations = placed_adoption.index[mask]
return {'PlacedAdoption': error_locations.to_list()}
return error, _validate
def validate_544():
error = ErrorDefinition(
code='544',
description="Any child who has conviction information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.",
affected_fields=['CONVICTED', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
convict = oc2['CONVICTED'].astype(str) == '1'
immunisations = oc2['IMMUNISATIONS'].isna()
teeth_ck = oc2['TEETH_CHECK'].isna()
health_ass = oc2['HEALTH_ASSESSMENT'].isna()
sub_misuse = oc2['SUBSTANCE_MISUSE'].isna()
error_mask = convict & (immunisations | teeth_ck | health_ass | sub_misuse)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_634():
error = ErrorDefinition(
code='634',
description='There are entries for previous permanence options, but child has not started to be looked after from 1 April 2016 onwards.',
affected_fields=['LA_PERM', 'PREV_PERM', 'DATE_PERM', 'DECOM']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PrevPerm' not in dfs:
return {}
else:
episodes = dfs['Episodes']
prevperm = dfs['PrevPerm']
collection_start = dfs['metadata']['collection_start']
# convert date field to appropriate format
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# the maximum date has the highest possibility of satisfying the condition
episodes['LAST_DECOM'] = episodes.groupby('CHILD')['DECOM'].transform('max')
# prepare to merge
episodes.reset_index(inplace=True)
prevperm.reset_index(inplace=True)
merged = prevperm.merge(episodes, on='CHILD', how='left', suffixes=['_prev', '_eps'])
# If <PREV_PERM> or <LA_PERM> or <DATE_PERM> provided, then at least 1 episode must have a <DECOM> later than 01/04/2016
mask = (merged['PREV_PERM'].notna() | merged['DATE_PERM'].notna() | merged['LA_PERM'].notna()) & (
merged['LAST_DECOM'] < collection_start)
eps_error_locs = merged.loc[mask, 'index_eps']
prevperm_error_locs = merged.loc[mask, 'index_prev']
# return {'PrevPerm':prevperm_error_locs}
return {'Episodes': eps_error_locs.unique().tolist(), 'PrevPerm': prevperm_error_locs.unique().tolist()}
return error, _validate
def validate_158():
error = ErrorDefinition(
code='158',
description='If a child has been recorded as receiving an intervention for their substance misuse problem, then the additional item on whether an intervention was offered should be left blank.',
affected_fields=['INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
error_mask = oc2['INTERVENTION_RECEIVED'].astype(str).eq('1') & oc2['INTERVENTION_OFFERED'].notna()
error_locations = oc2.index[error_mask]
return {'OC2': error_locations.tolist()}
return error, _validate
def validate_133():
error = ErrorDefinition(
code='133',
description='Data entry for accommodation after leaving care is invalid. If reporting on a childs accommodation after leaving care the data entry must be valid',
affected_fields=['ACCOM'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
valid_codes = ['B1', 'B2', 'C1', 'C2', 'D1', 'D2', 'E1', 'E2', 'G1', 'G2', 'H1', 'H2', 'K1', 'K2', 'R1',
'R2', 'S2', 'T1', 'T2', 'U1', 'U2', 'V1', 'V2', 'W1', 'W2', 'X2', 'Y1', 'Y2', 'Z1', 'Z2',
'0']
error_mask = ~oc3['ACCOM'].isna() & ~oc3['ACCOM'].isin(valid_codes)
error_locations = oc3.index[error_mask]
return {'OC3': error_locations.tolist()}
return error, _validate
def validate_565():
error = ErrorDefinition(
code='565',
description='The date that the child started to be missing or away from placement without authorisation has been completed but whether the child was missing or away from placement without authorisation has not been completed.',
affected_fields=['MISSING', 'MIS_START']
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
mask = missing['MIS_START'].notna() & missing['MISSING'].isna()
error_locations = missing.index[mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_433():
error = ErrorDefinition(
code='433',
description='The reason for new episode suggests that this is a continuation episode, but the episode does not start on the same day as the last episode finished.',
affected_fields=['RNE', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['original_index'] = episodes.index
episodes.sort_values(['CHILD', 'DECOM', 'DEC'], inplace=True)
episodes[['PREVIOUS_DEC', 'PREVIOUS_CHILD']] = episodes[['DEC', 'CHILD']].shift(1)
rne_is_ongoing = episodes['RNE'].str.upper().astype(str).isin(['P', 'L', 'T', 'U', 'B'])
date_mismatch = episodes['PREVIOUS_DEC'] != episodes['DECOM']
missing_date = episodes['PREVIOUS_DEC'].isna() | episodes['DECOM'].isna()
same_child = episodes['PREVIOUS_CHILD'] == episodes['CHILD']
error_mask = rne_is_ongoing & (date_mismatch | missing_date) & same_child
error_locations = episodes['original_index'].loc[error_mask].sort_values()
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_437():
error = ErrorDefinition(
code='437',
description='Reason episode ceased is child has died or is aged 18 or over but there are further episodes.',
affected_fields=['REC'],
)
# !# potential false negatives, as this only operates on the current year's data
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes.sort_values(['CHILD', 'DECOM'], inplace=True)
episodes[['NEXT_DECOM', 'NEXT_CHILD']] = episodes[['DECOM', 'CHILD']].shift(-1)
# drop rows with missing DECOM as invalid/missing values can lead to errors
episodes = episodes.dropna(subset=['DECOM'])
ceased_e2_e15 = episodes['REC'].str.upper().astype(str).isin(['E2', 'E15'])
has_later_episode = episodes['CHILD'] == episodes['NEXT_CHILD']
error_mask = ceased_e2_e15 & has_later_episode
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_547():
error = ErrorDefinition(
code='547',
description="Any child who has health promotion information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.",
affected_fields=['HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
healthck = oc2['HEALTH_CHECK'].astype(str) == '1'
immunisations = oc2['IMMUNISATIONS'].isna()
teeth_ck = oc2['TEETH_CHECK'].isna()
health_ass = oc2['HEALTH_ASSESSMENT'].isna()
sub_misuse = oc2['SUBSTANCE_MISUSE'].isna()
error_mask = healthck & (immunisations | teeth_ck | health_ass | sub_misuse)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_635():
error = ErrorDefinition(
code='635',
description='There are entries for date of order and local authority code where previous permanence option was arranged but previous permanence code is Z1',
affected_fields=['LA_PERM', 'DATE_PERM', 'PREV_PERM']
)
def _validate(dfs):
if 'PrevPerm' not in dfs:
return {}
else:
prev_perm = dfs['PrevPerm']
# raise and error if either LA_PERM or DATE_PERM are present, yet PREV_PERM is absent.
mask = ((prev_perm['LA_PERM'].notna() | prev_perm['DATE_PERM'].notna()) & prev_perm['PREV_PERM'].isna())
error_locations = prev_perm.index[mask]
return {'PrevPerm': error_locations.to_list()}
return error, _validate
def validate_550():
error = ErrorDefinition(
code='550',
description='A placement provider code of PR0 can only be associated with placement P1.',
affected_fields=['PLACE', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = (episodes['PLACE'] != 'P1') & episodes['PLACE_PROVIDER'].eq('PR0')
validation_error_locations = episodes.index[mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_217():
error = ErrorDefinition(
code='217',
description='Children who are placed for adoption with current foster carers (placement types A3 or A5) must have a reason for new episode of S, T or U.',
affected_fields=['PLACE', 'DECOM', 'RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
max_decom_allowed = pd.to_datetime('01/04/2015', format='%d/%m/%Y', errors='coerce')
reason_new_ep = ['S', 'T', 'U']
place_codes = ['A3', 'A5']
mask = (episodes['PLACE'].isin(place_codes) & (episodes['DECOM'] >= max_decom_allowed)) & ~episodes[
'RNE'].isin(reason_new_ep)
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_518():
error = ErrorDefinition(
code='518',
description='If reporting legal status of adopters is L4 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L4') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_517():
error = ErrorDefinition(
code='517',
description='If reporting legal status of adopters is L3 then the genders of adopters should be coded as MF. MF = the adopting couple are male and female.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L3') & ~AD1['SEX_ADOPTR'].isin(['MF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_558():
error = ErrorDefinition(
code='558',
description='If a child has been adopted, then the decision to place them for adoption has not been disrupted and the date of the decision that a child should no longer be placed for adoption should be left blank. if the REC code is either E11 or E12 then the DATE PLACED CEASED date should not be provided',
affected_fields=['DATE_PLACED_CEASED', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes = episodes.reset_index()
rec_codes = ['E11', 'E12']
placeEpisodes = episodes[episodes['REC'].isin(rec_codes)]
merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED_CEASED'].notna()]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_453():
error = ErrorDefinition(
code='453',
description='Contradiction between placement distance in the last episode of the previous year and in the first episode of the current year.',
affected_fields=['PL_DISTANCE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['PL_DISTANCE'] = pd.to_numeric(episodes['PL_DISTANCE'], errors='coerce')
episodes_last['PL_DISTANCE'] = pd.to_numeric(episodes_last['PL_DISTANCE'], errors='coerce')
# drop rows with missing DECOM before finding idxmin/max, as invalid/missing values can lead to errors
episodes = episodes.dropna(subset=['DECOM'])
episodes_last = episodes_last.dropna(subset=['DECOM'])
episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin()
episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax()
episodes = episodes[episodes.index.isin(episodes_min)]
episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)]
episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'],
suffixes=('', '_last'), indicator=True).set_index('index')
in_both_years = episodes_merged['_merge'] == 'both'
same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last']
last_year_open = episodes_merged['DEC_last'].isna()
different_pl_dist = abs(episodes_merged['PL_DISTANCE'] - episodes_merged['PL_DISTANCE_last']) >= 0.2
error_mask = in_both_years & same_rne & last_year_open & different_pl_dist
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_516():
error = ErrorDefinition(
code='516',
description='The episode data submitted for this child does not show that he/she was with their former foster carer(s) during the year.If the code in the reason episode ceased is E45 or E46 the child must have a placement code of U1 to U6.',
affected_fields=['REC', 'PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
place_codes = ['U1', 'U2', 'U3', 'U4', 'U5', 'U6']
rec_codes = ['E45', 'E46']
error_mask = episodes['REC'].isin(rec_codes) & ~episodes['PLACE'].isin(place_codes)
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_511():
error = ErrorDefinition(
code='511',
description='If reporting that the number of person(s) adopting the looked after child is two adopters then the code should only be MM, FF or MF. MM = the adopting couple are both males; FF = the adopting couple are both females; MF = The adopting couple are male and female.',
affected_fields=['NB_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
mask = AD1['NB_ADOPTR'].astype(str).eq('2') & AD1['SEX_ADOPTR'].isin(['M1', 'F1'])
validation_error_mask = mask
validation_error_locations = AD1.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_524():
error = ErrorDefinition(
code='524',
description='If reporting legal status of adopters is L12 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L12') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_441():
error = ErrorDefinition(
code='441',
description='Participation method indicates child was 4 years old or over at the time of the review, but the date of birth and review date indicates the child was under 4 years old.',
affected_fields=['DOB', 'REVIEW', 'REVIEW_CODE'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
else:
reviews = dfs['Reviews']
reviews['DOB'] = pd.to_datetime(reviews['DOB'], format='%d/%m/%Y', errors='coerce')
reviews['REVIEW'] = pd.to_datetime(reviews['REVIEW'], format='%d/%m/%Y', errors='coerce')
reviews = reviews.dropna(subset=['REVIEW', 'DOB'])
mask = reviews['REVIEW_CODE'].isin(['PN1', 'PN2', 'PN3', 'PN4', 'PN5', 'PN6', 'PN7']) & (
reviews['REVIEW'] < reviews['DOB'] + pd.offsets.DateOffset(years=4))
validation_error_mask = mask
validation_error_locations = reviews.index[validation_error_mask]
return {'Reviews': validation_error_locations.tolist()}
return error, _validate
def validate_184():
error = ErrorDefinition(
code='184',
description='Date of decision that a child should be placed for adoption is before the child was born.',
affected_fields=['DATE_PLACED', # PlacedAdoptino
'DOB'], # Header
)
def _validate(dfs):
if 'Header' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
child_record = dfs['Header']
placed_for_adoption = dfs['PlacedAdoption']
all_data = (placed_for_adoption
.reset_index()
.merge(child_record, how='left', on='CHILD', suffixes=[None, '_P4A']))
all_data['DATE_PLACED'] = pd.to_datetime(all_data['DATE_PLACED'], format='%d/%m/%Y', errors='coerce')
all_data['DOB'] = pd.to_datetime(all_data['DOB'], format='%d/%m/%Y', errors='coerce')
mask = (all_data['DATE_PLACED'] >= all_data['DOB']) | all_data['DATE_PLACED'].isna()
validation_error = ~mask
validation_error_locations = all_data[validation_error]['index'].unique()
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_612():
error = ErrorDefinition(
code='612',
description="Date of birth field has been completed but mother field indicates child is not a mother.",
affected_fields=['SEX', 'MOTHER', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
error_mask = (
((header['MOTHER'].astype(str) == '0') | header['MOTHER'].isna())
& (header['SEX'].astype(str) == '2')
& header['MC_DOB'].notna()
)
validation_error_locations = header.index[error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_552():
"""
This error checks that the first adoption episode is after the last decision !
If there are multiple of either there may be unexpected results !
"""
error = ErrorDefinition(
code="552",
description="Date of Decision to place a child for adoption should be on or prior to the date that the child was placed for adoption.",
# Field that defines date of decision to place a child for adoption is DATE_PLACED and the start of adoption is defined by DECOM with 'A' placement types.
affected_fields=['DATE_PLACED', 'DECOM'],
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
# get the required datasets
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
# keep index values so that they stay the same when needed later on for error locations
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
adoption_eps = episodes[episodes['PLACE'].isin(['A3', 'A4', 'A5', 'A6'])].copy()
# find most recent adoption decision
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
# remove rows where either of the required values have not been filled.
placed_adoption = placed_adoption[placed_adoption['DATE_PLACED'].notna()]
placed_adoption_inds = placed_adoption.groupby('CHILD')['DATE_PLACED'].idxmax(skipna=True)
last_decision = placed_adoption.loc[placed_adoption_inds]
# first time child started adoption
adoption_eps["DECOM"] = pd.to_datetime(adoption_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
adoption_eps = adoption_eps[adoption_eps['DECOM'].notna()]
adoption_eps_inds = adoption_eps.groupby('CHILD')['DECOM'].idxmin(skipna=True)
# full information of first adoption
first_adoption = adoption_eps.loc[adoption_eps_inds]
# date of decision and date of start of adoption (DECOM) have to be put in one table
merged = first_adoption.merge(last_decision, on=['CHILD'], how='left', suffixes=['_EP', '_PA'])
# check to see if date of decision to place is less than or equal to date placed.
decided_after_placed = merged["DECOM"] < merged["DATE_PLACED"]
# find the corresponding location of error values per file.
episode_error_locs = merged.loc[decided_after_placed, 'index_EP']
placedadoption_error_locs = merged.loc[decided_after_placed, 'index_PA']
return {"PlacedAdoption": placedadoption_error_locs.to_list(), "Episodes": episode_error_locs.to_list()}
return error, _validate
def validate_551():
error = ErrorDefinition(
code='551',
description='Child has been placed for adoption but there is no date of the decision that the child should be placed for adoption.',
affected_fields=['DATE_PLACED', 'PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes = episodes.reset_index()
place_codes = ['A3', 'A4', 'A5', 'A6']
placeEpisodes = episodes[episodes['PLACE'].isin(place_codes)]
merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED'].isna()]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_557():
error = ErrorDefinition(
code='557',
description="Child for whom the decision was made that they should be placed for adoption has left care " +
"but was not adopted and information on the decision that they should no longer be placed for " +
"adoption items has not been completed.",
affected_fields=['DATE_PLACED_CEASED', 'REASON_PLACED_CEASED', # PlacedAdoption
'PLACE', 'LS', 'REC'], # Episodes
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'PlacedAdoption' not in dfs:
return {}
else:
eps = dfs['Episodes']
placed = dfs['PlacedAdoption']
eps = eps.reset_index()
placed = placed.reset_index()
child_placed = eps['PLACE'].isin(['A3', 'A4', 'A5', 'A6'])
order_granted = eps['LS'].isin(['D1', 'E1'])
not_adopted = ~eps['REC'].isin(['E11', 'E12']) & eps['REC'].notna()
placed['ceased_incomplete'] = (
placed['DATE_PLACED_CEASED'].isna() | placed['REASON_PLACED_CEASED'].isna()
)
eps = eps[(child_placed | order_granted) & not_adopted]
eps = eps.merge(placed, on='CHILD', how='left', suffixes=['_EP', '_PA'], indicator=True)
eps = eps[(eps['_merge'] == 'left_only') | eps['ceased_incomplete']]
EP_errors = eps['index_EP']
PA_errors = eps['index_PA'].dropna()
return {
'Episodes': EP_errors.to_list(),
'PlacedAdoption': PA_errors.to_list(),
}
return error, _validate
def validate_207():
error = ErrorDefinition(
code='207',
description='Mother status for the current year disagrees with the mother status already recorded for this child.',
affected_fields=['MOTHER'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
mother_is_different = header_merged['MOTHER'].astype(str) != header_merged['MOTHER_last'].astype(str)
mother_was_true = header_merged['MOTHER_last'].astype(str) == '1'
error_mask = in_both_years & mother_is_different & mother_was_true
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_523():
error = ErrorDefinition(
code='523',
description="Date of decision that the child should be placed for adoption should be the same date as the decision that adoption is in the best interest (date should be placed).",
affected_fields=['DATE_PLACED', 'DATE_INT'],
)
def _validate(dfs):
if ("AD1" not in dfs) or ("PlacedAdoption" not in dfs):
return {}
else:
placed_adoption = dfs["PlacedAdoption"]
ad1 = dfs["AD1"]
# keep initial index values to be reused for locating errors later on.
placed_adoption.reset_index(inplace=True)
ad1.reset_index(inplace=True)
# convert to datetime to enable comparison
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format="%d/%m/%Y",
errors='coerce')
ad1["DATE_INT"] = pd.to_datetime(ad1['DATE_INT'], format='%d/%m/%Y', errors='coerce')
# drop rows where either of the required values have not been filled.
placed_adoption = placed_adoption[placed_adoption["DATE_PLACED"].notna()]
ad1 = ad1[ad1["DATE_INT"].notna()]
# bring corresponding values together from both dataframes
merged_df = placed_adoption.merge(ad1, on=['CHILD'], how='inner', suffixes=["_AD", "_PA"])
# find error values
different_dates = merged_df['DATE_INT'] != merged_df['DATE_PLACED']
# map error locations to corresponding indices
pa_error_locations = merged_df.loc[different_dates, 'index_PA']
ad1_error_locations = merged_df.loc[different_dates, 'index_AD']
return {"PlacedAdoption": pa_error_locations.to_list(), "AD1": ad1_error_locations.to_list()}
return error, _validate
def validate_3001():
error = ErrorDefinition(
code='3001',
description='Where care leavers information is being returned for a young person around their 17th birthday, the accommodation cannot be with their former foster carer(s).',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
oc3 = dfs['OC3']
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
header['DOB17'] = header['DOB'] + pd.DateOffset(years=17)
oc3_merged = oc3.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
accom_foster = oc3_merged['ACCOM'].str.upper().astype(str).isin(['Z1', 'Z2'])
age_17_in_year = (oc3_merged['DOB17'] <= collection_end) & (oc3_merged['DOB17'] >= collection_start)
error_mask = accom_foster & age_17_in_year
error_locations = oc3.index[error_mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_389():
error = ErrorDefinition(
code='389',
description='Reason episode ceased is that child transferred to care of adult social care services, but child is aged under 16.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB16'] = header['DOB'] + pd.DateOffset(years=16)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
ceased_asc = episodes_merged['REC'].str.upper().astype(str).isin(['E7'])
ceased_over_16 = episodes_merged['DOB16'] <= episodes_merged['DEC']
error_mask = ceased_asc & ~ceased_over_16
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_387():
error = ErrorDefinition(
code='387',
description='Reason episode ceased is child moved into independent living arrangement, but the child is aged under 14.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB14'] = header['DOB'] + pd.DateOffset(years=14)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
ceased_indep = episodes_merged['REC'].str.upper().astype(str).isin(['E5', 'E6'])
ceased_over_14 = episodes_merged['DOB14'] <= episodes_merged['DEC']
dec_present = episodes_merged['DEC'].notna()
error_mask = ceased_indep & ~ceased_over_14 & dec_present
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_452():
error = ErrorDefinition(
code='452',
description='Contradiction between local authority of placement code in the last episode of the previous year and in the first episode of the current year.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin()
episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax()
episodes = episodes[episodes.index.isin(episodes_min)]
episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)]
episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'],
suffixes=('', '_last'), indicator=True).set_index('index')
in_both_years = episodes_merged['_merge'] == 'both'
same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last']
last_year_open = episodes_merged['DEC_last'].isna()
different_pl_la = episodes_merged['PL_LA'].astype(str) != episodes_merged['PL_LA_last'].astype(str)
error_mask = in_both_years & same_rne & last_year_open & different_pl_la
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_386():
error = ErrorDefinition(
code='386',
description='Reason episode ceased is adopted but child has reached age 18.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + pd.DateOffset(years=18)
episodes_merged = (
episodes
.reset_index()
.merge(header, how='left', on=['CHILD'], suffixes=('', '_header'), indicator=True)
.set_index('index')
.dropna(subset=['DOB18', 'DEC'])
)
ceased_adopted = episodes_merged['REC'].str.upper().astype(str).isin(['E11', 'E12'])
ceased_under_18 = episodes_merged['DOB18'] > episodes_merged['DEC']
error_mask = ceased_adopted & ~ceased_under_18
error_locations = episodes_merged.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_363():
error = ErrorDefinition(
code='363',
description='Child assessment order (CAO) lasted longer than 7 days allowed in the Children Act 1989.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
collection_end_str = dfs['metadata']['collection_end']
L2_eps = episodes[episodes['LS'] == 'L3'].copy()
L2_eps['original_index'] = L2_eps.index
L2_eps = L2_eps[L2_eps['DECOM'].notna()]
L2_eps.loc[L2_eps['DEC'].isna(), 'DEC'] = collection_end_str
L2_eps['DECOM'] = pd.to_datetime(L2_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
L2_eps = L2_eps.dropna(subset=['DECOM'])
L2_eps['DEC'] = pd.to_datetime(L2_eps['DEC'], format='%d/%m/%Y', errors='coerce')
L2_eps.sort_values(['CHILD', 'DECOM'])
L2_eps['index'] = pd.RangeIndex(0, len(L2_eps))
L2_eps['index+1'] = L2_eps['index'] + 1
L2_eps = L2_eps.merge(L2_eps, left_on='index', right_on='index+1',
how='left', suffixes=[None, '_prev'])
L2_eps = L2_eps[['original_index', 'DECOM', 'DEC', 'DEC_prev', 'CHILD', 'CHILD_prev', 'LS']]
L2_eps['new_period'] = (
(L2_eps['DECOM'] > L2_eps['DEC_prev'])
| (L2_eps['CHILD'] != L2_eps['CHILD_prev'])
)
L2_eps['duration'] = (L2_eps['DEC'] - L2_eps['DECOM']).dt.days
L2_eps['period_id'] = L2_eps['new_period'].astype(int).cumsum()
L2_eps['period_duration'] = L2_eps.groupby('period_id')['duration'].transform(sum)
error_mask = L2_eps['period_duration'] > 7
return {'Episodes': L2_eps.loc[error_mask, 'original_index'].to_list()}
return error, _validate
def validate_364():
error = ErrorDefinition(
code='364',
description='Sections 41-46 of Police and Criminal Evidence (PACE; 1984) severely limits ' +
'the time a child can be detained in custody in Local Authority (LA) accommodation.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
collection_end_str = dfs['metadata']['collection_end']
J2_eps = episodes[episodes['LS'] == 'J2'].copy()
J2_eps['original_index'] = J2_eps.index
J2_eps['DECOM'] = pd.to_datetime(J2_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
J2_eps = J2_eps[J2_eps['DECOM'].notna()]
J2_eps.loc[J2_eps['DEC'].isna(), 'DEC'] = collection_end_str
J2_eps['DEC'] = pd.to_datetime(J2_eps['DEC'], format='%d/%m/%Y', errors='coerce')
J2_eps.sort_values(['CHILD', 'DECOM'])
J2_eps['index'] = pd.RangeIndex(0, len(J2_eps))
J2_eps['index_prev'] = J2_eps['index'] + 1
J2_eps = J2_eps.merge(J2_eps, left_on='index', right_on='index_prev',
how='left', suffixes=[None, '_prev'])
J2_eps = J2_eps[['original_index', 'DECOM', 'DEC', 'DEC_prev', 'CHILD', 'CHILD_prev', 'LS']]
J2_eps['new_period'] = (
(J2_eps['DECOM'] > J2_eps['DEC_prev'])
| (J2_eps['CHILD'] != J2_eps['CHILD_prev'])
)
J2_eps['duration'] = (J2_eps['DEC'] - J2_eps['DECOM']).dt.days
J2_eps['period_id'] = J2_eps['new_period'].astype(int).cumsum()
J2_eps['period_duration'] = J2_eps.groupby('period_id')['duration'].transform(sum)
error_mask = J2_eps['period_duration'] > 21
return {'Episodes': J2_eps.loc[error_mask, 'original_index'].to_list()}
return error, _validate
def validate_365():
error = ErrorDefinition(
code='365',
description='Any individual short- term respite placement must not exceed 17 days.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
collection_end_str = dfs['metadata']['collection_end']
episodes.loc[episodes['DEC'].isna(), 'DEC'] = collection_end_str
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
over_17_days = episodes['DEC'] > episodes['DECOM'] + pd.DateOffset(days=17)
error_mask = (episodes['LS'] == 'V3') & over_17_days
return {'Episodes': episodes.index[error_mask].to_list()}
return error, _validate
def validate_367():
error = ErrorDefinition(
code='367',
description='The maximum amount of respite care allowable is 75 days in any 12-month period.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
V3_eps = episodes[episodes['LS'] == 'V3']
V3_eps = V3_eps.dropna(subset=['DECOM']) # missing DECOM should get fixed before looking for this error
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
V3_eps['DECOM_dt'] = pd.to_datetime(V3_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
V3_eps['DEC_dt'] = pd.to_datetime(V3_eps['DEC'], format='%d/%m/%Y', errors='coerce')
# truncate episode start/end dates to collection start/end respectively
V3_eps.loc[V3_eps['DEC'].isna() | (V3_eps['DEC_dt'] > collection_end), 'DEC_dt'] = collection_end
V3_eps.loc[V3_eps['DECOM_dt'] < collection_start, 'DECOM_dt'] = collection_start
V3_eps['duration'] = (V3_eps['DEC_dt'] - V3_eps['DECOM_dt']).dt.days
V3_eps = V3_eps[V3_eps['duration'] > 0]
V3_eps['year_total_duration'] = V3_eps.groupby('CHILD')['duration'].transform(sum)
error_mask = V3_eps['year_total_duration'] > 75
return {'Episodes': V3_eps.index[error_mask].to_list()}
return error, _validate
def validate_440():
error = ErrorDefinition(
code='440',
description='Participation method indicates child was under 4 years old at the time of the review, but date of birth and review date indicates the child was 4 years old or over.',
affected_fields=['DOB', 'REVIEW', 'REVIEW_CODE'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
else:
reviews = dfs['Reviews']
reviews['DOB'] = pd.to_datetime(reviews['DOB'], format='%d/%m/%Y', errors='coerce')
reviews['REVIEW'] = pd.to_datetime(reviews['REVIEW'], format='%d/%m/%Y', errors='coerce')
mask = reviews['REVIEW_CODE'].eq('PN0') & (
reviews['REVIEW'] > reviews['DOB'] + pd.offsets.DateOffset(years=4))
validation_error_mask = mask
validation_error_locations = reviews.index[validation_error_mask]
return {'Reviews': validation_error_locations.tolist()}
return error, _validate
def validate_445():
error = ErrorDefinition(
code='445',
description='D1 is not a valid code for episodes starting after December 2005.',
affected_fields=['LS', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
max_decom_allowed = pd.to_datetime('31/12/2005', format='%d/%m/%Y', errors='coerce')
mask = episodes['LS'].eq('D1') & (episodes['DECOM'] > max_decom_allowed)
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_446():
error = ErrorDefinition(
code='446',
description='E1 is not a valid code for episodes starting before December 2005.',
affected_fields=['LS', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
min_decom_allowed = pd.to_datetime('01/12/2005', format='%d/%m/%Y', errors='coerce')
mask = episodes['LS'].eq('E1') & (episodes['DECOM'] < min_decom_allowed)
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_208():
error = ErrorDefinition(
code='208',
description='Unique Pupil Number (UPN) for the current year disagrees with the Unique Pupil Number (UPN) already recorded for this child.',
affected_fields=['UPN'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
upn_is_different = header_merged['UPN'].str.upper().astype(str) != header_merged[
'UPN_last'].str.upper().astype(str)
upn_not_recorded = header_merged['UPN'].str.upper().astype(str).isin(['UN2', 'UN3', 'UN4', 'UN5', 'UN6']) & \
header_merged['UPN_last'].str.upper().astype(str).isin(['UN1'])
error_mask = in_both_years & upn_is_different & ~upn_not_recorded
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_204():
error = ErrorDefinition(
code='204',
description='Ethnic origin code disagrees with the ethnic origin already recorded for this child.',
affected_fields=['ETHNIC'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
ethnic_is_different = header_merged['ETHNIC'].astype(str).str.upper() != header_merged[
'ETHNIC_last'].astype(str).str.upper()
error_mask = in_both_years & ethnic_is_different
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_203():
error = ErrorDefinition(
code='203',
description='Date of birth disagrees with the date of birth already recorded for this child.',
affected_fields=['DOB'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
header_last['DOB'] = pd.to_datetime(header_last['DOB'], format='%d/%m/%Y', errors='coerce')
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
dob_is_different = header_merged['DOB'].astype(str) != header_merged['DOB_last'].astype(str)
error_mask = in_both_years & dob_is_different
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_530():
error = ErrorDefinition(
code='530',
description="A placement provider code of PR4 cannot be associated with placement P1.",
affected_fields=['PLACE', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = episodes['PLACE'].eq('P1') & episodes['PLACE_PROVIDER'].eq('PR4')
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_571():
error = ErrorDefinition(
code='571',
description='The date that the child ceased to be missing or away from placement without authorisation is before the start or after the end of the collection year.',
affected_fields=['MIS_END'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
missing['fMIS_END'] = pd.to_datetime(missing['MIS_END'], format='%d/%m/%Y', errors='coerce')
end_date_before_year = missing['fMIS_END'] < collection_start
end_date_after_year = missing['fMIS_END'] > collection_end
error_mask = end_date_before_year | end_date_after_year
error_locations = missing.index[error_mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_1005():
error = ErrorDefinition(
code='1005',
description='The end date of the missing episode or episode that the child was away from placement without authorisation is not a valid date.',
affected_fields=['MIS_END'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
missing['fMIS_END'] = pd.to_datetime(missing['MIS_END'], format='%d/%m/%Y', errors='coerce')
missing_end_date = missing['MIS_END'].isna()
invalid_end_date = missing['fMIS_END'].isna()
error_mask = ~missing_end_date & invalid_end_date
error_locations = missing.index[error_mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_1004():
error = ErrorDefinition(
code='1004',
description='The start date of the missing episode or episode that the child was away from placement without authorisation is not a valid date.',
affected_fields=['MIS_START'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
missing['fMIS_START'] = pd.to_datetime(missing['MIS_START'], format='%d/%m/%Y', errors='coerce')
missing_start_date = missing['MIS_START'].isna()
invalid_start_date = missing['fMIS_START'].isna()
error_mask = missing_start_date | invalid_start_date
error_locations = missing.index[error_mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_202():
error = ErrorDefinition(
code='202',
description='The gender code conflicts with the gender already recorded for this child.',
affected_fields=['SEX'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
sex_is_different = header_merged['SEX'].astype(str) != header_merged['SEX_last'].astype(str)
error_mask = in_both_years & sex_is_different
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_621():
error = ErrorDefinition(
code='621',
description="Motherโs field has been completed but date of birth shows that the mother is younger than her child.",
affected_fields=['DOB', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
header['MC_DOB'] = pd.to_datetime(header['MC_DOB'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
mask = (header['MC_DOB'] > header['DOB']) | header['MC_DOB'].isna()
validation_error_mask = ~mask
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_556():
error = ErrorDefinition(
code='556',
description='Date of decision that the child should be placed for adoption should be on or prior to the date that the freeing order was granted.',
affected_fields=['DATE_PLACED', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
placedAdoptions['DATE_PLACED'] = pd.to_datetime(placedAdoptions['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
episodes = episodes.reset_index()
D1Episodes = episodes[episodes['LS'] == 'D1']
merged = D1Episodes.reset_index().merge(placedAdoptions, how='left', on='CHILD', ).set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED'] > merged['DECOM']]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_393():
error = ErrorDefinition(
code='393',
description='Child is looked after but mother field is not completed.',
affected_fields=['MOTHER'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header_female = header[header['SEX'].astype(str) == '2']
applicable_episodes = episodes[~episodes['LS'].str.upper().isin(['V3', 'V4'])]
error_mask = header_female['CHILD'].isin(applicable_episodes['CHILD']) & header_female['MOTHER'].isna()
error_locations = header_female.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_NoE():
error = ErrorDefinition(
code='NoE',
description='This child has no episodes loaded for previous year even though child started to be looked after before this current year.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last = dfs['Episodes_last']
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
episodes_before_year = episodes[episodes['DECOM'] < collection_start]
episodes_merged = episodes_before_year.reset_index().merge(episodes_last, how='left', on=['CHILD'],
indicator=True).set_index('index')
episodes_not_matched = episodes_merged[episodes_merged['_merge'] == 'left_only']
error_mask = episodes.index.isin(episodes_not_matched.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_356():
error = ErrorDefinition(
code='356',
description='The date the episode ceased is before the date the same episode started.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
error_mask = episodes['DEC'].notna() & (episodes['DEC'] < episodes['DECOM'])
return {'Episodes': episodes.index[error_mask].to_list()}
return error, _validate
def validate_611():
error = ErrorDefinition(
code='611',
description="Date of birth field is blank, but child is a mother.",
affected_fields=['MOTHER', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
validation_error_mask = header['MOTHER'].astype(str).isin(['1']) & header['MC_DOB'].isna()
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_1009():
error = ErrorDefinition(
code='1009',
description='Reason for placement change is not a valid code.',
affected_fields=['REASON_PLACE_CHANGE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'CARPL',
'CLOSE',
'ALLEG',
'STAND',
'APPRR',
'CREQB',
'CREQO',
'CHILD',
'LAREQ',
'PLACE',
'CUSTOD',
'OTHER'
]
mask = episodes['REASON_PLACE_CHANGE'].isin(code_list) | episodes['REASON_PLACE_CHANGE'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_1006():
error = ErrorDefinition(
code='1006',
description='Missing type invalid.',
affected_fields=['MISSING'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
missing_from_care = dfs['Missing']
code_list = ['M', 'A']
mask = missing_from_care['MISSING'].isin(code_list) | missing_from_care['MISSING'].isna()
validation_error_mask = ~mask
validation_error_locations = missing_from_care.index[validation_error_mask]
return {'Missing': validation_error_locations.tolist()}
return error, _validate
def validate_631():
error = ErrorDefinition(
code='631',
description='Previous permanence option not a valid value.',
affected_fields=['PREV_PERM'],
)
def _validate(dfs):
if 'PrevPerm' not in dfs:
return {}
previous_permanence = dfs['PrevPerm']
code_list = ['P1', 'P2', 'P3', 'P4', 'Z1']
mask = previous_permanence['PREV_PERM'].isin(code_list) | previous_permanence['PREV_PERM'].isna()
validation_error_mask = ~mask
validation_error_locations = previous_permanence.index[validation_error_mask]
return {'PrevPerm': validation_error_locations.tolist()}
return error, _validate
def validate_196():
error = ErrorDefinition(
code='196',
description='Strengths and Difficulties (SDQ) reason is not a valid code.',
affected_fields=['SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
code_list = ['SDQ1', 'SDQ2', 'SDQ3', 'SDQ4', 'SDQ5']
mask = oc2['SDQ_REASON'].isin(code_list) | oc2['SDQ_REASON'].isna()
validation_error_mask = ~mask
validation_error_locations = oc2.index[validation_error_mask]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_177():
error = ErrorDefinition(
code='177',
description='The legal status of adopter(s) code is not a valid code.',
affected_fields=['LS_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
adoptions = dfs['AD1']
code_list = ['L0', 'L11', 'L12', 'L2', 'L3', 'L4']
mask = adoptions['LS_ADOPTR'].isin(code_list) | adoptions['LS_ADOPTR'].isna()
validation_error_mask = ~mask
validation_error_locations = adoptions.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_176():
error = ErrorDefinition(
code='176',
description='The gender of adopter(s) at the date of adoption code is not a valid code.',
affected_fields=['SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
adoptions = dfs['AD1']
code_list = ['M1', 'F1', 'MM', 'FF', 'MF']
mask = adoptions['SEX_ADOPTR'].isin(code_list) | adoptions['SEX_ADOPTR'].isna()
validation_error_mask = ~mask
validation_error_locations = adoptions.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_175():
error = ErrorDefinition(
code='175',
description='The number of adopter(s) code is not a valid code.',
affected_fields=['NB_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
adoptions = dfs['AD1']
code_list = ['1', '2']
mask = adoptions['NB_ADOPTR'].astype(str).isin(code_list) | adoptions['NB_ADOPTR'].isna()
validation_error_mask = ~mask
validation_error_locations = adoptions.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_132():
error = ErrorDefinition(
code='132',
description='Data entry for activity after leaving care is invalid.',
affected_fields=['ACTIV'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
care_leavers = dfs['OC3']
code_list = [
'F1',
'P1',
'F2',
'P2',
'F4',
'P4',
'F5',
'P5',
'G4',
'G5',
'G6',
'0'
]
mask = care_leavers['ACTIV'].astype(str).isin(code_list) | care_leavers['ACTIV'].isna()
validation_error_mask = ~mask
validation_error_locations = care_leavers.index[validation_error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_131():
error = ErrorDefinition(
code='131',
description='Data entry for being in touch after leaving care is invalid.',
affected_fields=['IN_TOUCH'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
care_leavers = dfs['OC3']
code_list = [
'YES',
'NO',
'DIED',
'REFU',
'NREQ',
'RHOM'
]
mask = care_leavers['IN_TOUCH'].isin(code_list) | care_leavers['IN_TOUCH'].isna()
validation_error_mask = ~mask
validation_error_locations = care_leavers.index[validation_error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_120():
error = ErrorDefinition(
code='120',
description='The reason for the reversal of the decision that the child should be placed for adoption code is not valid.',
affected_fields=['REASON_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
placed_adoptions = dfs['PlacedAdoption']
code_list = ['RD1', 'RD2', 'RD3', 'RD4']
mask = placed_adoptions['REASON_PLACED_CEASED'].isin(code_list) | placed_adoptions[
'REASON_PLACED_CEASED'].isna()
validation_error_mask = ~mask
validation_error_locations = placed_adoptions.index[validation_error_mask]
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_114():
error = ErrorDefinition(
code='114',
description='Data entry to record the status of former carer(s) of an adopted child is invalid.',
affected_fields=['FOSTER_CARE'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
adoptions = dfs['AD1']
code_list = ['0', '1']
mask = adoptions['FOSTER_CARE'].astype(str).isin(code_list) | adoptions['FOSTER_CARE'].isna()
validation_error_mask = ~mask
validation_error_locations = adoptions.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_178():
error = ErrorDefinition(
code='178',
description='Placement provider code is not a valid code.',
affected_fields=['PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list_placement_provider = ['PR0', 'PR1', 'PR2', 'PR3', 'PR4', 'PR5']
code_list_placement_with_no_provider = ['T0', 'T1', 'T2', 'T3', 'T4', 'Z1']
place_provider_needed_and_correct = episodes['PLACE_PROVIDER'].isin(code_list_placement_provider) & ~episodes[
'PLACE'].isin(code_list_placement_with_no_provider)
place_provider_not_provided = episodes['PLACE_PROVIDER'].isna()
place_provider_not_needed = episodes['PLACE_PROVIDER'].isna() & episodes['PLACE'].isin(
code_list_placement_with_no_provider)
mask = place_provider_needed_and_correct | place_provider_not_provided | place_provider_not_needed
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_103():
error = ErrorDefinition(
code='103',
description='The ethnicity code is either not valid or has not been entered.',
affected_fields=['ETHNIC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
header = dfs['Header']
code_list = [
'WBRI',
'WIRI',
'WOTH',
'WIRT',
'WROM',
'MWBC',
'MWBA',
'MWAS',
'MOTH',
'AIND',
'APKN',
'ABAN',
'AOTH',
'BCRB',
'BAFR',
'BOTH',
'CHNE',
'OOTH',
'REFU',
'NOBT'
]
mask = header['ETHNIC'].isin(code_list)
validation_error_mask = ~mask
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_143():
error = ErrorDefinition(
code='143',
description='The reason for new episode code is not a valid code.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = ['S', 'P', 'L', 'T', 'U', 'B']
mask = episodes['RNE'].isin(code_list) | episodes['RNE'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_144():
error = ErrorDefinition(
code='144',
description='The legal status code is not a valid code.',
affected_fields=['LS'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'C1',
'C2',
'D1',
'E1',
'V2',
'V3',
'V4',
'J1',
'J2',
'J3',
'L1',
'L2',
'L3'
]
mask = episodes['LS'].isin(code_list) | episodes['LS'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_145():
error = ErrorDefinition(
code='145',
description='Category of need code is not a valid code.',
affected_fields=['CIN'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'N1',
'N2',
'N3',
'N4',
'N5',
'N6',
'N7',
'N8',
]
mask = episodes['CIN'].isin(code_list) | episodes['CIN'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_146():
error = ErrorDefinition(
code='146',
description='Placement type code is not a valid code.',
affected_fields=['PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'A3',
'A4',
'A5',
'A6',
'H5',
'K1',
'K2',
'P1',
'P2',
'P3',
'R1',
'R2',
'R3',
'R5',
'S1',
'T0',
'T1',
'T2',
'T3',
'T4',
'U1',
'U2',
'U3',
'U4',
'U5',
'U6',
'Z1'
]
mask = episodes['PLACE'].isin(code_list) | episodes['PLACE'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_149():
error = ErrorDefinition(
code='149',
description='Reason episode ceased code is not valid. ',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'E11',
'E12',
'E2',
'E3',
'E4A',
'E4B',
'E13',
'E41',
'E45',
'E46',
'E47',
'E48',
'E5',
'E6',
'E7',
'E8',
'E9',
'E14',
'E15',
'E16',
'E17',
'X1'
]
mask = episodes['REC'].isin(code_list) | episodes['REC'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_167():
error = ErrorDefinition(
code='167',
description='Data entry for participation is invalid or blank.',
affected_fields=['REVIEW_CODE'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
review = dfs['Reviews']
code_list = ['PN0', 'PN1', 'PN2', 'PN3', 'PN4', 'PN5', 'PN6', 'PN7']
mask = review['REVIEW'].notna() & review['REVIEW_CODE'].isin(code_list) | review['REVIEW'].isna() & review[
'REVIEW_CODE'].isna()
validation_error_mask = ~mask
validation_error_locations = review.index[validation_error_mask]
return {'Reviews': validation_error_locations.tolist()}
return error, _validate
def validate_101():
error = ErrorDefinition(
code='101',
description='Gender code is not valid.',
affected_fields=['SEX'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
header = dfs['Header']
code_list = ['1', '2']
mask = header['SEX'].astype(str).isin(code_list)
validation_error_mask = ~mask
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_141():
error = ErrorDefinition(
code='141',
description='Date episode began is not a valid date.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce').notna()
na_location = episodes['DECOM'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_147():
error = ErrorDefinition(
code='147',
description='Date episode ceased is not a valid date.',
affected_fields=['DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce').notna()
na_location = episodes['DEC'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_171():
error = ErrorDefinition(
code='171',
description="Date of birth of mother's child is not a valid date.",
affected_fields=['MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
mask = pd.to_datetime(header['MC_DOB'], format='%d/%m/%Y', errors='coerce').notna()
na_location = header['MC_DOB'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_102():
error = ErrorDefinition(
code='102',
description='Date of birth is not a valid date.',
affected_fields=['DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
mask = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce').notna()
validation_error_mask = ~mask
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_112():
error = ErrorDefinition(
code='112',
description='Date should be placed for adoption is not a valid date.',
affected_fields=['DATE_INT'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
mask = pd.to_datetime(ad1['DATE_INT'], format='%d/%m/%Y', errors='coerce').notna()
na_location = ad1['DATE_INT'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = ad1.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_115():
error = ErrorDefinition(
code='115',
description="Date of Local Authority's (LA) decision that a child should be placed for adoption is not a valid date.",
affected_fields=['DATE_PLACED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
adopt = dfs['PlacedAdoption']
mask = pd.to_datetime(adopt['DATE_PLACED'], format='%d/%m/%Y', errors='coerce').notna()
na_location = adopt['DATE_PLACED'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = adopt.index[validation_error_mask]
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_116():
error = ErrorDefinition(
code='116',
description="Date of Local Authority's (LA) decision that a child should no longer be placed for adoption is not a valid date.",
affected_fields=['DATE_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
adopt = dfs['PlacedAdoption']
mask = pd.to_datetime(adopt['DATE_PLACED_CEASED'], format='%d/%m/%Y', errors='coerce').notna()
na_location = adopt['DATE_PLACED_CEASED'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = adopt.index[validation_error_mask]
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_392c():
error = ErrorDefinition(
code='392c',
description='Postcode(s) provided are invalid.',
affected_fields=['HOME_POST', 'PL_POST'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
home_provided = episodes['HOME_POST'].notna()
home_details = merge_postcodes(episodes, "HOME_POST")
home_valid = home_details['pcd'].notna()
pl_provided = episodes['PL_POST'].notna()
pl_details = merge_postcodes(episodes, "PL_POST")
pl_valid = pl_details['pcd'].notna()
error_mask = (home_provided & ~home_valid) | (pl_provided & ~pl_valid)
return {'Episodes': episodes.index[error_mask].tolist()}
return error, _validate
def validate_213():
error = ErrorDefinition(
code='213',
description='Placement provider information not required.',
affected_fields=['PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
mask = df['PLACE'].isin(['T0', 'T1', 'T2', 'T3', 'T4', 'Z1']) & df['PLACE_PROVIDER'].notna()
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_168():
error = ErrorDefinition(
code='168',
description='Unique Pupil Number (UPN) is not valid. If unknown, default codes should be UN1, UN2, UN3, UN4 or UN5.',
affected_fields=['UPN'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
df = dfs['Header']
mask = df['UPN'].str.match(r'(^((?![IOS])[A-Z]){1}(\d{12}|\d{11}[A-Z]{1})$)|^(UN[1-5])$', na=False)
mask = ~mask
return {'Header': df.index[mask].tolist()}
return error, _validate
def validate_388():
error = ErrorDefinition(
code='388',
description='Reason episode ceased is coded new episode begins, but there is no continuation episode.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
df['DECOM'] = pd.to_datetime(df['DECOM'], format='%d/%m/%Y', errors='coerce')
df['DEC'] = pd.to_datetime(df['DEC'], format='%d/%m/%Y', errors='coerce')
df['DECOM'] = df['DECOM'].fillna('01/01/1901') # Watch for potential future issues
df = df.sort_values(['CHILD', 'DECOM'])
df['DECOM_NEXT_EPISODE'] = df.groupby(['CHILD'])['DECOM'].shift(-1)
# The max DECOM for each child is also the one with no next episode
# And we also add the skipna option
# grouped_decom_by_child = df.groupby(['CHILD'])['DECOM'].idxmax(skipna=True)
no_next = df.DECOM_NEXT_EPISODE.isna() & df.CHILD.notna()
# Dataframe with the maximum DECOM removed
max_decom_removed = df[~no_next]
# Dataframe with the maximum DECOM only
max_decom_only = df[no_next]
# Case 1: If reason episode ceased is coded X1 there must be a subsequent episode
# starting on the same day.
case1 = max_decom_removed[(max_decom_removed['REC'] == 'X1') &
(max_decom_removed['DEC'].notna()) &
(max_decom_removed['DECOM_NEXT_EPISODE'].notna()) &
(max_decom_removed['DEC'] != max_decom_removed['DECOM_NEXT_EPISODE'])]
# Case 2: If an episode ends but the child continues to be looked after, a new
# episode should start on the same day.The reason episode ceased code of
# the episode which ends must be X1.
case2 = max_decom_removed[(max_decom_removed['REC'] != 'X1') &
(max_decom_removed['REC'].notna()) &
(max_decom_removed['DEC'].notna()) &
(max_decom_removed['DECOM_NEXT_EPISODE'].notna()) &
(max_decom_removed['DEC'] == max_decom_removed['DECOM_NEXT_EPISODE'])]
# Case 3: If a child ceases to be looked after reason episode ceased code X1 must
# not be used.
case3 = max_decom_only[(max_decom_only['DEC'].notna()) &
(max_decom_only['REC'] == 'X1')]
mask_case1 = case1.index.tolist()
mask_case2 = case2.index.tolist()
mask_case3 = case3.index.tolist()
mask = mask_case1 + mask_case2 + mask_case3
mask.sort()
return {'Episodes': mask}
return error, _validate
def validate_113():
error = ErrorDefinition(
code='113',
description='Date matching child and adopter(s) is not a valid date.',
affected_fields=['DATE_MATCH'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
mask = pd.to_datetime(ad1['DATE_MATCH'], format='%d/%m/%Y', errors='coerce').notna()
na_location = ad1['DATE_MATCH'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = ad1.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_134():
error = ErrorDefinition(
code='134',
description='Data on adoption should not be entered for the OC3 cohort.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM', 'DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR',
'SEX_ADOPTR', 'LS_ADOPTR'],
)
def _validate(dfs):
if 'OC3' not in dfs or 'AD1' not in dfs:
return {}
else:
oc3 = dfs['OC3']
ad1 = dfs['AD1']
ad1['ad1_index'] = ad1.index
all_data = ad1.merge(oc3, how='left', on='CHILD')
na_oc3_data = (
all_data['IN_TOUCH'].isna() &
all_data['ACTIV'].isna() &
all_data['ACCOM'].isna()
)
na_ad1_data = (
all_data['DATE_INT'].isna() &
all_data['DATE_MATCH'].isna() &
all_data['FOSTER_CARE'].isna() &
all_data['NB_ADOPTR'].isna() &
all_data['SEX_ADOPTR'].isna() &
all_data['LS_ADOPTR'].isna()
)
validation_error = ~na_oc3_data & ~na_ad1_data
validation_error_locations = all_data.loc[validation_error, 'ad1_index'].unique()
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_119():
error = ErrorDefinition(
code='119',
description='If the decision is made that a child should no longer be placed for adoption, then the date of this decision and the reason why this decision was made must be completed.',
affected_fields=['REASON_PLACED_CEASED', 'DATE_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
adopt = dfs['PlacedAdoption']
na_placed_ceased = adopt['DATE_PLACED_CEASED'].isna()
na_reason_ceased = adopt['REASON_PLACED_CEASED'].isna()
validation_error = (na_placed_ceased & ~na_reason_ceased) | (~na_placed_ceased & na_reason_ceased)
validation_error_locations = adopt.index[validation_error]
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_159():
error = ErrorDefinition(
code='159',
description='If a child has been recorded as not receiving an intervention for their substance misuse problem, then the additional item on whether an intervention was offered should be completed as well.',
affected_fields=['SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
mask1 = oc2['SUBSTANCE_MISUSE'].astype(str) == '1'
mask2 = oc2['INTERVENTION_RECEIVED'].astype(str) == '0'
mask3 = oc2['INTERVENTION_OFFERED'].isna()
validation_error = mask1 & mask2 & mask3
validation_error_locations = oc2.index[validation_error]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_142():
error = ErrorDefinition(
code='142',
description='A new episode has started, but the previous episode has not ended.',
affected_fields=['DEC', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
df['DECOM'] = pd.to_datetime(df['DECOM'], format='%d/%m/%Y', errors='coerce')
df['DEC'] = pd.to_datetime(df['DEC'], format='%d/%m/%Y', errors='coerce')
df['DECOM'] = df['DECOM'].fillna('01/01/1901') # Watch for potential future issues
df['DECOM'] = df['DECOM'].replace('01/01/1901', pd.NA)
last_episodes = df.sort_values('DECOM').reset_index().groupby(['CHILD'])['index'].last()
ended_episodes_df = df.loc[~df.index.isin(last_episodes)]
ended_episodes_df = ended_episodes_df[(ended_episodes_df['DEC'].isna() | ended_episodes_df['REC'].isna()) &
ended_episodes_df['CHILD'].notna() & ended_episodes_df[
'DECOM'].notna()]
mask = ended_episodes_df.index.tolist()
return {'Episodes': mask}
return error, _validate
def validate_148():
error = ErrorDefinition(
code='148',
description='Date episode ceased and reason episode ceased must both be coded, or both left blank.',
affected_fields=['DEC', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
df['DEC'] = pd.to_datetime(df['DEC'], format='%d/%m/%Y', errors='coerce')
mask = ((df['DEC'].isna()) & (df['REC'].notna())) | ((df['DEC'].notna()) & (df['REC'].isna()))
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_151():
error = ErrorDefinition(
code='151',
description='All data items relating to a childs adoption must be coded or left blank.',
affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTER', 'SEX_ADOPTR', 'LS_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
na_date_int = ad1['DATE_INT'].isna()
na_date_match = ad1['DATE_MATCH'].isna()
na_foster_care = ad1['FOSTER_CARE'].isna()
na_nb_adoptr = ad1['NB_ADOPTR'].isna()
na_sex_adoptr = ad1['SEX_ADOPTR'].isna()
na_lsadoptr = ad1['LS_ADOPTR'].isna()
ad1_not_null = (
~na_date_int & ~na_date_match & ~na_foster_care & ~na_nb_adoptr & ~na_sex_adoptr & ~na_lsadoptr)
validation_error = (
~na_date_int | ~na_date_match | ~na_foster_care | ~na_nb_adoptr | ~na_sex_adoptr | ~na_lsadoptr) & ~ad1_not_null
validation_error_locations = ad1.index[validation_error]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_182():
error = ErrorDefinition(
code='182',
description='Data entries on immunisations, teeth checks, health assessments and substance misuse problem identified should be completed or all OC2 fields should be left blank.',
affected_fields=['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'CONVICTED',
'HEALTH_CHECK', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
mask1 = (
oc2['IMMUNISATIONS'].isna() |
oc2['TEETH_CHECK'].isna() |
oc2['HEALTH_ASSESSMENT'].isna() |
oc2['SUBSTANCE_MISUSE'].isna()
)
mask2 = (
oc2['CONVICTED'].isna() &
oc2['HEALTH_CHECK'].isna() &
oc2['INTERVENTION_RECEIVED'].isna() &
oc2['INTERVENTION_OFFERED'].isna()
)
validation_error = mask1 & ~mask2
validation_error_locations = oc2.index[validation_error]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_214():
error = ErrorDefinition(
code='214',
description='Placement location information not required.',
affected_fields=['PL_POST', 'URN'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
mask = df['LS'].isin(['V3', 'V4']) & ((df['PL_POST'].notna()) | (df['URN'].notna()))
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_222():
error = ErrorDefinition(
code='222',
description='Ofsted Unique reference number (URN) should not be recorded for this placement type.',
affected_fields=['URN'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
place_code_list = ['H5', 'P1', 'P2', 'P3', 'R1', 'R2', 'R5', 'T0', 'T1', 'T2', 'T3', 'T4', 'Z1']
mask = (df['PLACE'].isin(place_code_list)) & (df['URN'].notna()) & (df['URN'] != 'XXXXXX')
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_366():
error = ErrorDefinition(
code='366',
description='A child cannot change placement during the course of an individual short-term respite break.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
mask = (df['LS'] == 'V3') & (df['RNE'] != 'S')
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_628():
error = ErrorDefinition(
code='628',
description='Motherhood details are not required for care leavers who have not been looked after during the year.',
affected_fields=['MOTHER'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs or 'OC3' not in dfs:
return {}
else:
hea = dfs['Header']
epi = dfs['Episodes']
oc3 = dfs['OC3']
hea = hea.reset_index()
oc3_no_nulls = oc3[oc3[['IN_TOUCH', 'ACTIV', 'ACCOM']].notna().any(axis=1)]
hea_merge_epi = hea.merge(epi, how='left', on='CHILD', indicator=True)
hea_not_in_epi = hea_merge_epi[hea_merge_epi['_merge'] == 'left_only']
cohort_to_check = hea_not_in_epi.merge(oc3_no_nulls, how='inner', on='CHILD')
error_cohort = cohort_to_check[cohort_to_check['MOTHER'].notna()]
error_list = list(set(error_cohort['index'].to_list()))
error_list.sort()
return {'Header': error_list}
return error, _validate
def validate_164():
error = ErrorDefinition(
code='164',
description='Distance is not valid. Please check a valid postcode has been entered.',
affected_fields=['PL_DISTANCE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
is_short_term = df['LS'].isin(['V3', 'V4'])
distance = pd.to_numeric(df['PL_DISTANCE'], errors='coerce')
# Use a bit of tolerance in these bounds
distance_valid = distance.gt(-0.2) & distance.lt(1001.0)
mask = ~is_short_term & ~distance_valid
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_169():
error = ErrorDefinition(
code='169',
description='Local Authority (LA) of placement is not valid or is missing. Please check a valid postcode has been entered.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
is_short_term = df['LS'].isin(['V3', 'V4'])
# Because PL_LA is derived, it will always be valid if present
mask = ~is_short_term & df['PL_LA'].isna()
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_179():
error = ErrorDefinition(
code='179',
description='Placement location code is not a valid code.',
affected_fields=['PL_LOCATION'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
is_short_term = df['LS'].isin(['V3', 'V4'])
# Because PL_LOCATION is derived, it will always be valid if present
mask = ~is_short_term & df['PL_LOCATION'].isna()
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_1015():
error = ErrorDefinition(
code='1015',
description='Placement provider is own provision but child not placed in own LA.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
local_authority = dfs['metadata']['localAuthority']
placement_fostering_or_adoption = df['PLACE'].isin([
'A3', 'A4', 'A5', 'A6', 'U1', 'U2', 'U3', 'U4', 'U5', 'U6',
])
own_provision = df['PLACE_PROVIDER'].eq('PR1')
is_short_term = df['LS'].isin(['V3', 'V4'])
is_pl_la = df['PL_LA'].eq(local_authority)
checked_episodes = ~placement_fostering_or_adoption & ~is_short_term & own_provision
checked_episodes = checked_episodes & df['LS'].notna() & df['PLACE'].notna()
mask = checked_episodes & ~is_pl_la
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_411():
error = ErrorDefinition(
code='411',
description='Placement location code disagrees with LA of placement.',
affected_fields=['PL_LOCATION'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
local_authority = dfs['metadata']['localAuthority']
mask = df['PL_LOCATION'].eq('IN') & df['PL_LA'].ne(local_authority)
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_420():
error = ErrorDefinition(
code='420',
description='LA of placement completed but child is looked after under legal status V3 or V4.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
is_short_term = df['LS'].isin(['V3', 'V4'])
mask = is_short_term & df['PL_LA'].notna()
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_355():
error = ErrorDefinition(
code='355',
description='Episode appears to have lasted for less than 24 hours',
affected_fields=['DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
mask = df['DECOM'].astype(str) == df['DEC'].astype(str)
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_586():
error = ErrorDefinition(
code='586',
description='Dates of missing periods are before childโs date of birth.',
affected_fields=['MIS_START'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
df = dfs['Missing']
df['DOB'] = pd.to_datetime(df['DOB'], format='%d/%m/%Y', errors='coerce')
df['MIS_START'] = pd.to_datetime(df['MIS_START'], format='%d/%m/%Y', errors='coerce')
error_mask = df['MIS_START'].notna() & (df['MIS_START'] <= df['DOB'])
return {'Missing': df.index[error_mask].to_list()}
return error, _validate
def validate_630():
error = ErrorDefinition(
code='630',
description='Information on previous permanence option should be returned.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'PrevPerm' not in dfs or 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
pre = dfs['PrevPerm']
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
epi = epi.reset_index()
# Form the episode dataframe which has an 'RNE' of 'S' in this financial year
epi_has_rne_of_S_in_year = epi[(epi['RNE'] == 'S') & (epi['DECOM'] >= collection_start)]
# Merge to see
# 1) which CHILD ids are missing from the PrevPerm file
# 2) which CHILD are in the prevPerm file, but don't have the LA_PERM/DATE_PERM field completed where they should be
# 3) which CHILD are in the PrevPerm file, but don't have the PREV_PERM field completed.
merged_epi_preperm = epi_has_rne_of_S_in_year.merge(pre, on='CHILD', how='left', indicator=True)
error_not_in_preperm = merged_epi_preperm['_merge'] == 'left_only'
error_wrong_values_in_preperm = (merged_epi_preperm['PREV_PERM'] != 'Z1') & (
merged_epi_preperm[['LA_PERM', 'DATE_PERM']].isna().any(axis=1))
error_null_prev_perm = (merged_epi_preperm['_merge'] == 'both') & (merged_epi_preperm['PREV_PERM'].isna())
error_mask = error_not_in_preperm | error_wrong_values_in_preperm | error_null_prev_perm
error_list = merged_epi_preperm[error_mask]['index'].to_list()
error_list = list(set(error_list))
error_list.sort()
return {'Episodes': error_list}
return error, _validate
def validate_501():
error = ErrorDefinition(
code='501',
description='A new episode has started before the end date of the previous episode.',
affected_fields=['DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
epi = epi.reset_index()
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
epi['DEC'] = pd.to_datetime(epi['DEC'], format='%d/%m/%Y', errors='coerce')
epi = epi.sort_values(['CHILD', 'DECOM'])
epi_lead = epi.shift(1)
epi_lead = epi_lead.reset_index()
m_epi = epi.merge(epi_lead, left_on='index', right_on='level_0', suffixes=('', '_prev'))
error_cohort = m_epi[(m_epi['CHILD'] == m_epi['CHILD_prev']) & (m_epi['DECOM'] < m_epi['DEC_prev'])]
error_list = error_cohort['index'].to_list()
error_list.sort()
return {'Episodes': error_list}
return error, _validate
def validate_502():
error = ErrorDefinition(
code='502',
description='Last yearโs record ended with an open episode. The date on which that episode started does not match the start date of the first episode on this yearโs record.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs:
return {}
else:
epi = dfs['Episodes']
epi_last = dfs['Episodes_last']
epi = epi.reset_index()
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
epi_last['DECOM'] = pd.to_datetime(epi_last['DECOM'], format='%d/%m/%Y', errors='coerce')
epi_last_no_dec = epi_last[epi_last['DEC'].isna()]
epi_min_decoms_index = epi[['CHILD', 'DECOM']].groupby(['CHILD'])['DECOM'].idxmin()
epi_min_decom_df = epi.loc[epi_min_decoms_index, :]
merged_episodes = epi_min_decom_df.merge(epi_last_no_dec, on='CHILD', how='inner')
error_cohort = merged_episodes[merged_episodes['DECOM_x'] != merged_episodes['DECOM_y']]
error_list = error_cohort['index'].to_list()
error_list = list(set(error_list))
error_list.sort()
return {'Episodes': error_list}
return error, _validate
def validate_153():
error = ErrorDefinition(
code='153',
description="All data items relating to a child's activity or accommodation after leaving care must be coded or left blank.",
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
oc3 = dfs['OC3']
oc3_not_na = (
oc3['IN_TOUCH'].notna() &
oc3['ACTIV'].notna() &
oc3['ACCOM'].notna()
)
oc3_all_na = (
oc3['IN_TOUCH'].isna() &
oc3['ACTIV'].isna() &
oc3['ACCOM'].isna()
)
validation_error = ~oc3_not_na & ~oc3_all_na
validation_error_locations = oc3.index[validation_error]
return {'OC3': validation_error_locations.to_list()}
return error, _validate
def validate_166():
error = ErrorDefinition(
code='166',
description="Date of review is invalid or blank.",
affected_fields=['REVIEW'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
else:
review = dfs['Reviews']
error_mask = pd.to_datetime(review['REVIEW'], format='%d/%m/%Y', errors='coerce').isna()
validation_error_locations = review.index[error_mask]
return {'Reviews': validation_error_locations.to_list()}
return error, _validate
def validate_174():
error = ErrorDefinition(
code='174',
description="Mother's child date of birth is recorded but gender shows that the child is a male.",
affected_fields=['SEX', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
child_is_male = header['SEX'].astype(str) == '1'
mc_dob_recorded = header['MC_DOB'].notna()
error_mask = child_is_male & mc_dob_recorded
validation_error_locations = header.index[error_mask]
return {'Header': validation_error_locations.to_list()}
return error, _validate
def validate_180():
error = ErrorDefinition(
code='180',
description="Data entry for the strengths and difficulties questionnaire (SDQ) score is invalid.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
oc2['SDQ_SCORE'] = pd.to_numeric(oc2['SDQ_SCORE'], errors='coerce')
error_mask = oc2['SDQ_SCORE'].notna() & ~oc2['SDQ_SCORE'].isin(range(41))
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_181():
error = ErrorDefinition(
code='181',
description="Data items relating to children looked after continuously for 12 months should be completed with a 0 or 1.",
affected_fields=['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
code_list = ['0', '1']
fields_of_interest = ['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
error_mask = (
oc2[fields_of_interest].notna()
& ~oc2[fields_of_interest].astype(str).isin(['0', '1'])
).any(axis=1)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_192():
error = ErrorDefinition(
code='192',
description="Child has been identified as having a substance misuse problem but the additional item on whether an intervention was received has been left blank.",
affected_fields=['SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
misuse = oc2['SUBSTANCE_MISUSE'].astype(str) == '1'
intervention_blank = oc2['INTERVENTION_RECEIVED'].isna()
error_mask = misuse & intervention_blank
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_193():
error = ErrorDefinition(
code='193',
description="Child not identified as having a substance misuse problem but at least one of the two additional items on whether an intervention were offered and received have been completed.",
affected_fields=['SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
no_substance_misuse = oc2['SUBSTANCE_MISUSE'].isna() | (oc2['SUBSTANCE_MISUSE'].astype(str) == '0')
intervention_not_blank = oc2['INTERVENTION_RECEIVED'].notna() | oc2['INTERVENTION_OFFERED'].notna()
error_mask = no_substance_misuse & intervention_not_blank
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_197a():
error = ErrorDefinition(
code='197a',
description="Reason for no Strengths and Difficulties (SDQ) score is not required if Strengths and Difficulties Questionnaire score is filled in.",
affected_fields=['SDQ_SCORE', 'SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
sdq_filled_in = oc2['SDQ_SCORE'].notna()
reason_filled_in = oc2['SDQ_REASON'].notna()
error_mask = sdq_filled_in & reason_filled_in
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_567():
error = ErrorDefinition(
code='567',
description='The date that the missing episode or episode that the child was away from placement without authorisation ended is before the date that it started.',
affected_fields=['MIS_START', 'MIS_END'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
mis = dfs['Missing']
mis['MIS_START'] = pd.to_datetime(mis['MIS_START'], format='%d/%m/%Y', errors='coerce')
mis['MIS_END'] = pd.to_datetime(mis['MIS_END'], format='%d/%m/%Y', errors='coerce')
mis_error = mis[mis['MIS_START'] > mis['MIS_END']]
return {'Missing': mis_error.index.to_list()}
return error, _validate
def validate_304():
error = ErrorDefinition(
code='304',
description='Date unaccompanied asylum-seeking child (UASC) status ceased must be on or before the 18th birthday of a child.',
affected_fields=['DUC'],
)
def _validate(dfs):
if 'UASC' not in dfs:
return {}
else:
uasc = dfs['UASC']
uasc['DOB'] = pd.to_datetime(uasc['DOB'], format='%d/%m/%Y', errors='coerce')
uasc['DUC'] = pd.to_datetime(uasc['DUC'], format='%d/%m/%Y', errors='coerce')
mask = uasc['DUC'].notna() & (uasc['DUC'] > uasc['DOB'] + pd.offsets.DateOffset(years=18))
return {'UASC': uasc.index[mask].to_list()}
return error, _validate
def validate_333():
error = ErrorDefinition(
code='333',
description='Date should be placed for adoption must be on or prior to the date of matching child with adopter(s).',
affected_fields=['DATE_INT'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
adt = dfs['AD1']
adt['DATE_MATCH'] = pd.to_datetime(adt['DATE_MATCH'], format='%d/%m/%Y', errors='coerce')
adt['DATE_INT'] = pd.to_datetime(adt['DATE_INT'], format='%d/%m/%Y', errors='coerce')
# If <DATE_MATCH> provided, then <DATE_INT> must also be provided and be <= <DATE_MATCH>
mask1 = adt['DATE_MATCH'].notna() & adt['DATE_INT'].isna()
mask2 = adt['DATE_MATCH'].notna() & adt['DATE_INT'].notna() & (adt['DATE_INT'] > adt['DATE_MATCH'])
mask = mask1 | mask2
return {'AD1': adt.index[mask].to_list()}
return error, _validate
def validate_1011():
error = ErrorDefinition(
code='1011',
description='This child is recorded as having his/her care transferred to another local authority for the final episode and therefore should not have the care leaver information completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'],
)
def _validate(dfs):
if 'OC3' not in dfs or 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
oc3 = dfs['OC3']
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
# If final <REC> = 'E3' then <IN_TOUCH>; <ACTIV> and <ACCOM> should not be provided
epi.sort_values(['CHILD', 'DECOM'], inplace=True)
grouped_decom_by_child = epi.groupby(['CHILD'])['DECOM'].idxmax(skipna=True)
max_decom_only = epi.loc[epi.index.isin(grouped_decom_by_child), :]
E3_is_last = max_decom_only[max_decom_only['REC'] == 'E3']
oc3.reset_index(inplace=True)
cohort_to_check = oc3.merge(E3_is_last, on='CHILD', how='inner')
error_mask = cohort_to_check[['IN_TOUCH', 'ACTIV', 'ACCOM']].notna().any(axis=1)
error_list = cohort_to_check['index'][error_mask].to_list()
error_list = list(set(error_list))
error_list.sort()
return {'OC3': error_list}
return error, _validate
def validate_574():
error = ErrorDefinition(
code='574',
description='A new missing/away from placement without authorisation period cannot start when the previous missing/away from placement without authorisation period is still open. Missing/away from placement without authorisation periods should also not overlap.',
affected_fields=['MIS_START', 'MIS_END'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
mis = dfs['Missing']
mis['MIS_START'] = pd.to_datetime(mis['MIS_START'], format='%d/%m/%Y', errors='coerce')
mis['MIS_END'] = | pd.to_datetime(mis['MIS_END'], format='%d/%m/%Y', errors='coerce') | pandas.to_datetime |
import kabuki
import hddm
import numpy as np
import pandas as pd
from numpy.random import rand
from scipy.stats import uniform, norm
from copy import copy
def gen_single_params_set(include=()):
"""Returns a dict of DDM parameters with random values for a singel conditin
the function is used by gen_rand_params.
:Optional:
include : tuple
Which optional parameters include. Can be
any combination of:
* 'z' (bias, default=0.5)
* 'sv' (inter-trial drift variability)
* 'sz' (inter-trial bias variability)
* 'st' (inter-trial non-decision time variability)
Special arguments are:
* 'all': include all of the above
* 'all_inter': include all of the above except 'z'
"""
params = {}
if include == 'all':
include = ['z', 'sv', 'sz', 'st']
elif include == 'all_inter':
include = ['sv', 'sz', 'st']
params['sv'] = 2.5 * rand() if 'sv' in include else 0
params['sz'] = rand() * 0.4 if 'sz' in include else 0
params['st'] = rand() * 0.35 if 'st' in include else 0
params['z'] = .4 + rand() * 0.2 if 'z' in include else 0.5
# Simple parameters
params['v'] = (rand() - .5) * 8
params['t'] = 0.2 + rand() * 0.3
params['a'] = 0.5 + rand() * 1.5
if 'pi' in include or 'gamma' in include:
params['pi'] = max(rand() * 0.1, 0.01)
#params['gamma'] = rand()
assert hddm.utils.check_params_valid(**params)
return params
def gen_rand_params(include=(), cond_dict=None, seed=None):
"""Returns a dict of DDM parameters with random values.
:Optional:
include : tuple
Which optional parameters include. Can be
any combination of:
* 'z' (bias, default=0.5)
* 'sv' (inter-trial drift variability)
* 'sz' (inter-trial bias variability)
* 'st' (inter-trial non-decision time variability)
Special arguments are:
* 'all': include all of the above
* 'all_inter': include all of the above except 'z'
cond_dict : dictionary
cond_dict is used when multiple conditions are desired.
the dictionary has the form of {param1: [value_1, ... , value_n], param2: [value_1, ... , value_n]}
and the function will output n sets of parameters. each set with values from the
appropriate place in the dictionary
for instance if cond_dict={'v': [0, 0.5, 1]} then 3 parameters set will be created.
the first with v=0 the second with v=0.5 and the third with v=1.
seed: float
random seed
Output:
if conditions is None:
params: dictionary
a dictionary holding the parameters values
else:
cond_params: a dictionary holding the parameters for each one of the conditions,
that has the form {'c1': params1, 'c2': params2, ...}
it can be used directly as an argument in gen_rand_data.
merged_params:
a dictionary of parameters that can be used to validate the optimization
and learning algorithms.
"""
# set seed
if seed is not None:
np.random.seed(seed)
# if there is only a single condition then we can use gen_single_params_set
if cond_dict is None:
return gen_single_params_set(include=include)
# generate original parameter set
org_params = gen_single_params_set(include)
# create a merged set
merged_params = org_params.copy()
for name in cond_dict.keys():
del merged_params[name]
cond_params = {}
n_conds = len(list(cond_dict.values())[0])
for i in range(n_conds):
# create a set of parameters for condition i
# put them in i_params, and in cond_params[c#i]
i_params = org_params.copy()
for name in cond_dict.keys():
i_params[name] = cond_dict[name][i]
cond_params['c%d' % i] = i_params
# update merged_params
merged_params['%s(c%d)' % (name, i)] = cond_dict[name][i]
return cond_params, merged_params
####################################################################
# Functions to generate RT distributions with specified parameters #
####################################################################
def gen_rts(size=1000, range_=(-6, 6), dt=1e-3,
intra_sv=1., structured=True, subj_idx=None,
method='cdf', **params):
"""
A private function used by gen_rand_data
Returns a DataFrame of randomly simulated RTs from the DDM.
:Arguments:
params : dict
Parameter names and values to use for simulation.
:Optional:
size : int
Number of RTs to simulate.
range_ : tuple
Minimum (negative) and maximum (positve) RTs.
dt : float
Number of steps/sec.
intra_sv : float
Intra-trial variability.
structured : bool
Return a structured array with fields 'RT'
and 'response'.
subj_idx : int
If set, append column 'subj_idx' with value subj_idx.
method : str
Which method to use to simulate the RTs:
* 'cdf': fast, uses the inverse of cumulative density function to sample, dt can be 1e-2.
* 'drift': slow, simulates each complete drift process, dt should be 1e-4.
"""
if 'v_switch' in params and method != 'drift':
print("Warning: Only drift method supports changes in drift-rate. v_switch will be ignored.")
# Set optional default values if they are not provided
for var_param in ('sv', 'sz', 'st'):
if var_param not in params:
params[var_param] = 0
if 'z' not in params:
params['z'] = .5
if 'sv' not in params:
params['sv'] = 0
if 'sz' not in params:
params['sz'] = 0
# check sample
if isinstance(size, tuple): # this line is because pymc stochastic use tuple for sample size
if size == ():
size = 1
else:
size = size[0]
if method == 'cdf_py':
rts = _gen_rts_from_cdf(params, size, range_, dt)
elif method == 'drift':
rts = _gen_rts_from_simulated_drift(params, size, dt, intra_sv)[0]
elif method == 'cdf':
rts = hddm.wfpt.gen_rts_from_cdf(params['v'], params['sv'], params['a'], params['z'],
params['sz'], params[
't'], params['st'],
size, range_[0], range_[1], dt)
else:
raise TypeError("Sampling method %s not found." % method)
if not structured:
return rts
else:
data = pd.DataFrame(rts, columns=['rt'])
data['response'] = 1.
data['response'][data['rt'] < 0] = 0.
data['rt'] = np.abs(data['rt'])
return data
def _gen_rts_from_simulated_drift(params, samples=1000, dt=1e-4, intra_sv=1.):
"""Returns simulated RTs from simulating the whole drift-process.
:Arguments:
params : dict
Parameter names and values.
:Optional:
samlpes : int
How many samples to generate.
dt : float
How many steps/sec.
intra_sv : float
Intra-trial variability.
:SeeAlso:
gen_rts
"""
from numpy.random import rand
if samples is None:
samples = 1
nn = 1000
a = params['a']
v = params['v']
if 'v_switch' in params:
switch = True
t_switch = params['t_switch'] / dt
# Hack so that we will always step into a switch
nn = int(round(t_switch))
else:
switch = False
# create delay
if 'st' in params:
start_delay = (uniform.rvs(loc=params['t'], scale=params['st'], size=samples)
- params['st'] / 2.)
else:
start_delay = np.ones(samples) * params['t']
# create starting_points
if 'sz' in params:
starting_points = (uniform.rvs(loc=params['z'], scale=params['sz'], size=samples)
- params['sz'] / 2.) * a
else:
starting_points = np.ones(samples) * params['z'] * a
rts = np.empty(samples)
step_size = np.sqrt(dt) * intra_sv
drifts = []
for i_sample in range(samples):
drift = np.array([])
crossed = False
iter = 0
y_0 = starting_points[i_sample]
# drifting...
if 'sv' in params and params['sv'] != 0:
drift_rate = norm.rvs(v, params['sv'])
else:
drift_rate = v
if 'v_switch' in params:
if 'V_switch' in params and params['V_switch'] != 0:
drift_rate_switch = norm.rvs(
params['v_switch'], params['V_switch'])
else:
drift_rate_switch = params['v_switch']
prob_up = 0.5 * (1 + np.sqrt(dt) / intra_sv * drift_rate)
while (not crossed):
# Generate nn steps
iter += 1
if iter == 2 and switch:
prob_up = 0.5 * (1 + np.sqrt(dt) /
intra_sv * drift_rate_switch)
position = ((rand(nn) < prob_up) * 2 - 1) * step_size
position[0] += y_0
position = np.cumsum(position)
# Find boundary crossings
cross_idx = np.where((position < 0) | (position > a))[0]
drift = np.concatenate((drift, position))
if cross_idx.shape[0] > 0:
crossed = True
else:
# If not crossed, set last position as starting point
# for next nn steps to continue drift
y_0 = position[-1]
# find the boundary interception
y2 = position[cross_idx[0]]
if cross_idx[0] != 0:
y1 = position[cross_idx[0] - 1]
else:
y1 = y_0
m = (y2 - y1) / dt # slope
# y = m*x + b
b = y2 - m * ((iter - 1) * nn + cross_idx[0]) * dt # intercept
if y2 < 0:
rt = ((0 - b) / m)
else:
rt = ((a - b) / m)
rts[i_sample] = (rt + start_delay[i_sample]) * np.sign(y2)
delay = start_delay[i_sample] / dt
drifts.append(np.concatenate(
(np.ones(int(delay)) * starting_points[i_sample], drift[:int(abs(rt) / dt)])))
return rts, drifts
def pdf_with_params(rt, params):
"""Helper function that calls full_pdf and gets the parameters
from the dict params.
"""
v = params['v']
V = params['sv']
z = params['z']
Z = params['sz']
t = params['t']
T = params['st']
a = params['a']
return hddm.wfpt.full_pdf(rt, v=v, V=V, a=a, z=z, Z=Z, t=t,
T=T, err=1e-4, n_st=2, n_sz=2, use_adaptive=1, simps_err=1e-3)
def _gen_rts_from_cdf(params, samples=1000):
"""Returns simulated RTs sampled from the inverse of the CDF.
:Arguments:
params : dict
Parameter names and values.
:Optional:
samlpes : int
How many samples to generate.
:SeeAlso:
gen_rts
"""
v = params['v']
V = params['sv']
z = params['z']
Z = params['sz']
t = params['t']
T = params['st']
a = params['a']
return hddm.likelihoods.wfpt.ppf(np.random.rand(samples), args=(v, V, a, z, Z, t, T))
def gen_rand_data(params=None, n_fast_outliers=0, n_slow_outliers=0, **kwargs):
"""Generate simulated RTs with random parameters.
:Optional:
params : dict <default=generate randomly>
Either dictionary mapping param names to values.
Or dictionary mapping condition name to parameter
dictionary (see example below).
If not supplied, takes random values.
n_fast_outliers : int <default=0>
How many fast outliers to add (outlier_RT < ter)
n_slow_outliers : int <default=0>
How many late outliers to add.
The rest of the arguments are forwarded to kabuki.generate.gen_rand_data
:Returns:
data array with RTs
parameter values
:Example:
# Generate random data set
>>> data, params = hddm.generate.gen_rand_data({'v':0, 'a':2, 't':.3},
size=100, subjs=5)
# Generate 2 conditions
>>> data, params = hddm.generate.gen_rand_data({'cond1': {'v':0, 'a':2, 't':.3},
'cond2': {'v':1, 'a':2, 't':.3}})
:Notes:
Wrapper function for kabuki.generate.gen_rand_data. See
the help doc of that function for more options.
"""
if params is None:
params = gen_rand_params()
from numpy import inf
# set valid param ranges
bounds = {'a': (0, inf),
'z': (0, 1),
't': (0, inf),
'st': (0, inf),
'sv': (0, inf),
'sz': (0, 1)
}
if 'share_noise' not in kwargs:
kwargs['share_noise'] = set(['a', 'v', 't', 'st', 'sz', 'sv', 'z'])
# Create RT data
data, subj_params = kabuki.generate.gen_rand_data(gen_rts, params,
check_valid_func=hddm.utils.check_params_valid,
bounds=bounds, **kwargs)
# add outliers
seed = kwargs.get('seed', None)
data = add_outliers(data, n_fast=n_fast_outliers,
n_slow=n_slow_outliers, seed=seed)
return data, subj_params
def gen_rand_rlddm_data(a, t, scaler, alpha, size=1, p_upper=1, p_lower=0, z=0.5, q_init=0.5, pos_alpha=float('nan'), subjs=1, split_by=0, mu_upper=1, mu_lower=0, sd_upper=0.1, sd_lower=0.1, binary_outcome=True, uncertainty=False):
all_data = []
tg = t
ag = a
alphag = alpha
pos_alphag = pos_alpha
scalerg = scaler
for s in range(0, subjs):
t = np.maximum(0.05, np.random.normal(
loc=tg, scale=0.05, size=1)) if subjs > 1 else tg
a = np.maximum(0.05, np.random.normal(
loc=ag, scale=0.15, size=1)) if subjs > 1 else ag
alpha = np.minimum(np.minimum(np.maximum(0.001, np.random.normal(loc=alphag, scale=0.05, size=1)), alphag+alphag),1) if subjs > 1 else alphag
scaler = np.random.normal(
loc=scalerg, scale=0.25, size=1) if subjs > 1 else scalerg
if np.isnan(pos_alpha):
pos_alfa = alpha
else:
pos_alfa = np.maximum(0.001,np.random.normal(loc=pos_alphag, scale=0.05, size=1)) if subjs > 1 else pos_alphag
n = size
q_up = np.tile([q_init], n)
q_low = np.tile([q_init], n)
response = np.tile([0.5], n)
feedback = np.tile([0.5], n)
rt = np.tile([0], n)
if binary_outcome:
rew_up = np.random.binomial(1, p_upper, n).astype(float)
rew_low = np.random.binomial(1, p_lower, n).astype(float)
else:
rew_up = np.random.normal(mu_upper, sd_upper, n)
rew_low = np.random.normal(mu_lower, sd_lower, n)
sim_drift = np.tile([0], n)
subj_idx = np.tile([s], n)
d = {'q_up': q_up, 'q_low': q_low, 'sim_drift': sim_drift, 'rew_up': rew_up, 'rew_low': rew_low,
'response': response, 'rt': rt, 'feedback': feedback, 'subj_idx': subj_idx, 'split_by': split_by, 'trial': 1}
df = pd.DataFrame(data=d)
df = df[['q_up', 'q_low', 'sim_drift', 'rew_up', 'rew_low',
'response', 'rt', 'feedback', 'subj_idx', 'split_by', 'trial']]
data, params = hddm.generate.gen_rand_data(
{'a': a, 't': t, 'v': df.loc[0, 'sim_drift'], 'z': z}, subjs=1, size=1)
df.loc[0, 'response'] = data.response[0]
df.loc[0, 'rt'] = data.rt[0]
if (data.response[0] == 1.0):
df.loc[0, 'feedback'] = df.loc[0, 'rew_up']
if (df.loc[0, 'feedback'] > df.loc[0, 'q_up']):
alfa = pos_alfa
else:
alfa = alpha
else:
df.loc[0, 'feedback'] = df.loc[0, 'rew_low']
if (df.loc[0, 'feedback'] > df.loc[0, 'q_low']):
alfa = pos_alfa
else:
alfa = alpha
for i in range(1, n):
df.loc[i, 'trial'] = i + 1
df.loc[i, 'q_up'] = (df.loc[i - 1, 'q_up'] * (1 - df.loc[i - 1, 'response'])) + ((df.loc[i - 1, 'response'])
* (df.loc[i - 1, 'q_up'] + (alfa * (df.loc[i - 1, 'rew_up'] - df.loc[i - 1, 'q_up']))))
df.loc[i, 'q_low'] = (df.loc[i - 1, 'q_low'] * (df.loc[i - 1, 'response'])) + ((1 - df.loc[i - 1, 'response'])
* (df.loc[i - 1, 'q_low'] + (alfa * (df.loc[i - 1, 'rew_low'] - df.loc[i - 1, 'q_low']))))
df.loc[i, 'sim_drift'] = (
df.loc[i, 'q_up'] - df.loc[i, 'q_low']) * (scaler)
data, params = hddm.generate.gen_rand_data(
{'a': a, 't': t, 'v': df.loc[i, 'sim_drift'] , 'z': z}, subjs=1, size=1)
df.loc[i, 'response'] = data.response[0]
df.loc[i, 'rt'] = data.rt[0]
if (data.response[0] == 1.0):
df.loc[i, 'feedback'] = df.loc[i, 'rew_up']
if (df.loc[i, 'feedback'] > df.loc[i, 'q_up']):
alfa = pos_alfa
else:
alfa = alpha
else:
df.loc[i, 'feedback'] = df.loc[i, 'rew_low']
if (df.loc[i, 'feedback'] > df.loc[i, 'q_low']):
alfa = pos_alfa
else:
alfa = alpha
all_data.append(df)
all_data = pd.concat(all_data, axis=0)
all_data = all_data[['q_up', 'q_low', 'sim_drift', 'response',
'rt', 'feedback', 'subj_idx', 'split_by', 'trial']]
return all_data
def gen_rand_rl_data(scaler, alpha, size=1, p_upper=1, p_lower=0, z=0.5, q_init=0.5, pos_alpha=float('nan'), subjs=1, split_by=0, mu_upper=1, mu_lower=0, sd_upper=0.1, sd_lower=0.1, binary_outcome=True):
all_data = []
alphag = alpha
pos_alphag = pos_alpha
scalerg = scaler
for s in range(0, subjs):
alpha = np.minimum(np.minimum(np.maximum(0.001, np.random.normal(loc=alphag, scale=0.05, size=1)), alphag+alphag),1) if subjs > 1 else alphag
scaler = np.random.normal(
loc=scalerg, scale=0.25, size=1) if subjs > 1 else scalerg
if np.isnan(pos_alpha):
pos_alfa = alpha
else:
pos_alfa = np.maximum(0.001, np.random.normal(loc=pos_alphag, scale=0.05, size=1)) if subjs > 1 else pos_alphag
n = size
q_up = np.tile([q_init], n) # initialize q
q_low = np.tile([q_init], n) # initialize q
response = np.tile([0.5], n)
feedback = np.tile([0.5], n)
rt = np.tile([0], n)
if binary_outcome:
rew_up = np.random.binomial(1, p_upper, n).astype(float)
rew_low = np.random.binomial(1, p_lower, n).astype(float)
else:
rew_up = np.random.normal(mu_upper, sd_upper, n)
rew_low = np.random.normal(mu_lower, sd_lower, n)
sim_drift = np.tile([0], n)
p = np.tile([0.5], n)
subj_idx = np.tile([s], n)
d = {'q_up': q_up, 'q_low': q_low, 'p': p, 'sim_drift': sim_drift, 'rew_up': rew_up, 'rew_low': rew_low,
'response': response, 'feedback': feedback, 'subj_idx': subj_idx, 'split_by': split_by, 'trial': 1}
df = | pd.DataFrame(data=d) | pandas.DataFrame |
################################################################################################
# NOTE: I started this code to get better matching results than matching by address,
# but I never finished and thus this code hasn't actually been used yet.
################################################################################################
import pandas as pd
import numpy as np
import re
valid_street_types = [
'ST', 'AVE', 'CT', 'CIR', 'BLVD', 'WAY', 'DR', 'TER', 'HWY', 'HL',
'PL', 'LN', 'RD', 'PARK', 'ALY', 'PLZ', 'ROW', 'WALK', 'SQ', 'SW']
def clean_eviction_data(ev):
if 'index' not in ev.columns:
ev.reset_index(inplace=True)
ev = ev[(~pd.isnull(ev['address'])) & (ev['address'] != 'UNKNOWN')
& (ev['address'].str.contains('^[0-9]'))]
ev.address = ev.address.str.upper()
ev.loc[ev['address'].str.contains('APT\.'), 'apt'] = ev.loc[
ev['address'].str.contains('APT\.'), 'address'].str.split('\s#?APT\.?').str[1]
ev.loc[ev['address'].str.contains('APT\.'), 'address'] = ev.loc[
ev['address'].str.contains('APT\.'), 'address'].str.split('\s#?APT\.?').str[0]
ev.loc[ev['address'].str.contains('#'), 'apt'] = ev.loc[
ev['address'].str.contains('#'), 'address'].str.split('\s#').str[1]
ev.loc[ev['address'].str.contains('#'), 'address'] = ev.loc[
ev['address'].str.contains('#'), 'address'].str.split('\s#').str[0]
ev['address'] = ev['address'].str.replace(
'|'.join(['STRETT', 'STRRET', 'STREET31', 'SREET', 'DTREET', 'STREEET']), 'ST')
ev['address'] = ev['address'].str.replace(
'|'.join(['STRET', 'STRRE', 'STREE$']), 'ST')
ev['address'] = ev['address'].str.replace(
'|'.join(['AVENEU', 'AVENE', 'AVENE', 'AVNEUE', 'AAVE']), 'AVE')
ev['address'] = ev['address'].str.replace('BOUELVARD', 'BLVD')
ev['address'] = ev['address'].str.replace('VAN VAN', 'VAN')
ev['address'] = ev['address'].str.replace('ST ST', 'ST')
ev['address'] = ev['address'].str.replace('AVE AVE', 'AVE')
ev['address'] = ev['address'].str.replace('MERCED MERCED', 'MERCED')
ev['address'] = ev['address'].str.replace('POSTREET', 'POST ST')
ev['address'] = ev['address'].str.replace('21STREET', '21ST ST')
ev['address'] = ev['address'].str.replace('JOOSTREET', 'JOOST AVE')
ev['address'] = ev['address'].str.replace('LOCUSTREET', 'LOCUS ST')
ev['address'] = ev['address'].str.replace('BUSTREET', 'BUSH ST')
ev['address'] = ev['address'].str.replace('1STREET', '1ST ST')
ev['address'] = ev['address'].str.replace('AMHERSTREET', 'AMHERST ST')
ev['address'] = ev['address'].str.replace('TURKSTREET', 'TURK ST')
ev['address'] = ev['address'].str.replace('HARRISOIN', 'HARRISON')
ev['address'] = ev['address'].str.replace('BOARDWAY', 'BROADWAY')
ev['address'] = ev['address'].str.replace("ๆช่ๆถ่งๆช่ฟฆ", "")
ev['address'] = ev['address'].str.replace("ๆถๅฎ", "")
ev['address'] = ev['address'].str.replace("'", "")
ev.loc[ev['address'] == "20 FRANKLIN STREET", 'address'] = "1580-1598 MARKET ST"
ev.loc[ev['address'] == "57 TAYLOR STREET", 'address'] = "101-105 TURK ST"
ev.loc[ev['address'] == "455 EDDY STREET", 'address'] = "350 TURK ST"
ev.loc[ev['address'] == "790 VALLEJO STREET", 'address'] = "1500-1506 POWELL ST"
ev.loc[ev['address'] == "2 EMERY LANE", 'address'] = "734-752 VALLEJO ST"
ev.loc[ev['address'] == "1091 BUSH STREET", 'address'] = "850 LEAVENWORTH ST"
ev.loc[ev['address'] == "795 20TH AVENUE", 'address'] = "4400 FULTON ST"
ev.loc[ev['address'] == "440 DAVIS COURT", 'address'] = "100 WASHINGTON ST"
ev.loc[ev['address'] == "405 12TH AVENUE", 'address'] = "4801 GEARY BLVD"
ev.loc[ev['address'] == "4 BECKETT STREET", 'address'] = "670 JACKSON ST"
ev.loc[ev['address'] == "874 SACRAMENTO STREET", 'address'] = "800 STOCKTON ST"
ev.loc[ev['address'] == "265 NORTH POINT STREET", 'address'] = "2310-2390 POWELL ST"
ev.loc[ev['address'] == "20 12TH STREET", 'address'] = "1613 MARKET ST"
ev.loc[ev['address'] == "609 ASHBURY STREET", 'address'] = "1501-1509 HAIGHT ST"
ev.loc[ev['address'] == "22 VANDEWATER STREET", 'address'] = "333 BAY ST"
ev.loc[ev['address'] == "160 BAY STREET", 'address'] = "2210-2290 STOCKTON ST"
ev.loc[ev['address'] == "505 26TH AVENUE", 'address'] = "6201-6209 GEARY BLVD"
ev.loc[ev['address'] == "3410 22ND STREET", 'address'] = "994-998 GUERRERO ST"
ev.loc[ev['address'] == "1312 UTAH STREET", 'address'] = "2601-2611 24TH ST"
ev.loc[ev['address'] == "1290 HAYES STREET", 'address'] = "600-604 DIVISADERO ST"
ev.loc[ev['address'] == "130 COSO AVE", 'address'] = "1 LUNDY'S LN"
ev.loc[ev['address'] == '3444 16TH STREET', 'address'] = "3440 16TH ST"
ev.loc[ev['address'] == '603 NATOMA STREET', 'address'] = "170 7TH ST"
ev.loc[ev['address'].str.contains('[0-9]02ND'), 'address'] = ev.loc[
ev['address'].str.contains('[0-9]02ND'), 'address'].str.replace('02ND', ' 2ND')
ev.loc[ev['address'].str.contains('\s[0-9A-Z]$'), 'address'] = ev.loc[
ev['address'].str.contains('\s[0-9A-Z]$'), 'address'].str.split(' ').str[:-1].str.join(' ')
ev.loc[ev['address'].str.contains('BROADWAY'), 'street_type'] = 'ST'
# ev.loc[ev['address'].str.contains('CESAR CHAVEZ'), 'street_type'] = 'BLVD'
ev.loc[ev['address'].str.contains('RUSSIA'), 'street_type'] = 'AVE'
ev.loc[ev['petition'] == 'M101171', 'address'] = '531 GONZALEZ DRIVE'
ev.loc[ev['petition'] == 'M111009', 'address'] = '55 CHUMASERO DRIVE'
ev.loc[ev['petition'] == 'M112072', 'address'] = '125 CAMBON DRIVE'
ev.loc[ev['petition'] == 'M131872', 'address'] = '1921 ELLIS STREET'
ev.loc[ev['petition'] == 'M140347', 'address'] = '326 LONDON STREET'
ev.loc[ev['petition'] == 'E980001', 'address'] = '1551A 20TH AVE'
ev.loc[ev['petition'] == 'E991754', 'address'] = '1271 FILBERT ST'
ev.loc[ev['petition'] == 'M2K0279', 'address'] = '2364 FULTON ST'
ev.loc[ev['petition'] == 'S000521', 'address'] = '431 SOMERSET ST'
ev.loc[ev['petition'] == 'S000417', 'address'] = '1201 GUERRERO ST'
# parkmerced
ev.loc[ev['address'].str.contains(
'GONZALEZ|FONT|SERRANO|CHUMASERO|ARBALLO|GARCES|CAMBON|VIDAL|GRIJALVA|TAPIA|BUCARELI|RIVAS|CRESPI|CARDENAS|HIGUERA'),
'address'] = '3711 19TH AVE'
ev = ev[ev['address'] != 'NO ADDRESS PROVIDED']
# clean street types
ev['street_type'] = ev['address'].str.split(' ').str[-1]
st_typ_dict = {'STREET': 'ST', 'AVENUE': 'AVE', 'DRIVE': 'DR', 'BOULEVARD': 'BLVD', 'COURT': 'CT',
'TERRACE': 'TER', 'PLACE': 'PL', 'HIGHWAY': 'HWY', 'LANE': 'LN', 'ROAD': 'RD', 'ALLEY': 'ALY',
'CIRCLE': 'CIR', 'SQUARE': 'SQ', 'PLAZA': 'PLZ', 'HILLS': 'HL', 'HILL': 'HL'
}
ev = ev.replace({'street_type': st_typ_dict})
ev.loc[~ev['street_type'].isin(valid_street_types), 'street_type'] = None
# clean street numbers
ev['street_num'] = ev['address'].str.split(' ').str[0]
ev['house_1'] = ''
ev['house_2'] = ev['street_num']
ev.loc[ev['street_num'].str.contains('-'), 'house_1'] = ev.loc[
ev['street_num'].str.contains('-'), 'street_num'].str.split('-').str[0]
ev.loc[ev['street_num'].str.contains('-'), 'house_2'] = ev.loc[
ev['street_num'].str.contains('-'), 'street_num'].str.split('-').str[1]
ev['house_1'] = ev['house_1'].str.replace('\D', '')
ev['house_2'] = ev['house_2'].str.replace('\D', '')
# clean street names
ev['street_name'] = None
ev.loc[~ | pd.isnull(ev['street_type']) | pandas.isnull |
import pandas as pd
from time import sleep
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.select import Select
url = 'https://www.agmarknet.gov.in/SearchCmmMkt.aspx?Tx_Commodity=78&Tx_State=MH&Tx_District=14&Tx_Market=172&DateFrom=01-Jan-2021&DateTo=27-Sep-2021&Fr_Date=01-Jan-2021&To_Date=27-Sep-2021&Tx_Trend=0&Tx_CommodityHead=Tomato&Tx_StateHead=Maharashtra&Tx_DistrictHead=Pune&Tx_MarketHead=Pune'
d = webdriver.Chrome(ChromeDriverManager().install())
d.get(url)
commodity = Select(d.find_element_by_id('ddlCommodity')).options
commodity = [(i.get_attribute('value'), i.text) for i in commodity[1:]]
states = Select(d.find_element_by_id('ddlState')).options
states = [(i.get_attribute('value'), i.text) for i in states[1:]]
print(states)
print(commodity)
for i, j in states:
if j=='Maharashtra':
c, d = i, j
break
final_df = pd.DataFrame()
for a, b in commodity:
temp_df = pd.DataFrame()
url = 'https://www.agmarknet.gov.in/SearchCmmMkt.aspx?Tx_Commodity='+a+'&Tx_State='+c+'&Tx_District=0&Tx_Market=0&DateFrom=01-Jan-2021&DateTo=10-Aug-2021&Fr_Date=01-Jan-2021&To_Date=10-Aug-2021&Tx_Trend=0&Tx_CommodityHead='+b+'&Tx_StateHead='+d+'&Tx_DistrictHead=--Select--&Tx_MarketHead=--Select--'
# print(url)
dr = webdriver.Chrome(ChromeDriverManager().install())
dr.get(url)
while True:
try:
table = dr.find_element_by_id('cphBody_GridPriceData').get_attribute('outerHTML')
data = pd.read_html(str(table))[0]
data.dropna(axis=0, inplace=True)
if len(data)>1:
data.set_index('Sl no.', inplace=True)
temp_df = pd.concat([temp_df, data], axis=0)
next = dr.find_element_by_css_selector("input[alt='>']")
next.click()
sleep(6)
except:
print('Done')
if len(temp_df)>0:
new_cols = [(b, x) for x in temp_df.columns]
temp_df.columns = pd.MultiIndex.from_tuples(new_cols)
if len(final_df) > len(temp_df):
final_df = pd.concat([final_df, temp_df], axis=1)
else:
final_df = | pd.concat([temp_df, final_df], axis=1) | pandas.concat |
import ast
import pandas as pd
from pandas.api.types import CategoricalDtype
import os
import numpy as np
import tensorflow as tf
from PIL import Image
import shutil
from tqdm import tqdm
from subprocess import Popen, PIPE, STDOUT
root_path = os.path.join(os.path.dirname(__file__), os.path.pardir)
def load(filepath):
tracks = | pd.read_csv(filepath, index_col=0, header=[0, 1]) | pandas.read_csv |
"""
Function related to the loading and processing of CPC instruments from TSI
version: 0.0
date: 2016-09-09
"""
import os
import sys
import pandas as pd
import glob
import pickle
import numpy as np
import re
import matplotlib.pyplot as plt
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import atmoscripts
def Load_to_HDF(input_path= None,
input_filelist = None,
output_path = None,
output_h5_filename = 'CPC_sec',
InputTZ = 0,
OutputTZ = 0,
resample_time = False,
output_file_frequency = 'all',
force_reload_from_source = False
):
"""
Load performed after data has been exported to CSV file with just raw concentrations and times.
"""
if output_path is None:
os.chdir(input_path)
else:
os.chdir(output_path)
if (output_h5_filename is None) or (output_h5_filename == ''):
output_h5_filename = 'CPC'
output_h5_filename = output_h5_filename + '_raw'
if force_reload_from_source:
remove_previous_output('h5',force_reload_from_source, input_filelist)
if input_filelist is None:
os.chdir(input_path)
filelist = glob.glob('*.csv')
# Check if previous data has been loaded, if so, don't load it again
if os.path.isfile('files_loaded.txt'):
with open('files_loaded.txt', 'rb') as f:
files_already_loaded = pickle.load(f)
# Get only the new files to be loaded:
filelist=list(set(filelist).difference(set(files_already_loaded)))
# Read where the import has gotten up to previously:
if os.path.isfile('partial_files_loaded.txt'):
with open('partial_files_loaded.txt','r') as f:
last_loaded = f.readlines()
last_loaded = [x.strip() for x in last_loaded]
last_loaded_file = last_loaded[0]
if type(last_loaded_file) is str:
filelist.append(last_loaded_file)
else:
filelist = input_filelist
filelist.sort()
#Iterate through to load the raw files
for file in filelist:
# Read cpc csv file
read_cpc_csv(file, output_h5_filename, output_file_frequency,
InputTZ, OutputTZ)
# Clean up
if os.path.isfile('partial_files_loaded.txt'):
os.remove('partial_files_loaded.txt')
#Save the files that have already been loaded to file for next update
with open('files_loaded.txt', 'wb') as f:
try:
files_already_loaded
except NameError:
filelist = filelist
else:
filelist = filelist + files_already_loaded
pickle.dump(filelist, f)
if resample_time:
timebase_resampler(input_path, output_h5_filename,
variable = output_h5_filename,
time_int=['5S'],
output_path = output_path)
return output_h5_filename
def Load_to_NonHDF(input_path= None,
input_filelist = None,
output_path = None,
output_h5_filename = 'CPC_sec',
InputTZ = 0,
OutputTZ = 0,
resample_time = False,
output_file_frequency = 'all',
force_reload_from_source = False,
output_file_format = 'csv',
gui_mode = True,
gui_mainloop = None
):
'''
Do all the processing in HDF, then save the final product as netcdf or csv
'''
assert output_file_format in ['csv','netcdf','nc'],'Choose either netcdf \
or csv file format!'
# Load the data quickly via hdf
base_fname = Load_to_HDF(
input_path= input_path,
input_filelist = input_filelist,
output_path = output_path,
output_h5_filename = output_h5_filename,
InputTZ = InputTZ,
OutputTZ = OutputTZ,
resample_time = resample_time,
output_file_frequency = output_file_frequency,
force_reload_from_source = force_reload_from_source
)
os.chdir(output_path)
# Get the list of recently created hdf files
filelist = glob.glob('*'+base_fname+'*.h5')
# Load each file then save it as the requested file formatr
for f in filelist:
d = pd.read_hdf(f,key = 'cn')
if output_file_format.lower() == 'csv':
fname = f.split('.')[0]+'.csv'
d.to_csv(fname)
else:
fname = f.split('.')[0]+'.nc'
atmoscripts.df_to_netcdf(
d,
nc_filename = fname,
global_title = None,
global_description = None,
author = None,
global_institution = None,
global_comment = None,
gui_mode=gui_mode,
gui_mainloop = gui_mainloop
)
os.chdir(output_path)
if os.path.isfile('netcdf_global_attributes.temp'):
os.remove('netcdf_global_attributes.temp')
return
def load_cn(data_path=None, filetype=None, fname=None):
'''
Loads data from concatenated data file.
'''
if fname is None:
assert data_path is not None, 'specify data path!'
assert filetype is not None, 'specify filetype!'
os.chdir(data_path)
# Get most recently updated file:
filelist = glob.glob('*.'+filetype)
fname = min(filelist, key=os.path.getctime)
else:
filetype = fname.split('.')[-1]
if filetype in ['hdf','h5']:
data = pd.read_hdf(fname, key='cn')
elif filetype in ['netcdf','nc']:
data = atmoscripts.read_netcdf(fname, data_path)
elif filetype == 'csv':
data = pd.read_csv(fname,skipinitialspace = True,
index_col=0,
parse_dates=True,
infer_datetime_format=True)
return data
def remove_previous_output(filetype, reload_from_source, input_flist):
'''
Checks if previous files have been created. If not, then return true and
create the new files. If so, and you've been asked to reload_from_source,
return true. Otherwise, return false and don't reload the files.
'''
input_filelist = [f.split('/')[-1] for f in input_flist]
filelist = glob.glob('*'+filetype)
if len(filelist) > 0 and reload_from_source:
# Delete files and return
for file in filelist:
if file not in input_filelist:
os.remove(file)
if os.path.isfile('files_loaded.txt'):
os.remove('files_loaded.txt')
return
def save_to_hdf(data, output_h5_filename, output_file_frequency):
import datetime
''' Determine the destination file for each datapoint in the dataframe'''
year_str = [str(i.isocalendar()[0]) for i in data.index]
mnth_str = ['0' + str(i.month) \
if i.month <10 \
else str(i.month) \
for i in data.index]
if output_file_frequency.lower() == 'monthly':
# print('Saving to monthly HDF files')
# Identify the destination file of each data point
data['destination_file'] = [output_h5_filename+'_'+x+y for x,y in zip(year_str,mnth_str)]
# Get the unique filenames
output_filelist = set(data['destination_file'])
elif output_file_frequency.lower() == 'weekly':
# print('Saving to weekly HDF files')
wk_str = ['0'+str(i.isocalendar()[1]) \
if i.isocalendar()[1] < 10 \
else str(i.isocalendar()[1]) \
for i in data.index]
# Identify the destination file of each data point
data['destination_file'] = [output_h5_filename+'_'+x+'_wk'+y for x,y in zip(year_str,wk_str)]
# Get the unique filenames
output_filelist = set(data['destination_file'])
elif output_file_frequency.lower() == 'all':
# Continue as normal
# print('Saving all data to a single HDF file')
data['destination_file'] = output_h5_filename
output_filelist = set(data['destination_file'])
else:
# if output_file_frequency.lower() == 'daily':
# print('Saving to daily HDF files')
# else:
if output_file_frequency.lower() != 'daily':
print("Cannot determine what frequency you want the output file, defaulting to saving to daily files.")
day_str = [datetime.datetime.strftime(i, format = '%d') for i in data.index]
# Identify the destination file of each data point
data['destination_file'] = [output_h5_filename+'_'+x+y+z for x,y,z in zip(year_str,mnth_str,day_str)]
# Get the unique filenames
output_filelist = set(data['destination_file'])
''' Save the appropriate data to each destination file '''
for file in list(output_filelist):
# Select data
data_temp = data[data['destination_file']==file]
# Delete extraneous columns
del data_temp['destination_file']
# Check if file already exists, if so, append, otherwise, create a new file
if os.path.isfile(file+'.h5'):
data_saved = pd.read_hdf(file+'.h5', key = 'cn')
data_temp = data_saved.append(data_temp)
# Drop any duplicates which may be there, based only on the Timestamp
data_temp = data_temp.reset_index().drop_duplicates(subset='Timestamp', keep='last')
data_temp = data_temp.set_index('Timestamp')
#os.remove(file+'.h5')
# Save to file
data_temp.to_hdf(file+'.h5', key = 'cn', mode='a')
# Remove additional columns that were added to the dataframe in the processing
del data['destination_file']
return file+'.h5'
def check_cpc_file_format(filename):
'''
Reformat the CPC output that is produced by AIM auto-export.
This involves removing header information which is reprinted after each
sample
'''
# read the file
with open(filename) as f:
# Read the file
content = f.readlines()
content = [x.strip() for x in content]
# keep the first header so the read_cpc_csv function still works
first_header = True
# Initialise new list
content_reformatted = []
header_list = []
# Iterate through each line, each for validity
for line in content:
if first_header and (line.split(',')[0] in ['Sample File','Model','','Sample #']):
append_to_list(content_reformatted,line)
continue
elif line.split(',')[0] in ['Sample #']:
append_to_list(header_list,line)
else:
try:
int(line.split(',')[0])
append_to_list(content_reformatted,line)
first_header=False
except:
continue
if 'Sample File' not in content[0]:
return None
if len(content) == len(content_reformatted):
return filename
else:
# Make sure the header reflects the longest sample length in the file
content_reformatted[3] = max(header_list,key=len)
# Write new file
filename_reformatted = filename.split('.')[0]+'_reformatted.'+filename.split('.')[1]
if os.path.isfile(filename_reformatted):
os.remove(filename_reformatted)
with open(filename_reformatted,'wt') as fnew:
fnew.write('\n'.join(line for line in content_reformatted))
print('Input CPC file reformatted')
# Return new filename
return filename_reformatted
def append_to_list(lst, line):
return lst.append(line)
def read_cpc_csv(read_filename, output_filename_base, output_file_frequency, InputTZ=0, OutputTZ=0):
'''
Reads CPC data exports from AIM 10 and higher as row based, with
ONLY concentration data output
'''
import numpy as np
if output_file_frequency == 'all':
print('Saving all data to a single HDF file')
else:
print('Saving to ' + output_file_frequency + ' HDF file')
# Check format of file:
read_filename = check_cpc_file_format(read_filename)
if read_filename is None:
return
# Read each row of data, taking into account that each row can change length and parsing format (weird...)
df = pd.read_csv(read_filename, skiprows = range(0,3), engine='python', skipinitialspace=True, iterator = True, chunksize = 1000)
# Read the number of samples in the file
with open(read_filename) as f:
lastline = f.readlines()[-1]
numsamples = lastline.split(",")[0]
# Read where the import has gotten up to previously:
first_load = True
if os.path.isfile('partial_files_loaded.txt'):
with open('partial_files_loaded.txt','r') as f:
last_loaded = f.readlines()
last_loaded = [x.strip() for x in last_loaded]
last_loaded_file = last_loaded[0]
last_loaded_sample = int(last_loaded[1])
first_load = False
for chunk in df:
# Extract initial timestamp for each sample (i.e. each row)
try:
chunk['sample_timestamp'] = pd.to_datetime(chunk['Start Date']+' '+chunk['Start Time'], format = '%m/%d/%y %H:%M:%S')
chunk = chunk.reset_index()
del chunk['index']
except KeyError:
# The csv file that you've read isn't actually a TSI CPC file
return
data = pd.DataFrame(columns = {'Timestamp', 'Concentration'})
for rowidx in range(0,len(chunk)):
if not first_load:
if (chunk['Sample #'][rowidx] <= last_loaded_sample) and (read_filename == last_loaded_file):
continue
# Create timestamp and extract concentration for each sample in chunk
timestamp = [chunk['sample_timestamp'][rowidx]+pd.Timedelta(seconds=x) for x in range(0,chunk['Sample Length'][rowidx])]
conc = chunk.loc[rowidx][12:(12+chunk['Sample Length'][rowidx])]
print('Formatting sample ' + str(chunk['Sample #'].loc[rowidx])
+ ' of ' + numsamples + ' from file ' + read_filename)
# Format data as dataframe
data_temp = pd.DataFrame({'Timestamp': timestamp, 'Concentration': conc.values})
# Append new data to current data
data = | pd.concat([data,data_temp]) | pandas.concat |
# !pip install git+https://github.com/huggingface/transformers
# !pip install textacy
import sys
from custom_svo_extractor import find_svo
experiment_data = 'squad'
experiment_model = 'Bert'
extractor = 'spacy'
if len(sys.argv) > 1:
experiment_data = str(sys.argv[1])
experiment_model = str(sys.argv[2])
extractor = 'spacy' if str(sys.argv) == 'spacy' else 'textacy' if str(sys.argv) == 'textacy' else 'custom'
import urllib.request
import zipfile
from transformers import pipeline
import json
import pandas as pd
import matplotlib.pyplot as plt
import networkx as nx
import spacy
import textacy
def preprocess_masks(df, experiment_data, experiment_model, unmasker):
good_preds_ids = []
predicted_sentences = []
ground_truth_sentences = []
valid_examples = len(df)
for i in range(len(df)):
mask = df['masked_sentences'][i].replace('ร', '').replace('ฤ ', '')
if mask.count('[MASK]') > 1 or mask.count('[MASK]') == 0:
valid_examples -= 1
continue
if 're' in experiment_data:
gold_label = df['obj'][i].replace('ร', '').replace('ฤ ', '')
else:
gold_label = df['obj_label'][i].replace('ร', '').replace('ฤ ', '')
if 'Roberta' in experiment_model:
mask = mask.replace('[MASK]', '<mask>')
preds = pd.DataFrame(unmasker(mask))['token_str']
if any(preds.str.contains(gold_label)):
good_preds_ids.append(i)
top_pred = preds[0].replace('ร', '').replace('ฤ ', '')
if 'Roberta' in experiment_model:
full_sentence = mask.replace('<mask>', top_pred)
ground_truth_sentence = mask.replace('<mask>', gold_label)
else:
full_sentence = mask.replace('[MASK]', top_pred)
ground_truth_sentence = mask.replace('[MASK]', gold_label)
predicted_sentences.append(full_sentence)
ground_truth_sentences.append(ground_truth_sentence)
return good_preds_ids, predicted_sentences, ground_truth_sentences, valid_examples
def generate_df(data_type='squad'):
if data_type == 'squad':
file = 'data/data/Squad/test.jsonl'
elif data_type == 're-date-birth':
file = 'data/data/Google_RE/date_of_birth_test.jsonl'
elif data_type == 're-place-birth':
file = 'data/data/Google_RE/place_of_birth_test.jsonl'
elif data_type == 're-place-death':
file = 'data/data/Google_RE/place_of_death_test.jsonl'
else:
raise NameError("Data file ", data_type, "not available.")
with open(file, 'r') as json_file:
json_list = list(json_file)
df = pd.DataFrame()
for json_str in json_list:
result = json.loads(json_str)
if 're' in data_type:
dfItem = pd.DataFrame.from_dict({'masked_sentences': result['masked_sentences'], 'obj': str(result['obj'][:4])})
else:
dfItem = pd.DataFrame.from_records(result)
df = df.append(dfItem, ignore_index=True)
return df
def textacy_extract_relations(text):
nlp = spacy.load("en_core_web_sm")
doc = nlp(text)
return textacy.extract.subject_verb_object_triples(doc)
def spacy_extract_relations(text):
nlp = spacy.load("en_core_web_sm")
doc = nlp(text)
triples = []
for ent in doc.ents:
preps = [prep for prep in ent.root.head.children if prep.dep_ == "prep"]
for prep in preps:
for child in prep.children:
triples.append((ent.text, "{} {}".format(ent.root.head, prep), child.text))
return triples
def retrieve_data():
url = "https://dl.fbaipublicfiles.com/LAMA/data.zip"
extract_dir = "data"
zip_path, _ = urllib.request.urlretrieve(url)
with zipfile.ZipFile(zip_path, "r") as f:
f.extractall(extract_dir)
def retrive_models(experiment_model):
if experiment_model == 'DistilBert':
from transformers import DistilBertTokenizer, DistilBertModel
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
model = DistilBertModel.from_pretrained("distilbert-base-uncased")
unmasker = pipeline('fill-mask', model='distilbert-base-uncased')
elif experiment_model == 'Bert':
from transformers import BertTokenizer, BertModel
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained("bert-base-uncased")
unmasker = pipeline('fill-mask', model='bert-base-uncased')
elif experiment_model == 'Roberta':
from transformers import RobertaTokenizer, RobertaModel
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaModel.from_pretrained("roberta-base")
unmasker = pipeline('fill-mask', model='roberta-base')
else:
raise NameError("Model has not been implemented yet.")
return tokenizer, model, unmasker
def generate_kg(predicted_sentences, extractor='spacy'):
nlp = spacy.load("en_core_web_sm")
label_dict = {}
row_list = []
if extractor == 'spacy':
extract_relations = spacy_extract_relations
elif extractor == 'textacy':
extract_relations = textacy_extract_relations
elif extractor == 'custom':
extract_relations = find_svo
else:
raise ValueError("Please provide a valid extraction option")
for text in predicted_sentences:
relations = extract_relations(text)
for _source, _relation, _target in relations:
row_list.append({'source': str(_source), 'target':str(_target), 'edge': str(_relation)})
label_dict[(str(_source), str(_target))] = str(_relation)
return pd.DataFrame(row_list), label_dict
def plot_kg(df, label_dict, node_color='skyblue', font_color='red', save_name='img.jpg'):
G=nx.from_pandas_edgelist(df, "source", "target",
edge_attr=True, create_using=nx.MultiDiGraph())
plt.figure(figsize=(12,12))
pos = nx.spring_layout(G)
nx.draw(G, with_labels=True, node_color=node_color, edge_cmap=plt.cm.Blues, pos = pos)
nx.draw_networkx_edge_labels(G,pos,edge_labels=label_dict,font_color=font_color)
plt.savefig(save_name)
def generate_merge_df(labels):
n_df = | pd.DataFrame(labels, columns=['source', 'edge']) | pandas.DataFrame |
# --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Code starts here
data = pd.read_csv(path)
data['Rating'].hist()
data = data[data['Rating']<=5]
data['Rating'].hist()
#Code ends here
# --------------
# code starts here
total_null = data.isnull().sum()
percent_null = (total_null/data.isnull().count())*100
missing_data = pd.concat([total_null,percent_null],keys=['Total','Percent'],axis=1)
#print(missing_data)
data = data.dropna()
total_null_1 = data.isnull().sum()
percent_null_1 = (total_null_1/data.isnull().count())*100
missing_data_1 = | pd.concat([total_null_1,percent_null_1],keys=['Total','Percent'],axis=1) | pandas.concat |
# coding: utf-8
# CS FutureMobility Tool
# See full license in LICENSE.txt.
import numpy as np
import pandas as pd
#import openmatrix as omx
from IPython.display import display
from openpyxl import load_workbook,Workbook
from time import strftime
import os.path
import mode_choice.model_defs as md
import mode_choice.matrix_utils as mtx
import config
''' Utilities to summarize the outputs of Mode Choice '''
def display_mode_share(mc_obj):
'''
This displays a mode share summary by market segment (with / without vehicle, peak / off-peak) on the IPython notebook.
:param mc_obj: mode choice module object as defined in the IPython notebook
'''
# display mode share tables
avg_trips_by_mode = pd.DataFrame(None)
for purpose in ['HBW','HBO', 'NHB', 'HBSc1', 'HBSc2', 'HBSc3']:
avg_trips_by_mode = avg_trips_by_mode.add(pd.DataFrame({pv:{mode:(mc_obj.table_container.get_table(purpose)[pv][mode].sum()) for mode in mc_obj.table_container.get_table(purpose)[pv]} for pv in ['0_PK','1_PK','0_OP','1_OP']}).T,
fill_value = 0)
avg_mode_share = avg_trips_by_mode.divide(avg_trips_by_mode.sum(1),axis = 0)
display(avg_mode_share.style.format("{:.2%}"))
def write_boston_neighbortown_mode_share_to_excel(mc_obj):
'''
Writes mode share summary by purpose and market segment to an Excel workbook.
Applies only to trips to/from Boston
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_excel_fn: output Excel filename, by default in the output path defined in config.py
'''
out_excel_fn = mc_obj.config.out_path + "mode_share_bosNB_{0}.xlsx".format(strftime("%Y%m%d"))
# check if file exists.
if os.path.isfile(out_excel_fn):
book = load_workbook(out_excel_fn)
else:
book = Workbook()
book.save(out_excel_fn)
writer = pd.ExcelWriter(out_excel_fn,engine = 'openpyxl')
writer.book = book
for purp in md.purposes:
mode_share = pd.DataFrame(columns = md.peak_veh)
trip_table = mc_obj.table_container.get_table(purp)
for pv in md.peak_veh:
for mode in trip_table[pv].keys():
#study area zones might not start at zone 0 and could have discontinous TAZ IDs
trip_table_o = mtx.OD_slice(trip_table[pv][mode], O_slice = md.taz['BOSTON'], D_slice = md.taz['BOS_AND_NEI'])
trip_table_d = mtx.OD_slice(trip_table[pv][mode], O_slice = md.taz['BOS_AND_NEI'], D_slice = md.taz['BOSTON'])
trip_table_b = mtx.OD_slice(trip_table[pv][mode], O_slice = md.taz['BOSTON'], D_slice = md.taz['BOSTON'])
trip_table_bos = trip_table_o + trip_table_d - trip_table_b
mode_share.loc[mode,pv] = trip_table_bos.sum()
mode_share['Total'] = mode_share.sum(1)
mode_share['Share'] = mode_share['Total'] / mode_share['Total'].sum()
if purp in book.sheetnames: # if sheetname exists, delete
book.remove(book[purp])
writer.save()
mode_share.to_excel(writer, sheet_name = purp)
writer.save()
def write_study_area_mode_share_to_excel(mc_obj, out_excel_fn = None):
'''
Writes mode share summary by purpose and market segment to an Excel workbook.
Applies only to trips to/from study area
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_excel_fn: output Excel filename, by default in the output path defined in config.py
'''
if out_excel_fn is None:
out_excel_fn = mc_obj.config.out_path + "mode_share_study_area_{0}.xlsx".format(strftime("%Y%m%d"))
# check if file exists.
if os.path.isfile(out_excel_fn):
book = load_workbook(out_excel_fn)
else:
book = Workbook()
book.save(out_excel_fn)
writer = pd.ExcelWriter(out_excel_fn,engine = 'openpyxl')
writer.book = book
for purp in md.purposes:
mode_share = pd.DataFrame(columns = md.peak_veh)
trip_table = mc_obj.table_container.get_table(purp)
for pv in md.peak_veh:
for mode in trip_table[pv].keys():
trip_table_o = mtx.OD_slice(trip_table[pv][mode], O_slice = md.study_area)
trip_table_d = mtx.OD_slice(trip_table[pv][mode], D_slice = md.study_area)
trip_table_ii = mtx.OD_slice(trip_table[pv][mode], O_slice = md.study_area, D_slice = md.study_area)
trip_table_sa = trip_table_o + trip_table_d - trip_table_ii
mode_share.loc[mode,pv] = trip_table_sa.sum()
mode_share['Total'] = mode_share.sum(1)
mode_share['Share'] = mode_share['Total'] / mode_share['Total'].sum()
if purp in book.sheetnames: # if sheetname exists, delete
book.remove(book[purp])
writer.save()
mode_share.to_excel(writer, sheet_name = purp)
writer.save()
def write_mode_share_to_excel(mc_obj,purpose, out_excel_fn = None):
'''
Writes mode share summary by purpose and market segment to an Excel workbook.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param purpose: can be a single purpose or 'all', in which case the Excel workbook has six sheets, one for each purpose.
:param out_excel_fn: output Excel filename, by default in the output path defined in config.py
'''
if out_excel_fn is None:
out_excel_fn = mc_obj.config.out_path + "MC_mode_share_{0}_{1}.xlsx".format(purpose, strftime("%Y%m%d"))
if purpose == 'all':
# check if file exists.
if os.path.isfile(out_excel_fn):
book = load_workbook(out_excel_fn)
else:
book = Workbook()
book.save(out_excel_fn)
writer = pd.ExcelWriter(out_excel_fn,engine = 'openpyxl')
writer.book = book
for purp in md.purposes:
trip_table = mc_obj.table_container.get_table(purp)
mode_share = pd.DataFrame(columns = md.peak_veh)
for pv in md.peak_veh:
for mode in trip_table[pv].keys():
mode_share.loc[mode,pv] = trip_table[pv][mode].sum()
mode_share['Total'] = mode_share.sum(1)
mode_share['Share'] = mode_share['Total'] / mode_share['Total'].sum()
if purp in book.sheetnames: # if sheetname exists, delete
book.remove(book[purp])
writer.save()
mode_share.to_excel(writer, sheet_name = purp)
writer.save()
elif purpose in md.purposes:
# check if file exists.
if os.path.isfile(out_excel_fn):
book = load_workbook(out_excel_fn)
else:
book = Workbook()
book.save(out_excel_fn)
writer = pd.ExcelWriter(out_excel_fn,engine = 'openpyxl')
writer.book = book
mode_share = pd.DataFrame(columns = md.peak_veh)
for pv in md.peak_veh:
for mode in mc_obj.trips_by_mode[pv].keys():
mode_share.loc[mode,pv] = mc_obj.trips_by_mode[pv][mode].sum()
mode_share['Total'] = mode_share.sum(1)
mode_share['Share'] = mode_share['Total'] / mode_share['Total'].sum()
if purpose in book.sheetnames: # if sheetname exists, delete
book.remove(book[purpose])
writer.save()
mode_share.to_excel(writer, sheet_name = purpose)
writer.save()
def __mt_prod_attr_nhood(mc_obj, trip_table, skim): # miles traveled. For VMT and PMT, by neighborhood
# sum prodct of trip_table - skims
mt_total = trip_table * skim['Length (Skim)']
# calculate marginals
prod = pd.DataFrame(np.sum(mt_total,axis = 1)/2, columns = ['Production'])
attr = pd.DataFrame(np.sum(mt_total,axis = 0) / 2, columns = ['Attraction'])
towns = mc_obj.taz.sort_values(md.taz_ID_field).iloc[0:md.max_zone]
mt_taz = pd.concat([towns[[md.taz_ID_field,'BOSTON_NB']],prod,attr],axis = 1,join = 'inner')
mt_taz.index.names=['Boston Neighborhood']
return mt_taz.groupby(['BOSTON_NB']).sum()[['Production','Attraction']].reset_index()
def __trip_prod_attr_nhood(mc_obj, trip_table):
mt_total = trip_table
# calculate marginals
prod = pd.DataFrame(np.sum(mt_total,axis = 1), columns = ['Production'])
attr = pd.DataFrame(np.sum(mt_total,axis = 0), columns = ['Attraction'])
towns = mc_obj.taz.sort_values(md.taz_ID_field).iloc[0:md.max_zone]
mt_taz = pd.concat([towns[[md.taz_ID_field,'BOSTON_NB']],prod,attr],axis = 1,join = 'inner')
mt_taz.index.names=['Boston Neighborhood']
return mt_taz.groupby(['BOSTON_NB']).sum()[['Production','Attraction']].reset_index()
def sm_vmt_by_neighborhood(mc_obj, out_fn = None, by = None, sm_mode = 'SM_RA'):
'''
Summarizes VMT production and attraction by the 26 Boston neighborhoods for Shared Mobility Modes.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_fn: output csv filename; if None specified, in the output path defined in config.py
:param by: grouping used for the summary; if None specified, only aggregate production and attraction will be provided.
'''
if out_fn is None and by is None:
out_fn = mc_obj.config.out_path + sm_mode + f'_vmt_by_neighborhood.csv'
elif out_fn is None and by:
out_fn = mc_obj.config.out_path + sm_mode + f'_vmt_by_neighborhood_by_{by}.csv'
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
if by in ['peak','veh_own','purpose'] == False:
print('Only supports VMT by neighborhood, peak / vehicle ownership, purpose.')
return
else:
vmt_master_table = pd.DataFrame(columns = ['Production','Attraction','peak','veh_own','purpose'])
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
auto_trip_table = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][sm_mode] / md.AO_dict[sm_mode]
vmt_table = __mt_prod_attr_nhood(mc_obj,auto_trip_table,skim_dict[peak])
vmt_table['peak'] = peak
vmt_table['veh_own'] = veh_own
vmt_table['purpose'] = purpose
vmt_master_table = vmt_master_table.append(vmt_table, sort = True)
if by == None:
vmt_summary = vmt_master_table.groupby('BOSTON_NB').sum()
elif by == 'peak':
vmt_summary = pd.concat([
vmt_master_table.groupby(['peak','BOSTON_NB']).sum().loc[peak] for peak in ['PK','OP']], axis = 1, keys = ['PK','OP'])
elif by == 'veh_own':
vmt_summary = pd.concat([
vmt_master_table.groupby(['veh_own','BOSTON_NB']).sum().loc[veh_own] for veh_own in ['0','1']], axis = 1, keys = ['No car', 'With car']
)
elif by == 'purpose':
vmt_summary = pd.concat([
vmt_master_table.groupby(['purpose','BOSTON_NB']).sum().loc[purpose] for purpose in vmt_master_table.purpose.unique()],axis = 1, keys= vmt_master_table.purpose.unique())
vmt_summary.to_csv(out_fn)
def vmt_by_neighborhood(mc_obj, out_fn = None, by = None):
'''
Summarizes VMT production and attraction by the 26 Boston neighborhoods.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_fn: output csv filename; if None specified, in the output path defined in config.py
:param by: grouping used for the summary; if None specified, only aggregate production and attraction will be provided.
'''
if out_fn is None and by is None:
out_fn = mc_obj.config.out_path + f'vmt_by_neighborhood.csv'
elif out_fn is None and by:
out_fn = mc_obj.config.out_path + f'vmt_by_neighborhood_by_{by}.csv'
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
if by in ['peak','veh_own','purpose'] == False:
print('Only supports VMT by neighborhood, peak / vehicle ownership, purpose.')
return
else:
vmt_master_table = pd.DataFrame(columns = ['Production','Attraction','peak','veh_own','purpose'])
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
drive_modes = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'].keys()
auto_trip_table = sum([
mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode] / md.AO_dict[mode]
for mode in ['DA','SR2','SR3+','SM_RA','SM_SH'] if mode in drive_modes])
vmt_table = __mt_prod_attr_nhood(mc_obj,auto_trip_table,skim_dict[peak])
vmt_table['peak'] = peak
vmt_table['veh_own'] = veh_own
vmt_table['purpose'] = purpose
vmt_master_table = vmt_master_table.append(vmt_table, sort = True)
if by == None:
vmt_summary = vmt_master_table.groupby('BOSTON_NB').sum()
elif by == 'peak':
vmt_summary = pd.concat([
vmt_master_table.groupby(['peak','BOSTON_NB']).sum().loc[peak] for peak in ['PK','OP']], axis = 1, keys = ['PK','OP'])
elif by == 'veh_own':
vmt_summary = pd.concat([
vmt_master_table.groupby(['veh_own','BOSTON_NB']).sum().loc[veh_own] for veh_own in ['0','1']], axis = 1, keys = ['No car', 'With car']
)
elif by == 'purpose':
vmt_summary = pd.concat([
vmt_master_table.groupby(['purpose','BOSTON_NB']).sum().loc[purpose] for purpose in vmt_master_table.purpose.unique()],axis = 1, keys= vmt_master_table.purpose.unique())
vmt_summary.to_csv(out_fn)
def pmt_by_neighborhood(mc_obj, out_fn = None, by = None):
'''
Summarizes PMT production and attraction by the 26 Boston neighborhoods.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_fn: output csv filename; if None specified, in the output path defined in config.py
:param by: grouping used for the summary; if None specified, only aggregate production and attraction will be provided.
'''
if out_fn is None and by is None:
out_fn = mc_obj.config.out_path + f'pmt_by_neighborhood.csv'
elif out_fn is None and by:
out_fn = mc_obj.config.out_path + f'pmt_by_neighborhood_by_{by}.csv'
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
if by in ['peak','veh_own','purpose'] == False:
print('Only supports PMT by neighborhood, peak / vehicle ownership, purpose.')
return
else:
pmt_master_table = pd.DataFrame(columns = ['Production','Attraction','peak','veh_own','purpose'])
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
drive_modes = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'].keys()
person_trip_table = sum([mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode] for mode in md.modes if mode in drive_modes])
pmt_table = __mt_prod_attr_nhood(mc_obj,person_trip_table,skim_dict[peak])
pmt_table['peak'] = peak
pmt_table['veh_own'] = veh_own
pmt_table['purpose'] = purpose
pmt_master_table = pmt_master_table.append(pmt_table, sort = True)
if by == None:
pmt_summary = pmt_master_table.groupby('BOSTON_NB').sum()
elif by == 'peak':
pmt_summary = pd.concat([
pmt_master_table.groupby(['peak','BOSTON_NB']).sum().loc[peak] for peak in ['PK','OP']], axis = 1, keys = ['PK','OP'])
elif by == 'veh_own':
pmt_summary = pd.concat([
pmt_master_table.groupby(['veh_own','BOSTON_NB']).sum().loc[veh_own] for veh_own in ['0','1']], axis = 1, keys = ['No car', 'With car']
)
elif by == 'purpose':
pmt_summary = pd.concat([
pmt_master_table.groupby(['purpose','BOSTON_NB']).sum().loc[purpose] for purpose in pmt_master_table.purpose.unique()],axis = 1, keys= pmt_master_table.purpose.unique())
pmt_summary.to_csv(out_fn)
def act_pmt_by_neighborhood(mc_obj, out_fn = None, by = None):
'''
Summarizes PMT production and attraction by the 26 Boston neighborhoods for active modes.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_fn: output csv filename; if None specified, in the output path defined in config.py
:param by: grouping used for the summary; if None specified, only aggregate production and attraction will be provided.
'''
if out_fn is None and by is None:
out_fn = mc_obj.config.out_path + f'act_pmt_by_neighborhood.csv'
elif out_fn is None and by:
out_fn = mc_obj.config.out_path + f'act_pmt_by_neighborhood_by_{by}.csv'
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
if by in ['peak','veh_own','purpose'] == False:
print('Only supports PMT by neighborhood, peak / vehicle ownership, purpose.')
return
else:
pmt_master_table = pd.DataFrame(columns = ['Production','Attraction','peak','veh_own','purpose'])
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
drive_modes = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'].keys()
person_trip_table = sum([mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode] for mode in ['Walk','Bike'] if mode in drive_modes])
pmt_table = __mt_prod_attr_nhood(mc_obj,person_trip_table,skim_dict[peak])
pmt_table['peak'] = peak
pmt_table['veh_own'] = veh_own
pmt_table['purpose'] = purpose
pmt_master_table = pmt_master_table.append(pmt_table, sort = True)
if by == None:
pmt_summary = pmt_master_table.groupby('BOSTON_NB').sum()
elif by == 'peak':
pmt_summary = pd.concat([
pmt_master_table.groupby(['peak','BOSTON_NB']).sum().loc[peak] for peak in ['PK','OP']], axis = 1, keys = ['PK','OP'])
elif by == 'veh_own':
pmt_summary = pd.concat([
pmt_master_table.groupby(['veh_own','BOSTON_NB']).sum().loc[veh_own] for veh_own in ['0','1']], axis = 1, keys = ['No car', 'With car']
)
elif by == 'purpose':
pmt_summary = pd.concat([
pmt_master_table.groupby(['purpose','BOSTON_NB']).sum().loc[purpose] for purpose in pmt_master_table.purpose.unique()],axis = 1, keys= pmt_master_table.purpose.unique())
pmt_summary.to_csv(out_fn)
def sm_trips_by_neighborhood(mc_obj, out_fn = None, by = None, sm_mode = 'SM_RA'):
'''
Summarizes PMT production and attraction by the 26 Boston neighborhoods for Shared Mobility Modes.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_fn: output csv filename; if None specified, in the output path defined in config.py
:param by: grouping used for the summary; if None specified, only aggregate production and attraction will be provided.
:param sm_mode: Smart Mobility Mode name
'''
if out_fn is None and by is None:
out_fn = mc_obj.config.out_path + sm_mode + f'_trips_by_neighborhood.csv'
elif out_fn is None and by:
out_fn = mc_obj.config.out_path + sm_mode + f'_trips_by_neighborhood_by_{by}.csv'
if by in ['peak','veh_own','purpose'] == False:
print('Only supports Trips by neighborhood, peak / vehicle ownership, purpose.')
return
else:
trp_master_table = pd.DataFrame(columns = ['Production','Attraction','peak','veh_own','purpose'])
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
person_trip_table = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][sm_mode]
trp_table = __trip_prod_attr_nhood(mc_obj,person_trip_table)
trp_table['peak'] = peak
trp_table['veh_own'] = veh_own
trp_table['purpose'] = purpose
trp_master_table = trp_master_table.append(trp_table, sort = True)
if by == None:
trp_summary = trp_master_table.groupby('BOSTON_NB').sum()
elif by == 'peak':
trp_summary = pd.concat([
trp_master_table.groupby(['peak','BOSTON_NB']).sum().loc[peak] for peak in ['PK','OP']], axis = 1, keys = ['PK','OP'])
elif by == 'veh_own':
trp_summary = pd.concat([
trp_master_table.groupby(['veh_own','BOSTON_NB']).sum().loc[veh_own] for veh_own in ['0','1']], axis = 1, keys = ['No car', 'With car']
)
elif by == 'purpose':
trp_summary = pd.concat([
trp_master_table.groupby(['purpose','BOSTON_NB']).sum().loc[purpose] for purpose in trp_master_table.purpose.unique()],axis = 1, keys= trp_master_table.purpose.unique())
trp_summary.to_csv(out_fn)
def trips_by_neighborhood(mc_obj, out_fn = None, by = None):
'''
Summarizes PMT production and attraction by the 26 Boston neighborhoods.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_fn: output csv filename; if None specified, in the output path defined in config.py
:param by: grouping used for the summary; if None specified, only aggregate production and attraction will be provided.
'''
if out_fn is None and by is None:
out_fn = mc_obj.config.out_path + f'trips_by_neighborhood.csv'
elif out_fn is None and by:
out_fn = mc_obj.config.out_path + f'trips_by_neighborhood_by_{by}.csv'
if by in ['peak','veh_own','purpose'] == False:
print('Only supports Trips by neighborhood, peak / vehicle ownership, purpose.')
return
else:
trp_master_table = pd.DataFrame(columns = ['Production','Attraction','peak','veh_own','purpose'])
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
drive_modes = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'].keys()
person_trip_table = sum([mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode] for mode in md.modes if mode in drive_modes])
trp_table = __trip_prod_attr_nhood(mc_obj,person_trip_table)
trp_table['peak'] = peak
trp_table['veh_own'] = veh_own
trp_table['purpose'] = purpose
trp_master_table = trp_master_table.append(trp_table, sort = True)
if by == None:
trp_summary = trp_master_table.groupby('BOSTON_NB').sum()
elif by == 'peak':
trp_summary = pd.concat([
trp_master_table.groupby(['peak','BOSTON_NB']).sum().loc[peak] for peak in ['PK','OP']], axis = 1, keys = ['PK','OP'])
elif by == 'veh_own':
trp_summary = pd.concat([
trp_master_table.groupby(['veh_own','BOSTON_NB']).sum().loc[veh_own] for veh_own in ['0','1']], axis = 1, keys = ['No car', 'With car']
)
elif by == 'purpose':
trp_summary = pd.concat([
trp_master_table.groupby(['purpose','BOSTON_NB']).sum().loc[purpose] for purpose in trp_master_table.purpose.unique()],axis = 1, keys= trp_master_table.purpose.unique())
trp_summary.to_csv(out_fn)
def mode_share_by_neighborhood(mc_obj, out_fn = None, by = None):
'''
Summarizes mode share as the average of trips to/from the 26 Boston neighborhoods, in three categories - drive, non-motorized and transit.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_fn: output csv filename; if None specified, in the output path defined in config.py
:param by: grouping used for the summary
'''
if out_fn is None and by is None:
out_fn = mc_obj.config.out_path + f'mode_share_by_neighborhood.csv'
elif out_fn is None and by:
out_fn = mc_obj.config.out_path + f'mode_share_by_neighborhood_by_{by}.csv'
if by in ['peak','veh_own','purpose'] == False:
print('Only supports mode share by neighborhood, peak / vehicle ownership, purpose.')
return
else:
share_master_table = pd.DataFrame(columns = ['drive','non-motorized','transit','peak','veh_own','purpose'])
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
share_table = pd.DataFrame(index = range(0,md.max_zone),columns = ['drive','non-motorized','transit','smart mobility']).fillna(0)
for mode in mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}']:
trip_table = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode]
category = md.mode_categories[mode]
share_table[category] += (trip_table.sum(axis = 1)+trip_table.sum(axis = 0))/2
towns = mc_obj.taz.sort_values(md.taz_ID_field).iloc[0:md.max_zone]
trips = pd.concat([towns[[md.taz_ID_field,'BOSTON_NB']],share_table],axis = 1,join = 'inner').groupby(['BOSTON_NB']).sum().drop([md.taz_ID_field],axis = 1)
trips['peak'] = peak
trips['veh_own'] = veh_own
trips['purpose'] = purpose
share_master_table = share_master_table.append(trips.reset_index(), sort = True)
if by == None:
trip_summary = share_master_table.groupby('BOSTON_NB').sum()
share_summary = trip_summary.divide(trip_summary.sum(axis = 1),axis = 0)
elif by == 'peak':
share_summary = pd.concat([
share_master_table.groupby(['peak','BOSTON_NB']).sum().loc[peak].divide(
share_master_table.groupby(['peak','BOSTON_NB']).sum().loc[peak].sum(axis=1),axis = 0)
for peak in ['PK','OP']
], axis = 1, keys = ['PK','OP'])
elif by == 'veh_own':
share_summary = pd.concat([
share_master_table.groupby(['veh_own','BOSTON_NB']).sum().loc[veh_own].divide(
share_master_table.groupby(['veh_own','BOSTON_NB']).sum().loc[veh_own].sum(axis=1),axis = 0)
for veh_own in ['0','1']
], axis = 1, keys = ['No car', 'With car'])
elif by == 'purpose':
share_summary = pd.concat([
share_master_table.groupby(['purpose','BOSTON_NB']).sum().loc[purpose].divide(
share_master_table.groupby(['purpose','BOSTON_NB']).sum().loc[purpose].sum(axis=1),axis = 0)
for purpose in share_master_table.purpose.unique()
],axis = 1, keys= share_master_table.purpose.unique())
share_summary.to_csv(out_fn)
# Seaport method
def mode_share_by_subarea(mc_obj, out_fn = None, by = None):
'''
Summarizes mode share as the average of trips to/from the 7 Seaport sub-areas, in three categories - drive, non-motorized and transit.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_fn: output csv filename; if None specified, in the output path defined in config.py
:param by: grouping used for the summary
'''
if out_fn is None and by is None:
out_fn = mc_obj.config.out_path + f'mode_share_by_subarea.csv'
elif out_fn is None and by:
out_fn = mc_obj.config.out_path + f'mode_share_by_subarea_by_{by}.csv'
if by in ['peak','veh_own','purpose'] == False:
print('Only supports mode share by subarea, peak / vehicle ownership, purpose.')
return
else:
share_master_table = pd.DataFrame(columns = ['drive','non-motorized','transit','peak','veh_own','purpose'])
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
share_table = pd.DataFrame(index = range(0,md.max_zone),columns = ['drive','non-motorized','transit','smart mobility']).fillna(0)
for mode in mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}']:
trip_table = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode]
category = md.mode_categories[mode]
share_table[category] += (trip_table.sum(axis = 1)+trip_table.sum(axis = 0))/2
towns = mc_obj.taz.sort_values(md.taz_ID_field).iloc[0:md.max_zone]
towns['REPORT_AREA'] = towns['REPORT_AREA'][towns['REPORT_AREA'].isin(['South Station', 'Seaport Blvd', 'Design Center',
'Southeast Seaport', 'BCEC', 'Fort Point', 'Broadway'])]
trips = pd.concat([towns[[md.taz_ID_field,'REPORT_AREA']],share_table],axis = 1,join = 'inner').groupby(['REPORT_AREA']).sum().drop([md.taz_ID_field],axis = 1)
trips['peak'] = peak
trips['veh_own'] = veh_own
trips['purpose'] = purpose
share_master_table = share_master_table.append(trips.reset_index(), sort = True)
if by == None:
trip_summary = share_master_table.groupby('REPORT_AREA').sum()
share_summary = trip_summary.divide(trip_summary.sum(axis = 1),axis = 0)
elif by == 'peak':
share_summary = pd.concat([
share_master_table.groupby(['peak','REPORT_AREA']).sum().loc[peak].divide(
share_master_table.groupby(['peak','REPORT_AREA']).sum().loc[peak].sum(axis=1),axis = 0)
for peak in ['PK','OP']
], axis = 1, keys = ['PK','OP'])
elif by == 'veh_own':
share_summary = pd.concat([
share_master_table.groupby(['veh_own','REPORT_AREA']).sum().loc[veh_own].divide(
share_master_table.groupby(['veh_own','REPORT_AREA']).sum().loc[veh_own].sum(axis=1),axis = 0)
for veh_own in ['0','1']
], axis = 1, keys = ['No car', 'With car'])
elif by == 'purpose':
share_summary = pd.concat([
share_master_table.groupby(['purpose','REPORT_AREA']).sum().loc[purpose].divide(
share_master_table.groupby(['purpose','REPORT_AREA']).sum().loc[purpose].sum(axis=1),axis = 0)
for purpose in share_master_table.purpose.unique()
],axis = 1, keys= share_master_table.purpose.unique())
share_summary.to_csv(out_fn)
def __sm_compute_summary_by_subregion(mc_obj,metric = 'VMT',subregion = 'neighboring', sm_mode='SM_RA'):
''' Computing function used by write_summary_by_subregion(), does not produce outputs'''
if metric.lower() not in ('vmt','pmt','mode share','trip', 'pmt_act'):
print('Only supports trip, VMT, PMT and mode share calculations.')
return
if subregion.lower() not in ('boston','neighboring','i93','i495','region'):
print('Only supports within boston, "neighboring" for towns neighboring Boston, I93, I495 or Region.')
return
subregion_dict = {'boston':'BOSTON','neighboring':'BOS_AND_NEI','i93':'in_i95i93','i495':'in_i495'}
if metric.lower() == 'vmt':
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
vmt_table = np.zeros((md.max_zone,md.max_zone))
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
trip_table = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][sm_mode] / md.AO_dict[sm_mode]
vmt_table += trip_table * skim_dict[peak]['Length (Skim)']
if subregion.lower() in subregion_dict:
field = subregion_dict[subregion.lower()]
boston_o_auto_vmt = mtx.OD_slice(vmt_table,O_slice = md.taz['BOSTON'], D_slice = md.taz[field]== True)
boston_d_auto_vmt = mtx.OD_slice(vmt_table,md.taz[field]== True,D_slice = md.taz['BOSTON'])
#boston_o_auto_vmt = vmt_table[md.taz['BOSTON'],:][:, md.taz[field]== True]
#boston_d_auto_vmt = vmt_table[md.taz[field]== True,:][:,md.taz['BOSTON']]
town_definition = md.taz[md.taz[field]== True]
elif subregion.lower() == 'region':
boston_o_auto_vmt = mtx.OD_slice(vmt_table,O_slice = md.taz['BOSTON'])
boston_d_auto_vmt = mtx.OD_slice(vmt_table,D_slice = md.taz['BOSTON'])
#boston_o_auto_vmt = vmt_table[md.taz['BOSTON'],:]
#boston_d_auto_vmt = vmt_table[:][:,md.taz['BOSTON']]
town_definition = md.taz
zone_vmt_daily_o = pd.DataFrame(np.sum(boston_o_auto_vmt,axis=1)/2 ,columns=["VMT"])
zone_vmt_daily_d = pd.DataFrame(np.sum(boston_d_auto_vmt,axis=0)/2 ,columns=["VMT"])
town_vmt_o=pd.concat([town_definition,zone_vmt_daily_o],axis=1,join='inner')
town_vmt_d=pd.concat([town_definition,zone_vmt_daily_d],axis=1,join='inner')
vmt_sum_o = town_vmt_o[town_vmt_o['TOWN']=='BOSTON,MA'].groupby(['TOWN']).sum()['VMT']
vmt_sum_d = town_vmt_d[town_vmt_d['TOWN']=='BOSTON,MA'].groupby(['TOWN']).sum()['VMT']
subregion_vmt = (vmt_sum_o + vmt_sum_d).values[0]
return subregion_vmt
elif metric.lower() == 'trip':
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
tripsum_table = np.zeros((md.max_zone,md.max_zone))
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
trip_table = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][sm_mode]
tripsum_table += trip_table
if subregion.lower() in subregion_dict:
field = subregion_dict[subregion.lower()]
boston_o_trip = mtx.OD_slice(tripsum_table, O_slice = md.taz['BOSTON'],D_slice = md.taz[field]== True)
boston_d_trip = mtx.OD_slice(tripsum_table, O_slice = md.taz[field]== True, D_slice = md.taz['BOSTON'])
#boston_o_trip = tripsum_table[md.taz['BOSTON'],:][:, md.taz[field]== True]
#boston_d_trip = tripsum_table[md.taz[field]== True,:][:,md.taz['BOSTON']]
town_definition = md.taz[md.taz[field]== True]
elif subregion.lower() == 'region':
boston_o_trip = mtx.OD_slice(tripsum_table, O_slice = md.taz['BOSTON'])
boston_d_trip = mtx.OD_slice(tripsum_table, D_slice = md.taz['BOSTON'])
#boston_o_trip = tripsum_table[md.taz['BOSTON'],:]
#boston_d_trip = tripsum_table[:][:,md.taz['BOSTON']]
town_definition = md.taz
zone_daily_o = pd.DataFrame(np.sum(boston_o_trip,axis=1) ,columns=["trips"])
zone_daily_d = pd.DataFrame(np.sum(boston_d_trip,axis=0) ,columns=["trips"])
town_o=pd.concat([town_definition,zone_daily_o],axis=1,join='inner')
town_d=pd.concat([town_definition,zone_daily_d],axis=1,join='inner')
sum_o = town_o[town_o['TOWN']=='BOSTON,MA'].groupby(['TOWN']).sum()['trips']
sum_d = town_d[town_d['TOWN']=='BOSTON,MA'].groupby(['TOWN']).sum()['trips']
subregion_trip = (sum_o + sum_d).values[0]
return subregion_trip
def __compute_metric_by_zone(mc_obj,metric = 'VMT'):
''' Computing function used by write_summary_by_subregion(), does not produce outputs'''
if metric.lower() == 'vmt':
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
vmt_table = np.zeros((md.max_zone,md.max_zone))
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
drive_modes = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'].keys()
trip_table = sum([mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode] / md.AO_dict[mode] for mode in md.auto_modes if mode in drive_modes])
vmt_table += trip_table * skim_dict[peak]['Length (Skim)']
boston_o_auto_vmt = mtx.OD_slice(vmt_table, O_slice = md.taz['BOSTON'])
boston_d_auto_vmt = mtx.OD_slice(vmt_table,D_slice = md.taz['BOSTON'])
#boston_o_auto_vmt = vmt_table[md.taz['BOSTON'],:]
#boston_d_auto_vmt = vmt_table[:][:,md.taz['BOSTON']]
town_definition = md.taz
zone_vmt_daily_o = pd.DataFrame(np.sum(boston_o_auto_vmt,axis=0)/2 ,columns=["VMT"])
zone_vmt_daily_d = pd.DataFrame(np.sum(boston_d_auto_vmt,axis=1)/2 ,columns=["VMT"])
town_vmt_o=pd.concat([town_definition,zone_vmt_daily_o],axis=1,join='inner')
town_vmt_d=pd.concat([town_definition,zone_vmt_daily_d],axis=1,join='inner')
town_vmt = town_vmt_o.groupby(['TOWN']).sum()['VMT'] + town_vmt_d.groupby(['TOWN']).sum()['VMT']
return town_vmt
elif metric.lower() == 'pmt':
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
pmt_table = np.zeros((md.max_zone,md.max_zone))
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
drive_modes = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'].keys()
trip_table = sum([mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode]
for mode in md.modes if mode in drive_modes])
pmt_table += trip_table * skim_dict[peak]['Length (Skim)']
boston_o_auto_pmt = mtx.OD_slice(pmt_table, O_slice = md.taz['BOSTON'])
boston_d_auto_pmt = mtx.OD_slice(pmt_table, D_slice = md.taz['BOSTON'])
#boston_o_auto_pmt = pmt_table[md.taz['BOSTON'],:]
#boston_d_auto_pmt = pmt_table[:][:,md.taz['BOSTON']]
town_definition = md.taz
zone_pmt_daily_o = pd.DataFrame(np.sum(boston_o_auto_pmt,axis=0)/2 ,columns=["VMT"])
zone_pmt_daily_d = pd.DataFrame(np.sum(boston_d_auto_pmt,axis=1)/2 ,columns=["VMT"])
town_pmt_o=pd.concat([town_definition,zone_pmt_daily_o],axis=1,join='inner')
town_pmt_d=pd.concat([town_definition,zone_pmt_daily_d],axis=1,join='inner')
town_pmt = town_pmt_o.groupby(['TOWN']).sum()['VMT'] + town_pmt_d.groupby(['TOWN']).sum()['VMT']
return town_pmt
elif metric.lower() == 'pmt_act':
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
pmt_table = np.zeros((md.max_zone,md.max_zone))
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
drive_modes = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'].keys()
trip_table = sum([mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode]
for mode in ['Walk','Bike'] if mode in drive_modes])
pmt_table += trip_table * skim_dict[peak]['Length (Skim)']
boston_o_auto_pmt = mtx.OD_slice(pmt_table, O_slice = md.taz['BOSTON'])
boston_d_auto_pmt = mtx.OD_slice(pmt_table, D_slice = md.taz['BOSTON'])
#boston_o_auto_pmt = pmt_table[taz['BOSTON'],:]
#boston_d_auto_pmt = pmt_table[:][:,taz['BOSTON']]
town_definition = md.taz
zone_pmt_daily_o = pd.DataFrame(np.sum(boston_o_auto_pmt,axis=0)/2 ,columns=["VMT"])
zone_pmt_daily_d = pd.DataFrame(np.sum(boston_d_auto_pmt,axis=1)/2 ,columns=["VMT"])
town_pmt_o=pd.concat([town_definition,zone_pmt_daily_o],axis=1,join='inner')
town_pmt_d=pd.concat([town_definition,zone_pmt_daily_d],axis=1,join='inner')
town_pmt = town_pmt_o.groupby(['TOWN']).sum()['VMT'] + town_pmt_d.groupby(['TOWN']).sum()['VMT']
return town_pmt
def __compute_summary_by_subregion(mc_obj,metric = 'VMT',subregion = 'neighboring'):
''' Computing function used by write_summary_by_subregion(), does not produce outputs'''
if metric.lower() not in ('vmt','pmt','mode share','trip', 'pmt_act'):
print('Only supports trip, VMT, PMT and mode share calculations.')
return
if subregion.lower() not in ('boston','neighboring','i93','i495','region'):
print('Only supports within boston, "neighboring" for towns neighboring Boston, I93, I495 or Region.')
return
subregion_dict = {'boston':'BOSTON','neighboring':'BOS_AND_NEI','i93':'in_i95i93','i495':'in_i495'}
if metric.lower() == 'vmt':
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
vmt_table = np.zeros((md.max_zone,md.max_zone))
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
modes = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'].keys()
trip_table = sum([mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode] / md.AO_dict[mode] for mode in md.auto_modes if mode in modes])
vmt_table += trip_table * skim_dict[peak]['Length (Skim)']
if subregion.lower() in subregion_dict:
field = subregion_dict[subregion.lower()]
#boston_o_auto_vmt = vmt_table[md.taz['BOSTON'],:][:, md.taz[field]== True]
#boston_d_auto_vmt = vmt_table[md.taz[field]== True,:][:,md.taz['BOSTON']]
boston_o_auto_vmt = mtx.OD_slice(vmt_table, O_slice = md.taz['BOSTON'], D_slice = md.taz[field]== True)
boston_d_auto_vmt = mtx.OD_slice(vmt_table, O_slice = md.taz[field]== True, D_slice = md.taz['BOSTON'])
town_definition = md.taz[md.taz[field]== True]
elif subregion.lower() == 'region':
# boston_o_auto_vmt = vmt_table[md.taz['BOSTON'],:]
# boston_d_auto_vmt = vmt_table[:][:,md.taz['BOSTON']]
boston_o_auto_vmt = mtx.OD_slice(vmt_table, O_slice = md.taz['BOSTON'])
boston_d_auto_vmt = mtx.OD_slice(vmt_table, D_slice = md.taz['BOSTON'])
town_definition = md.taz
zone_vmt_daily_o = pd.DataFrame(np.sum(boston_o_auto_vmt,axis=1)/2 ,columns=["VMT"])
zone_vmt_daily_d = pd.DataFrame(np.sum(boston_d_auto_vmt,axis=0)/2 ,columns=["VMT"])
town_vmt_o=pd.concat([town_definition,zone_vmt_daily_o],axis=1,join='inner')
town_vmt_d=pd.concat([town_definition,zone_vmt_daily_d],axis=1,join='inner')
vmt_sum_o = town_vmt_o[town_vmt_o['TOWN']=='BOSTON,MA'].groupby(['TOWN']).sum()['VMT']
vmt_sum_d = town_vmt_d[town_vmt_d['TOWN']=='BOSTON,MA'].groupby(['TOWN']).sum()['VMT']
subregion_vmt = (vmt_sum_o + vmt_sum_d).values[0]
return subregion_vmt
elif metric.lower() == 'pmt':
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
pmt_table = np.zeros((md.max_zone,md.max_zone))
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
drive_modes = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'].keys()
trip_table = sum([mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode]
for mode in md.modes if mode in drive_modes])
pmt_table += trip_table * skim_dict[peak]['Length (Skim)']
if subregion.lower() in subregion_dict:
field = subregion_dict[subregion.lower()]
boston_o_auto_pmt = mtx.OD_slice(pmt_table, O_slice = md.taz['BOSTON'],D_slice = md.taz[field]== True)
boston_d_auto_pmt = mtx.OD_slice(pmt_table ,O_slice = md.taz[field]== True, D_slice = md.taz['BOSTON'])
#boston_o_auto_pmt = pmt_table[md.taz['BOSTON'],:][:, md.taz[field]== True]
#boston_d_auto_pmt = pmt_table[md.taz[field]== True,:][:,md.taz['BOSTON']]
town_definition = md.taz[md.taz[field]== True]
elif subregion.lower() == 'region':
boston_o_auto_pmt = mtx.OD_slice(pmt_table, O_slice = md.taz['BOSTON'])
boston_d_auto_pmt = mtx.OD_slice(pmt_table, D_slice = md.taz['BOSTON'])
#boston_o_auto_pmt = pmt_table[md.taz['BOSTON'],:]
#boston_d_auto_pmt = pmt_table[:][:,md.taz['BOSTON']]
town_definition = md.taz
zone_pmt_daily_o = pd.DataFrame(np.sum(boston_o_auto_pmt,axis=1)/2 ,columns=["PMT"])
zone_pmt_daily_d = pd.DataFrame(np.sum(boston_d_auto_pmt,axis=0)/2 ,columns=["PMT"])
town_pmt_o=pd.concat([town_definition,zone_pmt_daily_o],axis=1,join='inner')
town_pmt_d=pd.concat([town_definition,zone_pmt_daily_d],axis=1,join='inner')
pmt_sum_o = town_pmt_o[town_pmt_o['TOWN']=='BOSTON,MA'].groupby(['TOWN']).sum()['PMT']
pmt_sum_d = town_pmt_d[town_pmt_d['TOWN']=='BOSTON,MA'].groupby(['TOWN']).sum()['PMT']
boston_portion_pmt = (pmt_sum_o + pmt_sum_d).values[0]
return boston_portion_pmt
elif metric.lower() == 'pmt_act':
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
pmt_table = np.zeros((md.max_zone,md.max_zone))
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
drive_modes = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'].keys()
trip_table = sum([mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode]
for mode in ['Walk','Bike'] if mode in drive_modes])
pmt_table += trip_table * skim_dict[peak]['Length (Skim)']
if subregion.lower() in subregion_dict:
field = subregion_dict[subregion.lower()]
boston_o_auto_pmt = mtx.OD_slice(pmt_table, O_slice = md.taz['BOSTON'],D_slice = md.taz[field]== True)
boston_d_auto_pmt = mtx.OD_slice(pmt_table, O_slice = md.taz[field]== True, D_slice = md.taz['BOSTON'])
#boston_o_auto_pmt = pmt_table[md.taz['BOSTON'],:][:, md.taz[field]== True]
#boston_d_auto_pmt = pmt_table[md.taz[field]== True,:][:,md.taz['BOSTON']]
town_definition = md.taz[md.taz[field]== True]
elif subregion.lower() == 'region':
boston_o_auto_pmt = mtx.OD_slice(pmt_table,O_slice = md.taz['BOSTON'])
boston_d_auto_pmt = mtx.OD_slice(pmt_table,D_slice = md.taz['BOSTON'])
#boston_o_auto_pmt = pmt_table[md.taz['BOSTON'],:]
#boston_d_auto_pmt = pmt_table[:][:,md.taz['BOSTON']]
town_definition = md.taz
zone_pmt_daily_o = pd.DataFrame(np.sum(boston_o_auto_pmt,axis=1)/2 ,columns=["PMT"])
zone_pmt_daily_d = pd.DataFrame(np.sum(boston_d_auto_pmt,axis=0)/2 ,columns=["PMT"])
town_pmt_o= | pd.concat([town_definition,zone_pmt_daily_o],axis=1,join='inner') | pandas.concat |
import pandas as pd
import numpy as np
# import nltk
# import re
# from nltk.corpus import stopwords
# from nltk.tokenize import word_tokenize
import math
class NaiveBayesModel:
def WordGivenNoPI(self, tempNegDocVector, uniqueWords):
data = np.zeros([1, len(uniqueWords)])
wordGivenNoPI = pd.DataFrame(data, columns=uniqueWords)
columnSum = tempNegDocVector.sum(axis=1, skipna=True)
numWordsInNoPI = columnSum.sum()
for word in uniqueWords:
nk_wordinNoPI = tempNegDocVector[word].sum()
wordGivenNoPI.at[0, word] = (nk_wordinNoPI + 1) / (numWordsInNoPI + len(uniqueWords))
return (wordGivenNoPI, numWordsInNoPI)
def TrainModel(self, Train_Vector, uniqueWords):
yesCount = Train_Vector["PurchaseIntention"] == "yes"
tempPosDocVector = Train_Vector[yesCount]
totalPI = tempPosDocVector["PurchaseIntention"].count()
print("total PI ", totalPI)
noCount = Train_Vector["PurchaseIntention"] == "no"
tempNegDocVector = Train_Vector[noCount]
# print(tempNegDocVector["PurchaseIntention"])
totalNonPI = tempNegDocVector["PurchaseIntention"].count()
print("total non PI ", totalNonPI)
# print(totalPI+totalNonPI)
# totalNonPI = docVector["PurchaseIntention"].count() - totalPI
total = totalPI + totalNonPI
Prob_PI = totalPI / total
Prob_NoPI = totalNonPI / total
data = np.zeros([1, len(uniqueWords)])
wordGivenPI = pd.DataFrame(data, columns=uniqueWords)
columnSum = tempPosDocVector.sum(axis=1, skipna=True)
numWordsInPI = columnSum.sum()
for word in uniqueWords:
nk_wordinPI = tempPosDocVector[word].sum()
wordGivenPI.at[0, word] = (nk_wordinPI + 1) / (numWordsInPI + len(uniqueWords))
df_wordGivenNoPI, numWordsInNoPI = self.WordGivenNoPI(tempNegDocVector, uniqueWords)
return wordGivenPI, df_wordGivenNoPI, Prob_PI, Prob_NoPI, numWordsInPI, numWordsInNoPI
def predict(self,Prob_PI, Prob_NoPI, uniqueWords, df_WordGivenPI, df_WordGivenNoPi, numWordsInPI, numWordsInNoPI,
df_test, clean):
predict_df = | pd.DataFrame() | pandas.DataFrame |
"""
The :mod:`codeless.fs` module includes methods to
select best features/most relevant fetures.
"""
# For univariate Selection
from sklearn.feature_selection import SelectKBest as _skb# SelectKBest selects the k best features
from sklearn.feature_selection import chi2 as _chi#This is used for applying the statistical analysis eg.Hypothesis testing,
# null hypothesis, alternate hypothesis etc.
# For Feature Importance
from sklearn.ensemble import ExtraTreesClassifier as _etc
# For Information Gain
from sklearn.feature_selection import mutual_info_classif as _mic
import pandas as _pd
import numpy as _np
def UVSelection(X, y, k=10):
'''
SELECT K-BEST
Select features according to the k highest scores. Greater the Score
more important the feature is.
Parameters
-------------
X: Series/DataFrame
Dataframe of all dependent feature.
y: Series/DataFrame
Independent Column
k: Integer, optional
based on Score, top k columns with highest Scores are selected.
Returns
------------
DataFrame: rectangular dataset
a dataframe with two columns (ie. Feature, Score) is returned.
'Feature' containing names of all the column names & 'Score'
containing scores of each column arrange in deacreasing order
of score.
'''
# It'll take top k best feature
ordered_rank_features = _skb(score_func=_chi,k=len(X.columns))
ordered_feature=ordered_rank_features.fit(X,y)
# feature_rank = pd.DataFrame(ordered_feature.scores_)
feature_rank = _pd.DataFrame(ordered_feature.scores_, index=X.columns, columns=['Score'])
return feature_rank.nlargest(k, 'Score')
def FeatureImportance(X, y):
'''
FEATURE IMPORTANCE
This technique gives you a score for each feature of the data,
the higher the score more relevant it is.
Parameters
-------------
X: Series/DataFrame
Dataframe of all dependent feature.
y: Series/DataFrame
Independent Column
Returns
------------
DataFrame: rectangular dataset
a dataframe with a columns (ie. Score) is returned.
'Score' containing scores of each column arrange in deacreasing order
of score. Index column contains name of columns.
To get top n features from the dataframe write
list(ranked_features.nlargest(n, 'Score')['Feature'])
'''
model = _etc()
model.fit(X, y)
ranked_features = _pd.DataFrame(model.feature_importances_, index=X.columns, columns=['Score'])
return ranked_features.nlargest(len(X.columns), 'Score')
def Corr(df, threshold=0.9, strategy=None):
'''
CORRELATION
Find columns with correlation more then threshold.
Parameters
-------------
df: rectangular dataset
2D dataset that can be coerced into an ndarray. If a Pandas DataFrame
is provided, the index/column information will be used to label the
columns and rows.
threshold: float, optional
All columns having more correlation value then threshold will be
taken into cosideration.
strategy: string, optional
Only two value is accepted ie. None & 'drop'
If None then set of all the columns having correlation more then
threshold will be returned.
If 'drop' then all the columns having correlation more then
threshold will be droped from the dataset passed.
Returns
------------
df: rectangular dataset
df is modified dataset after all the steps are done.
col_corr: set
set of all the columns with correlation more than threshold.
'''
col_corr = set() # Set of all the names of correlated columns
corr_matrix = df.corr()
for i in range(len(corr_matrix.columns)):
for j in range(i):
if abs(corr_matrix.iloc[i, j] > threshold): #we are interest in absolute coeff value
colname = corr_matrix.columns[i] #getting the name of the column
col_corr.add(colname)
if strategy=='drop':
df = df.drop(col_corr, axis=1)
return df, col_corr
def InfoGain(X, y):
'''
INFORMATION GAIN
Estimate mutual information for a discrete target variable.
Mutual information (MI) [1]_ between two random variables is a non-negative
value, which measures the dependency between the variables. It is equal
to zero if and only if two random variables are independent, and higher
values mean higher dependency.
The function relies on nonparametric methods based on entropy estimation
from k-nearest neighbors distances as described in [2]_ and [3]_. Both
methods are based on the idea originally proposed in [4]_.
Parameters
-------------
X: Series/DataFrame
Dataframe of all dependent feature.
y: Series/DataFrame
Independent Column
Returns
------------
DataFrame: rectangular dataset
a dataframe with a columns (ie. Score) is returned.
'Score' containing scores of each column arrange in deacreasing order
of score. Index column contains name of columns.
To get top n features from the dataframe write
list(ranked_features.nlargest(n, 'Score')['Feature'])
'''
mutual_info = _mic(X, y)
# mutual_info
mutual_data = | _pd.DataFrame(mutual_info, index=X.columns, columns=['Score']) | pandas.DataFrame |
"""File for saving and loading. Assumes an experiment_folder path in which can data can be freely written and modified. For specific processes it also requires
a process_id. Nothing will be stored above the experiment_folder path.
Every method should call one of these methods for any saving/loading tasks."""
import os
import pickle
import pandas as pd
import torch
import yaml
from torch.utils.tensorboard import SummaryWriter
from .training_utils import get_nets
from .utils import get_time_stamp
def get_exp_steps(experiment_folder):
exp_steps = {}
for exp_name, curr_path in exp_models_path_generator(experiment_folder):
exp_steps[exp_name] = get_all_steps(curr_path)
return exp_steps
def get_all_steps(steps_dir):
step_dict = {}
for root, dirs, files in os.walk(steps_dir):
for step_dir in dirs:
name_split_underscore = step_dir.split("_")
if len(name_split_underscore) == 1:
continue
step_dict[int(name_split_underscore[1])] = step_dir
return step_dict
def get_models(model_folder_path, step, device=None):
if step == -1:
all_steps = get_all_steps(model_folder_path)
step = int(max(all_steps.keys(), key=lambda x: int(x)))
model_path = os.path.join(model_folder_path, "step_{}".format(step))
if not os.path.exists(model_path):
return None
models_dict = {}
for root, dirs, files in os.walk(model_path):
for model_file_name in files:
model_idx = model_file_name.split("_")[1].split(".")[0]
model = load_model(os.path.join(root, model_file_name), device)
models_dict[model_idx] = model
return models_dict
def get_all_models(experiment_folder, step):
models_dict = {}
# iterate through models
for exp_name, curr_path in exp_models_path_generator(experiment_folder):
try:
models_dict[exp_name] = get_models(curr_path, step)
except:
continue
return models_dict
def cache_data(
experiment_folder, name, data, meta_dict=None, step=None, time_stamp=False
):
cache_folder = os.path.join(experiment_folder, "postprocessing", name)
if step is not None:
cache_folder = os.path.join(cache_folder, "step_{}".format(step))
if time_stamp:
cache_folder = os.path.join(cache_folder, get_time_stamp())
if not os.path.exists(cache_folder):
os.makedirs(cache_folder)
with open(os.path.join(cache_folder, "data.pkl"), "wb") as f:
pickle.dump(data, f)
if meta_dict is not None:
with open(os.path.join(cache_folder, "meta.yml"), "w") as f:
yaml.dump(meta_dict, f)
return cache_folder
def load_cached_data(experiment_folder, name, step=None, time_stamp=None):
cache_folder = os.path.join(experiment_folder, "postprocessing", name)
if step is not None:
cache_folder = os.path.join(cache_folder, "step_{}".format(step))
if time_stamp is not None:
cache_folder = os.path.join(cache_folder, time_stamp)
cached_data_path = os.path.join(cache_folder, "data.pkl")
if os.path.isfile(cached_data_path):
with open(cached_data_path, "rb") as f:
cached_data = pickle.load(f)
else:
cached_data = None
cached_meta_path = os.path.join(cache_folder, "meta.yml")
if os.path.isfile(cached_meta_path):
with open(cached_meta_path, "rb") as f:
cached_meta_data = yaml.load(f)
else:
cached_meta_data = None
return cached_data, cached_meta_data
def exp_models_path_generator(experiment_folder):
for curr_dir in os.listdir(os.path.join(experiment_folder, "models")):
root = os.path.join(experiment_folder, "models", curr_dir)
yield curr_dir, root
def save_models(models, model_name, model_params, experiment_root, curr_exp_name, step):
models_path = os.path.join(
experiment_root, "models", curr_exp_name, "step_{}".format(step)
)
if not os.path.exists(models_path):
os.makedirs(models_path)
for idx_model in range(len(models)):
torch.save(
{
"model_name": model_name,
"model_params": model_params,
"model_state_dict": models[idx_model].state_dict(),
},
os.path.join(models_path, "model_{}.pt".format(idx_model)),
)
def load_model(PATH, device=None):
if device is None:
device = torch.device("cpu")
meta_data = torch.load(PATH, map_location=device)
model = get_nets(
meta_data["model_name"], meta_data["model_params"], num_nets=1, device=device
)[0]
model.load_state_dict(meta_data["model_state_dict"])
return model
def load_configs(experiment_folder):
config_dir = {}
for root, dirs, files in os.walk(
os.path.join(experiment_folder, "runs"), topdown=False
):
if len(files) != 2:
continue
curr_dir = os.path.basename(root)
with open(os.path.join(root, "config.yml"), "rb") as f:
config = yaml.load(f)
config_dir[curr_dir] = config
config_dir[curr_dir]["net_params"] = tuple(config_dir[curr_dir]["net_params"])
return | pd.DataFrame(config_dir) | pandas.DataFrame |
import torch
import numpy as np
import matplotlib.pyplot as plt
import pandas
import os
import seaborn as sns
from argparse import ArgumentParser
from models.utils.continual_model import ContinualModel
from datasets.utils.continual_dataset import ContinualDataset
from typing import Tuple
from utils.conf import base_path
from datasets import NAMES as DATASET_NAMES
class MainVisual:
def __init__(self):
self.markers = ['*-', '*:', '^-', '^:', 'o-', 'o:', 'v-', 'v:', 'x-', 'x:',
'o--', '*--', 'v--', '^--']
self.ptms = ["albert", "bert", "gpt2", "roberta", "xlnet"]
self.datasets = ["seq-clinc150", "seq-maven", "seq-webred"]
self.settings = ["class", "task"]
self.prob_types = ["proto", "final"]
self.methods = ["vanilla", "ewc", "hat", "er", "derpp", "joint"]
self.num_tasks = {"clinc150": 15, "maven": 16, "webred": 24}
self.bsize = [200, 500, 1000]
self.time = ["time"]
self.ft = ["forward_transfer"]
self.bt = ["backward_transfer"]
self.fgt = ["forgetting"]
self.selection = {
"method": self.methods,
"ptm": self.ptms,
"time": self.time,
"ft": self.ft,
"bt": self.bt,
"fgt": self.fgt
}
def visualize(self, xs, tags, results, x_label, y_label, out_file, title):
for i, value in enumerate(results):
plt.plot(xs, value, self.markers[i], label=tags[i])
plt.legend()
plt.xlabel(x_label, fontsize=10)
plt.ylabel(y_label, fontsize=10)
plt.title(title.split(".")[0])
plt.savefig(out_file)
plt.clf()
pass
def visualize_grouped_bar(self, x_label, y_label, hue, title, data, file_path):
sns.set_theme(style="whitegrid")
# Draw a nested barplot by species and sex
g = sns.catplot(
data=data, kind="bar",
x=x_label, y=y_label, hue=hue,
ci="sd", palette="viridis", alpha=.6, height=6
)
sns.set(rc={"figure.dpi": 300, 'savefig.dpi': 300})
g.despine(left=True)
# plt.xlabel(x_label, fontsize=15)
plt.ylabel(y_label, fontsize=15)
g.legend.set_title(hue)
# plt.title(title)
if y_label == "accuracy":
plt.ylim(0, 105)
g.savefig(file_path)
plt.clf()
def merg_data(self, datasets, setting=None, merge_all=False):
clumns = ["PLM", "Method", "forward transfer", "backward transfer", "forgetting", "time", "dataset", "task"]
all_df = None
if not merge_all:
for ds in datasets:
file_name = "{dataset}_{setting}".format(dataset=ds, setting=setting)
in_file = "./data/detail_result/{file_name}.csv".format(file_name=file_name)
last_task = "task{id}".format(id=self.num_tasks[ds])
clumns[-1] = last_task
df = pandas.read_csv(in_file)
sub_df = pandas.DataFrame(df[clumns])
sub_df = sub_df.rename(columns={last_task: "mean accuracy"})
length = len(sub_df)
sub_df["setting"] = [setting] * length
if all_df is None:
all_df = sub_df
else:
all_df = pandas.concat([all_df, sub_df])
return all_df
else:
for ds in datasets:
for set_ in self.settings:
file_name = "{dataset}_{setting}".format(dataset=ds, setting=set_)
in_file = "./data/detail_result/{file_name}.csv".format(file_name=file_name)
last_task = "task{id}".format(id=self.num_tasks[ds])
clumns[-1] = last_task
df = pandas.read_csv(in_file)
sub_df = pandas.DataFrame(df[clumns])
sub_df = sub_df.rename(columns={last_task: "accuracy"})
length = len(sub_df)
sub_df["setting"] = [set_ + "-il"] * length
if all_df is None:
all_df = sub_df
else:
all_df = | pandas.concat([all_df, sub_df]) | pandas.concat |
# -*- coding: UTF-8 -*-
"""
This module contains functions for calculating evaluation metrics for the generated service recommendations.
"""
import numpy
import pandas
runtime_metrics = ["Training time", "Overall testing time", "Individual testing time"]
quality_metrics = ["Recall", "Precision", "F1", "# of recommendations"]
def results_as_dataframe(user_actions, recommendations):
"""
Converts the recommendation results into a pandas dataframe for easier evaluation.
@param user_actions: A list of the actually performed user actions.
@param recommendations: For each of the performed actions the list of calculated service recommendations.
@return: A pandas dataframe that has as index the performed user actions (there is one row per action). The first
column contains for each action the highest scoring recommendation, the second column contains the second best
recommendation etc.
"""
results = pandas.DataFrame(recommendations, index=pandas.Index(user_actions, name="Actual action"))
results.columns = [(r+1) for r in range(len(results.columns))]
return results
class QualityMetricsCalculator():
"""
This is a utility class that contains a number of methods for calculating overall quality metrics for the produced
recommendations. In general these methods produce pandas dataframes with several rows, where each row corresponds
to one "cutoff" point. For example, a cutoff "4" means that the system cuts the number of recommendations at four,
i.e. the user is shown at most four recommendations. If some post-processing method was used (e.g. show fewer
recommendations if the recommendation conflict is low), then it can happen that fewer than four recommendations
are shown. For reference, the column "# of recommendations" lists the average of the number of recommendations that
were actually shown to the user.
"""
def __init__(self, actual_actions, recommendations):
"""
Initialize the calculation of the quality metrics..
@param actual_actions: A list of strings, each representing one actual user action.
@param recommendations: A list of lists of strings with the same length as actual_actions. Each list of
strings contains the calculated recommendations for the corresponding actual user action.
@return:
"""
self.results = results_as_dataframe(actual_actions, recommendations)
def __unique_actions__(self):
"""
It can happen that one potential user action never happened, but that the corresponding service was recommended.
To be able to count these false positives, we must calculate the list of all potential actions.
"""
occurring_actions = set(self.results.index.values)
occurring_services = pandas.melt(self.results).dropna()["value"]
occurring_services = set(occurring_services.unique())
return sorted(occurring_actions | occurring_services)
def true_positives(self, action):
"""
Counts how often the given action was recommended correctly (true positives, TP).
@param action: The name of the user action for which to count true positives.
@return: A pandas dataset with column TP and several rows, first row lists #TP at cutoff "1", the second row at
cutoff "2", etc.
"""
#get all rows where the actual action corresponds to the given action
r = self.results[self.results.index == action]
if len(r) == 0:
#if there are no such rows, then we have zero true positives, fill result dataframe with zeroes
true_positives = pandas.Series(0.0, index=self.results.columns)
else:
#if recommendation matches the action, set column to "1" (true positive), else set to "0" (false negative)
r = r.applymap(lambda col: 1 if col == action else 0).fillna(0)
#count how many true positives there are in each column
r = r.sum()
#if have a true positive for n-th recommendation, then also have true positive for n+1, n+2 etc
#-> calculate cumulative sum
true_positives = r.cumsum(axis=0).apply(float)
true_positives = | pandas.DataFrame(true_positives, columns=["TP"]) | pandas.DataFrame |
"""
Packages to use :
tsfresh
tsfel https://tsfel.readthedocs.io/en/latest/
sktime
feature tools : https://docs.featuretools.com/en/stable/automated_feature_engineering/handling_time.html
Cesium http://cesium-ml.org/docs/feature_table.html
Feature Tools for advacned fewatures `https://github.com/Featuretools/predict-remaining-useful-life/blob/master/Advanced%20Featuretools%20RUL.ipynb
"""
import pandas as pd
import tsfresh
from tsfresh import extract_relevant_features, extract_features
import numpy as np
import pdb
import re
def features_time_basic(df, input_raw_path = None, dir_out = None, features_group_name = None, auxiliary_csv_path = None, drop_cols = None, index_cols = None, merge_cols_mapping = None, cat_cols = None, id_cols = None, dep_col = None, max_rows = 10):
df['date_t'] = pd.to_datetime(df['date'])
df['year'] = df['date_t'].dt.year
df['month'] = df['date_t'].dt.month
df['week'] = df['date_t'].dt.week
df['day'] = df['date_t'].dt.day
df['dayofweek'] = df['date_t'].dt.dayofweek
cat_cols = []
return df[['year', 'month', 'week', 'day', 'dayofweek'] + id_cols], cat_cols
def features_lag(df, fname):
out_df = df[['item_id', 'dept_id', 'cat_id', 'store_id', 'state_id']]
###############################################################################
# day lag 29~57 day and last year's day lag 1~28 day
day_lag = df.iloc[:,-28:]
day_year_lag = df.iloc[:,-393:-365]
day_lag.columns = [str("lag_{}_day".format(i)) for i in range(29,57)] # Rename columns
day_year_lag.columns = [str("lag_{}_day_of_last_year".format(i)) for i in range(1,29)]
# Rolling mean(3) and (7) and (28) and (84) 29~57 day and last year's day lag 1~28 day
rolling_3 = df.iloc[:,-730:].T.rolling(3).mean().T.iloc[:,-28:]
rolling_3.columns = [str("rolling3_lag_{}_day".format(i)) for i in range(29,57)] # Rename columns
rolling_3_year = df.iloc[:,-730:].T.rolling(3).mean().T.iloc[:,-393:-365]
rolling_3_year.columns = [str("rolling3_lag_{}_day_of_last_year".format(i)) for i in range(1,29)]
rolling_7 = df.iloc[:,-730:].T.rolling(7).mean().T.iloc[:,-28:]
rolling_7.columns = [str("rolling7_lag_{}_day".format(i)) for i in range(29,57)] # Rename columns
rolling_7_year = df.iloc[:,-730:].T.rolling(7).mean().T.iloc[:,-393:-365]
rolling_7_year.columns = [str("rolling7_lag_{}_day_of_last_year".format(i)) for i in range(1,29)]
rolling_28 = df.iloc[:,-730:].T.rolling(28).mean().T.iloc[:,-28:]
rolling_28.columns = [str("rolling28_lag_{}_day".format(i)) for i in range(29,57)]
rolling_28_year = df.iloc[:,-730:].T.rolling(28).mean().T.iloc[:,-393:-365]
rolling_28_year.columns = [str("rolling28_lag_{}_day_of_last_year".format(i)) for i in range(1,29)]
rolling_84 = df.iloc[:,-730:].T.rolling(84).mean().T.iloc[:,-28:]
rolling_84.columns = [str("rolling84_lag_{}_day".format(i)) for i in range(29,57)]
rolling_84_year = df.iloc[:,-730:].T.rolling(84).mean().T.iloc[:,-393:-365]
rolling_84_year.columns = [str("rolling84_lag_{}_day_of_last_year".format(i)) for i in range(1,29)]
# monthly lag 1~18 month
month_lag = pd.DataFrame({})
for i in range(1,19):
if i == 1:
monthly = df.iloc[:,-28*i:].T.sum().T
month_lag["monthly_lag_{}_month".format(i)] = monthly
else:
monthly = df.iloc[:, -28*i:-28*(i-1)].T.sum().T
month_lag["monthly_lag_{}_month".format(i)] = monthly
# combine day lag and monthly lag
out_df = pd.concat([out_df, day_lag], axis=1)
out_df = pd.concat([out_df, day_year_lag], axis=1)
out_df = pd.concat([out_df, rolling_3], axis=1)
out_df = pd.concat([out_df, rolling_3_year], axis=1)
out_df = pd.concat([out_df, rolling_7], axis=1)
out_df = pd.concat([out_df, rolling_7_year], axis=1)
out_df = pd.concat([out_df, rolling_28], axis=1)
out_df = pd.concat([out_df, rolling_28_year], axis=1)
out_df = pd.concat([out_df, rolling_84], axis=1)
out_df = pd.concat([out_df, rolling_84_year], axis=1)
out_df = pd.concat([out_df, month_lag], axis=1)
###############################################################################
# dept_id
group_dept = df.groupby("dept_id").sum()
# day lag 29~57 day and last year's day lag 1~28 day
dept_day_lag = group_dept.iloc[:,-28:]
dept_day_year_lag = group_dept.iloc[:,-393:-365]
dept_day_lag.columns = [str("dept_lag_{}_day".format(i)) for i in range(29,57)]
dept_day_year_lag.columns = [str("dept_lag_{}_day_of_last_year".format(i)) for i in range(1,29)]
# monthly lag 1~18 month
month_dept_lag = pd.DataFrame({})
for i in range(1,19):
if i == 1:
monthly_dept = group_dept.iloc[:,-28*i:].T.sum().T
month_dept_lag["dept_monthly_lag_{}_month".format(i)] = monthly_dept
elif i >= 7 and i < 13:
continue
else:
monthly = group_dept.iloc[:, -28*i:-28*(i-1)].T.sum().T
month_dept_lag["dept_monthly_lag_{}_month".format(i)] = monthly_dept
# combine out df
out_df = pd.merge(out_df, dept_day_lag, left_on="dept_id", right_index=True, how="left")
out_df = | pd.merge(out_df, dept_day_year_lag, left_on="dept_id", right_index=True, how="left") | pandas.merge |
import os
import pandas as pd
import mygene
from util_path import get_path
from util_dei import filter_dei
res_dir = get_path("resource/Entrez")
gene_dir = get_path("vertex/gene")
mg = mygene.MyGeneInfo()
def read_gene2ensembl():
global res_dir
g2e_df = pd.read_csv(os.path.join(res_dir, "gene2ensembl_9606.tsv"), sep="\t", header=None,
names=["Tax_ID", "Entrez_Gene_ID", "Ensembl_Gene_ID"])
unique_tax_ids = g2e_df.Tax_ID.unique()
assert(len(unique_tax_ids) == 1)
assert(unique_tax_ids[0] == 9606)
g2e_df = g2e_df.drop("Tax_ID", axis=1).drop_duplicates().reindex()
return g2e_df
def read_biomart(GRCh):
global res_dir
if GRCh == "37":
filename = "GRCh37_p13_mart_export.txt"
elif GRCh == "38":
filename = "GRCh38_p12_mart_export.txt"
else:
raise ValueError("Cannot recognize GRCh param. Got {}. Use string '37' or '38' instead.".format(GRCh))
biomart_df = pd.read_csv(os.path.join(res_dir, filename), sep="\t", dtype=str)
biomart_df = biomart_df.rename(columns={"Gene stable ID": "Ensembl_Gene_ID",
"HGNC ID": "HGNC_ID",
"Transcript stable ID": "Ensembl_Tx_ID"})
if GRCh == "37":
return biomart_df
else: # GRCh == "38"
# Note that in GRCh38 BioMart data, every HGNC ID starts with a prefix "HGNC:"
# No such prefix in GRCh37 BioMart data
if all(biomart_df.HGNC_ID.dropna().str.startswith("HGNC:")):
biomart_df.loc[:, "HGNC_ID"] = biomart_df.loc[:, "HGNC_ID"].apply(lambda x: x.split(":")[1] if not | pd.isnull(x) | pandas.isnull |
"""Parse Tecan files, group lists and fit titrations.
(Titrations are described in list.pH or list.cl file.
Builds 96 titrations and export them in txt files. In the case of 2 labelblocks
performs a global fit saving a png and printing the fitting results.)
:ref:`prtecan parse`:
* Labelblock
* Tecanfile
:ref:`prtecan group`:
* LabelblocksGroup
* TecanfilesGroup
* Titration
* TitrationAnalysis
Functions
---------
.. autofunction:: fit_titration
.. autofunction:: fz_Kd_singlesite
.. autofunction:: fz_pK_singlesite
.. autofunction:: extract_metadata
.. autofunction:: strip_lines
"""
import copy
import hashlib
import itertools
import os
import warnings
from typing import Any, Dict, List, Optional, Tuple, Union # , overload
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
import scipy.stats
import seaborn as sb
from matplotlib.backends.backend_pdf import PdfPages
list_of_lines = List[List]
# bug xdoctest-3.7 #import numpy.typing as npt
def strip_lines(lines: list_of_lines) -> list_of_lines:
"""Remove empty fields/cells from lines read from a csv file.
([a,,b,,,]-->[a,b])
Parameters
----------
lines
Lines that are a list of fields, typically from a csv/xls file.
Returns
-------
Lines removed from blank cells.
"""
stripped_lines = []
for line in lines:
sl = [line[i] for i in range(len(line)) if line[i] != '']
stripped_lines.append(sl)
return stripped_lines
# def extract_metadata(lines: list_of_lines) -> Dict[str, Union[str, float, List[Any]]]:
def extract_metadata(lines: list_of_lines) -> Dict[str, Any]:
"""Extract metadata from a list of stripped lines.
First field is the *key*, remaining fields goes into a list of values::
['', 'key', '', '', 'value1', '', ..., 'valueN', ''] -->
{key: [value1, ..., valueN]}
*Label* and *Temperature* are two exceptions::
['Label: labelXX', '', '', '', '', '', '', '']
['', 'Temperature: XX ยฐC', '', '', '', '', '', '']
Parameters
----------
lines
Lines that are a list of fields, typically from a csv/xls file.
Returns
-------
Metadata for Tecanfile or Labelblock.
"""
stripped_lines = strip_lines(lines)
temp = {
'Temperature': float(line[0].split(':')[1].split('ยฐC')[0])
for line in stripped_lines
if len(line) == 1 and 'Temperature' in line[0]
}
labl = {
'Label': line[0].split(':')[1].strip()
for line in stripped_lines
if len(line) == 1 and 'Label' in line[0]
}
m1 = {
line[0]: line[0]
for line in stripped_lines
if len(line) == 1 and 'Label' not in line[0] and 'Temperature' not in line[0]
}
m2: Dict[str, Union[str, float, List[str]]] = {
line[0]: line[1:] for line in stripped_lines if len(line) > 1
}
m2.update(m1)
m2.update(temp)
m2.update(labl)
return m2
def fz_Kd_singlesite(K: float, p: np.ndarray, x: np.ndarray) -> np.ndarray:
"""Fit function for Cl titration."""
return (p[0] + p[1] * x / K) / (1 + x / K)
def fz_pK_singlesite(K: float, p: np.ndarray, x: np.ndarray) -> np.ndarray:
"""Fit function for pH titration."""
return (p[1] + p[0] * 10 ** (K - x)) / (1 + 10 ** (K - x))
def fit_titration(
kind: str,
x: np.ndarray,
y: np.ndarray,
y2: Optional[np.ndarray] = None,
residue: Optional[np.ndarray] = None,
residue2: Optional[np.ndarray] = None,
tval_conf: float = 0.95,
) -> pd.DataFrame:
"""Fit pH or Cl titration using a single-site binding model.
Returns confidence interval (default=0.95) for fitting params (cov*tval), rather than
standard error of the fit. Use scipy leastsq. Determine 3 fitting parameters:
- binding constant *K*
- and 2 plateau *SA* and *SB*.
Parameters
----------
kind
Titration type {'pH'|'Cl'}
x, y
Main dataset.
y2
Second dataset (share x with main dataset).
residue
Residues for main dataset.
residue2
Residues for second dataset.
tval_conf
Confidence level (default 0.95) for parameter estimations.
Returns
-------
Fitting results.
Raises
------
NameError
When kind is different than "pH" or "Cl".
Examples
--------
>>> fit_titration("Cl", [1, 10, 30, 100, 200], [10, 8, 5, 1, 0.1])[["K", "sK"]]
K sK
0 38.955406 30.201929
"""
if kind == 'pH':
fz = fz_pK_singlesite
elif kind == 'Cl':
fz = fz_Kd_singlesite
else:
raise NameError('kind= pH or Cl')
def compute_p0(x: np.ndarray, y: np.ndarray) -> np.ndarray:
df = pd.DataFrame({'x': x, 'y': y})
SA = df.y[df.x == min(df.x)].values[0]
SB = df.y[df.x == max(df.x)].values[0]
K = np.average([max(y), min(y)])
try:
x1, y1 = df[df['y'] >= K].values[0]
except IndexError:
x1 = np.nan
y1 = np.nan
try:
x2, y2 = df[df['y'] <= K].values[0]
except IndexError:
x2 = np.nan
y2 = np.nan
K = (x2 - x1) / (y2 - y1) * (K - y1) + x1
return np.r_[K, SA, SB]
x = np.array(x)
y = np.array(y)
if y2 is None:
def ssq1(p: np.ndarray, x: np.ndarray, y1: np.ndarray) -> np.ndarray:
return np.r_[y1 - fz(p[0], p[1:3], x)]
p0 = compute_p0(x, y)
p, cov, info, msg, success = scipy.optimize.leastsq(
ssq1, p0, args=(x, y), full_output=True, xtol=1e-11
)
else:
def ssq2(
p: np.ndarray,
x: np.ndarray,
y1: np.ndarray,
y2: np.ndarray,
rd1: np.ndarray,
rd2: np.ndarray,
) -> np.ndarray:
return np.r_[
(y1 - fz(p[0], p[1:3], x)) / rd1**2,
(y2 - fz(p[0], p[3:5], x)) / rd2**2,
]
p1 = compute_p0(x, y)
p2 = compute_p0(x, y2)
ave = np.average([p1[0], p2[0]])
p0 = np.r_[ave, p1[1], p1[2], p2[1], p2[2]]
tmp = scipy.optimize.leastsq(
ssq2, p0, full_output=True, xtol=1e-11, args=(x, y, y2, residue, residue2)
)
p, cov, info, msg, success = tmp
res = pd.DataFrame({'ss': [success]})
res['msg'] = msg
if 1 <= success <= 4:
try:
tval = (tval_conf + 1) / 2
chisq = sum(info['fvec'] * info['fvec'])
res['df'] = len(y) - len(p)
res['tval'] = scipy.stats.distributions.t.ppf(tval, res.df)
res['chisqr'] = chisq / res.df
res['K'] = p[0]
res['SA'] = p[1]
res['SB'] = p[2]
if y2 is not None:
res['df'] += len(y2)
res['tval'] = scipy.stats.distributions.t.ppf(tval, res.df)
res['chisqr'] = chisq / res.df
res['SA2'] = p[3]
res['SB2'] = p[4]
res['sSA2'] = np.sqrt(cov[3][3] * res.chisqr) * res.tval
res['sSB2'] = np.sqrt(cov[4][4] * res.chisqr) * res.tval
res['sK'] = np.sqrt(cov[0][0] * res.chisqr) * res.tval
res['sSA'] = np.sqrt(cov[1][1] * res.chisqr) * res.tval
res['sSB'] = np.sqrt(cov[2][2] * res.chisqr) * res.tval
except TypeError:
pass # if some params are not successfully determined.
return res
class Labelblock:
"""Parse a label block within a Tecan file.
Parameters
----------
tecanfile :
Object containing (has-a) this Labelblock.
lines :
Lines for this Labelblock.
Attributes
----------
tecanfile
metadata : dict
Metadata specific for this Labelblock.
data : Dict[str, float]
The 96 data values as {'well_name': value}.
Raises
------
Exception
When data do not correspond to a complete 96-well plate.
Warns
-----
Warning
When it replaces "OVER" with ``np.nan`` for any saturated value.
"""
def __init__(
self,
tecanfile: Optional['Tecanfile'],
lines: list_of_lines,
) -> None:
try:
assert lines[14][0] == '<>' and lines[23] == lines[24] == [
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
]
except AssertionError as err:
raise Exception('Cannot build Labelblock: not 96 wells?') from err
stripped = strip_lines(lines)
stripped[14:23] = []
self.tecanfile = tecanfile
self.metadata = extract_metadata(stripped)
self.data = self._extract_data(lines[15:23])
def _extract_data(self, lines: list_of_lines) -> Dict[str, float]:
"""Convert data into a dictionary.
{'A01' : value}
:
{'H12' : value}
Parameters
----------
lines
xls file read into lines.
Returns
-------
dict
Data from a label block.
Raises
------
Exception
When something went wrong. Possibly because not 96-well.
Warns
-----
When a cell contains saturated signal (converted into np.nan).
"""
rownames = tuple('ABCDEFGH')
data = {}
try:
assert len(lines) == 8
for i, row in enumerate(rownames):
assert lines[i][0] == row # e.g. "A" == "A"
for col in range(1, 13):
try:
data[row + "{0:0>2}".format(col)] = float(lines[i][col])
except ValueError:
data[row + "{0:0>2}".format(col)] = np.nan
path = self.tecanfile.path if self.tecanfile else ""
warnings.warn(
"OVER value in {0}{1:0>2} well for {2} of tecanfile: {3}".format(
row, col, self.metadata['Label'], path
)
)
except AssertionError as err:
raise Exception("Cannot extract data in Labelblock: not 96 wells?") from err
return data
KEYS = [
'Emission Bandwidth',
'Emission Wavelength',
'Excitation Bandwidth',
'Excitation Wavelength',
'Integration Time',
'Mode',
'Number of Flashes',
]
def __eq__(self, other: object) -> bool:
"""Two labelblocks are equal when metadata KEYS are identical."""
# Identical labelblocks can be grouped safely into the same titration; otherwise
# some kind of normalization (# of flashes, gain, etc.) would be
# necessary.
if not isinstance(other, Labelblock):
return NotImplemented
eq: bool = True
for k in Labelblock.KEYS:
eq *= self.metadata[k] == other.metadata[k]
# 'Gain': [81.0, 'Manual'] = 'Gain': [81.0, 'Optimal'] They are equal
eq *= self.metadata['Gain'][0] == other.metadata['Gain'][0]
# annotation error: Value of type "Union[str, float, List[str]]" is not indexable
return eq
class Tecanfile:
"""Parse a .xls file as exported from Tecan.
Parameters
----------
path
Name of the xls file.
Attributes
----------
path
metadata : dict
General metadata for Tecanfile e.g. 'Date:' or 'Shaking Duration:'.
labelblocks : List[Labelblock]
All labelblocks contained in the file.
Methods
-------
read_xls(path) :
Read xls file at path.
lookup_csv_lines(csvl, pattern='Label: Label', col=0) :
Return row index for pattern found at col.
Raises
------
FileNotFoundError
When path does not exist.
Exception
When no Labelblock is found.
"""
def __init__(self, path: str) -> None:
csvl = Tecanfile.read_xls(path)
idxs = Tecanfile.lookup_csv_lines(csvl, pattern='Label: Label', col=0)
if len(idxs) == 0:
raise Exception('No Labelblock found.')
# path
self.path = path
# metadata
self.metadata = extract_metadata(csvl[: idxs[0]])
# labelblocks
labelblocks = []
n_labelblocks = len(idxs)
idxs.append(len(csvl))
for i in range(n_labelblocks):
labelblocks.append(Labelblock(self, csvl[idxs[i] : idxs[i + 1]]))
self.labelblocks = labelblocks
def __eq__(self, other: object) -> bool:
"""Two Tecanfile are equal if their attributes are."""
# never used thus far.
# https://izziswift.com/compare-object-instances-for-equality-by-their-attributes/
return self.__dict__ == other.__dict__
def __hash__(self) -> int:
"""Define hash (related to __eq__) using self.path."""
return hash(self.path)
@classmethod
def read_xls(cls, path: str) -> list_of_lines:
"""Read first sheet of an xls file.
Parameters
----------
path
Path to .xls file.
Returns
-------
Lines.
"""
df = | pd.read_excel(path) | pandas.read_excel |
from datetime import timedelta
from operator import methodcaller
import itertools
import math
import pytest
sa = pytest.importorskip('sqlalchemy')
pytest.importorskip('psycopg2')
import os
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from datashape import dshape
from odo import odo, drop, discover
from blaze import (
data,
atan2,
by,
coalesce,
compute,
concat,
cos,
greatest,
join,
least,
radians,
sin,
sqrt,
symbol,
transform,
)
from blaze.interactive import iscorescalar
from blaze.utils import example, normalize
names = ('tbl%d' % i for i in itertools.count())
@pytest.fixture(scope='module')
def pg_ip():
return os.environ.get('POSTGRES_IP', 'localhost')
@pytest.fixture
def url(pg_ip):
return 'postgresql://postgres@{}/test::%s'.format(pg_ip)
@pytest.yield_fixture
def sql(url):
ds = dshape('var * {A: string, B: int64}')
try:
t = data(url % next(names), dshape=ds)
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
assert t.dshape == ds
t = data(odo([('a', 1), ('b', 2)], t))
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sql_with_null(url):
ds = dshape(""" var * {name: ?string,
sex: ?string,
amount: int,
id: int,
comment: ?string}
""")
rows = [('Alice', 'F', 100, 1, 'Alice comment'),
(None, 'M', 300, 2, None),
('Drew', 'F', 100, 4, 'Drew comment'),
('Bob', 'M', 100, 5, 'Bob comment 2'),
('Drew', 'M', 200, 5, None),
('first', None, 300, 4, 'Missing info'),
(None, None, 300, 6, None)]
try:
x = url % next(names)
t = data(x, dshape=ds)
print(x)
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
assert t.dshape == ds
t = data(odo(rows, t))
try:
yield t
finally:
drop(t)
@pytest.yield_fixture(scope='module')
def nyc(pg_ip):
# odoing csv -> pandas -> postgres is more robust, as it doesn't require
# the postgres server to be on the same filesystem as the csv file.
nyc_pd = odo(example('nyc.csv'), pd.DataFrame)
try:
t = odo(nyc_pd,
'postgresql://postgres@{}/test::nyc'.format(pg_ip))
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def big_sql(url):
try:
t = data(url % next(names), dshape='var * {A: string, B: int64}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
t = odo(zip(list('a'*100), list(range(100))), t)
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sqla(url):
try:
t = data(url % next(names), dshape='var * {A: ?string, B: ?int32}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
t = odo([('a', 1), (None, 1), ('c', None)], t)
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sqlb(url):
try:
t = data(url % next(names), dshape='var * {A: string, B: int64}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
t = odo([('a', 1), ('b', 2)], t)
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sql_with_dts(url):
try:
t = data(url % next(names), dshape='var * {A: datetime}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
t = odo([(d,) for d in pd.date_range('2014-01-01', '2014-02-01')], t)
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sql_with_timedeltas(url):
try:
t = data(url % next(names), dshape='var * {N: timedelta}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
t = odo([(timedelta(seconds=n),) for n in range(10)], t)
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sql_two_tables(url):
dshape = 'var * {a: int32}'
try:
t = data(url % next(names), dshape=dshape)
u = data(url % next(names), dshape=dshape)
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield u, t
finally:
drop(t)
drop(u)
@pytest.yield_fixture
def products(url):
try:
products = data(url % 'products',
dshape="""var * {
product_id: int64,
color: ?string,
price: float64}""",
primary_key=['product_id'])
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield products
finally:
drop(products)
@pytest.yield_fixture
def orders(url, products):
try:
orders = data(url % 'orders',
dshape="""var * {
order_id: int64,
product_id: map[int64, T],
quantity: int64}""",
foreign_keys=dict(product_id=products.data.c.product_id),
primary_key=['order_id'])
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield orders
finally:
drop(orders)
# TODO: scope these as module because I think pytest is caching sa.Table, which
# doesn't work if remove it after every run
@pytest.yield_fixture
def main(url):
try:
main = odo([(i, int(np.random.randint(10))) for i in range(13)],
url % 'main',
dshape=dshape('var * {id: int64, data: int64}'),
primary_key=['id'])
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield main
finally:
drop(main)
@pytest.yield_fixture
def pkey(url, main):
choices = [u'AAPL', u'HPQ', u'ORCL', u'IBM', u'DOW', u'SBUX', u'AMD',
u'INTC', u'GOOG', u'PRU', u'MSFT', u'AIG', u'TXN', u'DELL',
u'PEP']
n = 100
data = list(zip(range(n),
np.random.choice(choices, size=n).tolist(),
np.random.uniform(10000, 20000, size=n).tolist(),
np.random.randint(main.count().scalar(), size=n).tolist()))
try:
pkey = odo(data, url % 'pkey',
dshape=dshape('var * {id: int64, sym: string, price: float64, main: map[int64, T]}'),
foreign_keys=dict(main=main.c.id),
primary_key=['id'])
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield pkey
finally:
drop(pkey)
@pytest.yield_fixture
def fkey(url, pkey):
try:
fkey = odo([(i,
int(np.random.randint(pkey.count().scalar())),
int(np.random.randint(10000)))
for i in range(10)],
url % 'fkey',
dshape=dshape('var * {id: int64, sym_id: map[int64, T], size: int64}'),
foreign_keys=dict(sym_id=pkey.c.id),
primary_key=['id'])
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield fkey
finally:
drop(fkey)
@pytest.yield_fixture
def sql_with_float(url):
try:
t = data(url % next(names), dshape='var * {c: float64}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield t
finally:
drop(t)
@pytest.yield_fixture(scope='module')
def nyc_csv(pg_ip):
try:
t = odo(
example('nyc.csv'),
'postgresql://postgres@{}/test::nyc'.format(pg_ip),
)
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield t
finally:
drop(t)
def test_nyc_csv(nyc_csv):
t = symbol('t', discover(nyc_csv))
assert compute(t.nrows, nyc_csv, return_type='core') > 0
def test_postgres_create(sql):
assert odo(sql, list) == [('a', 1), ('b', 2)]
def test_postgres_isnan(sql_with_float):
dta = (1.0,), (float('nan'),)
table = odo(dta, sql_with_float)
sym = symbol('s', discover(dta))
assert compute(sym.isnan(), table, return_type=list) == [(False,), (True,)]
def test_insert_from_subselect(sql_with_float):
data = pd.DataFrame([{'c': 2.0}, {'c': 1.0}])
tbl = odo(data, sql_with_float)
s = symbol('s', discover(data))
odo(compute(s[s.c.isin((1.0, 2.0))].sort(), tbl, return_type='native'), sql_with_float),
tm.assert_frame_equal(
odo(sql_with_float, pd.DataFrame).iloc[2:].reset_index(drop=True),
pd.DataFrame([{'c': 1.0}, {'c': 2.0}]),
)
def test_concat(sql_two_tables):
t_table, u_table = sql_two_tables
t_data = pd.DataFrame(np.arange(5), columns=['a'])
u_data = pd.DataFrame(np.arange(5, 10), columns=['a'])
odo(t_data, t_table)
odo(u_data, u_table)
t = symbol('t', discover(t_data))
u = symbol('u', discover(u_data))
tm.assert_frame_equal(
compute(concat(t, u).sort('a'), {t: t_table, u: u_table}, return_type=pd.DataFrame),
pd.DataFrame(np.arange(10), columns=['a']),
)
def test_concat_invalid_axis(sql_two_tables):
t_table, u_table = sql_two_tables
t_data = pd.DataFrame(np.arange(5), columns=['a'])
u_data = pd.DataFrame(np.arange(5, 10), columns=['a'])
odo(t_data, t_table)
odo(u_data, u_table)
# We need to force the shape to not be a record here so we can
# create the `Concat` node with an axis=1.
t = symbol('t', '5 * 1 * int32')
u = symbol('u', '5 * 1 * int32')
with pytest.raises(ValueError) as e:
compute(concat(t, u, axis=1), {t: t_table, u: u_table}, return_type='native')
# Preserve the suggestion to use merge.
assert "'merge'" in str(e.value)
def test_timedelta_arith(sql_with_dts):
delta = timedelta(days=1)
dates = pd.Series(pd.date_range('2014-01-01', '2014-02-01'))
sym = symbol('s', discover(dates))
assert (
compute(sym + delta, sql_with_dts, return_type=pd.Series) == dates + delta
).all()
assert (
compute(sym - delta, sql_with_dts, return_type=pd.Series) == dates - delta
).all()
assert (
compute(sym - (sym - delta), sql_with_dts, return_type=pd.Series) ==
dates - (dates - delta)
).all()
@pytest.mark.parametrize('func', ('var', 'std'))
def test_timedelta_stat_reduction(sql_with_timedeltas, func):
sym = symbol('s', discover(sql_with_timedeltas))
expr = getattr(sym.N, func)()
deltas = pd.Series([timedelta(seconds=n) for n in range(10)])
expected = timedelta(
seconds=getattr(deltas.astype('int64') / 1e9, func)(ddof=expr.unbiased)
)
assert compute(expr, sql_with_timedeltas, return_type=timedelta) == expected
def test_coerce_bool_and_sum(sql):
sql = sql.data
n = sql.name
t = symbol(n, discover(sql))
expr = (t.B > 1.0).coerce(to='int32').sum()
result = compute(expr, sql).scalar()
expected = compute(t.B, sql, return_type=pd.Series).gt(1).sum()
assert result == expected
def test_distinct_on(sql):
sql = sql.data
t = symbol('t', discover(sql))
computation = compute(t[['A', 'B']].sort('A').distinct('A'), sql, return_type='native')
assert normalize(str(computation)) == normalize("""
SELECT DISTINCT ON (anon_1."A") anon_1."A", anon_1."B"
FROM (SELECT {tbl}."A" AS "A", {tbl}."B" AS "B"
FROM {tbl}) AS anon_1 ORDER BY anon_1."A" ASC
""".format(tbl=sql.name))
assert odo(computation, tuple) == (('a', 1), ('b', 2))
def test_relabel_columns_over_selection(big_sql):
t = symbol('t', discover(big_sql))
result = compute(t[t['B'] == 2].relabel(B=u'b'),
big_sql, return_type=pd.DataFrame)
expected = pd.DataFrame([['a', 2]], columns=[u'A', u'b'])
tm.assert_frame_equal(result, expected)
def test_auto_join_field(orders):
t = symbol('t', discover(orders))
expr = t.product_id.color
result = compute(expr, orders, return_type='native')
expected = """SELECT
products.color
FROM products, orders
WHERE orders.product_id = products.product_id
"""
assert normalize(str(result)) == normalize(expected)
def test_auto_join_projection(orders):
t = symbol('t', discover(orders))
expr = t.product_id[['color', 'price']]
result = compute(expr, orders, return_type='native')
expected = """SELECT
products.color,
products.price
FROM products, orders
WHERE orders.product_id = products.product_id
"""
assert normalize(str(result)) == normalize(expected)
@pytest.mark.xfail
@pytest.mark.parametrize('func', ['max', 'min', 'sum'])
def test_foreign_key_reduction(orders, products, func):
t = symbol('t', discover(orders))
expr = methodcaller(func)(t.product_id.price)
result = compute(expr, orders, return_type='native')
expected = """WITH alias as (select
products.price as price
from
products, orders
where orders.product_id = products.product_id)
select {0}(alias.price) as price_{0} from alias
""".format(func)
assert normalize(str(result)) == normalize(expected)
def test_foreign_key_chain(fkey):
t = symbol('t', discover(fkey))
expr = t.sym_id.main.data
result = compute(expr, fkey, return_type='native')
expected = """SELECT
main.data
FROM main, fkey, pkey
WHERE fkey.sym_id = pkey.id and pkey.main = main.id
"""
assert normalize(str(result)) == normalize(expected)
@pytest.mark.xfail(raises=AssertionError,
reason='CTE mucks up generation here')
@pytest.mark.parametrize('grouper', ['sym', ['sym']])
def test_foreign_key_group_by(fkey, grouper):
t = symbol('fkey', discover(fkey))
expr = by(t.sym_id[grouper], avg_price=t.sym_id.price.mean())
result = compute(expr, fkey, return_type='native')
expected = """SELECT
pkey.sym,
avg(pkey.price) AS avg_price
FROM pkey, fkey
WHERE fkey.sym_id = pkey.id
GROUP BY pkey.sym
"""
assert normalize(str(result)) == normalize(expected)
@pytest.mark.parametrize('grouper', ['sym_id', ['sym_id']])
def test_group_by_map(fkey, grouper):
t = symbol('fkey', discover(fkey))
expr = by(t[grouper], id_count=t.size.count())
result = compute(expr, fkey, return_type='native')
expected = """SELECT
fkey.sym_id,
count(fkey.size) AS id_count
FROM fkey
GROUP BY fkey.sym_id
"""
assert normalize(str(result)) == normalize(expected)
def test_foreign_key_isin(fkey):
t = symbol('fkey', discover(fkey))
expr = t.sym_id.isin([1, 2])
result = compute(expr, fkey, return_type='native')
expected = """SELECT
fkey.sym_id IN (%(sym_id_1)s, %(sym_id_2)s) AS anon_1
FROM fkey
"""
assert normalize(str(result)) == normalize(expected)
@pytest.mark.xfail(raises=AssertionError, reason='Not yet implemented')
def test_foreign_key_merge_expression(fkey):
from blaze import merge
t = symbol('fkey', discover(fkey))
expr = merge(t.sym_id.sym, t.sym_id.main.data)
expected = """
select pkey.sym, main.data
from
fkey, pkey, main
where
fkey.sym_id = pkey.id and pkey.main = main.id
"""
result = compute(expr, fkey, return_type='native')
assert normalize(str(result)) == normalize(expected)
def test_join_type_promotion(sqla, sqlb):
t, s = symbol(sqla.name, discover(sqla)), symbol(sqlb.name, discover(sqlb))
expr = join(t, s, 'B', how='inner')
result = set(map(tuple, compute(expr, {t: sqla, s: sqlb}, return_type='native').execute().fetchall()))
expected = set([(1, 'a', 'a'), (1, None, 'a')])
assert result == expected
@pytest.mark.parametrize(['n', 'column'],
[(1, 'A'), (-1, 'A'),
(1, 'B'), (-1, 'B'),
(0, 'A'), (0, 'B')])
def test_shift_on_column(n, column, sql):
sql = sql.data
t = symbol('t', discover(sql))
expr = t[column].shift(n)
result = compute(expr, sql, return_type=pd.Series)
expected = odo(sql, pd.DataFrame)[column].shift(n)
| tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2022/1/12 14:55
Desc: ไธๆน่ดขๅฏ็ฝ-ๆฐๆฎไธญๅฟ-่กไธๅๆ
https://data.eastmoney.com/gdfx/
"""
import pandas as pd
import requests
from tqdm import tqdm
def stock_gdfx_free_holding_statistics_em(date: str = "20210930") -> pd.DataFrame:
"""
ไธๆน่ดขๅฏ็ฝ-ๆฐๆฎไธญๅฟ-่กไธๅๆ-่กไธๆ่ก็ป่ฎก-ๅๅคงๆต้่กไธ
https://data.eastmoney.com/gdfx/HoldingAnalyse.html
:param date: ๆฅๅๆ
:type date: str
:return: ๅๅคงๆต้่กไธ
:rtype: pandas.DataFrame
"""
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "STATISTICS_TIMES,COOPERATION_HOLDER_MARK",
"sortTypes": "-1,-1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_COOPFREEHOLDERS_ANALYSISNEW",
"columns": "ALL",
"source": "WEB",
"client": "WEB",
"filter": f"""(HOLDNUM_CHANGE_TYPE="001")(END_DATE='{'-'.join([date[:4], date[4:6], date[6:]])}')""",
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json["result"]["pages"]
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1)):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.columns = [
"ๅบๅท",
"-",
"-",
"่กไธๅ็งฐ",
"่กไธ็ฑปๅ",
"-",
"็ป่ฎกๆฌกๆฐ",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-10ไธชไบคๆๆฅ-ๅนณๅๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-10ไธชไบคๆๆฅ-ๆๅคงๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-10ไธชไบคๆๆฅ-ๆๅฐๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-30ไธชไบคๆๆฅ-ๅนณๅๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-30ไธชไบคๆๆฅ-ๆๅคงๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-30ไธชไบคๆๆฅ-ๆๅฐๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-60ไธชไบคๆๆฅ-ๅนณๅๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-60ไธชไบคๆๆฅ-ๆๅคงๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-60ไธชไบคๆๆฅ-ๆๅฐๆถจๅน
",
"ๆๆไธช่ก",
]
big_df = big_df[
[
"ๅบๅท",
"่กไธๅ็งฐ",
"่กไธ็ฑปๅ",
"็ป่ฎกๆฌกๆฐ",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-10ไธชไบคๆๆฅ-ๅนณๅๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-10ไธชไบคๆๆฅ-ๆๅคงๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-10ไธชไบคๆๆฅ-ๆๅฐๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-30ไธชไบคๆๆฅ-ๅนณๅๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-30ไธชไบคๆๆฅ-ๆๅคงๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-30ไธชไบคๆๆฅ-ๆๅฐๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-60ไธชไบคๆๆฅ-ๅนณๅๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-60ไธชไบคๆๆฅ-ๆๅคงๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-60ไธชไบคๆๆฅ-ๆๅฐๆถจๅน
",
"ๆๆไธช่ก",
]
]
big_df["็ป่ฎกๆฌกๆฐ"] = pd.to_numeric(big_df["็ป่ฎกๆฌกๆฐ"])
big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-10ไธชไบคๆๆฅ-ๅนณๅๆถจๅน
"] = pd.to_numeric(big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-10ไธชไบคๆๆฅ-ๅนณๅๆถจๅน
"])
big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-10ไธชไบคๆๆฅ-ๆๅคงๆถจๅน
"] = pd.to_numeric(big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-10ไธชไบคๆๆฅ-ๆๅคงๆถจๅน
"])
big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-10ไธชไบคๆๆฅ-ๆๅฐๆถจๅน
"] = pd.to_numeric(big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-10ไธชไบคๆๆฅ-ๆๅฐๆถจๅน
"])
big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-30ไธชไบคๆๆฅ-ๅนณๅๆถจๅน
"] = pd.to_numeric(big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-30ไธชไบคๆๆฅ-ๅนณๅๆถจๅน
"])
big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-30ไธชไบคๆๆฅ-ๆๅคงๆถจๅน
"] = pd.to_numeric(big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-30ไธชไบคๆๆฅ-ๆๅคงๆถจๅน
"])
big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-30ไธชไบคๆๆฅ-ๆๅฐๆถจๅน
"] = pd.to_numeric(big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-30ไธชไบคๆๆฅ-ๆๅฐๆถจๅน
"])
big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-60ไธชไบคๆๆฅ-ๅนณๅๆถจๅน
"] = pd.to_numeric(big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-60ไธชไบคๆๆฅ-ๅนณๅๆถจๅน
"])
big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-60ไธชไบคๆๆฅ-ๆๅคงๆถจๅน
"] = pd.to_numeric(big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-60ไธชไบคๆๆฅ-ๆๅคงๆถจๅน
"])
big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-60ไธชไบคๆๆฅ-ๆๅฐๆถจๅน
"] = pd.to_numeric(big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-60ไธชไบคๆๆฅ-ๆๅฐๆถจๅน
"])
return big_df
def stock_gdfx_holding_statistics_em(date: str = "20210930") -> pd.DataFrame:
"""
ไธๆน่ดขๅฏ็ฝ-ๆฐๆฎไธญๅฟ-่กไธๅๆ-่กไธๆ่ก็ป่ฎก-ๅๅคง่กไธ
https://data.eastmoney.com/gdfx/HoldingAnalyse.html
:param date: ๆฅๅๆ
:type date: str
:return: ๅๅคง่กไธ
:rtype: pandas.DataFrame
"""
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "STATISTICS_TIMES,COOPERATION_HOLDER_MARK",
"sortTypes": "-1,-1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_COOPHOLDERS_ANALYSIS",
"columns": "ALL",
"source": "WEB",
"client": "WEB",
"filter": f"""(HOLDNUM_CHANGE_TYPE="001")(END_DATE='{'-'.join([date[:4], date[4:6], date[6:]])}')""",
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json["result"]["pages"]
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1)):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.columns = [
"ๅบๅท",
"-",
"-",
"่กไธๅ็งฐ",
"่กไธ็ฑปๅ",
"-",
"็ป่ฎกๆฌกๆฐ",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-10ไธชไบคๆๆฅ-ๅนณๅๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-10ไธชไบคๆๆฅ-ๆๅคงๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-10ไธชไบคๆๆฅ-ๆๅฐๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-30ไธชไบคๆๆฅ-ๅนณๅๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-30ไธชไบคๆๆฅ-ๆๅคงๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-30ไธชไบคๆๆฅ-ๆๅฐๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-60ไธชไบคๆๆฅ-ๅนณๅๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-60ไธชไบคๆๆฅ-ๆๅคงๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-60ไธชไบคๆๆฅ-ๆๅฐๆถจๅน
",
"ๆๆไธช่ก",
]
big_df = big_df[
[
"ๅบๅท",
"่กไธๅ็งฐ",
"่กไธ็ฑปๅ",
"็ป่ฎกๆฌกๆฐ",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-10ไธชไบคๆๆฅ-ๅนณๅๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-10ไธชไบคๆๆฅ-ๆๅคงๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-10ไธชไบคๆๆฅ-ๆๅฐๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-30ไธชไบคๆๆฅ-ๅนณๅๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-30ไธชไบคๆๆฅ-ๆๅคงๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-30ไธชไบคๆๆฅ-ๆๅฐๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-60ไธชไบคๆๆฅ-ๅนณๅๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-60ไธชไบคๆๆฅ-ๆๅคงๆถจๅน
",
"ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-60ไธชไบคๆๆฅ-ๆๅฐๆถจๅน
",
"ๆๆไธช่ก",
]
]
big_df["็ป่ฎกๆฌกๆฐ"] = pd.to_numeric(big_df["็ป่ฎกๆฌกๆฐ"])
big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-10ไธชไบคๆๆฅ-ๅนณๅๆถจๅน
"] = pd.to_numeric(big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-10ไธชไบคๆๆฅ-ๅนณๅๆถจๅน
"])
big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-10ไธชไบคๆๆฅ-ๆๅคงๆถจๅน
"] = pd.to_numeric(big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-10ไธชไบคๆๆฅ-ๆๅคงๆถจๅน
"])
big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-10ไธชไบคๆๆฅ-ๆๅฐๆถจๅน
"] = pd.to_numeric(big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-10ไธชไบคๆๆฅ-ๆๅฐๆถจๅน
"])
big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-30ไธชไบคๆๆฅ-ๅนณๅๆถจๅน
"] = pd.to_numeric(big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-30ไธชไบคๆๆฅ-ๅนณๅๆถจๅน
"])
big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-30ไธชไบคๆๆฅ-ๆๅคงๆถจๅน
"] = pd.to_numeric(big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-30ไธชไบคๆๆฅ-ๆๅคงๆถจๅน
"])
big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-30ไธชไบคๆๆฅ-ๆๅฐๆถจๅน
"] = pd.to_numeric(big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-30ไธชไบคๆๆฅ-ๆๅฐๆถจๅน
"])
big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-60ไธชไบคๆๆฅ-ๅนณๅๆถจๅน
"] = pd.to_numeric(big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-60ไธชไบคๆๆฅ-ๅนณๅๆถจๅน
"])
big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-60ไธชไบคๆๆฅ-ๆๅคงๆถจๅน
"] = pd.to_numeric(big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-60ไธชไบคๆๆฅ-ๆๅคงๆถจๅน
"])
big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-60ไธชไบคๆๆฅ-ๆๅฐๆถจๅน
"] = pd.to_numeric(big_df["ๅ
ฌๅๆฅๅๆถจๅน
็ป่ฎก-60ไธชไบคๆๆฅ-ๆๅฐๆถจๅน
"])
return big_df
def stock_gdfx_free_holding_change_em(date: str = "20210930") -> pd.DataFrame:
"""
ไธๆน่ดขๅฏ็ฝ-ๆฐๆฎไธญๅฟ-่กไธๅๆ-่กไธๆ่กๅๅจ็ป่ฎก-ๅๅคงๆต้่กไธ
https://data.eastmoney.com/gdfx/HoldingAnalyse.html
:param date: ๆฅๅๆ
:type date: str
:return: ๅๅคงๆต้่กไธ
:rtype: pandas.DataFrame
"""
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "HOLDER_NUM,HOLDER_NEW",
"sortTypes": "-1,-1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_FREEHOLDERS_BASIC_INFONEW",
"columns": "ALL",
"source": "WEB",
"client": "WEB",
"filter": f"(END_DATE='{'-'.join([date[:4], date[4:6], date[6:]])}')",
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json["result"]["pages"]
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1)):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.columns = [
"ๅบๅท",
"-",
"-",
"่กไธๅ็งฐ",
"-",
"่กไธ็ฑปๅ",
"-",
"-",
"-",
"ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๆปๆๆ",
"ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๆฐ่ฟ",
"ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๅขๅ ",
"ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๅๅฐ",
"ๆๆซๆ่กๅชๆฐ็ป่ฎก-ไธๅ",
"-",
"ๆต้ๅธๅผ็ป่ฎก",
"ๆๆไธช่ก",
"-",
"-",
]
big_df = big_df[
[
"ๅบๅท",
"่กไธๅ็งฐ",
"่กไธ็ฑปๅ",
"ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๆปๆๆ",
"ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๆฐ่ฟ",
"ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๅขๅ ",
"ๆๆซๆ่กๅชๆฐ็ป่ฎก-ไธๅ",
"ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๅๅฐ",
"ๆต้ๅธๅผ็ป่ฎก",
"ๆๆไธช่ก",
]
]
big_df["ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๆปๆๆ"] = pd.to_numeric(big_df["ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๆปๆๆ"])
big_df["ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๆฐ่ฟ"] = pd.to_numeric(big_df["ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๆฐ่ฟ"])
big_df["ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๅขๅ "] = pd.to_numeric(big_df["ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๅขๅ "])
big_df["ๆๆซๆ่กๅชๆฐ็ป่ฎก-ไธๅ"] = pd.to_numeric(big_df["ๆๆซๆ่กๅชๆฐ็ป่ฎก-ไธๅ"])
big_df["ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๅๅฐ"] = pd.to_numeric(big_df["ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๅๅฐ"])
big_df["ๆต้ๅธๅผ็ป่ฎก"] = pd.to_numeric(big_df["ๆต้ๅธๅผ็ป่ฎก"])
return big_df
def stock_gdfx_holding_change_em(date: str = "20210930") -> pd.DataFrame:
"""
ไธๆน่ดขๅฏ็ฝ-ๆฐๆฎไธญๅฟ-่กไธๅๆ-่กไธๆ่กๅๅจ็ป่ฎก-ๅๅคง่กไธ
https://data.eastmoney.com/gdfx/HoldingAnalyse.html
:param date: ๆฅๅๆ
:type date: str
:return: ๅๅคงๆต้่กไธ
:rtype: pandas.DataFrame
"""
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "HOLDER_NUM,HOLDER_NEW",
"sortTypes": "-1,-1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_HOLDERS_BASIC_INFO",
"columns": "ALL",
"source": "WEB",
"client": "WEB",
"filter": f"(END_DATE='{'-'.join([date[:4], date[4:6], date[6:]])}')",
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json["result"]["pages"]
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1)):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.columns = [
"ๅบๅท",
"-",
"-",
"่กไธๅ็งฐ",
"-",
"่กไธ็ฑปๅ",
"-",
"-",
"-",
"ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๆปๆๆ",
"ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๆฐ่ฟ",
"ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๅขๅ ",
"ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๅๅฐ",
"ๆๆซๆ่กๅชๆฐ็ป่ฎก-ไธๅ",
"-",
"-",
"ๆๆไธช่ก",
"ๆต้ๅธๅผ็ป่ฎก",
]
big_df = big_df[
[
"ๅบๅท",
"่กไธๅ็งฐ",
"่กไธ็ฑปๅ",
"ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๆปๆๆ",
"ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๆฐ่ฟ",
"ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๅขๅ ",
"ๆๆซๆ่กๅชๆฐ็ป่ฎก-ไธๅ",
"ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๅๅฐ",
"ๆต้ๅธๅผ็ป่ฎก",
"ๆๆไธช่ก",
]
]
big_df["ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๆปๆๆ"] = pd.to_numeric(big_df["ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๆปๆๆ"])
big_df["ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๆฐ่ฟ"] = pd.to_numeric(big_df["ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๆฐ่ฟ"])
big_df["ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๅขๅ "] = pd.to_numeric(big_df["ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๅขๅ "])
big_df["ๆๆซๆ่กๅชๆฐ็ป่ฎก-ไธๅ"] = pd.to_numeric(big_df["ๆๆซๆ่กๅชๆฐ็ป่ฎก-ไธๅ"])
big_df["ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๅๅฐ"] = pd.to_numeric(big_df["ๆๆซๆ่กๅชๆฐ็ป่ฎก-ๅๅฐ"])
big_df["ๆต้ๅธๅผ็ป่ฎก"] = pd.to_numeric(big_df["ๆต้ๅธๅผ็ป่ฎก"])
return big_df
def stock_gdfx_free_top_10_em(
symbol: str = "sh688686", date: str = "20210630"
) -> pd.DataFrame:
"""
ไธๆน่ดขๅฏ็ฝ-ไธช่ก-ๅๅคงๆต้่กไธ
https://emweb.securities.eastmoney.com/PC_HSF10/ShareholderResearch/Index?type=web&code=SH688686#sdltgd-0
:param symbol: ๅธฆๅธๅบๆ ่ฏ็่ก็ฅจไปฃ็
:type symbol: str
:param date: ๆฅๅๆ
:type date: str
:return: ๅๅคง่กไธ
:rtype: pandas.DataFrame
"""
url = (
"https://emweb.securities.eastmoney.com/PC_HSF10/ShareholderResearch/PageSDLTGD"
)
params = {
"code": f"{symbol.upper()}",
"date": f"{'-'.join([date[:4], date[4:6], date[6:]])}",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["sdltgd"])
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.columns = [
"ๅๆฌก",
"-",
"-",
"-",
"-",
"่กไธๅ็งฐ",
"่กไธๆง่ดจ",
"่กไปฝ็ฑปๅ",
"ๆ่กๆฐ",
"ๅ ๆปๆต้่กๆฌๆ่กๆฏไพ",
"ๅขๅ",
"ๅๅจๆฏ็",
]
temp_df = temp_df[
[
"ๅๆฌก",
"่กไธๅ็งฐ",
"่กไธๆง่ดจ",
"่กไปฝ็ฑปๅ",
"ๆ่กๆฐ",
"ๅ ๆปๆต้่กๆฌๆ่กๆฏไพ",
"ๅขๅ",
"ๅๅจๆฏ็",
]
]
temp_df["ๆ่กๆฐ"] = pd.to_numeric(temp_df["ๆ่กๆฐ"])
temp_df["ๅ ๆปๆต้่กๆฌๆ่กๆฏไพ"] = pd.to_numeric(temp_df["ๅ ๆปๆต้่กๆฌๆ่กๆฏไพ"])
temp_df["ๅๅจๆฏ็"] = pd.to_numeric(temp_df["ๅๅจๆฏ็"])
return temp_df
def stock_gdfx_top_10_em(
symbol: str = "sh688686", date: str = "20210630"
) -> pd.DataFrame:
"""
ไธๆน่ดขๅฏ็ฝ-ไธช่ก-ๅๅคง่กไธ
https://emweb.securities.eastmoney.com/PC_HSF10/ShareholderResearch/Index?type=web&code=SH688686#sdgd-0
:param symbol: ๅธฆๅธๅบๆ ่ฏ็่ก็ฅจไปฃ็
:type symbol: str
:param date: ๆฅๅๆ
:type date: str
:return: ๅๅคง่กไธ
:rtype: pandas.DataFrame
"""
url = "https://emweb.securities.eastmoney.com/PC_HSF10/ShareholderResearch/PageSDGD"
params = {
"code": f"{symbol.upper()}",
"date": f"{'-'.join([date[:4], date[4:6], date[6:]])}",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["sdgd"])
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.columns = [
"ๅๆฌก",
"-",
"-",
"-",
"-",
"่กไธๅ็งฐ",
"่กไปฝ็ฑปๅ",
"ๆ่กๆฐ",
"ๅ ๆป่กๆฌๆ่กๆฏไพ",
"ๅขๅ",
"ๅๅจๆฏ็",
]
temp_df = temp_df[
[
"ๅๆฌก",
"่กไธๅ็งฐ",
"่กไปฝ็ฑปๅ",
"ๆ่กๆฐ",
"ๅ ๆป่กๆฌๆ่กๆฏไพ",
"ๅขๅ",
"ๅๅจๆฏ็",
]
]
temp_df["ๆ่กๆฐ"] = pd.to_ | numeric(temp_df["ๆ่กๆฐ"]) | pandas.to_numeric |
# *****************************************************************************
# Copyright (c) 2019, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
| :class:`pandas.Series` functions and operators implementations in SDC
| Also, it contains Numba internal operators which are required for Series type handling
"""
import numba
import numpy
import operator
import pandas
import math
import sys
from numba.errors import TypingError
from numba.extending import overload, overload_method, overload_attribute
from numba.typing import signature
from numba.extending import intrinsic
from numba import (types, numpy_support, cgutils)
from numba.typed import Dict
from numba import prange
import sdc
import sdc.datatypes.common_functions as common_functions
from sdc.datatypes.common_functions import (TypeChecker, check_index_is_numeric, find_common_dtype_from_numpy_dtypes,
sdc_join_series_indexes)
from sdc.datatypes.hpat_pandas_series_rolling_types import _hpat_pandas_series_rolling_init
from sdc.datatypes.hpat_pandas_stringmethods_types import StringMethodsType
from sdc.datatypes.hpat_pandas_getitem_types import SeriesGetitemAccessorType
from sdc.hiframes.pd_series_type import SeriesType
from sdc.str_arr_ext import (StringArrayType, string_array_type, str_arr_is_na, str_arr_set_na,
num_total_chars, pre_alloc_string_array, cp_str_list_to_array)
from sdc.utils import to_array, sdc_overload, sdc_overload_method, sdc_overload_attribute
from sdc.datatypes import hpat_pandas_series_autogenerated
@sdc_overload(operator.getitem)
def hpat_pandas_series_accessor_getitem(self, idx):
"""
Pandas Series operator :attr:`pandas.Series.get` implementation
**Algorithm**: result = series[idx]
**Test**: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_static_getitem_series1
Parameters
----------
series: :obj:`pandas.Series`
input series
idx: :obj:`int`, :obj:`slice` or :obj:`pandas.Series`
input index
Returns
-------
:class:`pandas.Series` or an element of the underneath type
object of :class:`pandas.Series`
"""
_func_name = 'Operator getitem().'
if not isinstance(self, SeriesGetitemAccessorType):
return None
accessor = self.accessor.literal_value
if accessor == 'iloc':
if isinstance(idx, (types.List, types.Array, types.SliceType)):
def hpat_pandas_series_iloc_list_slice_impl(self, idx):
result_data = self._series._data[idx]
result_index = self._series.index[idx]
return pandas.Series(result_data, result_index, self._series._name)
return hpat_pandas_series_iloc_list_slice_impl
if isinstance(idx, (int, types.Integer)):
def hpat_pandas_series_iloc_impl(self, idx):
return self._series._data[idx]
return hpat_pandas_series_iloc_impl
def hpat_pandas_series_iloc_callable_impl(self, idx):
index = numpy.asarray(list(map(idx, self._series._data)))
return pandas.Series(self._series._data[index], self._series.index[index], self._series._name)
return hpat_pandas_series_iloc_callable_impl
raise TypingError('{} The index must be an Integer, Slice or List of Integer or a callable.\
Given: {}'.format(_func_name, idx))
if accessor == 'iat':
if isinstance(idx, (int, types.Integer)):
def hpat_pandas_series_iat_impl(self, idx):
return self._series._data[idx]
return hpat_pandas_series_iat_impl
raise TypingError('{} The index must be a Integer. Given: {}'.format(_func_name, idx))
if accessor == 'loc':
# Note: Loc return Series
# Note: Index 0 in slice not supported
# Note: Loc slice and callable with String not implement
index_is_none = (self.series.index is None or
isinstance(self.series.index, numba.types.misc.NoneType))
if isinstance(idx, types.SliceType) and index_is_none:
def hpat_pandas_series_loc_slice_noidx_impl(self, idx):
max_slice = sys.maxsize
start = idx.start
stop = idx.stop
if idx.stop == max_slice:
stop = max_slice - 1
result_data = self._series._data[start:stop+1]
result_index = numpy.arange(start, stop + 1)
return pandas.Series(result_data, result_index, self._series._name)
return hpat_pandas_series_loc_slice_noidx_impl
if isinstance(idx, (int, types.Integer, types.UnicodeType, types.StringLiteral)):
def hpat_pandas_series_loc_impl(self, idx):
index = self._series.index
mask = numpy.empty(len(self._series._data), numpy.bool_)
for i in numba.prange(len(index)):
mask[i] = index[i] == idx
return pandas.Series(self._series._data[mask], index[mask], self._series._name)
return hpat_pandas_series_loc_impl
raise TypingError('{} The index must be an Number, Slice, String, List, Array or a callable.\
Given: {}'.format(_func_name, idx))
if accessor == 'at':
if isinstance(idx, (int, types.Integer, types.UnicodeType, types.StringLiteral)):
def hpat_pandas_series_at_impl(self, idx):
index = self._series.index
mask = numpy.empty(len(self._series._data), numpy.bool_)
for i in numba.prange(len(index)):
mask[i] = index[i] == idx
return self._series._data[mask]
return hpat_pandas_series_at_impl
raise TypingError('{} The index must be a Number or String. Given: {}'.format(_func_name, idx))
raise TypingError('{} Unknown accessor. Only "loc", "iloc", "at", "iat" are supported.\
Given: {}'.format(_func_name, accessor))
@sdc_overload(operator.getitem)
def hpat_pandas_series_getitem(self, idx):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.get
Limitations
-----------
Supported ``key`` can be one of the following:
- Integer scalar, e.g. :obj:`series[0]`
- A slice, e.g. :obj:`series[2:5]`
- Another series
Examples
--------
.. literalinclude:: ../../../examples/series_getitem.py
:language: python
:lines: 27-
:caption: Getting Pandas Series elements
:name: ex_series_getitem
.. command-output:: python ./series_getitem.py
:cwd: ../../../examples
.. todo:: Fix SDC behavior and add the expected output of the > python ./series_getitem.py to the docstring
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series operator :attr:`pandas.Series.get` implementation
**Algorithm**: result = series[idx]
**Test**: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_static_getitem_series1
Parameters
----------
series: :obj:`pandas.Series`
input series
idx: :obj:`int`, :obj:`slice` or :obj:`pandas.Series`
input index
Returns
-------
:class:`pandas.Series` or an element of the underneath type
object of :class:`pandas.Series`
"""
_func_name = 'Operator getitem().'
if not isinstance(self, SeriesType):
return None
# Note: Getitem return Series
index_is_none = isinstance(self.index, numba.types.misc.NoneType)
index_is_none_or_numeric = index_is_none or (self.index and isinstance(self.index.dtype, types.Number))
index_is_string = not index_is_none and isinstance(self.index.dtype, (types.UnicodeType, types.StringLiteral))
if (
isinstance(idx, types.Number) and index_is_none_or_numeric or
(isinstance(idx, (types.UnicodeType, types.StringLiteral)) and index_is_string)
):
def hpat_pandas_series_getitem_index_impl(self, idx):
index = self.index
mask = numpy.empty(len(self._data), numpy.bool_)
for i in numba.prange(len(index)):
mask[i] = index[i] == idx
return | pandas.Series(self._data[mask], index[mask], self._name) | pandas.Series |
import pandas as pd
import textacy
import textblob
import en_core_web_sm
nlp = en_core_web_sm.load()
# Multiprocessing Imports
from dask import dataframe as dd
from dask.multiprocessing import get
from multiprocessing import cpu_count
# Sentiment Imports
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
# Local Imports
from src.utils.pandas_utils import pivot_df_to_row
##
def text_vectorize_and_cluster(text, df=None, vectorizer=None, clusterer=None,
vector_params=None, clusterer_params=None,
outlier_scores=False, one_hot_labels=False, return_df=False,
return_type='clusters'):
""" Given processed text, vectorize and cluster it. Return cluster labels or cluster labels
along with fitted vectorizer and clusterer.
Parameters
----------
text : object
Object which contains text that will be passed to the transformer's .fit_transform() method
As such, text must already be processed and in correct format.
df : Pandas DataFrame
Optional dataframe attach clustering results to
vectorizer: object
Class for text vectorization. Must follow sklearn transformer convention and
implement .fit_transform() method
E.g. CountVectorizer from sklearn
vector_params: dict[str:obj]
Dictionary to pass to vectorizer as parameters
clusterer: object
Class for clustering. Must follow sklearn estimator convention and
implement .fit_predict() method for implementing cluster assignment
clusterer_params: dict[str:obj]
Dictionary to pass to clusterer as parameters
outlier_scores: boolean
Flag to indicate outlier scores computed by clusterer. Accessed
from clusterer.outlier_scores_ attribute
one_hot_labels: boolean
Flag to indicate if cluster labels should be one hot encoded
instead of returns as a one dimensional array of ordinal
integer labels
return_df: boolean
Flag to indicate if results should be returned concatenated
with the dataframe passed to 'df' kword arg
return_type: str in ['clusters', 'all', ]
String indicating return type. Must be on of ['clusters', 'all', 'df']
clusters: Return the cluster results as a one dimensional array of ordinal
integer labels or concatenated to dataframe if return_df=True
all: Return the fitted vectorizer, clusterer and cluster label results
Returns
-------
clusters: pd.Series or pd.DataFrame
Return the cluster results as a one dimensional array of ordinal
integer labels or concatenated to dataframe if return_df=True
clusters, vectorizer, clusterer: object, object, pd.Series or pd.DataFrame
Return the fitted vectorizer, clusterer and cluster label results
"""
# Check vectorizer and clusterer for correct methods
assert "fit_transform" in dir(vectorizer), "vectorizer has no 'fit_transform' method"
assert "fit_predict" in dir(clusterer), "clusterer has no 'fit_predict' method"
if return_df:
assert isinstance(df, pd.DataFrame), "If specifying 'return_df', data must be passed to argument 'df'"
# Instantiate vectorizer with params if specified
if vector_params:
vectorizer = vectorizer(**vector_params)
# Else instantiate the vectorizer
elif vectorizer:
vectorizer = vectorizer()
# Fit and trasnform text to vectors
vectors = vectorizer.fit_transform(text)
# Instantiate vectorizer with params if specified
if clusterer_params:
clusterer = clusterer(**clusterer_params)
elif clusterer:
clusterer = clusterer()
# Fit and trasnform vectors to clusters
cluster_labels = clusterer.fit_predict(vectors)
if len(set(clusterer.labels_)) <= 1:
return print('Clusterer could not find any meaningful labels. All data would fall under one cluster')
# Create DataFrame of Cluster Labels
results = pd.DataFrame(cluster_labels, columns=['Cluster_Label'])
# Add Outlier Score if specified
if outlier_scores:
results['Outlier_Score'] = clusterer.outlier_scores_
# Add labels as dummy variables
if one_hot_labels:
one_hot_cols = pd.get_dummies(results['Cluster_Label'], prefix='Cluster_Label')
one_hot_col_names = one_hot_cols.columns.values.tolist()
results = pd.merge(results, one_hot_cols, left_index=True, right_index=True)
# Attach to data if specified
if return_df:
results = pd.merge(df, results, left_index=True, right_index=True)
# Return all or just cluster results
if return_type == 'all':
return results, vectorizer, clusterer
elif return_type == 'clusters':
return results
def dask_df_textacy_apply(df, text_col, textacy_col_name='textacy_doc', ncores=None, inplace=False):
"""
Use dask to parallelize apply textacy Doc object creation from a dataframe
Parameters
----------
df : DataFrame
Dataframe which holds the text
text_col : str
The name of the text column in the df
textacy_col_name : str
The name to give to the column with the textacy doc objects
ncores : int
Number of cores to use for multiprocessing. Defaults to all cores in cpu minus one.
inplace : bool
Whether to return the entire df with the textacy doc series concatenated
or only textacy doc series.
Default is False
Returns
-------
DataFrame / Series
Either the dataframe passed as arg with the textacy series as last column or
just the textacy column
"""
# If no number of cores to work with, default to max
if not ncores:
nCores = cpu_count() - 1
nCores
# Partition dask dataframe and map textacy doc apply
# Sometimes this fails because it can't infer the dtypes correctly
# meta=pd.Series(name=0, dtype='object') is a start
# This is also a start https://stackoverflow.com/questions/40019905/how-to-map-a-column-with-dask?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
# Possibly both the inner lambda apply and outer lambda df both need metadata?
textacy_series = dd.from_pandas(df, npartitions=nCores).map_partitions(
lambda df : df[text_col].apply(lambda x : textacy.doc.Doc(x, lang=nlp))).compute(get=get)
# Name the series
textacy_series.name = textacy_col_name
# If inplace return the dataframe and textacy Series
if inplace:
return pd.concat([df, textacy_series], axis=1)
# Else return just the Textacy series
else:
return textacy_series
def load_textacy_corpus(df, text_col, metadata=True, metadata_columns=None):
# Fill text columns nulls with empty strings
df[text_col] = df[text_col].fillna('')
if metadata:
# Default to metadata columns being every column except the text column
metadata_cols = list(df.columns)
# If list is provided use those
if metadata_columns:
metadata_cols = metadata_columns
# Add text column to metadata columns
# These will constitute all the information held in the textacy corpus
metadata_columns.append(text_col)
# Subset to these
df = df[metadata_cols]
# Convert to nested dict of records
records = df.to_dict(orient='records')
# Split into text and metadata stream
text_stream, metadata_stream = textacy.io.split_records(records, text_col)
# Create Corpus
return textacy.corpus.Corpus(lang='en', texts=text_stream, metadatas=metadata_stream)
# With no metadata
else:
text_stream = (text for text in df[text_col].values)
return textacy.corpus.Corpus(lang='en', texts=text_stream)
# Entity Extraction
def corpus_entity_counts(corpus, include=None, exclude=None):
"""
Given a textacy corpus, return a dataframe of entities and their respective counts.
Parameters
----------
corpus : int
Description of arg1
include : str or Set[str]
Remove named entities whose type IS NOT in this param;
if โNUMERICโ, all numeric entity types (โDATEโ, โMONEYโ, โORDINALโ, etc.) are included
exclude : str or Set[str]
remove named entities whose type IS in this param; if โNUMERICโ,
all numeric entity types (โDATEโ, โMONEYโ, โORDINALโ, etc.) are excluded
Returns
-------
Dataframe
A pandas dataframe with entities and their respective counts, sorted by highest count
"""
from collections import Counter
# Extract all entities
entities = [list(textacy.extract.named_entities(doc, include_types=include, exclude_types=exclude))
for doc in
corpus]
# Pull all non-null entities to flattened list
non_null_entities = []
for entity in entities:
if entity:
non_null_entities.extend(entity)
# Change dtype to string so counter can distinguish
non_null_entities = [str(x) for x in non_null_entities]
# Count entities
entity_counts = Counter(non_null_entities)
# Entity Dataframe
df = (pd.DataFrame.from_dict(entity_counts, orient='index')
.reset_index()
.rename(columns={'index':'Entity', 0:'Count'})
.sort_values(by='Count', ascending=False)
.reset_index(drop=True))
return df
def entity_statements(doc, entity, ignore_entity_case=True,
min_n_words=1, max_n_words=300, return_entity=False):
"""
Extract sentences with a specified entity present in it
Modified from source code of Textacy's textacy.extract.semistructured_statements()
Args:
doc (``textacy.Doc`` or ``spacy.Doc``)
entity (str): a noun or noun phrase of some sort (e.g. "President Obama",
"global warming", "Python")
ignore_entity_case (bool): if True, entity matching is case-independent
min_n_words (int): min number of tokens allowed in a matching fragment
max_n_words (int): max number of tokens allowed in a matching fragment
Yields:
(``spacy.Span`` or ``spacy.Token``) or (``spacy.Span`` or ``spacy.Token``, ``spacy.Span`` or ``spacy.Token``):
dependin on if return_entity is enabled or not
Notes:
Inspired by <NAME>, <NAME>, <NAME>. Visual Analytics of
Media Frames in Online News and Blogs. IEEE InfoVis Workshop on Text
Visualization. October, 2013.
Which itself was inspired by by <NAME>.; <NAME>.; <NAME>.; and
<NAME>. 2010. Portable Extraction of Partially Structured Facts from
the Web. In Proc. ICETAL 2010, LNAI 6233, 345-356. Heidelberg, Springer.
"""
if ignore_entity_case is True:
entity_toks = entity.lower().split(' ')
get_tok_text = lambda x: x.lower_
else:
entity_toks = entity.split(' ')
get_tok_text = lambda x: x.text
first_entity_tok = entity_toks[0]
n_entity_toks = len(entity_toks)
#cue = cue.lower()
#cue_toks = cue.split(' ')
#n_cue_toks = len(cue_toks)
def is_good_last_tok(tok):
if tok.is_punct:
return False
if tok.pos in {CONJ, DET}:
return False
return True
for sent in doc.sents:
for tok in sent:
# filter by entity
if get_tok_text(tok) != first_entity_tok:
continue
if n_entity_toks == 1:
the_entity = tok
the_entity_root = the_entity
elif all(get_tok_text(tok.nbor(i=i + 1)) == et for i, et in enumerate(entity_toks[1:])):
the_entity = doc[tok.i: tok.i + n_entity_toks]
the_entity_root = the_entity.root
else:
continue
if return_entity:
yield (the_entity, sent.orth_)
else:
yield (sent.orth_)
break
def list_of_entity_statements(corpus, entity):
"""
Given an entity and a textacy corpus, return a list of all the sentences in which this entity occurs
Parameters
----------
corpus : textacy Corpus object
entity : str
The entity for which to search all the sentences within the corpus
Returns
-------
entity_sentences
A list of strings, each being a sentence which contains the entity search
"""
entity_sentences = [list(entity_statements(doc, entity=entity))
for doc
in corpus
if list(entity_statements(doc, entity=entity))] # If statement that removes null sentences
entity_sentences = [item for sublist in entity_sentences for item in sublist]
return entity_sentences
# Entity Sentiment extractions
def vader_entity_sentiment(df,
textacy_col,
entity,
inplace=True,
vader_sent_types=['neg', 'neu', 'pos', 'compound'],
keep_stats=['count', 'mean', 'min', '25%', '50%', '75%', 'max']):
"""
Pull the descriptive sentiment stats of text sentence with a specified entity in it.
Parameters
----------
df : DataFrame
Dataframe which holds the text
textacy_col : str
The name to give to the column with the textacy doc objects
entity : str
The entity to search the textacy Doc object for
inplace : bool
Whether to return the entire df with the sentiment info or the sentiment info alone
Default is False
vader_sent_types : list
The type of sentiment to extract. neg: negative, pos: positive, neu: neutral, compound is
comination of all three types of all
keep_stats : list
A list of the summary statistics to keep. Default is all returned by pandas DataFrame.describe() method
Returns
-------
DataFrame
Either the dataframe passed as arg with the sentiment info as trailing columns
or the sentiment descriptive stats by itself
"""
vader_analyzer = SentimentIntensityAnalyzer()
sentiment_rows = []
for text in df[textacy_col].values:
text_entities = list(entity_statements(text, entity))
# Iterate through all sentences and get sentiment analysis
entity_sentiment_info = [vader_analyzer.polarity_scores(sentence)
for
sentence
in
text_entities]
# After taking sentiments, turn into a dataframe and describe
try:
# Indices and columns to keep
keep_stats = keep_stats
keep_cols = vader_sent_types
# Describe those columns
summary_stats = pd.DataFrame(entity_sentiment_info).describe().loc[keep_stats, keep_cols]
# Add row to list
sentiment_rows.append(pivot_df_to_row(summary_stats))
# If there's nothing to describe
except ValueError as e:
# Create a summary stats with nulls
summary_stats = pd.DataFrame(index=keep_stats, columns=keep_cols)
# Add to list of rows
sentiment_rows.append(pivot_df_to_row(summary_stats))
# Concatenate All rows together into one dataframe
sentiment_df = pd.concat(sentiment_rows).add_prefix(entity+'_')
if not inplace:
return sentiment_df.reset_index(drop=True)
else:
# Return original df with new sentiment attached
return | pd.concat([df, sentiment_df], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1+dev
# kernelspec:
# display_name: Python [conda env:core_acc] *
# language: python
# name: conda-env-core_acc-py
# ---
# # Correlation analysis
#
# This notebook examines the correlation structure in the gene expression data generated in [1_create_compendia.ipynb](../processing/1_create_compendia.ipynb).
#
# When we performed clustering on the correlation matrices (using Pearson correlation) we found that pairs of genes had either very high correlation scores (>0.5) or very low correlation scores (<0.1). As a result gene pairs that were highly correlated clustered into a single large module. This finding is consistent with a [previous study](https://link.springer.com/article/10.1186/1471-2164-7-187), which found that KEGG (a database that containes genes or proteins annotated with specific biological processes as reported in the literature) is bias in some biological processes represented. Figure 1C demonstrates that a large fraction of gene pairs are ribosomal relationships - in the top 0.1% most co-expressed genes, 99% belong to the ribosome pathway.
# Furthermore, protein function prediction based on co-expression drop dramatically after removing the ribisome pathway (Figure 1A, B).
#
# This notebook applies different corrections to try to remove this very dominant global signal in the data. This notebook follows from [1a_transformation_correlation_analysis.ipynb](1a_transformation_correlation_analysis.ipynb). Here we are applying dimensionality reduction techniques in addition to scaling the data.
# %load_ext autoreload
# %autoreload 2
import os
import pandas as pd
import plotnine as pn
import seaborn as sns
from sklearn import preprocessing
import matplotlib.pyplot as plt
import umap
import random
import numpy as np
import scipy
from sklearn.decomposition import PCA
from core_acc_modules import paths
# ## Set user parameters
#
# Here we set the number of PCs or singular vectors to use. We are starting with 300 since this is what [eADAGE](https://pubmed.ncbi.nlm.nih.gov/28711280/) used.
# Params
num_PCs = 300
num_PCs_log = 100
num_singular_values = 300
num_singular_values_log = 100
# Load expression data
pao1_compendium_filename = paths.PAO1_COMPENDIUM
pa14_compendium_filename = paths.PA14_COMPENDIUM
pao1_compendium = pd.read_csv(pao1_compendium_filename, sep="\t", header=0, index_col=0)
pa14_compendium = | pd.read_csv(pa14_compendium_filename, sep="\t", header=0, index_col=0) | pandas.read_csv |
from __future__ import annotations
from typing import Optional, Union, cast
import numpy as np
from numpy.linalg import inv, matrix_rank
import pandas as pd
from linearmodels.typing import ArraySequence, Float64Array
def blocked_column_product(x: ArraySequence, s: Float64Array) -> Float64Array:
"""
Parameters
----------
x : list of ndarray
k-element list of arrays to construct the inner product
s : ndarray
Weighting matrix (k by k)
Returns
-------
ndarray
Blocked product. k x nobs rows and the number of columns is the same
the number of columns as any member of x.
"""
k = len(x)
out = []
for i in range(k):
val = s[i, 0] * x[0]
for j in range(1, k):
val += s[i, j] * x[j]
out.append(val)
return np.vstack(out)
def blocked_diag_product(x: ArraySequence, s: Float64Array) -> Float64Array:
"""
Parameters
----------
x : list of ndarray
k-element list of arrays to construct the inner product
s : ndarray
Weighting matrix (k by k)
Returns
-------
ndarray
Blocked product. k x nobs rows and the number of columns is the same
as the total number of columns in x.
"""
k = len(x)
out = []
for i in range(k):
row = []
for j in range(k):
row.append(s[i, j] * x[j])
row_arr = np.hstack(row)
out.append(row_arr)
return np.vstack(out)
def blocked_inner_prod(x: ArraySequence, s: Float64Array) -> Float64Array:
r"""
Parameters
----------
x : list of ndarray
k-element list of arrays to construct the inner product
s : ndarray
Weighting matrix (k by k)
Returns
-------
ndarray
Weighted inner product constructed from x and s
Notes
-----
Memory efficient implementation of high-dimensional inner product
.. math::
X'(S \otimes I_n)X
where n is the number of observations in the sample
"""
k = len(x)
widths = [m.shape[1] for m in x]
s_is_diag = np.all(np.asarray((s - np.diag(np.diag(s))) == 0.0))
w0 = widths[0]
homogeneous = all([w == w0 for w in widths])
if homogeneous and not s_is_diag:
# Fast path when all x have same number of columns
# Slower than diag case when k is large since many 0s
xa = np.hstack(x)
return xa.T @ xa * np.kron(s, np.ones((w0, w0)))
cum_width = np.cumsum([0] + widths)
total = sum(widths)
out: np.ndarray = np.zeros((total, total))
for i in range(k):
xi = x[i]
sel_i = slice(cum_width[i], cum_width[i + 1])
s_ii = s[i, i]
prod = s_ii * (xi.T @ xi)
out[sel_i, sel_i] = prod
# Short circuit if identity
if s_is_diag:
return out
for i in range(k):
xi = x[i]
sel_i = slice(cum_width[i], cum_width[i + 1])
for j in range(i + 1, k):
sel_j = slice(cum_width[j], cum_width[j + 1])
xj = x[j]
s_ij = s[i, j]
prod = s_ij * (xi.T @ xj)
out[sel_i, sel_j] = prod
out[sel_j, sel_i] = prod.T
return cast(np.ndarray, out)
def blocked_cross_prod(
x: ArraySequence, z: ArraySequence, s: Float64Array
) -> Float64Array:
r"""
Parameters
----------
x : list of ndarray
k-element list of arrays to use as the left side of the cross-product
z : list of ndarray
k-element list of arrays to use as the right side of the cross-product
s : ndarray
Weighting matrix (k by k)
Returns
-------
ndarray
Weighted cross product constructed from x and s
Notes
-----
Memory efficient implementation of high-dimensional cross product
.. math::
X'(S \otimes I_N)Z
where n is the number of observations in the sample
"""
k = len(x)
xp = []
for i in range(k):
row = []
for j in range(k):
s_ij = s[i, j]
row.append(s_ij * (x[i].T @ z[j]))
xp.append(np.concatenate(row, 1))
return np.concatenate(xp, 0)
def blocked_full_inner_product(x: Float64Array, s: Float64Array) -> Float64Array:
r"""
Parameters
----------
x : ndarray
Array of shape KT by KT
s : ndarray
Array of shape K by K
Notes
-----
Computes the quantity
.. math ::
x^\prime (S \otimes I_N)x
"""
k = s.shape[0]
t = x.shape[0] // k
sx = np.empty_like(x)
for i in range(k):
v = s[i, 0] * x[0:t]
for j in range(1, k):
v += s[i, j] * x[j * t : (j + 1) * t]
sx[i * t : (i + 1) * t] = v
return x.T @ sx
def inv_matrix_sqrt(s: Float64Array) -> Float64Array:
vecs, vals = np.linalg.eigh(s)
vecs = 1.0 / np.sqrt(vecs)
out = vals @ np.diag(vecs) @ vals.T
return (out + out.T) / 2
class LinearConstraint(object):
r"""
Linear constraint for regression estimation
Parameters
----------
r : {ndarray, DataFrame}
Restriction loading matrix
q : {ndarray, Series}
Restriction value
num_params : int
Number of model parameter. Used to test for correctness
require_pandas : bool
Flag indicating whether r and q must be pandas
Notes
-----
Used to impose the constraints
.. math ::
r \beta = q
"""
def __init__(
self,
r: Union[pd.DataFrame, np.ndarray],
q: Optional[Union[pd.Series, np.ndarray]] = None,
num_params: Optional[int] = None,
require_pandas: bool = True,
) -> None:
if not isinstance(r, (pd.DataFrame, np.ndarray)):
raise TypeError("r must be an array or DataFrame")
elif require_pandas and not isinstance(r, pd.DataFrame):
raise TypeError("r must be a DataFrame")
if r.ndim != 2:
raise ValueError("r must be 2-dimensional")
r_pd = pd.DataFrame(r)
ra = np.asarray(r, dtype=np.float64)
self._r_pd = r_pd
self._ra = ra
if q is not None:
if require_pandas and not isinstance(q, pd.Series):
raise TypeError("q must be a Series")
elif not isinstance(q, (pd.Series, np.ndarray)):
raise TypeError("q must be a Series or an array")
if r.shape[0] != q.shape[0]:
raise ValueError("Constraint inputs are not shape compatible")
q_pd = | pd.Series(q, index=r_pd.index) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
__doc__="""
ATLAS Higgs Machine Learning Challenge 2014
Read CERN Open Data Portal Dataset http://opendata.cern.ch/record/328
and manipulate it
- Label is changd from charcter to integer 0 or 1
- DetailLabel is introduced indicating subpopulations
- systematics effect are simulated
- bkg_weight_norm : manipulates the background weight
- tau_energy_scale : manipulates PRI_tau_pt and recompute other quantities accordingly
Some WARNING : variable DER_mass_MMC is not properly manipulated (modification is linearised),
and I advocate to NOT use DER_mass_MMC when doSystTauEnergyScale is enabled
There is a threshold in the original HiggsML file at 20GeV on PRI_tau_energy.
This threshold is moved when changing sysTauEnergyScale which is unphysicsal.
So if you're going to play with sysTauEnergyScale (within 0.9-1.1),
I suggest you remove events below say 22 GeV *after* the manipulation
applying doSystTauEnergyScale with sysTauENergyScale=1. does NOT yield identical results as not applyield
doSystTauEnergyScale, this is because of rounding error and zero mass approximation.
doSysTauEnerbyScale impacts PRI_tau_pt as well as PRI_met and PRI_met_phi
- so overall I suggest that when playing with doSystTauEnergyScale, the reference is
- not using DER_mass_MMC
- applying *after* this manipulation PRI_tau_pt>22
- run with sysTauENergyScale=1. to have the reference
Author <NAME> LAL, Nov 2016
Modification Dec 2016 (<NAME>):
- Wrap everything into separated functions.
- V4 class now handle 1D-vector values (to improve computation efficiency).
- Fix compatibility with both python 2 and 3.
- Use pandas.DataFrame to ease computation along columns
- Loading function for the base HiggsML dataset (fetch it on the internet if needed)
Refactor March 2017 (V. Estrade):
- Split load function (cleaner)
"""
__version__ = "0.1"
__author__ = "<NAME>, and <NAME> "
import sys
import os
import gzip
import copy
import pandas as pd
import numpy as np
from .download import maybe_download
from .download import get_data_dir
COLUMN_NAMES = {
0: 'PRI_lep_1_pt',
1: 'PRI_lep_1_eta',
2: 'PRI_lep_1_phi',
3: 'PRI_lep_2_pt',
4: 'PRI_lep_2_eta',
5: 'PRI_lep_2_phi',
6: 'PRI_met',
7: 'PRI_met_phi',
8: '8',
9: '9',
10: 'relative_MET',
11: 'axial_MET',
12: 'MMC',
13: 'delta_R',
14: 'delta_eta',
15: '15',
16: '16',
17: 'Pt/Pt',
18: 'invariant_mass_visible',
19: 'invariant_mass_ll',
20: 'delta_phi',
21: 'sphericity',
22: 'transverse_sphericity',
23: '23',
24: '24',
}
RESTRICTED_COLUMNS = [0,1,2,3,4,5,6,7]
COLUMN_RENAME_FOR_SKEWING = {
'PRI_lep_1_pt': 'PRI_tau_pt',
'PRI_lep_1_eta': 'PRI_tau_eta',
'PRI_lep_1_phi': 'PRI_tau_phi',
'PRI_lep_2_pt': 'PRI_lep_pt',
'PRI_lep_2_eta': 'PRI_lep_eta',
'PRI_lep_2_phi': 'PRI_lep_phi',
}
def load_htautau(nrows=None, restricted_cols=True):
url = "http://mlphysics.ics.uci.edu/data/htautau/htautau.txt.gz"
filename = os.path.join(get_data_dir(), "htautau.txt.gz")
maybe_download(filename, url)
if restricted_cols :
data = | pd.read_csv(filename, sep='\t', nrows=nrows, header=None, usecols=RESTRICTED_COLUMNS) | pandas.read_csv |
import numpy as np
import pandas as pd
import pytest
from src.policies.single_policy_functions import (
_identify_who_attends_because_of_a_b_schooling,
)
from src.policies.single_policy_functions import mixed_educ_policy
@pytest.fixture
def fake_states():
states = pd.DataFrame(index=np.arange(10))
states["state"] = ["Bayern", "Berlin"] * 5
# date at which schools are open in Berlin but closed in Bavaria
# date with uneven week number, i.e. where group a attends school
states["date"] = pd.Timestamp("2020-04-23")
states["educ_a_b_identifier"] = [False, True] * 5
states["occupation"] = pd.Categorical(
["school"] * 8 + ["preschool_teacher", "school_teacher"]
)
states["school_group_id_0"] = [-1] + [22] * 9
states["educ_worker"] = [False] * 8 + [True] * 2
states["age"] = np.arange(10)
return states
@pytest.fixture
def contacts(fake_states):
return pd.Series(True, index=fake_states.index)
def test_a_b_school_system_above_age_0(fake_states, contacts):
calculated = mixed_educ_policy(
states=fake_states,
contacts=contacts,
seed=123,
group_id_column="school_group_id_0",
a_b_query="occupation == 'school'",
non_a_b_attend=True,
hygiene_multiplier=1.0,
always_attend_query="state == 'Niedersachsen'", # no one
params=None,
)
expected = pd.Series([False, True] * 4 + [True, True])
pd.testing.assert_series_equal(calculated, expected)
def test_a_b_school_system_above_age_5(fake_states, contacts):
calculated = mixed_educ_policy(
states=fake_states,
contacts=contacts,
seed=123,
group_id_column="school_group_id_0",
a_b_query="occupation == 'school' & age > 5",
non_a_b_attend=True,
hygiene_multiplier=1.0,
always_attend_query="state == 'Niedersachsen'", # no one
params=None,
)
expected = pd.Series([True] * 6 + [False] + [True] * 3)
pd.testing.assert_series_equal(calculated, expected)
def test_a_b_school_system_below_age_5(fake_states, contacts):
calculated = mixed_educ_policy(
states=fake_states,
contacts=contacts,
seed=123,
group_id_column="school_group_id_0",
a_b_query="occupation == 'school' & age < 5",
non_a_b_attend=False,
hygiene_multiplier=1.0,
always_attend_query="state == 'Niedersachsen'", # no one
params=None,
)
expected = pd.Series(
[False, True, False, True, False, False, False, False, True, True]
)
pd.testing.assert_series_equal(calculated, expected)
def test_mixed_educ_policy_others_home_no_hygiene():
states = pd.DataFrame()
states["county"] = [1, 1, 2, 2, 2, 2, 2, 2]
states["educ_worker"] = [True, False, True, False, False, False, False, False]
states["school_group_id_0"] = [11, 11, 22, 22, 22, 22, 22, -1]
states["educ_a_b_identifier"] = [False, True, True, True, False, True, False, True]
states["date"] = pd.Timestamp("2021-01-04") # week 1
contacts = pd.Series([True] * 6 + [False] * 2, index=states.index)
seed = 333
res = mixed_educ_policy(
states=states,
contacts=contacts,
seed=seed,
group_id_column="school_group_id_0",
a_b_query="county == 2",
non_a_b_attend=False,
hygiene_multiplier=1.0,
always_attend_query="county == 55", # no one
params=None,
)
# zero class, closed county, teacher despite wrong week,
# wrong week, right week, wrong week, right week but not attending, not in school
expected = pd.Series([True, False, True, True, False, True, False, False])
pd.testing.assert_series_equal(res, expected)
def test_mixed_educ_policy_no_contacts():
states = pd.DataFrame()
states["educ_worker"] = [True, False, True, False, False, False, False]
states["educ_a_b_identifier"] = [False, True, False, True, False, True, False]
states["school_group_id_0"] = [11, 11, 22, 22, 22, 22, -1]
states["date"] = pd.Timestamp("2021-01-04") # week 1
states["county"] = 33
contacts = pd.Series(False, index=states.index)
seed = 333
res = mixed_educ_policy(
states=states,
contacts=contacts,
seed=seed,
group_id_column="school_group_id_0",
a_b_query=True,
non_a_b_attend=True,
hygiene_multiplier=1.0,
always_attend_query="county == 55", # no one
params=None,
)
pd.testing.assert_series_equal(res, contacts)
def test_identify_who_attends_because_of_a_b_schooling():
states = pd.DataFrame()
states["county"] = [1, 1, 2, 2, 2]
states["educ_a_b_identifier"] = [False, True, False, True, False]
states["date"] = pd.Timestamp("2021-01-04") # week number 1
# wrong county, wrong county, wrong week, right week, wrong week
expected = | pd.Series([False, False, False, True, False]) | pandas.Series |
#https://docs.google.com/document/d/1Y31Nt05peNIPwLo9O_TsTRAcy0GbDqGvRcHvJ4qtgzk/edit
###################### Indรญce #######################
#---- Instruรงรตes
#---- Imports
#---- Funรงรตes
#---- Leitura de Ficheiros
#---- Correcรงรตes ร Base de Dados
#---- Tabelas Descritivas
# ---- Nrยบ Ciclistas por Paรญs
# ---- Mรฉdia, Moda, Mediana, etc
# ---- Nome dos Vencedores por Ediรงรฃo
#---- Grรกficos
# ---- Ciclistas por Continente
# ---- Mรฉdia de Tempo por Ronda e Ediรงรฃo
# ---- Mรฉdia de km/H por Ediรงรฃo
# ---- Mรฉdia de Idade por Ediรงรฃo
# ---- Mรฉdia de Tempo por Corrida
#---- Testes
# ---- Tempo para correr cรณdigo
#===============================================================
#======================== Instruรงรตes ===========================
#===============================================================
""" CORRER NA CONSOLA: pip install dataframe_image """
#================= Erros em versรตes antigas =====================
"""# Garantir que se usa a รบltima versรตes do Pandas:
# Pandas updated to 1.2.0 or above
# Pandas <= 1.2.0, origina:
# 'DataFrame' object has no attribute 'to_frame'
# Corrigido com:
# XYZ = XYZ.to_frame()
# XYZ = XYZ.reset_index()
#https://pandas.pydata.org/docs/whatsnew/v1.2.0.html"""
#===============================================================
#========================= Imports =============================
#===============================================================
import matplotlib.pyplot as plt
import dataframe_image as dfi
import seaborn as sns
import pandas as pd
import numpy as np
import unittest
import time
import os
start = time.time()
#===============================================================
#========================= Funรงรตes =============================
#===============================================================
#================= Validate Image Directory =====================
def directoryValidator():
if os.path.isdir("./images/") == False: #check if directory exists
os.makedirs(os.path.dirname("./images/")) #create directory
else:
pass #if it exists, all good.
#================= Image Saver =====================
image_counter = 0
def saveImages(category, graph_name, variable):
global image_counter, table_counter
directoryValidator()
image_counter += 1
if (category) == "graph":
(graph_name).savefig("./images/{}_{}.png".format(str(category),str(image_counter)))
print("\n\t Graph image saved to 'images' folder as {}_{}.png".format(str(category),str(image_counter)))
elif (category) == "table":
(graph_name).export((variable), "./images/{}_{}.png".format(str(category),str(image_counter)))
print("\n\t Table image saved to 'images' folder as {}_{}.png".format(str(category),str(image_counter)))
else:
print("\n No valid image Category found.")
#================= Athlete Age =====================
def obterIdade(nome,pais_jogo): #pais_jogo->host_country
athlete_id = 0
idade = None
contain= False
for i in athletes["Name"]:
if i == nome:
athlete_id = athletes[athletes["Name"]==nome]["Athlete_id"]
athlete_id = int(athlete_id)
contagem = sport_events["Host Country"].str.contains(pais_jogo).sum()
if contagem > 0:
contain = True
if athlete_id != 0 and contain == True:
data_nasc = athletes[athletes["Athlete_id"] == athlete_id]["Date of Birth"].values[0]
ano_nasc = str(data_nasc[-4:])
ano_nasc = int(ano_nasc)
data_jogo = int(sport_events.loc[(sport_events["Athlete_id"] == athlete_id) &
(sport_events["Host Country"] == pais_jogo), ["Year"]].values[0])
ano_jogo = int (data_jogo)
idade = ano_jogo-ano_nasc
return idade
#=============================================================================
#=================== Respective Continents, from "nation" ====================
#---- Caribbean Countries have been put into south_america
asia = ["China", "Japan", "Malaysia", "South Korea"]
africa = ["South Africa"]
europe = ["Czech Republic", "Estonia", "France", "Germany", "Great Britain", "Greece", "Italy",
"Latvia", "Netherlands", "Poland", "Russia", "Slovakia", "Spain"]
north_america = ["United States"]
oceania = ["Australia", "New Zealand"]
south_america = ["Barbados", "Colombia", "Cuba", "Trinidad and Tobago", "Venezuela"]
#================= Assign Country to a Continent =====================
def getContinent(country):
if country in asia:
continent = "Asia"
elif country in africa:
continent = "Africa"
elif country in europe:
continent = "Europe"
elif country in north_america:
continent = "North America"
elif country in oceania:
continent = "Oceania"
elif country in south_america:
continent = "South America"
else:
continent = None
return continent
#===============================================================
#================== Leitura de Ficheiros =======================
#===============================================================
#================= athletes.csv =====================
athletes = pd.read_csv ("./csv_files/athletes.csv")
#================= sport_events.csv =====================
sport_events = pd.read_csv ("./csv_files/sport_events.csv")
sport_events.drop(sport_events.tail(1).index,inplace=True) #remover o ano de 1896
athletes.drop(athletes.tail(1).index,inplace=True)
#===============================================================
#============ Correcรงรตes ร Base de Dados =======================
#===============================================================
#---- Mudar tipo de dados e Renomear Colunas
sport_events.columns = ["Year","Athlete_id","Race_id","Rounds","Heat","Time","Average Km/H",
"Rank","Result","Record","Host Country","Venue"]
athletes.columns = ["Athlete_id","Name","Date of Birth","Nation","Sex"]
sport_events["Time"] = sport_events["Time"].str.replace(',', '.')
sport_events["Time"] = | pd.to_numeric(sport_events["Time"],errors='coerce') | pandas.to_numeric |
#' Download StatsCan Metadata from Product Cube
#'
#' This function allows you to download product metadata from
#' Statistics Canada.
#' @param productId The Statistics Canada Product ID.
#' @keywords productId, product, metadata
#' @importFrom httr POST content content_type
#' @export
#' @examples
#' get_product_metadata('14100287')
#' cansimId = '2820087'
#' productId = '14100287'
#'
import requests
import csv
import zipfile
import io
import pandas as pd
from datetime import datetime
"""@package docstring
Download StatsCan Metadata from Product Cube
This function allows you to download product metadata from
Statistics Canada.
"""
"""Documentation for a class.
More details.
"""
class Product(object):
def __init__(self, productId, lang='en'):
self.productId = productId
self.lang = lang
self.camelLang = lang.capitalize()
self.metadata = self.get_metadata()
self.dimensions = self.get_dimensions()
## Documentation for a method.
# @param self The object pointer.
def get_metadata(self):
"""metadata
Retrieve metadata for a given productId.
"""
url = 'https://www150.statcan.gc.ca/t1/wds/rest/getCubeMetadata'
payload = [{'productId': int(self.productId)}]
print('Retreiving metadata for Product ID: ' + self.productId)
req = requests.post(
url,
json=payload
)
response = req.json()
if (response[0]['status'] == "SUCCESS"):
return(response[0]['object'])
else:
self.errors = response
print('ERROR: Metadata for Product ID ' + self.productId + ' could not be loaded.')
print('ERROR: see Product.errors() for more info')
def read_cansim_product_mapping(self, Id='all'):
if (Id == 'all'):
print("ERROR: Only one of cansimId or productId can be entered")
else:
response = requests.get(
'https://www.statcan.gc.ca/eng/developers-developpeurs/cansim_id-product_id-concordance.csv')
reader = csv.DictReader(io.StringIO(response.text))
cansim_concordance = [row for row in reader]
for row in cansim_concordance:
if row['PRODUCT_ID'] == Id:
returnFrame = row['CANSIM_ID']
return (returnFrame)
def downloadProductCube(self):
url = 'https://www150.statcan.gc.ca/t1/wds/rest/getFullTableDownloadCSV/' + self.productId + '/' + self.lang
response = requests.get(url).json()
if response['status'] == 'SUCCESS':
print('Downloading zip file of product cube....')
remote_zip = requests.get(response['object'])
root = zipfile.ZipFile(io.BytesIO(remote_zip.content))
name = self.productId + '.csv'
df = pd.read_csv(root.open(name))
return(df)
else:
print('ERROR - cannot find download url for productId: ' + self.productId)
def read_metadata(self):
d = self.dimensions
dim_names = [dim['dimensionName' + self.camelLang] for dim in self.metadata['dimension']]
# There are 6 dimensions. Some may have more or less?
print('The product has ' + str(len(d)) + ' dimensions.')
print('The dimensions are: ' + ', '.join(dim_names))
# This is the first dimension. for 14100287, this is geography
# Member is a list of all the values the dimension can take
# dimension 2 is labour force characterisitics
d[0]['member'][0].keys()
d[0]['member'][0].values()
# One can use the dims to generate a coordinate, which is a better way to download data.
# coordinate ID must have 10 digits. The last few are zeros if not defined.
# Example: Coordinate ID 1.1.1.1.1.2.0.0.0.0 is for the following:
# Canada, Population, Both sexes, 15 years and over, Estimate, Unadjusted
coordinateId = '1.1.1.1.1.2.0.0.0.0'
def get_series_info(self, coordinateId):
# coordinateId = '1.1.1.1.1.2.0.0.0.0'
self.coordinateId = coordinateId
payload = [{'productId': int(self.productId), "coordinate": self.coordinateId}]
req = requests.post(
url = 'https://www150.statcan.gc.ca/t1/wds/rest/getSeriesInfoFromCubePidCoord',
json=payload
)
if req.json()[0]['status'] == 'SUCCESS':
self.series_info = req.json()[0]['object']
self.vectorId = self.series_info['vectorId']
print('Series info stored in object.series_info')
else:
print('ERROR: Something went wrong with the API request')
def get_coordinate_data(self, n=10):
payload = [{'productId': int(self.productId), "coordinate": self.coordinateId, "latestN": n}]
req = requests.post(
url = 'https://www150.statcan.gc.ca/t1/wds/rest/getDataFromCubePidCoordAndLatestNPeriods',
json=payload
)
coord_data = req.json()
status = coord_data[0]['status']
if status == 'SUCCESS':
object_data = | pd.DataFrame(coord_data[0]['object']) | pandas.DataFrame |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import statsmodels
from matplotlib import pyplot
from scipy import stats
import statsmodels.api as sm
import warnings
from itertools import product
import datetime as dt
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.stattools import kpss
from pandas import DataFrame
from pandas import concat
from pandas import Series
from math import sqrt
from sklearn.metrics import mean_squared_error
# convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
data = pd.read_csv('Data/All_Merged.csv') # , parse_dates=[0], date_parser=dateparse
data.isna().sum()
# Inserting 0 for NA
data.fillna(0, inplace=True)
# plt.figure(figsize=[10,4])
# plt.title('BTC Price (USD) Daily')
# plt.plot(data.price, '-', label='Daily')
# Monthly
data['date'] = pd.to_datetime(data['date'])
data['date'] = data['date'].dt.tz_localize(None)
data = data.groupby([pd.Grouper(key='date', freq='M')]).first().reset_index()
data = data.set_index('date')
data['price'].fillna(method='ffill', inplace=True)
# Decomposition - only for price though!
# decomposition = sm.tsa.seasonal_decompose(data.price)
#
# trend = decomposition.trend
# seasonal = decomposition.seasonal
# residual = decomposition.resid
#
# fig = plt.figure(figsize=(10,8))
#
# plt.subplot(411)
# plt.plot(data.price, label='Original')
# plt.legend(loc='best')
# plt.subplot(412)
# plt.plot(trend, label='Trend')
# plt.legend(loc='best')
# plt.subplot(413)
# plt.plot(seasonal,label='Seasonality')
# plt.legend(loc='best')
# plt.subplot(414)
# plt.plot(residual, label='Residuals')
# plt.legend(loc='best')
#
# fig.suptitle('Decomposition of Prices Data')
# plt.show()
# Setting the data structure
reframed = series_to_supervised(data, 1, 1)
# Also removing the lagged price, as this will be created in the ARIMA
reframed.drop(reframed.columns[[0,8, 9, 10, 11, 12, 13]], axis=1, inplace=True)
print(reframed.head())
# split data
split_date = '2018-06-25'
reframed_train = reframed.loc[reframed.index <= split_date].copy()
reframed_test = reframed.loc[reframed.index > split_date].copy()
# Prรธver lige ARIMA pรฅ original data
# Det her er en seasonal ARIMA, SARIMA, sรฅ nok ekstra resultat efter en regulรฆr ARIMA
# Hjรฆlp til kommentering findes her: https://machinelearningmastery.com/sarima-for-time-series-forecasting-in-python/
# Den fitter fint hvis man ikke opdeler i train og test..
# Initial approximation of parameters
Qs = range(0, 2)
qs = range(0, 3)
Ps = range(0, 3)
ps = range(0, 3)
D=1
d=1
parameters = product(ps, qs, Ps, Qs)
parameters_list = list(parameters)
len(parameters_list)
x_train = reframed_train.iloc[:,:-1].values
y_train = reframed_train.iloc[:,-1]
x_test = reframed_test.iloc[:,:-1].values
y_test = reframed_test.iloc[:,-1]
# Model Selection
results = []
best_aic = float("inf")
warnings.filterwarnings('ignore')
for param in parameters_list:
try:
model=sm.tsa.statespace.SARIMAX(endog=y_train, exog=x_train, order=(param[0], d, param[1]),
seasonal_order=(param[2], D, param[3], 12),enforce_stationarity=True,
enforce_invertibility=True).fit(disp=-1)
except ValueError:
print('wrong parameters:', param)
continue
aic = model.aic
if aic < best_aic:
best_model = model
best_aic = aic
best_param = param
results.append([param, model.aic])
# Best Models
result_table = pd.DataFrame(results)
result_table.columns = ['parameters', 'aic']
print(result_table.sort_values(by = 'aic', ascending=True).head())
print(best_model.summary())
# Residual plot of the best model
fig = plt.figure(figsize=(10,4))
best_model.resid.plot()
fig.suptitle('Residual Plot of the Best Model')
print("DickeyโFuller test:: p=%f" % sm.tsa.stattools.adfuller(best_model.resid)[1])
# DickeyโFuller test:: p=0.xxx -> Residuals are stationary
df_month2 = data[['price']]
future = | pd.DataFrame() | pandas.DataFrame |
"""
Description : This file implements the Drain algorithm for log parsing
Author : LogPAI team
License : MIT
"""
import hashlib
import os
import re
import pandas as pd
from datetime import datetime
from typing import List
from .log_signature import calc_signature
# ไธไธชๅถๅญ่็นๅฐฑๆฏไธไธชLogCluster
class LogCluster:
def __init__(self, template_token_list: List[str], log_id_list: List[int]):
self.template_token_list = template_token_list
self.log_id_list = log_id_list
self.template_id = None
# ๆ ่็น
class Node:
def __init__(self, childD=None, depth=0, digitOrToken=None):
self.childD = {} if childD is None else childD
self.depth = depth
self.digitOrToken = digitOrToken
self.template_count = 0
def get_template(seq1, seq2):
assert len(seq1) == len(seq2)
res = []
for t1, t2 in zip(seq1, seq2):
if t1 == t2:
res.append(t1)
else:
res.append('<*>')
return res
class LogParser:
def __init__(self, log_format, indir='./', outdir='./result/', depth=4, st=0.4,
maxChild=100, rex=None, keep_para=True):
"""
Attributes
----------
rex : regular expressions used in preprocessing (step1)
path : the input path stores the input log file name
depth : depth of all leaf nodes
st : similarity threshold
maxChild : max number of children of an internal node
log_name : the name of the input file containing raw log messages
save_path : the output path stores the file containing structured logs
"""
self.path = indir
self.depth = depth - 2
self.st = st
self.maxChild = maxChild
self.log_name = None
self.save_path = outdir
self.df_log = None
self.log_format = log_format
self.rex = [] if rex is None else rex
self.keep_para = keep_para
def tree_search(self, root, token_list):
seq_len = len(token_list)
# ้ฟๅบฆๅฑ๏ผๅคๆญ้ฟๅบฆ
if seq_len not in root.childD:
return 0, None
len_node = root.childD[seq_len] # ้ฟๅบฆๅฑ็่็น
depth = 1
for token in token_list:
if depth >= self.depth or depth > seq_len:
break
if token in len_node.childD:
len_node = len_node.childD[token]
elif '<*>' in len_node.childD:
len_node = len_node.childD['<*>']
else:
return 0, None
depth += 1
return self.fastMatch(len_node.childD, token_list)
def addSeqToPrefixTree(self, rn, logClust):
def has_number(s):
return any(char.isdigit() for char in s)
logClust.template_id = rn.template_count # ๆจกๆฟid็ญไบๅบๅท
rn.template_count += 1 # ่ฟไธชๆ นไธ็ๆจกๆฟๆปๆฐๅ ไธ
seqLen = len(logClust.template_token_list)
if seqLen not in rn.childD:
firtLayerNode = Node(depth=1, digitOrToken=seqLen)
rn.childD[seqLen] = firtLayerNode
else:
firtLayerNode = rn.childD[seqLen]
parentn = firtLayerNode
currentDepth = 1
# ๅชๆไธไธชtokenๆถ๏ผ็ปๆๆฏไธๅฏน็
for token in logClust.template_token_list:
# Add current log cluster to the leaf node
if currentDepth >= self.depth or currentDepth > seqLen:
# if len(parentn.childD) == 0:
# parentn.childD = [logClust]
# else:
# parentn.childD.append(logClust)
break
# If token not matched in this layer of existing tree.
if token not in parentn.childD:
if not has_number(token):
if '<*>' in parentn.childD:
if len(parentn.childD) < self.maxChild:
newNode = Node(depth=currentDepth + 1, digitOrToken=token)
parentn.childD[token] = newNode
parentn = newNode
else:
parentn = parentn.childD['<*>']
else:
if len(parentn.childD) + 1 < self.maxChild:
newNode = Node(depth=currentDepth + 1, digitOrToken=token)
parentn.childD[token] = newNode
parentn = newNode
elif len(parentn.childD) + 1 == self.maxChild:
newNode = Node(depth=currentDepth + 1, digitOrToken='<*>')
parentn.childD['<*>'] = newNode
parentn = newNode
else:
parentn = parentn.childD['<*>']
else:
if '<*>' not in parentn.childD:
newNode = Node(depth=currentDepth + 1, digitOrToken='<*>')
parentn.childD['<*>'] = newNode
parentn = newNode
else:
parentn = parentn.childD['<*>']
# If the token is matched
else:
parentn = parentn.childD[token]
currentDepth += 1
# ๆทปๅ ๅฐ logClusterList
if len(parentn.childD) == 0:
parentn.childD = [logClust]
else:
parentn.childD.append(logClust)
# seq1 is template
def seqDist(self, seq1, seq2):
assert len(seq1) == len(seq2)
simTokens = 0
numOfPar = 0
for token1, token2 in zip(seq1, seq2):
if token1 == '<*>':
numOfPar += 1
continue
if token1 == token2:
simTokens += 1
retVal = float(simTokens) / len(seq1)
return retVal, numOfPar
def fastMatch(self, logClustL, seq):
retLogClust = None
maxSim = -1
maxNumOfPara = -1
maxClust = None
maxIdx = -1 # ๅน้
็็ฐๅฏนๅบ็็ดขๅผ
for i, logClust in enumerate(logClustL):
curSim, curNumOfPara = self.seqDist(logClust.template_token_list, seq)
if curSim > maxSim or (curSim == maxSim and curNumOfPara > maxNumOfPara):
maxSim = curSim
maxNumOfPara = curNumOfPara
maxClust = logClust
maxIdx = i
if maxSim < self.st:
return len(logClustL), None
else:
return maxIdx, maxClust
# if maxSim >= self.st:
# retLogClust = maxClust
# return retLogClust
# ่พๅบ่ชๅฎไน็ปๆ
def outputEventId(self, event_id_list):
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
self.df_log['EventId'] = event_id_list
self.df_log.to_csv(os.path.join(self.save_path, self.log_name + '_structured.csv'), index=False)
def outputResult(self, logClustL):
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
log_templates = [0] * self.df_log.shape[0]
log_templateids = [0] * self.df_log.shape[0]
df_events = []
for logClust in logClustL:
template_str = ' '.join(logClust.template_token_list)
occurrence = len(logClust.log_id_list)
# template_id = hashlib.md5(template_str.encode('utf-8')).hexdigest()[0:8]
template_id = logClust.template_id
for log_id in logClust.log_id_list:
log_id -= 1
log_templates[log_id] = template_str
log_templateids[log_id] = template_id
df_events.append([template_id, template_str, occurrence])
# df_event = pd.DataFrame(df_events, columns=['EventId', 'EventTemplate', 'Occurrences'])
self.df_log['EventId'] = log_templateids
self.df_log['EventTemplate'] = log_templates
if self.keep_para:
self.df_log["ParameterList"] = self.df_log.apply(self.get_parameter_list, axis=1)
self.df_log.to_csv(os.path.join(self.save_path, self.log_name + '_structured.csv'), index=False)
occ_dict = dict(self.df_log['EventTemplate'].value_counts())
df_event = pd.DataFrame()
df_event['EventTemplate'] = self.df_log['EventTemplate'].unique()
df_event['EventId'] = df_event['EventTemplate'].map(lambda x: hashlib.md5(x.encode('utf-8')).hexdigest()[0:8])
# df_event['EventId'] = self.df_log['EventId'].unique()
df_event['Occurrences'] = df_event['EventTemplate'].map(occ_dict)
df_event.to_csv(os.path.join(self.save_path, self.log_name + '_templates.csv'), index=False,
columns=["EventId", "EventTemplate", "Occurrences"])
def printTree(self, node, dep):
pStr = ''
for i in range(dep):
pStr += '\t'
if node.depth == 0:
pStr += 'Root'
elif node.depth == 1:
pStr += '<' + str(node.digitOrToken) + '>'
else:
pStr += node.digitOrToken
print(pStr)
if node.depth == self.depth:
return 1
for child in node.childD:
self.printTree(node.childD[child], dep + 1)
def parse(self, log_name: str):
print('Parsing file: ' + os.path.join(self.path, log_name))
start_time = datetime.now()
self.log_name = log_name
# root_node = Node()
all_cluster_list = [] # ๆๆ็logCluster
sig_bin = {}
# ไฟๅญ่งฃๆ็EventId
event_id_list = []
self.load_data()
# ้ๅๆฏไธ่ก
for idx, line in self.df_log.iterrows():
log_content = line['Content']
log_id = line['LineId']
# ้ขๅค็๏ผtokenๅ
log_token_list = self.preprocess(log_content).strip().split()
# ่ฎก็ฎ็ญพๅ
log_sig = calc_signature(log_token_list)
# log_sig = 0
if log_sig not in sig_bin:
sig_bin[log_sig] = Node()
# ๆฏไธช็ญพๅๅฏนๅบไธ้ขๆ
this_root = sig_bin[log_sig]
# ๆ็ดขๆ ๏ผๆพๅฐๅน้
็Cluster
# matched_cluster = self.tree_search(root_node, log_token_list)
match_idx, matched_cluster = self.tree_search(this_root, log_token_list)
# parsed_event_id = log_sig * 100 + matched_cluster.template_id
# event_id_list.append(parsed_event_id)
# cluster_template_id = matched_cluster.template_id
if matched_cluster is None:
# ๆฒกๆๅน้
็ Cluster
new_cluster = LogCluster(template_token_list=log_token_list, log_id_list=[log_id])
all_cluster_list.append(new_cluster)
# self.addSeqToPrefixTree(root_node, new_cluster)
self.addSeqToPrefixTree(this_root, new_cluster)
cluster_template_id = new_cluster.template_id
else:
# ๆๆฅๅฟๆถๆฏๆทปๅ ๅฐๅทฒๆ็ Cluster
new_template_token_list = get_template(log_token_list, matched_cluster.template_token_list)
matched_cluster.log_id_list.append(log_id)
if ' '.join(new_template_token_list) != ' '.join(matched_cluster.template_token_list):
matched_cluster.template_token_list = new_template_token_list
cluster_template_id = matched_cluster.template_id
event_id_list.append(log_sig * 1000 + cluster_template_id)
count = idx + 1
if count % 1000 == 0 or count == len(self.df_log):
print('Processed {0:.1f}% of log lines.'.format(count * 100.0 / len(self.df_log)))
self.outputEventId(event_id_list)
# self.outputResult(all_cluster_list)
print('Parsing done. [Time taken: {!s}]'.format(datetime.now() - start_time))
def load_data(self):
headers, regex = self.generate_logformat_regex(self.log_format)
self.df_log = self.log_to_dataframe(os.path.join(self.path, self.log_name), regex, headers, self.log_format)
def preprocess(self, line):
for currentRex in self.rex:
line = re.sub(currentRex, '<*>', line)
return line
def log_to_dataframe(self, log_file, regex, headers, logformat):
""" Function to transform log file to dataframe
"""
log_messages = []
linecount = 0
with open(log_file, 'r') as fin:
for line in fin.readlines():
try:
match = regex.search(line.strip())
message = [match.group(header) for header in headers]
log_messages.append(message)
linecount += 1
except Exception as e:
pass
logdf = | pd.DataFrame(log_messages, columns=headers) | pandas.DataFrame |
import requests
import time
import string
import html5lib
import re
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
def player_scrape():
start = time.time()
abc = list(string.ascii_uppercase)
players_df = pd.DataFrame()
try:
for z in abc:
url = f"https://www.pro-football-reference.com/players/{z}/"
r = requests.get(url)
r = r.content
soup = BeautifulSoup(r, "lxml")
data = soup.find_all("p")
players = []
for i in range(len(data[:-12])):
try:
player = {}
player["player_url"] = data[i].find("a")["href"][9:-4]
player["player_name"] = data[i].find("a").text
player["player_lastyear"] = data[i].text[-4:]
player["player_position"] = data[i].text[
data[i].text.find("(") + 1 : data[i].text.find(")")
]
players.append(player)
except:
continue
df = pd.DataFrame.from_dict(players)
players_df = pd.concat([players_df, df])
time.sleep(1)
players_df["player_lastyear"] = players_df["player_lastyear"].astype("int64")
print("Done: players_df")
print("Time: ", (time.time() - start) / 60, " min")
return players_df
except Exception as e:
print("Error: " + e)
def snap_scrape(y1, y2, players_df):
start = time.time()
weeks = pd.read_csv("../nfl-weeks.csv")
try:
if players_df.empty == True:
print(
"Error: Please insert players database from the return of fucntion player_scrape()"
)
else:
snaps_df = pd.DataFrame()
players = players_df[players_df["player_lastyear"] >= y1]
players_url = players["player_url"].tolist()
for url in players_url:
for year in range(y1, y2 + 1):
p_url = f"https://www.pro-football-reference.com/players/{url}/gamelog/{year}/"
try:
r = requests.get(p_url)
r = r.content
soup = BeautifulSoup(r, "lxml")
table = soup.find("table", attrs={"id": "stats"})
headers = table.find_all(
"th",
attrs={
"data-stat": [
"game_date",
"team",
"game_location",
"opp",
"offense",
"defense",
"special_teams",
]
},
)
columns = []
for h in headers:
columns.append(h.text)
data = table.find_all("tr")[2:-1]
rows = []
for row in data:
r = []
td = row.find_all(
"td",
attrs={
"data-stat": [
"game_date",
"team",
"game_location",
"opp",
"offense",
"defense",
"special_teams",
]
},
)
for d in td:
r.append(d.text)
rows.append(r)
df = pd.DataFrame(rows, columns=columns)
df.columns = [
"date",
"team",
"home.away",
"opp",
"off.snaps",
"def.snaps",
"st.snaps",
]
df["player_url"] = url
df["season"] = year
snaps_df = pd.concat([df, snaps_df])
except:
continue
time.sleep(1)
for row in weeks.iterrows():
mask = (snaps_df["date"] >= row[1]["Start_Date"]) & (
snaps_df["date"] <= row[1]["End_Date"]
)
snaps_df.loc[mask, "week"] = row[1]["Week"]
snaps_df = snaps_df.merge(
players, how="left", left_on="player_url", right_on="player_url"
)
snaps_df.drop("player_lastyear", axis=1, inplace=True)
snaps_df["season"] = snaps_df["season"].astype(str)
snaps_df["week"] = snaps_df["week"].astype(str)
snaps_df = snaps_df.replace(
{
"team": {
"GNB": "GB",
"JAX": "JAC",
"KAN": "KC",
"LAR": "LA",
"LVR": "LV",
"NOR": "NO",
"NWE": "NE",
"SDG": "SD",
"SFO": "SF",
"TAM": "TB",
},
"opp": {
"GNB": "GB",
"JAX": "JAC",
"KAN": "KC",
"LAR": "LA",
"LVR": "LV",
"NOR": "NO",
"NWE": "NE",
"SDG": "SD",
"SFO": "SF",
"TAM": "TB",
},
}
)
snaps_df["home.away"] = snaps_df["home.away"].replace(
{"@": "away", "": "home", np.nan: "home"}
)
snaps_df[["off.snaps", "def.snaps", "st.snaps"]] = snaps_df[
["off.snaps", "def.snaps", "st.snaps"]
].fillna(0)
snaps_df[["off.snaps", "def.snaps", "st.snaps"]] = snaps_df[
["off.snaps", "def.snaps", "st.snaps"]
].replace("", 0)
snaps_df[["off.snaps", "def.snaps", "st.snaps"]] = snaps_df[
["off.snaps", "def.snaps", "st.snaps"]
].astype(int)
snaps_df["season"] = snaps_df["season"].astype(str)
replace = [" III", " II", " IV", " V", " Jr.", " Sr.", " Sr", " Jr"]
snaps_df["player_name"] = snaps_df["player_name"].str.replace(
"|".join([re.escape(s) for s in replace]), "", regex=True
)
snaps_df = snaps_df.drop_duplicates()
print("Done: Player snaps scraped.")
print("Done: ", (time.time() - start) / 60, " min")
return snaps_df
except Exception as e:
print("Error: " + e)
def injury_scrape(season):
start = time.time()
teams = [
"crd",
"atl",
"rav",
"buf",
"car",
"chi",
"cin",
"cle",
"dal",
"den",
"det",
"gnb",
"htx",
"clt",
"jax",
"kan",
"rai",
"sdg",
"ram",
"mia",
"min",
"nwe",
"nor",
"nyg",
"nyj",
"phi",
"pit",
"sfo",
"sea",
"tam",
"oti",
"was",
]
injury_df = pd.DataFrame()
for team in teams:
url = (
f"https://www.pro-football-reference.com/teams/{team}/{season}_injuries.htm"
)
r = requests.get(url)
soup = BeautifulSoup(r.content, "lxml")
table = soup.find("table", attrs={"id": "team_injuries"})
table_headers = table.find("thead").find_all("th")
columns = []
for header in range(len(table_headers)):
c1 = table_headers[header].text
c2 = table_headers[header]["data-stat"]
columns.append(c1 + ": " + c2)
columns.insert(0, "player_address")
table_rows = table.find("tbody").find_all("tr")
rows = []
for tr in range(len(table_rows)):
p = table_rows[tr].find("th").text
padd = table_rows[tr].find("th").find("a")["href"][9:-4]
td = table_rows[tr].find_all("td")
row = [
td[d].get("data-tip") if td[d].has_attr("data-tip") else np.nan
for d in range(len(td))
]
row.insert(0, p)
row.insert(0, padd)
rows.append(row)
df = | pd.DataFrame(rows, columns=columns) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def redshift_url() -> str:
conn = os.environ["REDSHIFT_URL"]
return conn
@pytest.mark.skipif(not os.environ.get("REDSHIFT_URL"), reason="Do not test Redshift unless `REDSHIFT_URL` is set")
def test_redshift_without_partition(redshift_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(redshift_url, query, protocol="cursor")
# result from redshift might have different order each time
df.sort_values(by="test_int", inplace=True, ignore_index=True)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": | pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64") | pandas.Series |
# make a bootstrapped dataframe from the bootstrap index file
import pandas as pd
import os
import glob
np = pd.np
cwd = os.getcwd()
#os.chdir(read_path)
path =r'/Users/dingwenna/AlpineOrigin/' # use your path
allFiles = glob.glob(path + "/*events.csv.gz")
frame = | pd.DataFrame() | pandas.DataFrame |
import os
from glob import glob
import pandas as pd
def get_options():
import argparse
parser = argparse.ArgumentParser(
description='takes a folder with ')
parser.add_argument('--path', required=True,
metavar="str", type=str,
help='folder where the result files can be found')
parser.add_argument('--variable_to_report', required=True,
metavar="str", type=str,
help='folder where the result files can be found')
parser.add_argument('--name', required=True,
metavar="str", type=str,
help='name of the output')
args = parser.parse_args()
return args
def remove_percentage(tab, perc):
max_val = tab['val_auc_roc'].max()
tab = tab[0.1 > max_val - tab['val_auc_roc']]
return tab
def main():
only_keep_10_highest = True
options = get_options()
files = glob(os.path.join(options.path, 'neural_networks_model_fold_number*.csv'))
if options.variable_to_report == 'auc':
valv = 'val_auc_roc'
testv = 'test_auc_roc'
tab_global = []
for f in files:
table = pd.read_csv(f)
tab_global.append(table)
tab_best = []
for f in files:
table = pd.read_csv(f)
idx = table.groupby(['validation_fold'])['val_auc_roc'].transform(max) == table['val_auc_roc']
best_val_idx = table[idx]
if only_keep_10_highest:
best_val_idx = remove_percentage(best_val_idx, 10)
tab_best.append(best_val_idx)
table = | pd.concat(tab_global, axis=0) | pandas.concat |
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
from cellgrid.core import Schema, ModelBlueprint, DataMapper
from cellgrid.ensemble.classifier import DataFrame, Series
class ModeTestClass:
def __init__(self, bp):
self.name = bp.name
self.parent = bp.parent
class SchemaTestClass(Schema):
@classmethod
def create_model(cls, bp):
return ModeTestClass(bp)
class TestSchema:
def test_add_node(self):
schema = SchemaTestClass([])
bp = ModelBlueprint('test', 'markers', 'xgb', None)
xgb = SchemaTestClass.create_model(bp)
schema.add_model(xgb)
assert xgb == schema._model_dict['test']['model']
bp2 = ModelBlueprint('test2', 'markers', 'xgb', 'test')
xgb2 = SchemaTestClass.create_model(bp2)
schema.add_model(xgb2)
assert schema._model_dict['test']['children'] == ['test2']
def test_build_and_walk(self):
bps = [
ModelBlueprint('test11', 'markers', 'xgb', 'test0'),
ModelBlueprint('test0', 'markers', 'xgb', None),
ModelBlueprint('test111', 'markers', 'xgb', 'test11'),
ModelBlueprint('test12', 'markers', 'xgb', 'test0'),
ModelBlueprint('test112', 'markers', 'xgb', 'test11'),
ModelBlueprint('test121', 'markers', 'xgb', 'test12')
]
schema = SchemaTestClass(bps)
r = [(i.name, i.level) for i in schema.walk()]
assert r == [('test0', 0),
('test11', 1),
('test12', 1),
('test111', 2),
('test112', 2),
('test121', 2),
]
class TestDataFrameAndSeries:
def test_get_col_series(self):
df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], columns=list('ab'))
df = DataFrame(df)
s = df.get_col_series('b')
assert_series_equal(s.s, pd.Series([2, 4, 6], name='b'))
assert isinstance(s, Series)
s2 = df.get_col_series('a', index=[1])
assert_series_equal(s2.s, pd.Series([3], name='a', index=[1]))
def test_loc(self):
df_pd = pd.DataFrame([[1, 2], [3, 4], [5, 6]], columns=list('ab'))
df = DataFrame(df_pd)
df_loc = df.loc(index=[0, 1])
assert_frame_equal(df_loc.df, pd.DataFrame([[1, 2], [3, 4]], columns=list('ab')))
assert isinstance(df_loc, DataFrame)
df_loc2 = df.loc()
| assert_frame_equal(df_loc2.df, df_pd) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/env python
"""
BSD 2-Clause License
Copyright (c) 2021 (<EMAIL>)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import division
import argparse
import itertools
import json
import operator
import os
import re
import sys
import pickle
import math
from distutils.util import strtobool
import numpy as np
import pysam
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.backends.backend_pdf import PdfPages
from polyleven import levenshtein
from Bio import SeqIO
import seaborn as sns
import pandas as pd
from scipy import stats
###### Usage
#python plot_identity_error_alignment_normUnal.py -i basecaller1/norm_unaligned_assembly_polished basecaller2/norm_unaligned_assembly_polished basecaller3/norm_unaligned_assembly_polished -l basecaller1 basecaller2 basecaller3 -o outfolder -p appendix_outputname
#
def safe_div(x, y):
if y == 0:
return None
return x / y
plt.rcParams["patch.force_edgecolor"] = False
def plot_error_identity(df, pdf=None):
sns.set(font_scale=1)
fig = plt.figure(figsize=(13,2))
fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2)
ax = fig.add_subplot(1, 2, 1)
sns.barplot(x="basecaller", hue="genome", y="error", data=df, linewidth=0, ax=ax)
plt.xlabel("Basecallers")
plt.ylabel("Error")
plt.title("Error rate of aligned reads to reference genome")
ax.get_legend().remove()
ax = fig.add_subplot(1, 2, 2)
sns.barplot(x="basecaller", hue="genome", y="identity", data=df, linewidth=0, ax=ax)
plt.xlabel("Basecallers")
plt.ylabel("Identity")
plt.title("Identity rate of aligned reads to reference genome")
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., fontsize=10)
if pdf is not None:
pdf.savefig(fig, bbox_inches="tight")
plt.close("all")
def plot_match_mismatch_indels(df, pdf=None, stacked=True):
sns.set(font_scale=1)
fig = plt.figure(figsize=(10,5))
fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2)
if stacked:
ax = fig.add_subplot(2, 2, 1)
ax2 = ax.twiny()
#sns.barplot(x="basecaller", hue="genome", y="match", data=df, linewidth=0, ax=ax)
#plt.xlabel("Basecallers")
#plt.ylabel("%Matches")
#plt.title("Matches")
df0 = df[['basecaller', 'genome', 'mismatch', 'deletion', 'insertion', 'unaligned']]
cols = df0.columns
u, idx = np.unique(df.basecaller.tolist(), return_index=True)
order = u[np.argsort(idx)] #[u[index] for index in sorted(idx)]
df0['basecaller'] = pd.Categorical(df0.basecaller, categories=order, ordered=True) # ['f', 'a', 'w', 'h'] # prevent sorting
df0.set_index(['basecaller', 'genome'], inplace=True)
colors = plt.cm.Paired.colors
df1 = df0.unstack(level=-1) # unstack the 'Context' column
(df1['mismatch']+df1['deletion']+df1['insertion']+df1['unaligned']).plot(kind='bar', color=[colors[1], colors[0]], rot=0, ax=ax, linewidth=0)
print(df1['mismatch']+df1['deletion']+df1['insertion']+df1['unaligned'])
(df1['deletion']+df1['insertion']+df1['unaligned']).plot(kind='bar', color=[colors[3], colors[2]], rot=0, ax=ax, linewidth=0)
(df1['insertion']+df1['unaligned']).plot(kind='bar', color=[colors[5], colors[4]], rot=0, ax=ax, linewidth=0)
df1['unaligned'].plot(kind='bar', color=[colors[7], colors[6]], rot=0, ax=ax, linewidth=0)
#legend_labels = [f'{val} ({context})' for val, context in df1.columns]
ticks = []
for r in range(df.shape[0]//2):
ticks.append(r - 0.25)
ticks.append(r + 0.05)
ax.set_xticks(ticks)
ax.set_xticklabels(['lambda', 'ecoli'] * (df.shape[0]//2), rotation=45, fontsize=8)
ax.grid(axis="x")
legend_labels = []
labels = ["mismatch", "", "deletion", "", "insertion", "", "unaligned", ""]
#for val in labels:
# if val in legend_labels:
# legend_labels.append("")
# else:
# legend_labels.append(val)
#legend_labels = [f'{val} ({context})' for val, context in df1.columns]3
ax.legend(labels, bbox_to_anchor=(-0.08, 1.2), loc=2, borderaxespad=0., ncol=4, fontsize=10) #(1.05, 1)
ax.set_ylabel("mean error in %")
ax.set_xlabel("species")
ax.set_yscale('log') #,base=20)
#ax.text(0.02, -0.2, ' '.join(order), transform=ax.transAxes, fontsize=11) #horizontalalignment='center', verticalalignment='center'
ax2.set_xlim(ax.get_xlim())
ax2.set_xticks([0.02, 1, 2, 3])
ax2.set_xticklabels(order, fontsize=10)
ax.xaxis.set_ticks_position('none')
#ax2.xaxis.set_ticks_position('none')
ax2.grid(axis="x")
#ax.legend(legend_labels, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.ylabel("Proportion of errors")
else:
ax = fig.add_subplot(2, 2, 1)
sns.barplot(x="basecaller", hue="genome", y="mismatch", data=df, linewidth=0, ax=ax)
plt.xlabel("Basecallers")
plt.ylabel("Mismatches")
plt.title("Mismatches")
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.xticks(fontsize=8)
#ax._legend.remove()
ax = fig.add_subplot(2, 2, 3)
sns.barplot(x="basecaller", hue="genome", y="deletion", data=df, linewidth=0, ax=ax)
plt.xlabel("Basecallers")
plt.ylabel("Deletion")
plt.title("Deletion")
ax.get_legend().remove()
plt.xticks(fontsize=8)
#ax._legend.remove()
ax = fig.add_subplot(2, 2, 4)
sns.barplot(x="basecaller", hue="genome", y="insertion", data=df, linewidth=0, ax=ax)
plt.xlabel("Basecallers")
plt.ylabel("Insertion")
plt.title("Insertion")
ax.get_legend().remove()
plt.xticks(fontsize=8)
#ax._legend.remove()
if pdf is not None:
pdf.savefig(fig, bbox_inches="tight")
plt.close("all")
def plot_boxplot(data, labels, pdf=None, title="relative read length", ylabel="read length / reference length in %", reference=None):
sns.set(font_scale=1)
fig = plt.figure(figsize=(6,2))
fig.subplots_adjust(left=0, right=1, bottom=0, top=2, hspace=0.2, wspace=0.2)
ax = fig.add_subplot(1, 1, 1)
box = plt.boxplot(data, patch_artist=True)
ticks = np.arange(1, len(labels)+1)
plt.xticks(ticks, labels, rotation=45, ha="right")
plt.ylabel(ylabel)
plt.xlabel("Basecaller")
plt.title(title)
#plt.yscale('log') #,base=20)
if reference is not None:
plt.axhline(reference, c='r')
colors = len(labels[-3:]) * ['#EAEAF2'] + 3* ["#88888C"]
#colors2 = len(labels[-3:]) * ['#DD8655'] + 3* ["#181819"]
for patch, color in zip(box['boxes'], colors):
patch.set_facecolor(color)
#med.set_facecolor(color2)
if pdf is not None:
pdf.savefig(fig, bbox_inches="tight")
plt.close("all")
def make_argparser():
parser = argparse.ArgumentParser(description='Prints summary about alignment of basecalled reads.')
parser.add_argument('-i', '--fastq', nargs="*",
help='FASTA/Q files with basecalled reads.')
parser.add_argument('-l', '--labels', nargs="*",
help='list of labels. same order as list with fastq/a files')
parser.add_argument('-o', '--out',
help='out path.')
parser.add_argument('-p', '--prefix', default="basecalled",
help='out path.')
parser.add_argument('--stacked', type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True,
help='stack error rates in plot.')
return parser
def median_abs_dev(x):
return(stats.median_absolute_deviation(x))
def report_errors(argv):
parser = make_argparser()
args = parser.parse_args(argv[1:])
fastq = args.fastq
basecallers = args.labels
out = args.out
prefix = args.prefix
stacked = args.stacked
with PdfPages(out + "/{}_error_alignment_rates.pdf".format(prefix)) as pdf:
lambd = []
ecoli = []
df = pd.DataFrame(columns=['basecaller', 'genome', 'match', 'mismatch', 'deletion', 'insertion', 'unaligned', 'identity', 'error', 'mqual', 'relative read length', 'aligned \% of read'])
df_std = | pd.DataFrame(columns=['basecaller', 'genome', 'match', 'mismatch', 'deletion', 'insertion', 'unaligned','identity', 'error', 'mqual', 'relative read length', 'aligned \% of read']) | pandas.DataFrame |
import os
import numpy as np
import torch
from torchvision import models
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
from sklearn.model_selection import train_test_split
import pandas as pd
from itertools import chain
from sklearn import preprocessing
import torch.nn as nn
from PIL import Image
from sklearn.metrics import fbeta_score
# %%-------------------- EDA -----------------------------------------
import matplotlib.pyplot as plt
classes = ['primary', 'clear', 'agriculture', 'road', 'water',
'partly_cloudy', 'cultivation', 'habitation', 'haze', 'cloudy', 'bare_ground',
'selective_logging', 'artisanal_mine', 'blooming', 'slash_burn', 'blow_down', 'conventional_mine']
count = [37513, 28431, 12315, 8071, 7411, 7261, 4547, 3660, 2697, 2089, 862, 340, 339, 332, 209, 101, 100]
plt.barh(classes, count)
plt.title("Labels in dataset")
plt.show()
# %%-----------------------------------------------------------------
# Dictionary image labels
label=pd.read_csv('/home/ubuntu/ML2/Final Project/Data/train_v2.csv')
print(label.head())
imagestag=label['image_name'].values
imageslabel=label['tags'].values
dictionary = dict(zip(imagestag, imageslabel))
#Counting images
labels_list = list(chain.from_iterable([tags.split(" ") for tags in imageslabel]))
labels = sorted(set(labels_list))
countlabels = | pd.Series(labels_list) | pandas.Series |
"""
Module: Instagram Scrapper
Author: <NAME>
Version: 1.0.2
Last Modified: 28/11/2018 (Wednesday)
"""
from bs4 import BeautifulSoup
from SeleniumHelper import SeleniumBrowserHelper
from PostScrapper import PostScrapper
from PostScrapper import PostDetails
import pandas as pd
import json
import time
import FileHandler as fh
class InstaDetails:
def __init__(self):
self.username = None
self.verified = None
self.name = None
self.bio = None
self.website = None
self.no_of_posts = None
self.no_of_followers = None
self.no_of_following = None
self.posts = None
self.data = None
class InstaScrapper(InstaDetails):
posts_count = 0
def __init__(self):
self.html = None
self.helper = None
self.chrome_driver_path = None
self.chrome_options = None
self.soup = None
self.instagram_url = "https://www.instagram.com"
self.url_to_visit = ""
self.search_value = ""
self.selected = None
self.internet_connected = False
self.instagram_reachable = False
self.scrappable = False
self.details_extracted = False
self.characteristics = {"profile": False, "location": False, "hashtag": False}
self.posts_data = PostDetails()
def insta_search(self):
self.helper.open_url(self.instagram_url + "/instagram/")
self.helper.wait_until(delay=5, type="-class", arg="_9eogI")
self.load_page_content()
insta_search = self.helper.get_element("-class", "XTCLo")
self.search_value = input("Enter Something To Search On Instagram: ")
insta_search.send_keys(self.search_value)
self.helper.wait_until(delay=5, type="-class", arg="fuqBx")
self.load_page_content()
search_response = self.soup.find_all('a', {'class': 'yCE8d'})
choices = {}
for i, s in enumerate(search_response):
current_link = s['href']
current_value = s.find('span', {'class': 'Ap253'}).get_text()
current_response = [current_value, current_link]
choices[i] = current_response
print("Following Are The Top Results Against Your Search: ")
print("<---------Search Results-------->")
for key, value in choices.items():
print("\t", key+1, ".", value[0])
print("<------------------------------->")
option = int(input("Please Choose An Option From Search Results\nChoice: "))
self.selected = choices[option-1]
self.url_to_visit = self.selected[1]
self.check_n_set_url_characteristics(self.url_to_visit)
def start_fetching(self):
if self.characteristics["profile"]:
print("Fetching Data From Instagram For Profile: " + self.selected[0] + " !!!")
self.helper.open_url(self.instagram_url + self.url_to_visit)
self.helper.wait_until(delay=10, type="-class", arg="_9eogI")
self.load_page_content()
self.extract_details(1)
self.print_details(1)
self.fetch_posts()
self.save_details(1)
self.save_as_csv(1)
self.save_as_json(1)
elif self.characteristics["location"]:
print("Fetching Data From Instagram For Location: " + self.selected[0] + " !!!")
self.helper.open_url(self.instagram_url + self.url_to_visit)
self.helper.wait_until(delay=10, type="-class", arg="_9eogI")
self.load_page_content()
self.extract_details(3)
self.print_details(3)
self.fetch_posts()
self.save_details(3)
self.save_as_csv(3)
self.save_as_json(3)
elif self.characteristics["hashtag"]:
print("Fetching Data From Instagram For Hashtag: " + self.selected[0] + " !!!")
self.helper.open_url(self.instagram_url + self.url_to_visit)
self.helper.wait_until(delay=10, type="-class", arg="_9eogI")
self.load_page_content()
self.extract_details(2)
self.print_details(2)
self.fetch_posts()
self.save_details(2)
self.save_as_csv(2)
self.save_as_json(2)
def extract_details(self, choice):
if choice == 1:
if self.scrappable:
username = self.soup.find('h1', {'class': 'AC5d8'})
if username is None:
self.username = "Not Available"
else:
self.username = username.get_text()
name = self.soup.find('h1', {'class': 'rhpdm'})
if name is None:
self.name = "Not Available"
else:
self.name = name.get_text()
bio = self.soup.find('div', {'class': '-vDIg'}).span
if bio is None:
self.bio = "Not Available"
else:
self.bio = bio.get_text()
verified = self.soup.find('span', {'class': 'mrEK_'})
if verified is not None and verified.get_text() == "Verified":
self.verified = True
else:
self.verified = False
profile_statistics = self.soup.find_all('span', {'class': 'g47SY'})
self.no_of_posts = profile_statistics[0].get_text()
self.no_of_followers = profile_statistics[1].get_text()
self.no_of_following = profile_statistics[2].get_text()
website = self.soup.find('a', {'class': 'yLUwa'})
if website is None:
self.website = "Not Available"
else:
self.website = website.get_text()
self.details_extracted = True
elif choice == 2:
name = self.soup.find('a', {'class': 'F2iT8'})
if name is None:
self.name = "Not Available"
else:
self.name = name.get_text()
total_posts = self.soup.find('span', {'class': 'g47SY'})
if total_posts is None:
self.no_of_posts = "0"
else:
self.no_of_posts = total_posts.get_text()
self.details_extracted = True
elif choice == 3:
name = self.soup.find('h1', {'class': 'cgig_'})
if name is None:
self.name = "Not Available"
else:
self.name = name.get_text()
self.no_of_posts = "100"
self.details_extracted = True
else:
print("Content Cannot Be Fetched At The Moment!")
def check_n_set_url_characteristics(self, url):
if "explore" in url:
if "tags" in url:
self.characteristics["hashtag"] = True
elif "locations" in url:
self.characteristics["location"] = True
else:
self.characteristics["profile"] = True
def init_scrapper(self, c_path, c_args):
# print("Initializing Scrapper!!!!!!")
self.chrome_driver_path = c_path
self.chrome_options = c_args
self.helper = SeleniumBrowserHelper(self.chrome_driver_path, self.chrome_options)
# print("Checking Internet Connectivity!!!!")
self.internet_connected = self.helper.is_connected()
# self.instagram_reachable = self.helper.is_reachable(self.instagram_url, 10)
def start_scrapper(self):
print("Starting Scrapper!!!!")
if self.internet_connected:
# if self.instagram_reachable:
self.insta_search()
self.scrappable = True
self.init_posts_data()
self.start_fetching()
print("Fetching Completed!!!!")
self.save_posts_data()
time.sleep(3)
# else:
# print("Instagram Access Restricted By ISP/Network Administrator")
else:
print("No Internet!! Please Check Your Network Connectivity!!")
def close_scrapper(self):
print("Closing Scrapper In 3 Seconds! Have A Nice Day!!!")
self.helper.close_browser()
time.sleep(3)
def load_page_content(self):
self.html = self.helper.get_page_source()
self.soup = BeautifulSoup(self.html, 'html.parser')
def save_details(self, code):
if code == 1:
self.data = {
"username": self.username,
"is_verified": self.verified,
"name": self.name,
"bio": self.bio,
"website": self.website,
"posts_count": self.no_of_posts,
"followers_count": self.no_of_followers,
"following_count": self.no_of_following,
"posts": [self.posts]
}
elif code == 2:
self.data = {
"hashtag": self.name,
"posts_count": self.no_of_posts,
"posts": [self.posts]
}
elif code == 3:
self.data = {
"location": self.name,
"top_100_posts": [self.posts]
}
def fetch_posts(self):
total_posts = int(self.no_of_posts.replace(',', ''))
choice = int(input("Please Select An Option For Fetching " + self.name+"'s Posts: \n\t1. Fetch All " +
str(total_posts) + " Posts\n\t2. Fetch Top Posts Upto A Certain Limit\nChoice: "))
if choice == 1:
self.get_posts(total_posts)
elif choice == 2:
limit = int(input("How Many Posts Due To Want To Fetch Out Of "+str(total_posts)+": "))
self.get_posts(limit)
else:
print("Invalid Choice!! Please Select Again")
time.sleep(1)
self.fetch_posts()
def get_posts(self, no_of_posts):
self.posts = {}
print("Posts: ")
print("<--------------------------------------------------->")
while InstaScrapper.posts_count < no_of_posts:
self.helper.scroll_down()
self.load_page_content()
self.extract_posts(no_of_posts)
time.sleep(1)
print("<--------------------------------------------------->")
print(str(no_of_posts)+" Posts Fetched!!!")
self.posts = list(self.posts.values())
def extract_posts(self, nop):
posts = self.soup.find_all('div', {'class': 'v1Nh3'})
for p in posts:
p_link = p.a['href']
post_id = p_link[3:-1]
post = self.instagram_url + p_link
if InstaScrapper.posts_count < nop:
self.add_post(post_id, post)
else:
break
def is_post_present(self, post_id):
if post_id in self.posts:
return True
else:
return False
def add_post(self, post_id, post):
if not self.is_post_present(post_id):
self.posts[post_id] = post
InstaScrapper.posts_count += 1
print("Sr: "+str(InstaScrapper.posts_count)+", Id: "+post_id+", Url: "+post)
posts_scrapper = PostScrapper(post)
driver_args = ["--headless"]
driver_path = "/home/hussainali/chromedriver"
posts_scrapper.init_scrapper(driver_path, driver_args)
post_data = posts_scrapper.start_scrapper()
posts_scrapper.close_scrapper()
self.add_posts_data(post_data)
def init_posts_data(self):
self.posts_data.source_url = []
self.posts_data.description = []
self.posts_data.hashtags = []
self.posts_data.mentions = []
self.posts_data.emojis = []
self.posts_data.comments = []
def add_posts_data(self, post_data):
self.posts_data.source_url.append(post_data[0])
self.posts_data.description.append(post_data[1])
self.posts_data.hashtags.append(post_data[2])
self.posts_data.mentions.append(post_data[3])
self.posts_data.emojis.append(post_data[4])
self.posts_data.comments.append(post_data[5])
def save_posts_data(self):
posts_data = {
"post_url": self.posts_data.source_url,
"description": self.posts_data.description,
"hashtags": self.posts_data.hashtags,
"mentions": self.posts_data.mentions,
"emojis": self.posts_data.emojis,
"comments": self.posts_data.comments
}
fh.save_as_csv("Posts/" + self.name + "_posts_data", posts_data)
def print_details(self, code):
if self.details_extracted:
if code == 1:
print("<------------------Profile Details------------------>")
print("Username: @" + self.username)
print("Verified: ", self.verified)
print("Name: " + self.name)
print("Bio: " + self.bio)
print("Website: " + self.website)
print("Total Posts: " + self.no_of_posts)
print("Followers: " + self.no_of_followers)
print("Following: " + self.no_of_following)
print("<--------------------------------------------------->")
elif code == 2:
print("<------------------Hashtag Details------------------>")
print("Hashtag: " + self.name)
print("Total Posts: " + self.no_of_posts)
print("<--------------------------------------------------->")
elif code == 3:
print("<------------------Location Details------------------>")
print("Location: " + self.name)
print("<--------------------------------------------------->")
else:
print("Details Not Extracted Yet!!")
def save_as_csv(self, code):
if code == 1:
df = pd.DataFrame(self.data)
df.to_csv("Profiles/" + self.name + "_data.csv")
elif code == 2:
df = pd.DataFrame(self.data)
df.to_csv("Hashtags/" + self.name + "_data.csv")
elif code == 3:
df = | pd.DataFrame(self.data) | pandas.DataFrame |
import pandas as pd
import pytest
from pandera import Column, DataFrameSchema, Check
from pandera import dtypes
from pandera.errors import SchemaError
def test_numeric_dtypes():
for dtype in [
dtypes.Float,
dtypes.Float16,
dtypes.Float32,
dtypes.Float64]:
schema = DataFrameSchema({"col": Column(dtype, nullable=False)})
validated_df = schema.validate(
pd.DataFrame(
{"col": [-123.1, -7654.321, 1.0, 1.1, 1199.51, 5.1, 4.6]},
dtype=dtype.value))
assert isinstance(validated_df, pd.DataFrame)
for dtype in [
dtypes.Int,
dtypes.Int8,
dtypes.Int16,
dtypes.Int32,
dtypes.Int64]:
schema = DataFrameSchema({"col": Column(dtype, nullable=False)})
validated_df = schema.validate(
pd.DataFrame(
{"col": [-712, -4, -321, 0, 1, 777, 5, 123, 9000]},
dtype=dtype.value))
assert isinstance(validated_df, pd.DataFrame)
for dtype in [
dtypes.UInt8,
dtypes.UInt16,
dtypes.UInt32,
dtypes.UInt64]:
schema = DataFrameSchema({"col": Column(dtype, nullable=False)})
validated_df = schema.validate(
pd.DataFrame(
{"col": [1, 777, 5, 123, 9000]}, dtype=dtype.value))
assert isinstance(validated_df, pd.DataFrame)
def test_category_dtype():
schema = DataFrameSchema(
columns={
"col": Column(
dtypes.Category,
checks=[
Check(lambda s: set(s) == {"A", "B", "C"}),
Check(lambda s:
s.cat.categories.tolist() == ["A", "B", "C"]),
Check(lambda s: s.isin(["A", "B", "C"]))
],
nullable=False
),
},
coerce=False
)
validated_df = schema.validate(
pd.DataFrame(
{"col": pd.Series(["A", "B", "A", "B", "C"], dtype="category")}
)
)
assert isinstance(validated_df, pd.DataFrame)
def test_category_dtype_coerce():
columns = {
"col": Column(
dtypes.Category,
checks=Check(lambda s: set(s) == {"A", "B", "C"}),
nullable=False
),
}
with pytest.raises(SchemaError):
DataFrameSchema(columns=columns, coerce=False).validate(
pd.DataFrame(
{"col": pd.Series(["A", "B", "A", "B", "C"], dtype="object")}
)
)
validated_df = DataFrameSchema(columns=columns, coerce=True).validate(
pd.DataFrame(
{"col": pd.Series(["A", "B", "A", "B", "C"], dtype="object")}
)
)
assert isinstance(validated_df, pd.DataFrame)
def test_datetime():
schema = DataFrameSchema(
columns={
"col": Column(
dtypes.DateTime,
checks=Check(lambda s: s.min() > pd.Timestamp("2015")),
)
}
)
validated_df = schema.validate(
pd.DataFrame(
{"col": pd.to_datetime(["2019/01/01", "2018/05/21", "2016/03/10"])}
)
)
assert isinstance(validated_df, pd.DataFrame)
with pytest.raises(SchemaError):
schema.validate(
pd.DataFrame(
{"col": | pd.to_datetime(["2010/01/01"]) | pandas.to_datetime |
import glob
from astropy.io import fits
import pandas as pd
class HeaderSummary:
'''
HeaderSummary does retrieving information from fits files' headers. path_list provides the search paths applied by glob.glob(). For each file, fits.open() is used to open the file. Header info is retrieved as specified in keywords.
+ Output:
- self.table is pandas.DataFrame table.
+ Inputs:
- path_list = a list of path to be searched for files with glob.glob
> Ex. path_list = ['/Users/kb/15347/HST/*/*flt.fits','/Users/kb/15664/HST/*/*flt.fits']
- keywords = a dict of (key:value) = extension number : header keyword to be searched for.
> if None, keywords will set to {0:['ROOTNAME','DATE-OBS','FILTER','EXPSTART','EXPTIME','SUBARRAY'],}
- sort_by = a list of keywords to be sorted using pandas.DataFrame.sort_values(by=sort_by)
> if None, this will set to ['EXPSTART']
> ascending and reseting indices
- do_sort = True to sort self.table
- colname = a list of column names, running parallel to keywords
> pandas.DataFrame(df,columns=colname)
- add_filepath = True to append the filepaths to the last column
+ Compute:
- self.compute() to prepare the output self.table
+ Save:
- self.table.to_csv() from pandas.DataFrame method.
'''
def __init__(self,path_list,keywords=None,sort_by=None,do_sort=True,colname=None,add_filepath=True):
self.path_list = path_list
self.keywords = keywords
self.sort_by = sort_by
self.do_sort = do_sort
self.colname = colname
self.add_filepath = add_filepath
if self.keywords is None:
self.keywords = self._keywords()
if self.sort_by is None:
self.sort_by = ['EXPSTART']
if self.colname is None:
self.colname = self._colname()
def compute(self):
out = []
for jj,j in enumerate(self.path_list):
t = glob.glob(j)
for ii,i in enumerate(t):
filepath = i
tt = fits.open(filepath)
ttt = []
for extnum in self.keywords.keys():
for keyword in self.keywords[extnum]:
ttt.append(tt[extnum].header[keyword])
if self.add_filepath:
ttt.append(filepath)
out.append(ttt)
if self.add_filepath:
self.colname.append('FILEPATH')
self.table = | pd.DataFrame(out,columns=self.colname,) | pandas.DataFrame |
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
def read_file(filename):
labels = ["futures", "title", "wait", "exec", "duration", "us_future", "queue", "numa_sensitive", "num_threads", "info_string", "libcds"]
data = | pd.read_csv(filename, sep=',', header=None) | pandas.read_csv |
from pathlib import Path
import numpy as np
import pandas as pd
from functools import reduce
from loguru import logger
from datetime import datetime, timedelta
from siuba import _, filter, gather, group_by, ungroup, mutate, summarize, arrange
# plots
import matplotlib.pyplot as plt
import plotnine as p9
p9.theme_set(p9.theme_linedraw()) # default theme
from mizani.breaks import date_breaks
from mizani.formatters import date_format
from adtk.data import validate_series
from adtk.visualization import plot
from adtk.detector import LevelShiftAD, PersistAD, GeneralizedESDTestAD, SeasonalAD, AutoregressionAD
from tsmoothie.smoother import DecomposeSmoother
from src import utils
conn = utils.connect_athena(path='../configs/athena.yaml')
# -------------- #
### INDEX:
### 1. Initialize functions
### 2. Outlier detection functions
### 3. Imputation functions
### 4. Level shift correction functions
### 5. Plot functions
### 6. Process functions
### 7. Run functions
### 8. Start process
# -------------- #
### 1. Initialize functions
def _validate_series(df, column_name):
"""
A function to validate series
Parameters
----------
df : data frame
Data frame with column to validare. The data frame requires
columns region_slug and date to be sorted. Date is defined as
the index.
column_name : str
Name of the column to validate
"""
logger.debug(f"... validating {column_name}...\n")
y_df = df.sort_values(['date'])[['date', column_name]].set_index('date')
s = validate_series(y_df)
return s
### 2. Outlier detection functions
def _outlier_persist_ad(s, target_column_name, c_param, window_param = 7):
try :
persist_ad = PersistAD(c=c_param, side='both', window = window_param)
anomalies = persist_ad.fit_detect(s)
except :
logger.debug('!! No Persist !!')
anomalies = s
anomalies[target_column_name] = 0
finally :
anomalies = anomalies \
.rename(columns={target_column_name:'anomaly_persist'}) \
.reset_index()
return anomalies
def _outlier_seasonal_ad(s, target_column_name, c_param):
try :
seasonal_ad = SeasonalAD(c=c_param, side="both")
anomalies = seasonal_ad.fit_detect(s)
except :
logger.debug('!! No Seasonal !!')
anomalies = s
anomalies[target_column_name] = False
finally :
anomalies = anomalies \
.rename(columns={target_column_name:'anomaly_seasonal'}) \
.reset_index()
return anomalies
def _outlier_autregr_ad(s, target_column_name, c_param, n_steps_param = 1, step_size_param=7):
try :
autoregression_ad = AutoregressionAD(n_steps=n_steps_param, step_size=step_size_param, c=c_param)
anomalies = autoregression_ad.fit_detect(s)
except :
logger.debug('!! No Seasonal !!')
anomalies = s
anomalies[target_column_name] = 0
finally :
anomalies = anomalies \
.rename(columns={target_column_name:'anomaly_autor'}) \
.reset_index()
return anomalies
def _c_trun(c_param):
if c_param <= 1.5:
c_trun = 1.5
elif c_param >= 3:
c_trun = 3
else:
c_trun = c_param
logger.debug('C truncated: ' + str(c_trun))
return c_trun
def _anomalies_detector(s, target_column_name, c_param):
"""
The function runs three algorithms to detect outliers.
Parameters
----------
s : adtk object
Validated serie output of _validate_series()
target_column_name : str
Target column name to detect outliers
Output
----------
s : data frame
Number of times an observation is indentified as an outlier
"""
c_trunc = _c_trun(c_param)
logger.debug(f'C_TRUNC {c_trunc}')
# implementation of methodologies
anomalies = _outlier_persist_ad(s, target_column_name, c_trunc) \
.merge(_outlier_seasonal_ad(s, target_column_name, c_trunc)) \
.merge(_outlier_autregr_ad( s, target_column_name, c_trunc)) \
.fillna(0)
# sum of identification per observation
anomalies['anomaly_sum'] = \
(anomalies['anomaly_persist']) + \
(anomalies['anomaly_seasonal'] == True) + \
(anomalies['anomaly_autor'])
# excpetions for identification
anomalies.anomaly_sum[anomalies.date <= '2020-03-31'] = 0
anomalies.anomaly_sum[((anomalies.date >= '2020-12-15') & (anomalies.date <= '2021-01-15'))] = 0
anomalies.head(2)
return(anomalies[anomalies.anomaly_sum > 0])
# 2. Find anomalies
def _find_anomalies(df, anomaly_vote_minimun, target_column_name, c_param, print_report=True):
"""
The function implements the process of identification.
Parameters
----------
df : data frame
Data with tci observations
anomaly_vote_minimun : double
Minimum value to accept and anomaly
target_column_name : str
Target column name to detect outliers
print_report: bool
Flag to print results plots
Output
----------
df_anomaly : data frame
Observations per date indetify at least once as an outlier by
the method persis, seasonl or autoregressive identification
anomalies_date:
Description
"""
logger.debug("\n... finding outliers ...")
# validate series
s = _validate_series(df, target_column_name)
# plot(s)
# join anomialies detector
df_anomaly = df.merge(_anomalies_detector(s, target_column_name, c_param), how = 'left')
#print(df_anomaly.head())
anomalies_cnt = sum(df_anomaly.anomaly_sum >= anomaly_vote_minimun)
anomalies_date = df_anomaly[df_anomaly.anomaly_sum >= anomaly_vote_minimun].date.to_list()
logger.debug('Number of anomalies found: ' + str(anomalies_cnt) + '\n')
logger.debug(anomalies_date)
# print report flag
if print_report:
logger.debug('... printing anomalies report ...\n')
print(_plot_anomalies(df_anomaly,
observed_column = target_column_name,
anomalies_cnt=anomalies_cnt))
return(df_anomaly, anomalies_date)
### 3. Imputation functions
def _decompose_lowess(variable_smooth, missing_values, smooth_fraction):
"""
The function decompose the serie and smooths using local
regression or lowess curve.
Parameters
----------
variable_smooth : serie
Data with tci observations
missing_values : serie
Minimum value to accept and anomaly
smooth_fraction : double
Target column name to detect outliers
Output
----------
result : serie
Observations per date indetify at least once as an outlier by
the method persis, seasonl or autoregressive identification
"""
variable_smooth= np.log1p(variable_smooth)
# operate smoothing
smoother = DecomposeSmoother(smooth_type='lowess',
periods=7,
smooth_fraction=smooth_fraction)
smoother.smooth(variable_smooth)
result = variable_smooth
smooth_result = smoother.smooth_data[0]
result[missing_values] = smooth_result[missing_values]
# removing negatives
result = np.expm1(result)
return result
def _impute_anomalies(observed_column,
date_column,
anomaly_sum_column,
anomaly_vote_minimun,
smooth_fraction = 0.4,
print_plot = False):
"""
The function runs the process to detect anomalies and impute them.
Parameters
----------
observed_column : data frame
Description
date_column : str
Description
nomaly_sum_column : str
Description
anomaly_vote_minimun : str
Description
smooth_fraction : str
Description
Output
----------
df_impute: data frame
Observations of tci imputed.
"""
logger.debug("\n... imputing outliers ...")
# building data frame to impute
df_impute = pd.DataFrame({
'date': date_column,
'observed_column': observed_column,
'anomaly_sum': anomaly_sum_column,
'observed_missing': observed_column
})
# create missing values
df_impute.loc[df_impute.anomaly_sum >= anomaly_vote_minimun, 'observed_missing'] = None
df_impute.loc[df_impute.observed_column < 0, 'observed_missing'] = None
df_impute = df_impute.set_index('date')
# algorithms to impute
df_impute = df_impute \
.assign(RollingMean = df_impute.observed_missing \
.fillna(df_impute.observed_missing \
.rolling(30, min_periods=1,) \
.mean()) ) \
.assign(RollingMedian = df_impute.observed_missing \
.fillna(df_impute.observed_missing \
.rolling(30, min_periods=1,) \
.median()) ) \
.assign(Polinomial = df_impute.observed_missing \
.interpolate(method='polynomial', order = 5)) \
.assign(Loess = _decompose_lowess(df_impute.observed_column,
df_impute.observed_missing.isna(),
smooth_fraction = smooth_fraction))
if print_plot:
print(_plot_imputation(df_impute))
return df_impute
### 4. Level shift correction functions
def _c_param(region_slug,
athena_path = '~/shared/spd-sdv-omitnik-waze/corona',
c_metric = 'min',
f_metric = 1):
c_region = pd.read_csv(athena_path + '/cleaning/data/staging/cities_c_iqr.csv')
if sum(c_region.region_slug == region_slug) > 0:
if f_metric < 100:
c_param = (c_region[c_region.region_slug == region_slug][f"c_{c_metric}"].to_list()[0])*f_metric
elif f_metric == 100:
c_param = f_metric
else :
if f_metric < 100:
c_param = 3.0*f_metric #c_region[f"c_{c_metric}"].median()[0])*f_metric
elif f_metric == 100:
c_param = f_metric
logger.debug(f'C {c_metric}: ' + str(c_param))
return c_param
def _level_shift_detection(s,
c_param = 3.0,
window_param = 14,
print_plot = False):
"""
Level shift or change point detection. This function uses the function
LevelShiftAD from ADTK library.
Parameters
----------
s : validated serie object
Description
c_param : dbl, default 6.0
Description
window_param : int, default 14
Description
print_plot: bool, default True
Description
"""
#logger.debug(f"... detecting shift c{c_param}-w{window_param}...")
level_shift_ad = LevelShiftAD(c=c_param, side='both', window=window_param)
shifts = level_shift_ad.fit_detect(s)
if print_plot:
plot(s, anomaly=shifts, anomaly_color='red')
return shifts
def _run_shift_grid(s, observed_variable, c_param, low_grid = .20, upp_grid = .60):
"""
The function runs a grid for several c parameters to detect level shifts.
Parameters
----------
s : serie object
Validated serie object
observed_variable : validated serie object
Description
c_param : dbl
Description
low_grid: dbl, default .20
Description
upp_grid: dbl, default .60
Description
"""
logger.debug(f"... shift level running grid ...\n")
logger.debug(f'C_LS {c_param}')
shift_l = list()
# grid for values list
for cp in [round(c_param-c_param*(upp_grid), 4),
round(c_param-c_param*(low_grid), 4),
round(c_param, 4),
round(c_param+c_param*(low_grid), 4),
round(c_param+c_param*(upp_grid), 4) ]:
for wdw in [14, 15, 16, 17, 18]:
shift = _level_shift_detection(s, c_param = cp,
window_param=wdw,
print_plot = False) \
.rename(columns={observed_variable:f'shift_c{cp}_w{wdw}'})
shift_l.append(shift)
#len(shift_l)
df_grid = reduce(lambda df1, df2: df1.merge(df2, on='date'), shift_l)
df_grid.shape
logger.debug(f"Total combinations: {len(shift_l)}\n")
return df_grid
def _shifted_adtk_ts(s, column_name, agg="std", window=(3,3), diff="l2", print_plot=True):
# shift ts level
s_transformed = DoubleRollingAggregate(
agg=agg,
window=window,
diff=diff).transform(s).rename(columns={column_name:'adtk_shift'})
if print_plot:
plot(pd.concat([s, s_transformed], axis=1))
return s_transformed
def _shift_sum(df_shift,
ls_search_start_2020 = '2020-03-31',
ls_search_end_2020 = '2020-12-15',
ls_search_start_2021 = None,
ls_search_end_2021 = None):
logger.debug(f'LS 2020 Start {ls_search_start_2020} - {ls_search_end_2020}')
logger.debug(f'LS 2021 Start {ls_search_start_2021} - {ls_search_end_2021}')
df_shift_sum = (df_shift.reset_index()
>> filter((_.date > ls_search_start_2020) & (_.date < ls_search_end_2020 ))
>> gather('variable', 'value', -_.date)
>> filter(_.variable.str.startswith('shift'))
>> group_by('date')
>> summarize(shift_sum = _.value.sum())
>> filter(_.shift_sum > 0)
>> arrange('date')
)
return df_shift_sum
def _rolling_manual_sum(tab, days_before= 0, days_after = 7):
rolling_sum = list()
for dat in tab.date:
date_init = dat + timedelta(days=days_before)
date_end = dat + timedelta(days=days_after)
#logger.debug( str(dat ) + ' to ' + str(dat + timedelta(days=7)))
sum_sum = tab[(tab.date >= date_init) & (tab.date < date_end)].suma.sum()
rolling_sum.append(sum_sum)
return rolling_sum
def _initial_shift_date(df_shift_sum):
shift_init = df_shift_sum[df_shift_sum.shift_sum == df_shift_sum.shift_sum.max()].date.min()
logger.debug(f'Shift found at {shift_init}')
return shift_init
def _linear_interpolate_ts(shifted_column, date_column):
shifted_column[shifted_column < 0] = None
df = | pd.DataFrame() | pandas.DataFrame |
import pandas
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn import preprocessing
from setlist import setlist
import sys
import os
path=os.getcwd()
path=path.strip('complete_model')
sys.path.append(path)
from helper import svm,misc_helper
class train_test_generator:
def generate(self):
dataFrame = pandas.read_csv('../../CSV_Data/master_dataset.csv')
feature_columns = list(dataFrame.columns.values)[0:-1]
features,target = misc_helper.split_feature_target(dataFrame)
train,test,train_target,test_target = train_test_split(features,target,test_size = 0.2,stratify=target)
train,test = misc_helper.get_scaled_data(train,test)
#Initial Datasets
train = pandas.DataFrame(train,columns=feature_columns)
train.to_csv('datasets/train.csv',index=False)
train_target = pandas.DataFrame(train_target,columns=['label'])
train_target.to_csv('datasets/train_target.csv',index=False)
test = pandas.DataFrame(test,columns=feature_columns)
test.to_csv('datasets/test.csv',index=False)
test_target = pandas.DataFrame(test_target,columns=['label'])
test_target.to_csv('datasets/test_target.csv',index=False)
#
train_target_sets = train_target.copy(deep=True)
test_target_sets = test_target.copy(deep=True)
for i in range(len(setlist)):
train_target_sets['label'][train_target['label'].isin(setlist[i])] = str(i)
train_target_sets.to_csv('datasets/train_target_sets.csv',index=False)
for i in range(len(setlist)):
test_target_sets['label'][test_target['label'].isin(setlist[i])] = str(i)
test_target_sets.to_csv('datasets/test_target_sets.csv',index=False)
#Diving into sets
train_sets_features = [[] for i in range(len(setlist)) if len(setlist[i]) > 1]
train_sets_targets = [[] for i in range(len(setlist)) if len(setlist[i]) > 1]
test_sets_features = [[] for i in range(len(setlist)) if len(setlist[i]) > 1]
test_sets_targets = [[] for i in range(len(setlist)) if len(setlist[i]) > 1]
for index,row in train.iterrows():
setIndex = int(train_target_sets['label'][index])
if setIndex < len(train_sets_features):
train_sets_features[setIndex].append(row)
train_sets_targets[setIndex].append(train_target['label'][index])
for index,row in test.iterrows():
setIndex = int(test_target_sets['label'][index])
if setIndex < len(test_sets_features):
test_sets_features[setIndex].append(row)
test_sets_targets[setIndex].append(test_target['label'][index])
for i in range(len(train_sets_features)):
df = pandas.DataFrame(train_sets_features[i],columns=feature_columns)
df.to_csv('datasets/train_set_'+str(i),index=False)
df = pandas.DataFrame(train_sets_targets[i],columns=['label'])
df.to_csv('datasets/train_target_set_'+str(i),index=False)
df = pandas.DataFrame(test_sets_features[i],columns=feature_columns)
df.to_csv('datasets/test_set_'+str(i),index=False)
df = | pandas.DataFrame(test_sets_targets[i],columns=['label']) | pandas.DataFrame |
from matplotlib import colors
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import pandas as pd
import numpy as np
import matplotlib.gridspec as gridspec
import matplotlib.offsetbox as offsetbox
import palettable
from collections import defaultdict
class CoMut:
'''A user-created :class: `CoMut` object.
Params:
-----------
Attributes:
-----------
samples: list
List of samples that defines the sample order. It is set
by the first data set added. Samples from later data sets
are checked and reordered against this attribute.
axes: dict
Container containing plotted axes objects after plot_comut()
is called. Axes objects can be accessed and changed to change
the CoMut.
figure: matplotlib figure object
Figure that the CoMut is plotted on
_plots: dict
Container for plot information, including data, visual
params (ie color maps), plot type, and plot name.
_side_plots: dict of dicts
Container for side plot information. Values are side plot
data, keys are the name of the central CoMut plot the side
plot is paired with.'''
def __init__(self):
# user accessible attributes
self.samples = None
self.axes = {}
self.figure = None
# attributes for manipulation and storage
self._plots = {}
self._side_plots = defaultdict(dict)
@classmethod
def _get_default_categorical_cmap(cls, n_cats):
'''Returns the default color map for n categories.
If 10 or fewer, uses vivid_10 from palettable. If more than 10,
uses a segmented rainbow colormap.
Params:
-------
n_cats: int
The number of categories in the data.
Returns:
--------
cmap: list of colors'''
if n_cats <= 10:
cmap = palettable.cartocolors.qualitative.Vivid_10.mpl_colors
else:
hsv_cmap = plt.get_cmap('hsv')
cmap = [hsv_cmap(i/n_cats) for i in range(n_cats)]
return cmap
@classmethod
def _get_triangles(cls, x_base, y_base, tri_padding, height, width):
'''Returns np arrays of triangle coordinates
Params:
-------
x_base, y_base: floats
The x and y coordinates of the base of the triangle
tri_padding: float
The space between triangles
height, width: float
Height and width of the box enclosing the triangles.
Returns:
--------
(tri_1_coords, tri_2_coords): tuple of np arrays
Tuple of triangle coordinates as np arrays.'''
tri_1_coords = [[x_base, y_base + tri_padding],
[x_base, y_base + height],
[x_base + width - tri_padding, y_base + height]]
tri_2_coords = [[x_base + tri_padding, y_base],
[x_base + width, y_base],
[x_base + width, y_base + height - tri_padding]]
return (np.array(tri_1_coords), np.array(tri_2_coords))
@classmethod
def _sort_list_by_list(cls, value_list, value_order):
'''Sort an value list by a specified value
order, otherwise sort alphabetically at end.
Params:
-------
value_list: list-like
values to sort, eg ['nonsense', 'amp']
value_order: list-like
List of values that specify sort order.
Values not in this list will be sorted alphabetically
and placed at the end of the list.
Returns:
--------
sorted_values: list
Values sorted by the value order specified and
alphabetically otherwise.'''
# extract subset of alts that are specified in value_order
subset = [value for value in value_list if value in value_order]
other = [value for value in value_list if value not in value_order]
# sort subset according to value order list, otherwise alphabetical
sorted_subset = sorted(subset, key=lambda x: value_order.index(x))
sorted_other = sorted(other)
# join the two subsets
sorted_values = sorted_subset + sorted_other
return sorted_values
@classmethod
def _parse_categorical_data(cls, data, category_order, sample_order,
value_order, priority):
'''Parses tidy dataframe into a gene x sample dataframe
of tuples for plotting
Params:
-------
data: pandas dataframe
Dataframe from add_categorical_data or add_continuous_data
category_order: list-like
category_order from add_categorical_data
sample_order: list-like
Order of samples, from left to right.
value_order: list-like:
value_order from add_categorical_data
priority: list-like
priority from add_categorical_data
Returns:
--------
parsed_data: pandas dataframe, shape (categories, samples)
Dataframe of tuples depicting values for each sample in
each category.'''
# create parsed data storage
parsed_data = pd.DataFrame(index=category_order, columns=sample_order)
# subset data to categories and samples to avoid handling large dataframes
data = data[(data['category'].isin(category_order)) &
(data['sample'].isin(sample_order))]
# fill in parsed dataframe
for category in category_order:
for sample in sample_order:
sample_category_data = data[(data['category'] == category) &
(data['sample'] == sample)]
# if data is empty, the sample does not have a value in category
if len(sample_category_data) == 0:
parsed_data.loc[category, sample] = ()
# if length 1 just put the value
elif len(sample_category_data) == 1:
value = sample_category_data['value'].values[0]
parsed_data.loc[category, sample] = (value,)
# if length 2, sort by value order then convert to tuple
elif len(sample_category_data) == 2:
values = sample_category_data['value'].values
sorted_values = cls._sort_list_by_list(values, value_order)
parsed_data.loc[category, sample] = tuple(sorted_values)
# if more than two, apply priority, sort, then convert to tuple.
else:
values = sample_category_data['value'].values
present_priorities = [v for v in values if v in priority]
# just put 'Multiple' if no priorities or more than two
if len(present_priorities) == 0 or len(present_priorities) > 2:
parsed_data.loc[category, sample] = ('Multiple',)
# always plot a priority if present
elif len(present_priorities) == 1:
df_entry = present_priorities + ['Multiple']
sorted_df_entry = cls._sort_list_by_list(df_entry, value_order)
parsed_data.loc[category, sample] = tuple(sorted_df_entry)
# plot two priorities if present, ignoring others
elif len(present_priorities) == 2:
df_entry = cls._sort_list_by_list(present_priorities, value_order)
parsed_data.loc[category, sample] = tuple(df_entry)
return parsed_data
def _check_samples(self, samples):
'''Checks that samples are a subset of samples
currently associated with the CoMut object.
Params:
-------
samples: list-like
A list of sample names.'''
if not set(samples).issubset(set(self.samples)):
extra = set(samples) - set(self.samples)
raise ValueError('Unknown samples {} given. All added samples'
' must be a subset of either first samples'
' added or samples specified with'
' comut.samples'.format(extra))
def add_categorical_data(self, data, name=None, category_order=None,
value_order=None, mapping=None, borders=None,
priority=None, tick_style='normal'):
'''Add categorical data to the CoMut object.
Params:
-------
data: pandas dataframe
A tidy dataframe containing data. Required columns are
sample, category, and value. Other columns are ignored.
Example:
-------
sample | category | value
----------------------------
Sample_1 | TP53 | Missense
Sample_1 | Gender | Male
name: str
The name of the dataset being added. Used to references axes.
Example:
--------
example_comut = comut.CoMut()
example_comut.add_categorical_data(data, name = 'Mutation type')
category_order: list-like
Order of category to plot, from top to bottom. Only these
categories are plotted.
Example:
--------
example_comut = comut.CoMut()
example_comut.add_categorical_data(data, category_order = ['TP53', 'BRAF'])
value_order: list-like
Order of plotting of values in a single patch, from left
triangle to right triangle.
Example:
--------
value_order = ['Amp', 'Missense']
If Amp and Missense exist in the same category and sample, Amp
will be drawn as left triangle, Missense as right.
mapping: dict
Mapping of values to patch properties. The dict can either specify
only the facecolor or other patches properties.
Note:
-----
Three additional values are required to fully specify mapping:
'Absent', which determines the color for samples without value
for a name (default white).
'Multiple', which determines the color for samples with more than
two values in that category (default brown).
'Not Available', which determines the patch properties when a sample's
value is 'Not Available'.
borders: list-like
List of values that should be plotted as borders, not patches.
Example:
--------
example_comut = comut.CoMut()
example_comut.add_categorical_data(data, borders = ['LOH'])
priority: list-like
Ordered list of priorities for values. The function will attempt
to preserve values in this list, subverting the "Multiple"
assignment.
Example:
--------
example_comut.add_categorical_data(data, priority = ['Amp'])
If Amp exists alongside two other values, it will be drawn as
Amp + Multiple (two triangles), instead of Multiple.
tick_style: str, default='normal', 'italic', 'oblique'
Tick style to be used for the y axis ticks (category names).
Returns:
--------
None'''
# check that required columns exist
req_cols = {'sample', 'category', 'value'}
if not req_cols.issubset(data.columns):
missing_cols = req_cols - set(data.columns)
msg = ', '.join(list(missing_cols))
raise ValueError('Data missing required columns: {}'.format(msg))
# check that samples are a subset of current samples.
samples = list(data['sample'].drop_duplicates())
if self.samples is None:
self.samples = samples
else:
self._check_samples(samples)
# set defaults
if name is None:
name = len(self._plots)
if borders is None:
borders = []
if priority is None:
priority = []
if value_order is None:
value_order = []
# default category order to all categories uniquely present in data
# in the order they appear
if category_order is None:
category_order = list(data['category'].drop_duplicates())
# build default color map, uses vivid
unique_values = set(data['value'])
if mapping is None:
mapping = {}
# define default borders
for value in borders:
mapping[value] = {'facecolor': 'none', 'edgecolor': 'black', 'linewidth': 1}
# assign colors to other unique values
non_border = [val for val in unique_values if val not in borders]
default_cmap = self._get_default_categorical_cmap(len(non_border))
for i, value in enumerate(unique_values):
mapping[value] = {'facecolor': default_cmap[i]}
mapping['Absent'] = {'facecolor': 'white'}
mapping['Multiple'] = {'facecolor': palettable.colorbrewer.qualitative.Set1_7.mpl_colors[6]}
mapping['Not Available'] = {'facecolor': 'none', 'edgecolor': 'black', 'linewidth': 1}
elif isinstance(mapping, dict):
# copy the user mapping to avoid overwriting their mapping variable
mapping = mapping.copy()
# update user color map with reserved values if not present
if 'Not Available' not in mapping:
mapping['Not Available'] = {'facecolor': 'none', 'edgecolor': 'black', 'linewidth': 1}
if 'Absent' not in mapping:
mapping['Absent'] = {'facecolor': 'white'}
if 'Multiple' not in mapping:
mapping['Multiple'] = {'facecolor': palettable.colorbrewer.qualitative.Set1_7.mpl_colors[6]}
# check that all alt types present in data are in mapping
if not unique_values.issubset(mapping.keys()):
missing_cats = unique_values - set(mapping.keys())
raise ValueError('Categories present in dataframe {}'
' are missing from mapping'.format(missing_cats))
# if passed values aren't kwargs, convert to patches kwargs
for key, value in mapping.items():
if not isinstance(value, dict):
if key in borders:
mapping[key] = {'facecolor': 'none', 'edgecolor': value}
else:
mapping[key] = {'facecolor': value}
# check that borders have facecolor - None
for border in borders:
if mapping[border]['facecolor'] != 'none':
raise ValueError('Border category {} must have facecolor'
' = \'none\''.format(border))
else:
raise ValueError('Invalid mapping. Mapping must be a dict.')
# parse data into dataframe of tuples as required for plotting
parsed_data = self._parse_categorical_data(data, category_order, self.samples,
value_order, priority)
# store plot data
plot_data = {'data': parsed_data, 'patches_options': mapping,
'tick_style': tick_style, 'borders': borders, 'type': 'categorical'}
self._plots[name] = plot_data
return None
def add_continuous_data(self, data, mapping='binary', tick_style='normal',
value_range=None, cat_mapping=None, name=None):
'''Add a sample level continuous data to the CoMut object
Params:
-----------
data: pandas dataframe
A tidy dataframe containing data. Required columns are
sample, category, and value. Other columns are ignored.
Currently, only one category is allowed.
mapping: str, colors.LinearSegmentedColormap, default 'binary'
A mapping of continuous value to color. Can be defined as
matplotlib colormap (str) or a custom LinearSegmentedColormap
Samples with missing information are colored according to 'Absent'.
value_range: tuple or list
min and max value of the data. Data will be normalized using
this range to fit (0, 1). Defaults to the range of the data.
cat_mapping: dict
Mapping from a discrete category to patch color. Primarily used
to override defaults for 'Absent' and 'Not Available' but can
be used to mix categorical and continuous values in the same data.
name: str
The name of the dataset being added. Used to references axes.
defaults to the integer index of the plot being added.
tick_style: str, default='normal', 'italic', 'oblique'
Tick style to be used for the y axis ticks (category names).
Returns:
--------
None'''
# check that required columns exist
req_cols = {'sample', 'category', 'value'}
if not req_cols.issubset(data.columns):
missing_cols = req_cols - set(data.columns)
msg = ', '.join(list(missing_cols))
raise ValueError('Data missing required columns: {}'.format(msg))
# check that samples are a subset of object samples.
samples = list(data['sample'].drop_duplicates())
if self.samples is None:
self.samples = samples
else:
self._check_samples(samples)
# check that only one category is in the dataframe
if len(set(data['category'])) > 1:
raise ValueError('Only one category is allowed for continuous data')
# make default name
if name is None:
name = len(self._plots)
if value_range is None:
data_max = pd.to_numeric(data['value'], 'coerce').max()
data_min = | pd.to_numeric(data['value'], 'coerce') | pandas.to_numeric |
#
# Copyright ยฉ 2021 Uncharted Software Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from os import path
import math
import pandas as pd
from common_primitives.column_parser import ColumnParserPrimitive
from d3m import exceptions
from d3m.metadata import base as metadata_base
from distil_primitives_contrib.time_series_binner import TimeSeriesBinnerPrimitive
import utils as test_utils
class TimeSeriesBinnerPrimitiveTestCase(unittest.TestCase):
_dataset_path = path.abspath(
path.join(path.dirname(__file__), "timeseries_resource_dataset_2")
)
# _dataset_path = '/Users/vkorapaty/data/datasets/seed_datasets_current/LL1_736_population_spawn_MIN_METADATA/TRAIN/dataset_TRAIN'
def test_hyperparams_integer_bin(self) -> None:
timeseries_df = self._load_data(
"singleGroupData",
value_indices=[3],
parsing_hyperparams={"exclude_columns": [1]},
)
hyperparams_class = TimeSeriesBinnerPrimitive.metadata.query()[
"primitive_code"
]["class_type_arguments"]["Hyperparams"]
ts_binner = TimeSeriesBinnerPrimitive(
hyperparams=hyperparams_class.defaults().replace(
{
"grouping_key_col": 1,
"time_col": 2,
"value_cols": [3],
"binning_starting_value": "min",
}
)
)
result = ts_binner.produce(inputs=timeseries_df).value
self._compare_dataframes(
result,
[3, 4],
["species", "day", "count"],
[
["cas9_VBBA", "cas9_VBBA", "cas9_VBBA"],
[8, 11, 16],
[85920, 85574, 88357],
],
)
def test_no_hyperparams_integer_bin(self) -> None:
timeseries_df = self._load_data(
"singleGroupData",
value_indices=[3],
parsing_hyperparams={"exclude_columns": [1]},
)
hyperparams_class = TimeSeriesBinnerPrimitive.metadata.query()[
"primitive_code"
]["class_type_arguments"]["Hyperparams"]
ts_binner = TimeSeriesBinnerPrimitive(hyperparams=hyperparams_class.defaults())
result = ts_binner.produce(inputs=timeseries_df).value
self._compare_dataframes(
result,
[4, 4],
["species", "day", "count"],
[
["cas9_VBBA", "cas9_VBBA", "cas9_VBBA"],
[4, 10, 15, 16],
[28810, 113989, 86925, 30127],
],
)
def test_timestamp_downsampling_bin(self) -> None:
timeseries_df = self._load_data(
"singleGroupDataTimestamp",
value_indices=[3],
date_time_index=2,
parsing_hyperparams={
"exclude_columns": [1, 2],
"parse_semantic_types": (
"http://schema.org/Integer",
"http://schema.org/DateTime",
),
},
)
hyperparams_class = TimeSeriesBinnerPrimitive.metadata.query()[
"primitive_code"
]["class_type_arguments"]["Hyperparams"]
ts_binner = TimeSeriesBinnerPrimitive(
hyperparams=hyperparams_class.defaults().replace({"granularity": "years"})
)
timeseries_df["day"] = pd.to_datetime(timeseries_df["day"])
result = ts_binner.produce(inputs=timeseries_df).value
df = pd.DataFrame({"year": [2020, 2021], "month": [12, 12], "day": [31, 31]})
self._compare_dataframes(
result,
[2, 4],
["species", "day", "count"],
[["cas9_VBBA", "cas9_VBBA"], | pd.to_datetime(df) | pandas.to_datetime |
# -*- coding: utf-8 -*-
# Copyright StateOfTheArt.quant.
#
# * Commercial Usage: please contact <EMAIL>
# * Non-Commercial Usage:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pdb
import torch
import pandas as pd
import featurizer.functions.time_series_functions as tsf
def macd(tensor, fastperiod=12, slowperiod=26, signalperiod=9):
#DIF = tsf.ema(tensor, fastperiod) - tsf.ema(tensor, slowperiod)
#DEA = tsf.ema(DIF, signalperiod)
#MACD = (DIF - DEA) * 1 # Here is 1 rather than trodational 2
import talib
tensor_np = tensor.cpu().detach().numpy()
tensor_df = pd.DataFrame(tensor_np)
DIF = tensor_df.apply(lambda x: talib.MACD(x, fastperiod=fastperiod, slowperiod=slowperiod, signalperiod=signalperiod)[0])
DEA = tensor_df.apply(lambda x: talib.MACD(x, fastperiod=fastperiod, slowperiod=slowperiod, signalperiod=signalperiod)[1])
MACD = tensor_df.apply(lambda x: talib.MACD(x, fastperiod=fastperiod, slowperiod=slowperiod, signalperiod=signalperiod)[2])
DIF_ts = torch.tensor(DIF.values, dtype=tensor.dtype, device=tensor.device)
DEA_ts = torch.tensor(DEA.values, dtype=tensor.dtype, device=tensor.device)
MACD_ts = torch.tensor(MACD.values, dtype=tensor.dtype, device=tensor.device)
#
return DIF_ts, DEA_ts, MACD_ts
return DIF, DEA, MACD
def macdext(close_ts, fastperiod=12, fastmatype=0, slowperiod=26, slowmatype=0, signalperiod=9, signalmatype=0):
import talib
close_np = close_ts.cpu().detach().numpy()
close_df = pd.DataFrame(close_np)
DIF = close_df.apply(lambda x: talib.MACDEXT(x, fastperiod=12, fastmatype=0, slowperiod=26, slowmatype=0, signalperiod=9, signalmatype=0)[0])
DEA = close_df.apply(lambda x: talib.MACDEXT(x, fastperiod=12, fastmatype=0, slowperiod=26, slowmatype=0, signalperiod=9, signalmatype=0)[1])
MACD = close_df.apply(lambda x: talib.MACDEXT(x, fastperiod=12, fastmatype=0, slowperiod=26, slowmatype=0, signalperiod=9, signalmatype=0)[2])
DIF_ts = torch.tensor(DIF.values, dtype=close_ts.dtype, device=close_ts.device)
DEA_ts = torch.tensor(DEA.values, dtype=close_ts.dtype, device=close_ts.device)
MACD_ts = torch.tensor(MACD.values, dtype=close_ts.dtype, device=close_ts.device)
return DIF_ts, DEA_ts, MACD_ts
def macdfix(close_ts, signalperiod=9): # fixed fastperiod=12 and slowperiod=26
import talib
close_np = close_ts.cpu().detach().numpy()
close_df = pd.DataFrame(close_np)
DIF = close_df.apply(lambda x: talib.MACDFIX(x, signalperiod=9)[0])
DEA = close_df.apply(lambda x: talib.MACDFIX(x, signalperiod=9)[1])
MACD = close_df.apply(lambda x: talib.MACDFIX(x, signalperiod=9)[2])
DIF_ts = torch.tensor(DIF.values, dtype=close_ts.dtype, device=close_ts.device)
DEA_ts = torch.tensor(DEA.values, dtype=close_ts.dtype, device=close_ts.device)
MACD_ts = torch.tensor(MACD.values, dtype=close_ts.dtype, device=close_ts.device)
return DIF_ts, DEA_ts, MACD_ts
def ppo(close_ts, fastperiod=12, slowperiod=26, matype=0):
import talib
close_np = close_ts.cpu().detach().numpy()
close_df = pd.DataFrame(close_np)
PPO = close_df.apply(lambda x: talib.PPO(x, fastperiod=12, slowperiod=26, matype=0))
PPO_ts = torch.tensor(PPO.values, dtype=close_ts.dtype, device=close_ts.device)
return PPO_ts
def rsi(tensor, timeperiod):
import talib
tensor_np = tensor.cpu().detach().numpy()
tensor_df = pd.DataFrame(tensor_np)
tensor_df = tensor_df.apply(lambda x: talib.RSI(x, timeperiod=timeperiod))
output_tensor = torch.tensor(tensor_df.values, dtype=tensor.dtype, device=tensor.device)
return output_tensor
def bbands(tensor, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0):
import talib
tensor_np = tensor.cpu().detach().numpy()
tensor_df = pd.DataFrame(tensor_np)
upperband = tensor_df.apply(lambda x: talib.BBANDS(x, timeperiod=timeperiod, nbdevup=nbdevup, nbdevdn=nbdevdn, matype=matype)[0])
middleband = tensor_df.apply(lambda x: talib.BBANDS(x, timeperiod=timeperiod, nbdevup=nbdevup, nbdevdn=nbdevdn, matype=matype)[1])
lowerband = tensor_df.apply(lambda x: talib.BBANDS(x, timeperiod=timeperiod, nbdevup=nbdevup, nbdevdn=nbdevdn, matype=matype)[2])
upperband_ts = torch.tensor(upperband.values, dtype=tensor.dtype, device=tensor.device)
middleband_ts = torch.tensor(middleband.values, dtype=tensor.dtype, device=tensor.device)
lowerband_ts = torch.tensor(lowerband.values, dtype=tensor.dtype, device=tensor.device)
return upperband_ts, middleband_ts, lowerband_ts
def kdj(high_ts, low_ts, close_ts, fastk_period=9, slowk_period=3, slowd_period=3):
range_ts = tsf.rolling_max(high_ts, window=fastk_period) - tsf.rolling_min(low_ts, window=fastk_period)
RSV = (close_ts - tsf.rolling_min(low_ts, fastk_period).squeeze(-1)) / torch.clamp(range_ts.squeeze(-1),min=1)
K = tsf.ema(RSV, window=slowk_period).squeeze(-1)
D = tsf.ema(K, slowd_period).squeeze(-1)
J = 3*K - 2*D
return RSV, K, D, J
def candleup(open_ts, close_ts, high_ts):
output = torch.max(open_ts, close_ts)
output_ts = high_ts - output
return output_ts
def candledown(open_ts, close_ts, low_ts):
output = torch.min(open_ts, close_ts)
output_ts = output - low_ts
return output_ts
def atr(high_ts, low_ts, close_ts, timeperiod=14):
true_range = torch.max(high_ts, tsf.shift(close_ts, window=1)) - torch.min(low_ts, tsf.shift(close_ts, window=1))
atr = tsf.rolling_mean_(true_range, window=timeperiod)
return atr
def natr(high_ts, low_ts, close_ts, timeperiod=14):
true_range = torch.max(high_ts, tsf.shift(close_ts, window=1)) - torch.min(low_ts, tsf.shift(close_ts, window=1))
TRange_max = tsf.rolling_max(true_range, window=timeperiod)
TRange_min = tsf.rolling_min(true_range, window=timeperiod)
natr = tsf.rolling_mean_((true_range - TRange_min) / (TRange_max - TRange_min), window=timeperiod)
return natr
def dmi(high_ts, low_ts, close_ts, timeperiod=14):
up = high_ts - tsf.shift(high_ts, window=1)
down = tsf.shift(low_ts, window=1) - low_ts
zero = torch.zeros_like(high_ts, dtype=high_ts.dtype, device=high_ts.device)
PDM = torch.where(up>torch.max(down, zero), up, zero)
MDM = torch.where(down>torch.max(up, zero), down, zero)
TR14 = tsf.rolling_mean_(torch.max(high_ts, tsf.shift(close_ts, window=1)) - torch.min(low_ts, tsf.shift(close_ts, window=1)), window=14)
PDI = PDM / TR14[-1] * 100
MDI = MDM / TR14[-1] * 100
DX = torch.abs(PDI - MDI) / torch.abs(PDI + MDI)
ADX = tsf.ema(DX, window=timeperiod)
ADXR = tsf.ema(ADX, window=timeperiod)
return PDM, MDM, PDI, MDI, DX, ADX, ADXR
def apo(close_ts, fastperiod=12, slowperiod=26, matype=0):
import talib
close_np = close_ts.cpu().detach().numpy()
close_df = pd.DataFrame(close_np)
apo = close_df.apply(lambda x: talib.APO(x, fastperiod=12, slowperiod=26, matype=0))
apo_ts = torch.tensor(apo.values, dtype=close_ts.dtype, device=close_ts.device)
return apo_ts
def cci(high_ts, low_ts, close_ts, timeperiod=14):
TP = (high_ts + low_ts + close_ts) / 3
cci = (TP - tsf.rolling_mean_(TP, window=timeperiod)) / (0.015 * tsf.rolling_std(TP, window=timeperiod))
return cci
def cmo(close_ts, timeperiod=14):
import talib
close_np = close_ts.cpu().detach().numpy()
close_df = pd.DataFrame(close_np)
cmo = close_df.apply(lambda x: talib.CMO(x, timeperiod=14))
CMO_ts = torch.tensor(cmo.values, dtype=close_ts.dtype, device=close_ts.device)
return CMO_ts
def mfi(high_ts, low_ts, close_ts, total_turnover_ts, timeperiod=14):
TP = (high_ts + low_ts + close_ts) / 3
MF = TP * total_turnover_ts
zero = torch.zeros_like(high_ts)
PMF = torch.where(MF > tsf.shift(MF, window=1), MF, zero)
NMF = torch.where(MF < tsf.shift(MF, window=1), MF, zero)
MR = tsf.rolling_sum_(PMF, window=timeperiod) / tsf.rolling_sum_(NMF, window=timeperiod)
MFI = 100 - (100 / (1 + MR))
return MFI
def stochrsi(close_ts, timeperiod=14, fastk_period=5, fastd_period=3, fastd_matype=0):
import talib
close_np = close_ts.cpu().detach().numpy()
close_df = pd.DataFrame(close_np)
fastk = close_df.apply(lambda x: talib.STOCHRSI(x, timeperiod=14, fastk_period=5, fastd_period=3, fastd_matype=0)[0])
fastd = close_df.apply(lambda x: talib.STOCHRSI(x, timeperiod=14, fastk_period=5, fastd_period=3, fastd_matype=0)[1])
fastk_ts = torch.tensor(fastk.values, dtype=close_ts.dtype, device=close_ts.device)
fastd_ts = torch.tensor(fastd.values, dtype=close_ts.dtype, device=close_ts.device)
return fastk_ts, fastd_ts
def trix(close_ts, timeperiod=30):
import talib
close_np = close_ts.cpu().detach().numpy()
close_df = pd.DataFrame(close_np)
trix = close_df.apply(lambda x: talib.TRIX(x, timeperiod=30))
trix_ts = torch.tensor(trix.values, dtype=close_ts.dtype, device=close_ts.device)
return trix_ts
def uos(high_ts, low_ts, close_ts, timeperiod1=7, timeperiod2=14, timeperiod3=28, timeperiod4=6):
TH = torch.max(high_ts, tsf.shift(close_ts, window=1))
TL = torch.min(low_ts, tsf.shift(close_ts, window=1))
ACC1 = tsf.rolling_sum_(close_ts-TL, window=timeperiod1) / tsf.rolling_sum_(TH-TL, window=timeperiod1)
ACC2 = tsf.rolling_sum_(close_ts-TL, window=timeperiod2) / tsf.rolling_sum_(TH-TL, window=timeperiod2)
ACC3 = tsf.rolling_sum_(close_ts-TL, window=timeperiod3) / tsf.rolling_sum_(TH-TL, window=timeperiod3)
UOS = (ACC1*timeperiod2*timeperiod3 + ACC2*timeperiod1*timeperiod3 + ACC3*timeperiod1*timeperiod2) * 100 / (timeperiod1*timeperiod2 + timeperiod1*timeperiod3 + timeperiod2*timeperiod3)
MAUOS = tsf.ema(UOS, window=timeperiod4)
return UOS, MAUOS
def wr(high_ts, low_ts, close_ts, timeperiod=14):
HT = tsf.rolling_max(high_ts, window=timeperiod)
LT = tsf.rolling_min(low_ts, window=timeperiod)
WR = (HT - close_ts) / (HT - LT) * 100
return WR
def dema(close_ts, timeperiod=30):
import talib
close_np = close_ts.cpu().detach().numpy()
close_df = pd.DataFrame(close_np)
dema = close_df.apply(lambda x: talib.DEMA(x, timeperiod=30))
dema_ts = torch.tensor(dema.values, dtype=close_ts.dtype, device=close_ts.device)
return dema_ts
def HT_trendline(close_ts):
import talib
close_np = close_ts.cpu().detach().numpy()
close_df = pd.DataFrame(close_np)
HT_trendline = close_df.apply(lambda x: talib.HT_TRENDLINE(x))
HT_trendline_ts = torch.tensor(HT_trendline.values, dtype=close_ts.dtype, device=close_ts.device)
return HT_trendline_ts
def kama(close_ts, timeperiod=30):
import talib
close_np = close_ts.cpu().detach().numpy()
close_df = | pd.DataFrame(close_np) | pandas.DataFrame |
import json
from random import shuffle
from pathlib import Path
from tqdm import tqdm
import numpy as np
import pandas as pd
import torch
from torch_scatter import scatter
from torch_geometric.data import Data, InMemoryDataset, Batch
from sklearn.utils.class_weight import compute_class_weight
from utils import one_of_k_encoding, get_residue_features
try:
import rdkit
from rdkit import Chem, RDConfig
from rdkit.Chem import ChemicalFeatures
from rdkit.Chem.rdchem import HybridizationType as HT
from rdkit.Chem.rdchem import BondType as BT
from rdkit import RDLogger
RDLogger.DisableLog('rdApp.*')
except:
rdkit, Chem, RDConfig, ChemicalFeatures, HT, BT = 6 * [None]
print('Please install rdkit for data processing')
def get_mol_nodes_edges(mol):
# Read node features
N = mol.GetNumAtoms()
atom_type = []
atomic_number = []
aromatic = []
hybridization = []
# num_hs = []
for atom in mol.GetAtoms():
atom_type.append(atom.GetSymbol())
atomic_number.append(atom.GetAtomicNum())
aromatic.append(1 if atom.GetIsAromatic() else 0)
hybridization.append(atom.GetHybridization())
# Read edge features
row, col, edge_type = [], [], []
for bond in mol.GetBonds():
start, end = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
row += [start, end]
col += [end, start]
edge_type += 2 * [bond.GetBondType()]
edge_index = torch.LongTensor([row, col])
edge_type = [one_of_k_encoding(t, [BT.SINGLE, BT.DOUBLE, BT.TRIPLE, BT.AROMATIC]) for t in edge_type]
edge_attr = torch.FloatTensor(edge_type)
perm = (edge_index[0] * N + edge_index[1]).argsort()
edge_index = edge_index[:, perm]
edge_attr = edge_attr[perm]
row, col = edge_index
# Concat node fetures
hs = (torch.tensor(atomic_number, dtype=torch.long) == 1).to(torch.float)
num_hs = scatter(hs[row], col, dim_size=N).tolist()
x_atom_type = [one_of_k_encoding(t, ['H', 'C', 'N', 'O', 'F', 'S', 'Cl', 'Br', 'I']) for t in atom_type]
x_hybridization = [one_of_k_encoding(h, [HT.SP, HT.SP2, HT.SP3]) for h in hybridization]
x2 = torch.tensor([atomic_number, aromatic, num_hs], dtype=torch.float).t().contiguous()
x = torch.cat([torch.FloatTensor(x_atom_type), torch.FloatTensor(x_hybridization), x2], dim=-1)
return x, edge_index, edge_attr
def get_pro_nodes_edges(protein_seq, contact_map):
# add node information
feat = []
for residue in protein_seq:
residue_features = get_residue_features(residue)
feat.append(residue_features)
node_attr = torch.FloatTensor(feat)
# add main_chain information
m_index_row, m_index_col, m_edge_attr = [], [], []
for i in range(len(protein_seq) - 1):
m_index_row += [i, i + 1]
m_index_col += [i + 1, i]
m_edge_attr.append([1, 1, 0, 0, 0, 0, 0, 1]) # read the code below about edge feature extract
m_edge_attr.append([1, 1, 0, 0, 0, 0, 0, 1])
# read edge features from contactmap.txt
edge_attr = []
index_row, index_col = np.where(contact_map > 0)
index_row, index_col = index_row.tolist(), index_col.tolist()
for i, j in zip(index_row, index_col):
main_chain = 0 # int(np.abs(i - j) == 1)
prob = contact_map[i, j]
reversed_prob = 1 - prob
# prob level range
l1 = int(0 <= prob < 0.3)
l2 = int(0.3 <= prob < 0.5)
l3 = int(0.5 <= prob < 0.7)
l4 = int(0.5 <= prob < 0.9)
l5 = int(0.9 <= prob <= 1)
edge_attr.append([main_chain, prob, reversed_prob, l1, l2, l3, l4, l5])
edge_index = torch.LongTensor([m_index_row + index_row, m_index_col + index_col])
edge_attr = torch.FloatTensor(m_edge_attr + edge_attr)
# print(node_attr.shape, edge_index.shape, edge_attr.shape)
# assert edge_index.shape[1] == edge_attr.shape[0]
return node_attr, edge_index, edge_attr
# for small interaction dataset do not need to consider storage space
def proteinmol2graph(mol, protein_seq, contact_map):
if mol is None: return None
# Extrat molecular and protein's features
node_attr, edge_index, edge_attr = get_mol_nodes_edges(mol)
pro_node_attr, pro_edge_index, pro_edge_attr = get_pro_nodes_edges(protein_seq, contact_map)
# Build pyg data
data = Data(
x=node_attr, edge_index=edge_index, edge_attr=edge_attr, # pos=pos,
pro_x=pro_node_attr, pro_edge_index=pro_edge_index, pro_edge_attr=pro_edge_attr,
y=None, # None as a placeholder
# id=None,
)
return data
class BindingDBProMolInteactionDataset(InMemoryDataset):
train_val_test_GLAMs = {'train': 0, 'val': 0, 'test': 0}
def __init__(self, root, transform=None, pre_transform=None, pre_filter=None):
super(BindingDBProMolInteactionDataset, self).__init__(root, transform, pre_transform, pre_filter)
self.data, self.slices = torch.load(self.processed_paths[0])
self.train_val_test_GLAMs = torch.load(self.processed_paths[1])
self.mol_datas = torch.load(self.processed_paths[2])
self.pro_datas = torch.load(self.processed_paths[3])
n_trn, n_v, n_t = self.train_val_test_GLAMs.values()
self.train, self.val, self.test = self[:n_trn], self[n_trn:n_trn + n_v], self[n_trn + n_v:n_trn + n_v + n_t]
self.train = self.train.shuffle()
# smi[0] for unpack smi from [['smi1'], ['smi2']...]
self.mol_num_node_features = self.mol_datas[self[0].smi].x.shape[1]
self.mol_num_edge_features = self.mol_datas[self[0].smi].edge_attr.shape[1]
self.pro_num_node_features = self.pro_datas[self[0].pro].x.shape[1]
self.pro_num_edge_features = self.pro_datas[self[0].pro].edge_attr.shape[1]
@property
def raw_file_names(self):
return ['bindingdb/train.txt', 'bindingdb/dev.txt', 'bindingdb/test.txt',
'bindingdb/pro_contact_map/protein_maps_dict.ckpt']
@property
def processed_file_names(self):
return ['bindingdb_src_7.3/interaction_processed.pt', 'bindingdb_src_7.3/train_val_test_GLAMs.pt',
'bindingdb_src_7.3/mol_processed.pt', 'bindingdb_src_7.3/pro_processed.pt', ]
def process(self):
# mkdir processed/pdbbind
Path.mkdir(Path(self.processed_paths[0]).parent, exist_ok=True)
# load all unique mol and protein
train = pd.read_csv(self.raw_paths[0], sep=' ')
dev = pd.read_csv(self.raw_paths[1], sep=' ')
test = pd.read_csv(self.raw_paths[2], sep=' ')
unique_pros = set(pd.concat([train, dev, test])['target_sequence'].to_list())
unique_smis = set(pd.concat([train, dev, test])['compound_iso_smiles'].to_list())
unique_smis = [Chem.MolToSmiles(Chem.MolFromSmiles(smi), isomericSmiles=True) for smi in unique_smis]
# mol preprocess and save
mol_data_dict = {}
for i, smi in tqdm(enumerate(unique_smis)):
mol = Chem.MolFromSmiles(smi)
node_attr, edge_index, edge_attr = get_mol_nodes_edges(mol)
data = Data(x=node_attr, edge_index=edge_index, edge_attr=edge_attr)
mol_data_dict[smi] = data
torch.save(mol_data_dict, Path(self.processed_paths[2]))
# pro preprocess and save
protein_maps_dict_path = Path(self.raw_paths[3])
protein_maps_dict = torch.load(protein_maps_dict_path) #
pro_data_dict = {}
for i, pro in tqdm(enumerate(unique_pros)):
if pro not in protein_maps_dict.keys(): continue # skipped some removed protein
contact_map = protein_maps_dict[pro]
pro_node_attr, pro_edge_index, pro_edge_attr = get_pro_nodes_edges(pro, contact_map)
data = Data(x=pro_node_attr, edge_index=pro_edge_index, edge_attr=pro_edge_attr, )
pro_data_dict[pro] = data
torch.save(pro_data_dict, Path(self.processed_paths[3]))
# mol-pro inteaction save
data_list = []
for i, dataset_type in enumerate(self.train_val_test_GLAMs): # 0, 1, 2 for train, val, test
data_path = self.raw_paths[i]
data_pd = | pd.read_csv(data_path, sep=' ') | pandas.read_csv |
"""
Preprocess sites scripts.
Written by <NAME>.
Winter 2020
"""
import os
import configparser
import json
import csv
import math
import glob
import pandas as pd
import geopandas as gpd
import pyproj
from shapely.geometry import Polygon, MultiPolygon, mapping, shape, MultiLineString, LineString
from shapely.ops import transform, unary_union, nearest_points
import fiona
from fiona.crs import from_epsg
import rasterio
from rasterio.mask import mask
from rasterstats import zonal_stats
import networkx as nx
from rtree import index
import numpy as np
import random
CONFIG = configparser.ConfigParser()
CONFIG.read(os.path.join(os.path.dirname(__file__), 'script_config.ini'))
BASE_PATH = CONFIG['file_locations']['base_path']
DATA_RAW = os.path.join(BASE_PATH, 'raw')
DATA_INTERMEDIATE = os.path.join(BASE_PATH, 'intermediate')
DATA_PROCESSED = os.path.join(BASE_PATH, 'processed')
def find_country_list(continent_list):
"""
This function produces country information by continent.
Parameters
----------
continent_list : list
Contains the name of the desired continent, e.g. ['Africa']
Returns
-------
countries : list of dicts
Contains all desired country information for countries in
the stated continent.
"""
glob_info_path = os.path.join(BASE_PATH, 'global_information.csv')
countries = pd.read_csv(glob_info_path, encoding = "ISO-8859-1")
countries = countries[countries.exclude != 1]
if len(continent_list) > 0:
data = countries.loc[countries['continent'].isin(continent_list)]
else:
data = countries
output = []
for index, country in data.iterrows():
output.append({
'country_name': country['country'],
'iso3': country['ISO_3digit'],
'iso2': country['ISO_2digit'],
'regional_level': country['lowest'],
'region': country['region']
})
return output
def process_coverage_shapes(country):
"""
Load in coverage maps, process and export for each country.
Parameters
----------
country : string
Three digit ISO country code.
"""
iso3 = country['iso3']
iso2 = country['iso2']
technologies = [
'GSM',
'3G',
'4G'
]
for tech in technologies:
folder_coverage = os.path.join(DATA_INTERMEDIATE, iso3, 'coverage')
filename = 'coverage_{}.shp'.format(tech)
path_output = os.path.join(folder_coverage, filename)
if os.path.exists(path_output):
continue
print('----')
print('Working on {} in {}'.format(tech, iso3))
filename = 'Inclusions_201812_{}.shp'.format(tech)
folder = os.path.join(DATA_RAW, 'mobile_coverage_explorer',
'Data_MCE')
inclusions = gpd.read_file(os.path.join(folder, filename))
if iso2 in inclusions['CNTRY_ISO2']:
filename = 'MCE_201812_{}.shp'.format(tech)
folder = os.path.join(DATA_RAW, 'mobile_coverage_explorer',
'Data_MCE')
coverage = gpd.read_file(os.path.join(folder, filename))
coverage = coverage.loc[coverage['CNTRY_ISO3'] == iso3]
else:
filename = 'OCI_201812_{}.shp'.format(tech)
folder = os.path.join(DATA_RAW, 'mobile_coverage_explorer',
'Data_OCI')
coverage = gpd.read_file(os.path.join(folder, filename))
coverage = coverage.loc[coverage['CNTRY_ISO3'] == iso3]
if len(coverage) > 0:
print('Dissolving polygons')
coverage['dissolve'] = 1
coverage = coverage.dissolve(by='dissolve', aggfunc='sum')
coverage = coverage.to_crs('epsg:3857')
print('Excluding small shapes')
coverage['geometry'] = coverage.apply(clean_coverage,axis=1)
print('Removing empty and null geometries')
coverage = coverage[~(coverage['geometry'].is_empty)]
coverage = coverage[coverage['geometry'].notnull()]
print('Simplifying geometries')
coverage['geometry'] = coverage.simplify(
tolerance = 0.005,
preserve_topology=True).buffer(0.0001).simplify(
tolerance = 0.005,
preserve_topology=True
)
coverage = coverage.to_crs('epsg:4326')
if not os.path.exists(folder_coverage):
os.makedirs(folder_coverage)
coverage.to_file(path_output, driver='ESRI Shapefile')
return #print('Processed coverage shapes')
def process_regional_coverage(country):
"""
This functions estimates the area covered by each cellular
technology.
Parameters
----------
country : dict
Contains specific country parameters.
Returns
-------
output : dict
Results for cellular coverage by each technology for
each region.
"""
level = country['regional_level']
iso3 = country['iso3']
gid_level = 'GID_{}'.format(level)
filename = 'regions_{}_{}.shp'.format(level, iso3)
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'regions')
path = os.path.join(folder, filename)
regions = gpd.read_file(path)
technologies = [
'GSM',
'3G',
'4G'
]
output = {}
for tech in technologies:
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'coverage')
path = os.path.join(folder, 'coverage_{}.shp'.format(tech))
if os.path.exists(path):
coverage = gpd.read_file(path, encoding="utf-8")
segments = gpd.overlay(regions, coverage, how='intersection')
tech_coverage = {}
for idx, region in segments.iterrows():
area_km2 = round(area_of_polygon(region['geometry']) / 1e6)
tech_coverage[region[gid_level]] = area_km2
output[tech] = tech_coverage
return output
def get_regional_data(country):
"""
Extract regional data including luminosity and population.
Parameters
----------
country : string
Three digit ISO country code.
"""
iso3 = country['iso3']
level = country['regional_level']
gid_level = 'GID_{}'.format(level)
path_output = os.path.join(DATA_INTERMEDIATE, iso3, 'regional_coverage.csv')
if os.path.exists(path_output):
return #print('Regional data already exists')
path_country = os.path.join(DATA_INTERMEDIATE, iso3,
'national_outline.shp')
coverage = process_regional_coverage(country)
single_country = gpd.read_file(path_country)
# print('----')
# print('working on {}'.format(iso3))
path_settlements = os.path.join(DATA_INTERMEDIATE, iso3,
'settlements.tif')
filename = 'regions_{}_{}.shp'.format(level, iso3)
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'regions')
path = os.path.join(folder, filename)
regions = gpd.read_file(path)
results = []
for index, region in regions.iterrows():
with rasterio.open(path_settlements) as src:
affine = src.transform
array = src.read(1)
array[array <= 0] = 0
population_summation = [d['sum'] for d in zonal_stats(
region['geometry'],
array,
stats=['sum'],
nodata=0,
affine=affine)][0]
area_km2 = round(area_of_polygon(region['geometry']) / 1e6)
if 'GSM' in [c for c in coverage.keys()]:
if region[gid_level] in coverage['GSM']:
coverage_GSM_km2 = coverage['GSM'][region[gid_level]]
else:
coverage_GSM_km2 = 0
else:
coverage_GSM_km2 = 0
if '3G' in [c for c in coverage.keys()]:
if region[gid_level] in coverage['3G']:
coverage_3G_km2 = coverage['3G'][region[gid_level]]
else:
coverage_3G_km2 = 0
else:
coverage_3G_km2 = 0
if '4G' in [c for c in coverage.keys()]:
if region[gid_level] in coverage['4G']:
coverage_4G_km2 = coverage['4G'][region[gid_level]]
else:
coverage_4G_km2 = 0
else:
coverage_4G_km2 = 0
results.append({
'GID_0': region['GID_0'],
'GID_id': region[gid_level],
'GID_level': gid_level,
# 'mean_luminosity_km2': luminosity_summation / area_km2 if luminosity_summation else 0,
'population': population_summation,
# 'pop_under_10_pop': pop_under_10_pop,
'area_km2': area_km2,
'population_km2': population_summation / area_km2 if population_summation else 0,
# 'pop_adults_km2': ((population_summation - pop_under_10_pop) /
# area_km2 if pop_under_10_pop else 0),
'coverage_GSM_percent': round(coverage_GSM_km2 / area_km2 * 100 if coverage_GSM_km2 else 0, 1),
'coverage_3G_percent': round(coverage_3G_km2 / area_km2 * 100 if coverage_3G_km2 else 0, 1),
'coverage_4G_percent': round(coverage_4G_km2 / area_km2 * 100 if coverage_4G_km2 else 0, 1),
})
# print('Working on backhaul')
backhaul_lut = estimate_backhaul(iso3, country['region'], '2025')
# print('Working on estimating sites')
results = estimate_sites(results, iso3, backhaul_lut)
results_df = pd.DataFrame(results)
results_df.to_csv(path_output, index=False)
# print('Completed {}'.format(single_country.NAME_0.values[0]))
return #print('Completed night lights data querying')
def find_pop_under_10(region, iso3):
"""
Find the estimated population under 10 years old.
Parameters
----------
region : pandas series
The region being modeled.
iso3 : string
ISO3 country code.
Returns
-------
population : int
Population sum under 10 years of age.
"""
path = os.path.join(DATA_INTERMEDIATE, iso3, 'under_10')
all_paths = glob.glob(path + '/*.tif')
population = []
for path in all_paths:
with rasterio.open(path) as src:
affine = src.transform
array = src.read(1)
array[array <= 0] = 0
population_summation = [d['sum'] for d in zonal_stats(
region['geometry'],
array,
stats=['sum'],
nodata=0,
affine=affine)][0]
if population_summation is not None:
population.append(population_summation)
return sum(population)
def estimate_sites(data, iso3, backhaul_lut):
"""
Estimate the sites by region.
Parameters
----------
data : dataframe
Pandas df with regional data.
iso3 : string
ISO3 country code.
backhaul_lut : dict
Lookup table of backhaul composition.
Returns
-------
output : list of dicts
All regional data with estimated sites.
"""
output = []
existing_site_data_path = os.path.join(DATA_INTERMEDIATE, iso3, 'sites', 'sites.csv')
existing_site_data = {}
if os.path.exists(existing_site_data_path):
site_data = pd.read_csv(existing_site_data_path)
site_data = site_data.to_dict('records')
for item in site_data:
existing_site_data[item['GID_id']] = item['sites']
population = 0
for region in data:
if region['population'] == None:
continue
population += int(region['population'])
path = os.path.join(DATA_RAW, 'wb_mobile_coverage', 'wb_population_coverage_2G.csv')
coverage = pd.read_csv(path, encoding='latin-1')
coverage = coverage.loc[coverage['Country ISO3'] == iso3]
if len(coverage) > 1:
coverage = coverage['2020'].values[0]
else:
coverage = 0
population_covered = population * (coverage / 100)
path = os.path.join(DATA_RAW, 'real_site_data', 'site_counts.csv')
towers = pd.read_csv(path, encoding = "ISO-8859-1")
towers = towers.loc[towers['iso3'] == iso3]
towers = towers['sites'].values[0]
if np.isnan(towers):
towers = 0
towers_per_pop = 0
else:
towers_per_pop = towers / population_covered
tower_backhaul_lut = estimate_backhaul_type(backhaul_lut)
data = sorted(data, key=lambda k: k['population_km2'], reverse=True)
covered_pop_so_far = 0
for region in data:
#first try to use actual data
if len(existing_site_data) > 0:
sites_estimated_total = existing_site_data[region['GID_id']]
if region['area_km2'] > 0:
sites_estimated_km2 = sites_estimated_total / region['area_km2']
else:
sites_estimated_km2 = 0
#or if we don't have data estimates of sites per area
else:
if covered_pop_so_far < population_covered:
sites_estimated_total = region['population'] * towers_per_pop
sites_estimated_km2 = region['population_km2'] * towers_per_pop
else:
sites_estimated_total = 0
sites_estimated_km2 = 0
backhaul_fiber = 0
backhaul_copper = 0
backhaul_wireless = 0
backhaul_satellite = 0
for i in range(1, int(round(sites_estimated_total)) + 1):
num = random.uniform(0, 1)
if num <= tower_backhaul_lut['fiber']:
backhaul_fiber += 1
elif tower_backhaul_lut['fiber'] < num <= tower_backhaul_lut['copper']:
backhaul_copper += 1
elif tower_backhaul_lut['copper'] < num <= tower_backhaul_lut['microwave']:
backhaul_wireless += 1
elif tower_backhaul_lut['microwave'] < num:
backhaul_satellite += 1
output.append({
'GID_0': region['GID_0'],
'GID_id': region['GID_id'],
'GID_level': region['GID_level'],
# 'mean_luminosity_km2': region['mean_luminosity_km2'],
'population': region['population'],
# 'pop_under_10_pop': region['pop_under_10_pop'],
'area_km2': region['area_km2'],
'population_km2': region['population_km2'],
# 'pop_adults_km2': region['pop_adults_km2'],
'coverage_GSM_percent': region['coverage_GSM_percent'],
'coverage_3G_percent': region['coverage_3G_percent'],
'coverage_4G_percent': region['coverage_4G_percent'],
'total_estimated_sites': sites_estimated_total,
'total_estimated_sites_km2': sites_estimated_km2,
'sites_3G': sites_estimated_total * (region['coverage_3G_percent'] /100),
'sites_4G': sites_estimated_total * (region['coverage_4G_percent'] /100),
'backhaul_fiber': backhaul_fiber,
'backhaul_copper': backhaul_copper,
'backhaul_wireless': backhaul_wireless,
'backhaul_satellite': backhaul_satellite,
})
if region['population'] == None:
continue
covered_pop_so_far += region['population']
return output
def estimate_backhaul(iso3, region, year):
"""
Get the correct backhaul composition for the region.
Parameters
----------
iso3 : string
ISO3 country code.
region : string
The continent the country is part of.
year : int
The year of the backhaul composition desired.
Returns
-------
output : list of dicts
All regional data with estimated sites.
"""
output = []
path = os.path.join(BASE_PATH, 'raw', 'gsma', 'backhaul.csv')
backhaul_lut = pd.read_csv(path)
backhaul_lut = backhaul_lut.to_dict('records')
for item in backhaul_lut:
if region == item['Region'] and int(item['Year']) == int(year):
output.append({
'tech': item['Technology'],
'percentage': int(item['Value']),
})
return output
def estimate_backhaul_type(backhaul_lut):
"""
Process the tower backhaul lut.
Parameters
----------
backhaul_lut : dict
Lookup table of backhaul composition.
Returns
-------
output : dict
Tower backhaul lookup table.
"""
output = {}
preference = [
'fiber',
'copper',
'microwave',
'satellite'
]
perc_so_far = 0
for tech in preference:
for item in backhaul_lut:
if tech == item['tech'].lower():
perc = item['percentage']
output[tech] = (perc + perc_so_far) / 100
perc_so_far += perc
return output
def area_of_polygon(geom):
"""
Returns the area of a polygon. Assume WGS84 before converting
to projected crs.
Parameters
----------
geom : shapely geometry
A shapely geometry object.
Returns
-------
poly_area : int
Area of polygon in square kilometers.
"""
geod = pyproj.Geod(ellps="WGS84")
poly_area, poly_perimeter = geod.geometry_area_perimeter(
geom
)
return abs(int(poly_area))
def length_of_line(geom):
"""
Returns the length of a linestring. Assume WGS84 as crs.
Parameters
----------
geom : shapely geometry
A shapely geometry object.
Returns
-------
total_length : int
Length of the linestring given in kilometers.
"""
geod = pyproj.Geod(ellps="WGS84")
total_length = geod.line_length(*geom.xy)
return abs(int(total_length))
def estimate_numers_of_sites(linear_regressor, x_value):
"""
Function to predict the y value from the stated x value.
Parameters
----------
linear_regressor : object
Linear regression object.
x_value : float
The stated x value we want to use to predict y.
Returns
-------
result : float
The predicted y value.
"""
if not x_value == 0:
result = linear_regressor.predict(x_value)
result = result[0,0]
else:
result = 0
return result
def exclude_small_shapes(x):
"""
Remove small multipolygon shapes.
Parameters
---------
x : polygon
Feature to simplify.
Returns
-------
MultiPolygon : MultiPolygon
Shapely MultiPolygon geometry without tiny shapes.
"""
# if its a single polygon, just return the polygon geometry
if x.geometry.geom_type == 'Polygon':
return x.geometry
# if its a multipolygon, we start trying to simplify
# and remove shapes if its too big.
elif x.geometry.geom_type == 'MultiPolygon':
area1 = 0.01
area2 = 50
# dont remove shapes if total area is already very small
if x.geometry.area < area1:
return x.geometry
# remove bigger shapes if country is really big
if x['GID_0'] in ['CHL','IDN']:
threshold = 0.01
elif x['GID_0'] in ['RUS','GRL','CAN','USA']:
threshold = 0.01
elif x.geometry.area > area2:
threshold = 0.1
else:
threshold = 0.001
# save remaining polygons as new multipolygon for
# the specific country
new_geom = []
for y in x.geometry:
if y.area > threshold:
new_geom.append(y)
return MultiPolygon(new_geom)
def clean_coverage(x):
"""
Cleans the coverage polygons by remove small multipolygon shapes.
Parameters
---------
x : polygon
Feature to simplify.
Returns
-------
MultiPolygon : MultiPolygon
Shapely MultiPolygon geometry without tiny shapes.
"""
# if its a single polygon, just return the polygon geometry
if x.geometry.geom_type == 'Polygon':
if x.geometry.area > 1e7:
return x.geometry
# if its a multipolygon, we start trying to simplify and
# remove shapes if its too big.
elif x.geometry.geom_type == 'MultiPolygon':
threshold = 1e7
# save remaining polygons as new multipolygon for
# the specific country
new_geom = []
for y in x.geometry:
if y.area > threshold:
new_geom.append(y)
return MultiPolygon(new_geom)
def estimate_core_nodes(iso3, pop_density_km2, settlement_size):
"""
This function identifies settlements which exceed a desired settlement
size. It is assumed fiber exists at settlements over, for example,
20,000 inhabitants.
Parameters
----------
iso3 : string
ISO 3 digit country code.
pop_density_km2 : int
Population density threshold for identifying built up areas.
settlement_size : int
Overall sittelement size assumption, e.g. 20,000 inhabitants.
Returns
-------
output : list of dicts
Identified major settlements as Geojson objects.
"""
path = os.path.join(DATA_INTERMEDIATE, iso3, 'settlements.tif')
with rasterio.open(path) as src:
data = src.read()
threshold = pop_density_km2
data[data < threshold] = 0
data[data >= threshold] = 1
polygons = rasterio.features.shapes(data, transform=src.transform)
shapes_df = gpd.GeoDataFrame.from_features(
[
{'geometry': poly, 'properties':{'value':value}}
for poly, value in polygons
if value > 0
],
crs='epsg:4326'
)
stats = zonal_stats(shapes_df['geometry'], path, stats=['count', 'sum'])
stats_df = pd.DataFrame(stats)
nodes = pd.concat([shapes_df, stats_df], axis=1).drop(columns='value')
nodes = nodes[nodes['sum'] >= settlement_size]
nodes['geometry'] = nodes['geometry'].centroid
nodes = get_points_inside_country(nodes, iso3)
output = []
for index, item in enumerate(nodes.to_dict('records')):
output.append({
'type': 'Feature',
'geometry': mapping(item['geometry']),
'properties': {
'network_layer': 'core',
'id': 'core_{}'.format(index),
'node_number': index,
}
})
return output
def get_points_inside_country(nodes, iso3):
"""
Check settlement locations lie inside target country.
Parameters
----------
nodes : dataframe
A geopandas dataframe containing settlement nodes.
iso3 : string
ISO 3 digit country code.
Returns
-------
nodes : dataframe
A geopandas dataframe containing settlement nodes.
"""
filename = 'national_outline.shp'
path = os.path.join(DATA_INTERMEDIATE, iso3, filename)
national_outline = gpd.read_file(path)
bool_list = nodes.intersects(national_outline.unary_union)
nodes = pd.concat([nodes, bool_list], axis=1)
nodes = nodes[nodes[0] == True].drop(columns=0)
return nodes
def generate_agglomeration_lut(country):
"""
Generate a lookup table of agglomerations.
Parameters
----------
country : dict
Contains all country specfic information.
"""
iso3 = country['iso3']
regional_level = country['regional_level']
GID_level = 'GID_{}'.format(regional_level)
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'agglomerations')
if not os.path.exists(folder):
os.makedirs(folder)
path_output = os.path.join(folder, 'agglomerations.shp')
if os.path.exists(path_output):
return print('Agglomeration processing has already completed')
print('Working on {} agglomeration lookup table'.format(iso3))
filename = 'regions_{}_{}.shp'.format(regional_level, iso3)
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'regions')
path = os.path.join(folder, filename)
regions = gpd.read_file(path, crs="epsg:4326")
path_settlements = os.path.join(DATA_INTERMEDIATE, iso3, 'settlements.tif')
settlements = rasterio.open(path_settlements, 'r+')
settlements.nodata = 255
settlements.crs = {"epsg:4326"}
folder_tifs = os.path.join(DATA_INTERMEDIATE, iso3, 'agglomerations', 'tifs')
if not os.path.exists(folder_tifs):
os.makedirs(folder_tifs)
for idx, region in regions.iterrows():
bbox = region['geometry'].envelope
geo = gpd.GeoDataFrame()
geo = gpd.GeoDataFrame({'geometry': bbox}, index=[idx])
coords = [json.loads(geo.to_json())['features'][0]['geometry']]
#chop on coords
out_img, out_transform = mask(settlements, coords, crop=True)
# Copy the metadata
out_meta = settlements.meta.copy()
out_meta.update({"driver": "GTiff",
"height": out_img.shape[1],
"width": out_img.shape[2],
"transform": out_transform,
"crs": 'epsg:4326'})
path_output = os.path.join(folder_tifs, region[GID_level] + '.tif')
with rasterio.open(path_output, "w", **out_meta) as dest:
dest.write(out_img)
print('Completed settlement.tif regional segmentation')
nodes, missing_nodes = find_nodes(country, regions)
missing_nodes = get_missing_nodes(country, regions, missing_nodes, 10, 10)
nodes = nodes + missing_nodes
nodes = gpd.GeoDataFrame.from_features(nodes, crs='epsg:4326')
bool_list = nodes.intersects(regions['geometry'].unary_union)
nodes = pd.concat([nodes, bool_list], axis=1)
nodes = nodes[nodes[0] == True].drop(columns=0)
agglomerations = []
print('Identifying agglomerations')
for idx1, region in regions.iterrows():
seen = set()
for idx2, node in nodes.iterrows():
if node['geometry'].intersects(region['geometry']):
agglomerations.append({
'type': 'Feature',
'geometry': mapping(node['geometry']),
'properties': {
'id': idx1,
'GID_0': region['GID_0'],
GID_level: region[GID_level],
'population': node['sum'],
}
})
seen.add(region[GID_level])
if len(seen) == 0:
agglomerations.append({
'type': 'Feature',
'geometry': mapping(region['geometry'].centroid),
'properties': {
'id': 'regional_node',
'GID_0': region['GID_0'],
GID_level: region[GID_level],
'population': 1,
}
})
agglomerations = gpd.GeoDataFrame.from_features(
[
{
'geometry': item['geometry'],
'properties': {
'id': item['properties']['id'],
'GID_0':item['properties']['GID_0'],
GID_level: item['properties'][GID_level],
'population': item['properties']['population'],
}
}
for item in agglomerations
],
crs='epsg:4326'
)
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'agglomerations')
path_output = os.path.join(folder, 'agglomerations' + '.shp')
agglomerations.to_file(path_output)
agglomerations['lon'] = agglomerations['geometry'].x
agglomerations['lat'] = agglomerations['geometry'].y
agglomerations = agglomerations[['lon', 'lat', GID_level, 'population']]
agglomerations.to_csv(os.path.join(folder, 'agglomerations.csv'), index=False)
return print('Agglomerations layer complete')
def process_existing_fiber(country):
"""
Load and process existing fiber data.
Parameters
----------
country : dict
Contains all country specfic information.
"""
iso3 = country['iso3']
iso2 = country['iso2'].lower()
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'network_existing')
if not os.path.exists(folder):
os.makedirs(folder)
filename = 'core_edges_existing.shp'
path_output = os.path.join(folder, filename)
if os.path.exists(path_output):
return print('Existing fiber already processed')
path = os.path.join(DATA_RAW, 'afterfiber', 'afterfiber.shp')
shape = fiona.open(path)
data = []
for item in shape:
if item['properties']['iso2'].lower() == iso2.lower():
if item['geometry']['type'] == 'LineString':
if int(item['properties']['live']) == 1:
data.append({
'type': 'Feature',
'geometry': {
'type': 'LineString',
'coordinates': item['geometry']['coordinates'],
},
'properties': {
'operators': item['properties']['operator'],
'source': 'existing'
}
})
if item['geometry']['type'] == 'MultiLineString':
if int(item['properties']['live']) == 1:
try:
geom = MultiLineString(item['geometry']['coordinates'])
for line in geom:
data.append({
'type': 'Feature',
'geometry': mapping(line),
'properties': {
'operators': item['properties']['operator'],
'source': 'existing'
}
})
except:
# some geometries are incorrect from data source
# exclude to avoid issues
pass
if len(data) == 0:
return print('No existing infrastructure')
data = gpd.GeoDataFrame.from_features(data)
data.to_file(path_output, crs='epsg:4326')
return print('Existing fiber processed')
def find_nodes_on_existing_infrastructure(country):
"""
Find those agglomerations which are within a buffered zone of
existing fiber links.
Parameters
----------
country : dict
Contains all country specfic information.
"""
iso3 = country['iso3']
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'network_existing')
filename = 'core_nodes_existing.shp'
path_output = os.path.join(folder, filename)
if os.path.exists(path_output):
return print('Already found nodes on existing infrastructure')
else:
if not os.path.dirname(path_output):
os.makedirs(os.path.dirname(path_output))
path = os.path.join(folder, 'core_edges_existing.shp')
if not os.path.exists(path):
return print('No existing infrastructure')
existing_infra = gpd.read_file(path, crs='epsg:4326')
existing_infra = existing_infra.to_crs(epsg=3857)
existing_infra['geometry'] = existing_infra['geometry'].buffer(5000)
existing_infra = existing_infra.to_crs(epsg=4326)
# shape_output = os.path.join(DATA_INTERMEDIATE, iso3, 'network', 'core_edges_buffered.shp')
# existing_infra.to_file(shape_output, crs='epsg:4326')
path = os.path.join(DATA_INTERMEDIATE, iso3, 'agglomerations', 'agglomerations.shp')
agglomerations = gpd.read_file(path, crs='epsg:4326')
bool_list = agglomerations.intersects(existing_infra.unary_union)
agglomerations = pd.concat([agglomerations, bool_list], axis=1)
agglomerations = agglomerations[agglomerations[0] == True].drop(columns=0)
agglomerations['source'] = 'existing'
agglomerations.to_file(path_output, crs='epsg:4326')
return print('Found nodes on existing infrastructure')
def find_nodes(country, regions):
"""
Find key nodes.
Parameters
----------
country : dict
Contains all country specfic information.
regions : dataframe
All regions to be assessed.
Returns
-------
interim : list of dicts
Contains geojson dicts for nodes.
missing_nodes : list
Contains the id of regions with missing nodes.
"""
iso3 = country['iso3']
regional_level = country['regional_level']
GID_level = 'GID_{}'.format(regional_level)
threshold = country['pop_density_km2']
settlement_size = country['settlement_size']
folder_tifs = os.path.join(DATA_INTERMEDIATE, iso3, 'agglomerations', 'tifs')
interim = []
missing_nodes = set()
print('Working on gathering data from regional rasters')
for idx, region in regions.iterrows():
path = os.path.join(folder_tifs, region[GID_level] + '.tif')
with rasterio.open(path) as src:
data = src.read()
data[data < threshold] = 0
data[data >= threshold] = 1
polygons = rasterio.features.shapes(data, transform=src.transform)
shapes_df = gpd.GeoDataFrame.from_features(
[
{'geometry': poly, 'properties':{'value':value}}
for poly, value in polygons
if value > 0
],
crs='epsg:4326'
)
geojson_region = [
{
'geometry': region['geometry'],
'properties': {
GID_level: region[GID_level]
}
}
]
gpd_region = gpd.GeoDataFrame.from_features(
[
{'geometry': poly['geometry'],
'properties':{
GID_level: poly['properties'][GID_level]
}}
for poly in geojson_region
], crs='epsg:4326'
)
if len(shapes_df) == 0:
continue
nodes = gpd.overlay(shapes_df, gpd_region, how='intersection')
stats = zonal_stats(shapes_df['geometry'], path, stats=['count', 'sum'])
stats_df = | pd.DataFrame(stats) | pandas.DataFrame |
from typing import List, Union, Dict, Any, Tuple
import os
import json
from glob import glob
from dataclasses import dataclass
import functools
import argparse
from sklearn import metrics
import torch
import pandas as pd
import numpy as np
from tqdm import tqdm
from sklearn.metrics import precision_recall_fscore_support
from datasets import ClassLabel, load_dataset, load_metric
from utils import *
from dataset_configs import *
tqdm.pandas() # enable progress_apply
def flatten_dataset_to_table(dataset) -> pd.DataFrame:
"""Convert the HF Dataset to a Pandas DataFrame"""
results = []
for e_id, example in enumerate(tqdm(dataset)):
cur_len = len(example["words"])
results.extend(
[
[
e_id,
i,
example["words"][i],
example["labels"][i],
example["block_ids"][i],
example["line_ids"][i],
]
for i in range(cur_len)
]
)
return pd.DataFrame(
results,
columns=["sample_id", "word_id", "word", "label", "block_id", "line_id"],
)
def load_dataset_and_flatten(dataset_path) -> pd.DataFrame:
if os.path.exists(dataset_path.replace(".json", ".cached.csv")):
return pd.read_csv(dataset_path.replace(".json", ".cached.csv"))
else:
dataset = load_dataset("json", data_files=dataset_path, field="data")
df = flatten_dataset_to_table(dataset["train"])
df.to_csv(dataset_path.replace(".json", ".cached.csv"), index=None)
return df
def _preprocess_prediction_table(
test_df, pred_df, most_frequent_category=None, label_mapping: Dict = None
) -> pd.DataFrame:
"""Merge the prediction table with the original gt table
to 1) fetch the gt and 2) insert some "un-tokenized" tokens
"""
merged_df = test_df.merge(
pred_df.loc[:, ["sample_id", "word_id", "pred"]],
how="outer",
on=["sample_id", "word_id"],
)
if label_mapping is not None:
merged_df["pred"] = merged_df["pred"].map(label_mapping)
if most_frequent_category is None:
most_frequent_category = test_df["label"].value_counts().index[0]
merged_df["pred"] = merged_df["pred"].fillna(
most_frequent_category
) # fill in the most frequent category
return merged_df
@dataclass
class ModelConfig:
task_name: str = ""
model_name: str = ""
variant: str = ""
def put_model_config_at_the_first(func):
@functools.wraps(func)
def wrap(self, *args, **kwargs):
df = func(self, *args, **kwargs)
columns = df.columns
return df[["task_name", "model_name", "variant"] + list(columns[:-3])]
return wrap
class SingleModelPrediction:
"""Methods for processing the "test_predictions" tables for an individual model"""
def __init__(
self,
df,
label_space,
model_config: ModelConfig,
gt_name="label",
pred_name="pred",
used_metric="entropy",
):
self.df = df
self.label_space = label_space
self.gt_name = gt_name
self.pred_name = pred_name
self.model_config = model_config
self.used_metric = used_metric
@classmethod
def from_raw_prediction_table(
cls,
test_df,
pred_df,
label_space,
model_config,
most_frequent_category: int = None,
label_mapping=None,
used_metric="entropy",
**kwargs,
):
merged_df = _preprocess_prediction_table(
test_df,
pred_df,
most_frequent_category,
label_mapping=label_mapping,
)
return cls(
merged_df, label_space, model_config, used_metric=used_metric, **kwargs
)
def groupby(self, level):
assert level in ["block", "line"]
return self.df.groupby(["sample_id", f"{level}_id"])
def calculate_per_category_scores(self):
_scores = precision_recall_fscore_support(
self.df[self.gt_name],
self.df[self.pred_name],
labels=self.label_space,
zero_division=0,
)
_scores = pd.DataFrame(
_scores,
columns=self.label_space,
index=["precision", "recall", "f-score", "support"],
)
return _scores
def calculate_accuracy_for_group(self, gp, score_average="micro"):
accuracy = (gp[self.gt_name] == gp[self.pred_name]).mean()
precision, recall, fscore, _ = precision_recall_fscore_support(
gp[self.gt_name],
gp[self.pred_name],
average=score_average,
labels=self.label_space,
zero_division=0,
)
return {
"accuracy": accuracy,
"precision": precision,
"recall": recall,
"fscore": fscore,
}
def calculate_gini_score_for_group(self, gp):
cts = gp[self.pred_name].value_counts()
if len(cts) == 1:
return 0
else:
return 1 - ((cts / cts.sum()) ** 2).sum()
def calculate_entropy_for_group(self, gp):
cts = gp[self.pred_name].value_counts()
if len(cts) == 1:
return 0
else:
prob = cts / cts.sum()
entropy = -(prob * np.log2(prob)).sum()
return entropy
def create_page_level_accuracy_report(self) -> pd.DataFrame:
return (
self.df.groupby("sample_id")
.apply(self.calculate_accuracy_for_group)
.apply(pd.Series)
)
def create_page_level_gini_report(self, level="block") -> pd.Series:
gini = self.groupby(level=level).apply(self.calculate_gini_score_for_group)
gini = (
gini.to_frame()
.rename(columns={0: "gini"})
.reset_index()
.groupby("sample_id")
.gini.mean()
)
return gini
def create_page_level_entropy_report(self, level="block") -> pd.Series:
entropy = self.groupby(level=level).apply(self.calculate_entropy_for_group)
entropy = (
entropy.to_frame()
.rename(columns={0: "entropy"})
.reset_index()
.groupby("sample_id")
.entropy.mean()
)
return entropy
def create_page_level_ami_report(self) -> pd.DataFrame:
ami = (
self.df.groupby("sample_id")
.apply(
lambda gp: metrics.adjusted_mutual_info_score(
gp[self.gt_name], gp[self.pred_name]
)
)
.to_frame()
.rename(columns={0: "ami"})
)
return ami
def create_page_level_overall_report(self) -> pd.DataFrame:
report = self.create_page_level_accuracy_report()
report["gini"] = self.create_page_level_gini_report()
report["entropy"] = self.create_page_level_entropy_report()
return report
def create_all_page_accuracy_report(self) -> pd.Series:
return pd.Series(
self.calculate_accuracy_for_group(self.df, score_average="macro")
)
def create_all_page_ami_report(self) -> pd.Series:
return pd.Series(self.create_page_level_ami_report().mean())
def create_all_page_gini_report(self, level="block") -> pd.Series:
gini = self.create_page_level_gini_report(level=level)
report = pd.Series(
{
f"gini_{level}_average": gini.mean(),
f"gini_{level}_std": gini.std(),
f"gini_{level}_nonzero": gini[gini > 0].count(),
}
)
return report
def create_all_page_entropy_report(self, level="block") -> pd.Series:
entropy = self.create_page_level_entropy_report(level=level)
report = pd.Series(
{
f"entropy_{level}_average": entropy.mean(),
f"entropy_{level}_std": entropy.std(),
f"entropy_{level}_nonzero": entropy[entropy > 0].count(),
}
)
return report
def create_all_page_overall_report(self, add_line_level_gini=False) -> pd.Series:
report = self.create_all_page_accuracy_report()
if self.used_metric == "gini":
gini = self.create_all_page_gini_report()
if add_line_level_gini:
gini = gini.append(self.create_all_page_gini_report(level="line"))
report = report.append(gini)
elif self.used_metric == "entropy":
entropy = self.create_all_page_entropy_report()
if add_line_level_gini:
entropy = entropy.append(
self.create_all_page_entropy_report(level="line")
)
report = report.append(entropy)
report = report.append(self.create_all_page_ami_report())
return report
def majority_voting_postprocessing(self, level) -> "SingleModelPrediction":
"""This method attempts to use majority voting for model predictions within each
group (level) to improve the accuracy. It will firstly use groupby the elements
within each group, then find the most common class in the predicted categoires,
and replace the others as the predicted category.
"""
# It might take a while
df = (
self.groupby(level=level)
.progress_apply(
lambda gp: gp.assign(pred=gp[self.pred_name].value_counts().index[0])
)
.reset_index(drop=True)
)
return self.__class__(
df,
**{key: getattr(self, key) for key in self.__dict__.keys() if key != "df"},
)
@dataclass
class MultiModelPrediction:
"""Methods for processing the "test_predictions" tables for multiple models
within a Experiment
"""
predictions: List[SingleModelPrediction]
name: str
def create_per_category_report(self) -> pd.DataFrame:
reports = []
for prediction in self.predictions:
report = prediction.calculate_per_category_scores()
report["task_name"] = prediction.model_config.task_name
report["model_name"] = prediction.model_config.model_name
report["variant"] = prediction.model_config.variant
reports.append(report)
return | pd.concat(reports) | pandas.concat |
from sklearn.metrics import mutual_info_score
import matplotlib.pyplot as plt
import networkx as nx
from math import log
import numpy as np
import pandas as pd
import os
from helpers.timer import Timer
def mim(size, bins, data):
"""
Calculates the mutual information matrix.
Input:
The number of genes, the number of bins and the matrix (genes, samples).
Output:
Matrix (np.array) with the mim.
"""
matrix = np.zeros((size, size), dtype = "float64")
for i in np.arange(size):
x = data[i].copy()
for j in np.arange(i + 1, size):
y = data[j].copy()
matrix[i][j] = sum_mi(x, y, bins)
return matrix
def sum_mi(x, y, bins):
"""
Computes the mutual information score of the discrete probability
variable of each pair of genes.
Input:
Data array of each gene and the number of bins for the
discretization process.
Output:
Mutual information score variable (int).
"""
c_xy = np.histogram2d(x, y, bins)[0]
mi = mutual_info_score(None, None, contingency=c_xy)
return mi
def threshold_calculation(matrix, bins):
"""
Computes the threshold to make a clearer mutual information matrix.
Input:
First MIM computation and the amount of bins to calculate the MIM
of the permutation.
Output:
Threshold value (int).
"""
n_perm = 2
n_genes, n_cases = matrix.shape
permutations = np.zeros((n_perm, n_genes, n_cases))
# Execution of the permutation
for perm in np.arange(n_perm):
# Shuffle the matrix
perm_matrix = [matrix[i][np.random.permutation(n_genes)] for i in np.arange(n_cases)]
perm_matrix = np.vstack((perm_matrix))
# Execution of the MIM computation
dummy = mim(n_genes, bins, perm_matrix)
# Save permutation
permutations[perm] = dummy
return np.amax(np.mean(permutations, axis = 0))
def lioness_algorithm(data):
"""
LIONESS algorithm.
Input:
data (pd.DataFrame): Numeric matrix with samples in columns.
Output:
Sample-specific matrix.
"""
with Timer('Reading data...'):
columns = data.columns.to_numpy()
rows = data.index.to_numpy()
samples = len(columns)
genes = len(rows)
bins = round(1 + 3.22 * log(genes)) # sturge's rule
data_np = data.to_numpy()
with Timer("Computing agg..."):
agg = mim(genes, bins, data_np)
#threshold
I_0 = threshold_calculation(agg, bins)
id = np.where(agg < I_0)
agg[id] = 0
agg = agg.flatten()
reg = np.array([rows for i in np.arange(genes)]).flatten()
tar = np.array([[x for i in np.arange(genes)] for x in rows]).flatten()
output = | pd.DataFrame({"reg": reg, "tar": tar}) | pandas.DataFrame |
from IPython.core.error import UsageError
from mock import MagicMock
import numpy as np
from nose.tools import assert_equals, assert_is
import pandas as pd
from pandas.testing import assert_frame_equal
from sparkmagic.livyclientlib.exceptions import BadUserDataException
from sparkmagic.utils.utils import parse_argstring_or_throw, records_to_dataframe
from sparkmagic.utils.constants import SESSION_KIND_PYSPARK
from sparkmagic.utils.dataframe_parser import (
DataframeHtmlParser,
cell_contains_dataframe,
CellComponentType,
cell_components_iter,
CellOutputHtmlParser,
)
import unittest
def test_parse_argstring_or_throw():
parse_argstring = MagicMock(side_effect=UsageError("OOGABOOGABOOGA"))
try:
parse_argstring_or_throw(
MagicMock(), MagicMock(), parse_argstring=parse_argstring
)
assert False
except BadUserDataException as e:
assert_equals(str(e), str(parse_argstring.side_effect))
parse_argstring = MagicMock(side_effect=ValueError("AN UNKNOWN ERROR HAPPENED"))
try:
parse_argstring_or_throw(
MagicMock(), MagicMock(), parse_argstring=parse_argstring
)
assert False
except ValueError as e:
assert_is(e, parse_argstring.side_effect)
def test_records_to_dataframe_missing_value_first():
result = """{"z":100, "y":50}
{"z":25, "nullv":1.0, "y":10}"""
df = records_to_dataframe(result, SESSION_KIND_PYSPARK, True)
expected = pd.DataFrame(
[{"z": 100, "nullv": None, "y": 50}, {"z": 25, "nullv": 1, "y": 10}],
columns=["z", "nullv", "y"],
)
assert_frame_equal(expected, df)
def test_records_to_dataframe_coercing():
result = """{"z":"100", "y":"2016-01-01"}
{"z":"25", "y":"2016-01-01"}"""
df = records_to_dataframe(result, SESSION_KIND_PYSPARK, True)
expected = pd.DataFrame(
[
{"z": 100, "y": np.datetime64("2016-01-01")},
{"z": 25, "y": np.datetime64("2016-01-01")},
],
columns=["z", "y"],
)
assert_frame_equal(expected, df)
def test_records_to_dataframe_no_coercing():
result = """{"z":"100", "y":"2016-01-01"}
{"z":"25", "y":"2016-01-01"}"""
df = records_to_dataframe(result, SESSION_KIND_PYSPARK, False)
expected = pd.DataFrame(
[{"z": "100", "y": "2016-01-01"}, {"z": "25", "y": "2016-01-01"}],
columns=["z", "y"],
)
assert_frame_equal(expected, df)
def test_records_to_dataframe_missing_value_later():
result = """{"z":25, "nullv":1.0, "y":10}
{"z":100, "y":50}"""
df = records_to_dataframe(result, SESSION_KIND_PYSPARK, True)
expected = pd.DataFrame(
[{"z": 25, "nullv": 1, "y": 10}, {"z": 100, "nullv": None, "y": 50}],
columns=["z", "nullv", "y"],
)
| assert_frame_equal(expected, df) | pandas.testing.assert_frame_equal |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = | concat([a, b], keys=["key0", "key1"], names=["lvl0"]) | pandas.concat |
# -*- coding: utf-8 -*-
import io
import json
import pandas as pd
import ijson
import codecs
import warnings
from commons import read_csv_with_encoding, read_json_with_encoding, distance
TRANSMILENIO_FILENAME = "bogota/transmilenio"
BOGOTA_INTEREST_POINTS = "bogota/bogota_interest_points.json"
def nearestBusStop(point, transmilenio):
d = 99999999
n = ""
for i in trasmilenio:
p = distance(point["latitude"], point["longitude"], transmilenio[i]["lat"], transmilenio[i]["lon"])
if p < d:
d = p
n = transmilenio[i]["name"]
return n
def read_transmilenios_as_df():
a = read_json_with_encoding(TRANSMILENIO_FILENAME)
to_array = lambda e: [e["lat"], e["lon"], e["name"]]
b = list(zip(*map(to_array, a.values())))
return pd.DataFrame({'name': b[2], 'lat': b[0], 'lon': b[1]})
"""
Este enfoque no funciona con archivos grandes, el problema esta en commons.read_json_with_encoding cuando se usa
el mรฉtodo ast.literal_eval. No funciona porque la cantidad de memoria usada es demasiado alta.
USO: read_nodes_from_geojson("bogota/urb_sirena_interest_points.geojson")
"""
def read_nodes_from_geojson(filename):
a = read_json_with_encoding(filename)
r = []
for b in a["features"]:
if "type" in b:
if "Feature" in b["type"]:
r.append(b)
p = []
for b in r:
if "properties" in b:
if "amenity" in b["properties"]:
p.append(b)
c = []
for b in p:
name = None
lat = None
lon = None
if "name" in b["properties"]:
name = b["properties"]["name"]
if b["geometry"]["type"] == "Point":
lat = b["geometry"]["coordinates"][1]
lon = b["geometry"]["coordinates"][0]
geo = None
else:
geo = b["geometry"]
c.append([name, b["properties"]["amenity"], lat, lon, geo])
x = list(zip(*c))
z = | pd.DataFrame({'name': x[0], 'amenity': x[1], 'lat': x[2], "lon": x[3], "geometry": x[4]}) | pandas.DataFrame |
import datetime
import itertools
import json
import logging
import os
import sqlite3
from sqlite3 import DatabaseError
from typing import Optional, List, Dict, Tuple
import networkx as nx
import numpy as np
import pandas as pd
from ipyleaflet import Map, ScaleControl, FullScreenControl, Polyline, Icon, Marker, Circle, TileLayer, LayerGroup
from ipywidgets import HTML
from pandas.io.sql import DatabaseError as PandasDatabaseError
from scipy.spatial import KDTree
from scipy.stats import norm
from pyridy import config
from pyridy.osm import OSM
from pyridy.osm.utils import is_point_within_line_projection, project_point_onto_line
from pyridy.utils import Sensor, AccelerationSeries, LinearAccelerationSeries, MagnetometerSeries, OrientationSeries, \
GyroSeries, RotationSeries, GPSSeries, PressureSeries, HumiditySeries, TemperatureSeries, WzSeries, LightSeries, \
SubjectiveComfortSeries, AccelerationUncalibratedSeries, MagnetometerUncalibratedSeries, GyroUncalibratedSeries, \
GNSSClockMeasurementSeries, GNSSMeasurementSeries, NMEAMessageSeries
from pyridy.utils.device import Device
from pyridy.utils.tools import generate_random_color
logger = logging.getLogger(__name__)
class RDYFile:
def __init__(self, path: str = "", sync_method: str = "timestamp", cutoff: bool = True,
timedelta_unit: str = 'timedelta64[ns]',
strip_timezone: bool = True, name=""):
"""
Parameters
----------
path: str
Path to the Ridy File
sync_method: str
Sync method to be applied
cutoff: bool, default: True
If True, cutoffs the measurements precisely to the timestamp when the measurement was started, respectively
stopped. By default Ridy measurement files can contain several seconds of measurements from before/after
the button press
timedelta_unit: str
NumPy timedelta unit to applied
strip_timezone: bool, default: True
Strips timezone from timestamps as np.datetime64 does not support timezones
name: str
Name of the files, will be the filename if not provided
"""
self.path = path
self.name: Optional[str] = name
self.extension: Optional[str] = ""
if sync_method is not None and sync_method not in ["timestamp", "device_time", "gps_time", "ntp_time"]:
raise ValueError(
"synchronize argument must 'timestamp', 'device_time', 'gps_time' or 'ntp_time' not %s" % sync_method)
self.sync_method = sync_method
self.cutoff = cutoff
self.timedelta_unit = timedelta_unit
self.strip_timezone = strip_timezone
# Ridy App Info
self.ridy_version: Optional[str] = None
self.ridy_version_code: Optional[int] = None
# RDY File Infos
self.rdy_format_version: Optional[float] = None
self.rdy_info_name: Optional[str] = None
self.rdy_info_sex: Optional[str] = None
self.rdy_info_age: Optional[int] = None
self.rdy_info_height: Optional[float] = None
self.rdy_info_weight: Optional[float] = None
self.t0: Optional[np.datetime64] = None
self.cs_matrix_string: Optional[str] = None
self.cs_matrix: Optional[np.ndarray] = None # TODO
self.timestamp_when_started: Optional[int] = None
self.timestamp_when_stopped: Optional[int] = None
self.ntp_timestamp: Optional[int] = None
self.ntp_date_time: Optional[np.datetime64] = None
self.device: Optional[Device] = None
self.duration: Optional[float] = None
# Sensors
self.sensors: Optional[List[Sensor]] = []
# Measurement Series
self.measurements = {AccelerationSeries: AccelerationSeries(),
AccelerationUncalibratedSeries: AccelerationUncalibratedSeries(),
LinearAccelerationSeries: LinearAccelerationSeries(),
MagnetometerSeries: MagnetometerSeries(),
MagnetometerUncalibratedSeries: MagnetometerUncalibratedSeries(),
OrientationSeries: OrientationSeries(),
GyroSeries: GyroSeries(),
GyroUncalibratedSeries: GyroUncalibratedSeries(),
RotationSeries: RotationSeries(),
GPSSeries: GPSSeries(),
GNSSClockMeasurementSeries: GNSSClockMeasurementSeries(),
GNSSMeasurementSeries: GNSSMeasurementSeries(),
NMEAMessageSeries: NMEAMessageSeries(),
PressureSeries: PressureSeries(),
TemperatureSeries: TemperatureSeries(),
HumiditySeries: HumiditySeries(),
LightSeries: LightSeries(),
WzSeries: WzSeries(),
SubjectiveComfortSeries: SubjectiveComfortSeries()}
# OSM Data (set by Campaign)
self.osm: Optional[OSM] = None
self.matched_nodes = [] # Nodes from Map Matching
self.matched_ways = [] # Ways from Map Matching
if self.path:
self.load_file(self.path)
if self.timestamp_when_started and self.timestamp_when_stopped:
self.duration = (self.timestamp_when_stopped - self.timestamp_when_started) * 1e-9
if self.sync_method:
self._synchronize()
else:
logging.warning("RDYFile instantiated without a path")
pass
# def __getitem__(self, idx):
# key = list(self.measurements.keys())[idx]
# return self.measurements[key]
def __iter__(self):
"""
Returns
-------
FileIterator
"""
return FileIterator(self)
def __repr__(self):
return "Filename: %s, T0: %s, Duration: %s" % (self.name,
str(self.t0),
str(datetime.timedelta(seconds=self.duration)))
def _do_candidate_search(self, osm_xy: np.ndarray, track_xy: np.ndarray, hor_acc: np.ndarray):
""" Internal method to search for candidate edges for map matching
"""
# Find the closest coordinates using KDTrees
kd_tree_osm = KDTree(osm_xy)
kd_tree_track = KDTree(track_xy)
# Indices of OSM nodes that are close to each respective GPS point within radius r
indices = kd_tree_track.query_ball_tree(kd_tree_osm, r=100)
c_dict = {} # Dict with node candidates for each GPS coord
edges = [] # Candidate edges with emission probabilities
# Perform search for node candidate on all GPS coords
for i, idxs in enumerate(indices):
# Get unique ways based on indices
c_ways = list(set(list(itertools.chain(*[self.osm.nodes[idx].ways for idx in idxs]))))
# Find candidate line segments
c_segs = []
for w in c_ways:
n = w.nodes
segs = [] # List of suitable line segments
for n1, n2 in zip(n, n[1:]):
x1, y1 = n1.attributes["x"], n1.attributes["y"]
x2, y2 = n2.attributes["x"], n2.attributes["y"]
# Only take those line segment into consideration where the perpendicular projection
# of the GPS coords lies inside the line segment
b = is_point_within_line_projection(line=[[x1, y1], [x2, y2]], point=track_xy[i])
if b:
# Point of orthogonal intersection
p, d = project_point_onto_line(line=[[x1, y1], [x2, y2]], point=track_xy[i])
if d < hor_acc[i]:
p_lon, p_lat = self.osm.utm_proj(p[0], p[1], inverse=True)
segs.append([d, p_lon, p_lat, n1, n2, w.id, None, i])
if segs:
segs = np.array(segs)
i_min = np.argmin(
segs[:, 0]) # Select candidate line segment based on smallest perpendicular distance
n1, n2 = segs[i_min, 3], segs[i_min, 4]
e1 = list(self.osm.G.edges(n1.id, keys=True))
e2 = list(self.osm.G.edges(n2.id, keys=True))
inter = list(set(e1).intersection(e2))
if len(inter) == 0: # TODO
c_seg_e = e1[0]
else:
c_seg_e = inter[0]
c_seg = segs[i_min]
c_segs.append(c_seg)
edges.append([c_seg_e, hor_acc[i], c_seg[0]])
c_dict[i] = {"c_ways": c_ways, "c_segs": c_segs}
# Calculate emission probabilities for each edge candidate
c_edges = {}
if edges:
edges = np.array(edges, dtype='object')
e_probs = norm.pdf(edges[:, 2].astype(float), np.zeros(len(edges)), edges[:, 1].astype(float))
for i, e_prob in enumerate(e_probs):
if edges[i][0] not in c_edges:
c_edges[edges[i][0]] = {"e_prob": [e_prob]}
else:
c_edges[edges[i][0]]["e_prob"].append(e_prob)
return c_dict, c_edges
def _synchronize(self):
""" Internal method that synchronizes the timestamps to a given sync timestamp
"""
if self.sync_method == "timestamp":
for m in self.measurements.values():
m.synchronize("timestamp", self.timestamp_when_started, timedelta_unit=self.timedelta_unit)
elif self.sync_method == "device_time":
if self.t0:
for m in self.measurements.values():
m.synchronize("device_time", self.timestamp_when_started, self.t0,
timedelta_unit=self.timedelta_unit)
else:
logger.warning("(%s) t0 is None, falling back to timestamp synchronization" % self.name)
self.sync_method = "timestamp"
self._synchronize()
elif self.sync_method == "gps_time":
if len(self.measurements[GPSSeries]) > 0:
sync_timestamp = self.measurements[GPSSeries].time[0]
utc_sync_time = self.measurements[GPSSeries].utc_time[0]
for i, t in enumerate(self.measurements[
GPSSeries].utc_time):
# The first utc_time value ending with 000 is a real GPS measurement
if str(t)[-3:] == "000":
utc_sync_time = t
sync_timestamp = self.measurements[GPSSeries].time[i]
break
sync_time = np.datetime64(int(utc_sync_time * 1e6), "ns")
for m in self.measurements.values():
m.synchronize("gps_time", sync_timestamp, sync_time, timedelta_unit=self.timedelta_unit)
else:
logger.warning("(%s) No GPS time recording, falling back to device_time synchronization" % self.name)
self.sync_method = "device_time"
self._synchronize()
elif self.sync_method == "ntp_time":
if self.ntp_timestamp and self.ntp_date_time:
for m in self.measurements.values():
m.synchronize("ntp_time", self.ntp_timestamp, self.ntp_date_time,
timedelta_unit=self.timedelta_unit)
else:
logger.warning("(%s) No ntp timestamp and datetime, falling back to device_time synchronization" %
self.name)
self.sync_method = "device_time"
self._synchronize()
else:
raise ValueError(
"sync_method must 'timestamp', 'device_time', 'gps_time' or 'ntp_time' not %s" % self.sync_method)
pass
def create_map(self, t_lim: Tuple[np.datetime64, np.datetime64] = None, show_hor_acc: bool = False) -> Map:
""" Creates an ipyleaflet Map using OpenStreetMap and OpenRailwayMap to show the GPS track of the
measurement file
Parameters
----------
t_lim: tuple, default: None
Time limit as a tuple of np.datetime64 to show only parts of the GPS track that are within the specified
time interval
show_hor_acc : bool, default: False
If true shows the horizontal accuracies for each measurement point using circles. The likelihood that
that the real position is within the circle is defined as 68 %
Returns
-------
Map
"""
gps_series = self.measurements[GPSSeries]
coords = gps_series.to_ipyleaflef()
time = gps_series.time
hor_acc = gps_series.hor_acc
if coords == [[]]:
logger.warning("(%s) Cant create map, GPSSeries is empty!" % self.name)
else:
if t_lim:
if type(t_lim) != tuple:
raise ValueError("t_lim must be a tuple of np.datetime64")
if t_lim[0] > t_lim[1]:
raise ValueError("The first datetime for t_lim must be smaller than the second!")
mask = (gps_series.time >= t_lim[0]) & (gps_series.time <= t_lim[1])
coords = [c for i, c in enumerate(coords) if mask[i]]
time = [t for i, t in enumerate(gps_series.time) if mask[i]]
hor_acc = [h for i, h in enumerate(gps_series.hor_acc) if mask[i]]
color = generate_random_color("HEX")
m = Map(center=self.determine_track_center()[::-1],
zoom=12,
scroll_wheel_zoom=True,
basemap=config.OPEN_STREET_MAP_DE)
m.add_control(ScaleControl(position='bottomleft'))
m.add_control(FullScreenControl())
# Add map
m.add_layer(config.OPEN_RAILWAY_MAP)
file_polyline = Polyline(locations=coords, color=color, fill=False, weight=4, dash_array='10, 10')
m.add_layer(file_polyline)
start_marker = Marker(location=tuple(coords[0]), draggable=False, icon=config.START_ICON)
end_marker = Marker(location=tuple(coords[-1]), draggable=False, icon=config.END_ICON)
start_message = HTML()
end_message = HTML()
start_message.value = "<p>Start:</p><p>" + self.name + "</p><p>" \
+ str(time[0] or 'n/a') + "</p><p>" \
+ str(getattr(self.device, "manufacturer", "n/a")) + "; " \
+ str(getattr(self.device, "model", "n/a")) + "</p>"
end_message.value = "<p>End:</p><p>" + self.name + "</p><p>" \
+ str(time[-1] or 'n/a') + "</p><p>" \
+ str(getattr(self.device, "manufacturer", "n/a")) + "; " \
+ str(getattr(self.device, "model", "n/a")) + "</p>"
start_marker.popup = start_message
end_marker.popup = end_message
m.add_layer(start_marker)
m.add_layer(end_marker)
if show_hor_acc:
circles = []
for c, h in zip(coords, hor_acc):
circle = Circle()
circle.location = (c[0], c[1])
circle.radius = int(h)
circle.color = "#00549F"
circle.fill_color = "#00549F"
circle.weight = 3
circle.fill_opacity = 0.1
circles.append(circle)
l_circles = LayerGroup(layers=circles)
m.add_layer(l_circles)
return m
def determine_track_center(self, gps_series: Optional[GPSSeries] = None) -> (float, float):
""" Determines the geographical center of the GPSSeries, returns None if the GPSSeries is emtpy.
Parameters
----------
gps_series: GPSSeries, default: None
If not None, takes the given GPSSeries to determine the track center
Returns
-------
float, float
"""
if not gps_series:
gps_series = self.measurements[GPSSeries]
if gps_series.is_empty():
logger.warning("(%s) Cant determine track center, GPSSeries is empty!" % self.name)
else:
center_lon = (gps_series.lon.max() + gps_series.lon.min()) / 2
center_lat = (gps_series.lat.max() + gps_series.lat.min()) / 2
logging.info("Geographic center of track: Lon: %s, Lat: %s" % (str(center_lon), str(center_lat)))
return center_lon, center_lat
def do_map_matching(self, v_thres: float = 1, algorithm: str = "pyridy", alpha: int = 1.0, beta: int = 1.0):
""" Performs map matching of the GPS track to closest OSM nodes/ways
Parameters
----------
alpha: float, default: 1.0
Weighting Parameter for the Map Matching Algorithm. Alpha represents the default lange of an edge in the
Graph
beta: float, default: 1.0
Beta is a scaling factor for the emission probabilities.
algorithm: str, default: pyridy
Algorithm to be used, can be "pyridy" or "nx". The pyridy algorithm also incorporates how switches
can be transited
v_thres: float
Speed threshold, GPS points measured with a velocity below v_thres [m/s] will not be considered
Returns
-------
"""
if algorithm not in ["nx", "pyridy"]:
raise ValueError("(%s) Algorithm must be either nx or pyridy, not %s" % (self.name, algorithm))
if self.osm:
# Prepare data
gps_coords = self.measurements[GPSSeries]
lon = gps_coords.lon[gps_coords.speed > v_thres]
lat = gps_coords.lat[gps_coords.speed > v_thres]
hor_acc = gps_coords.hor_acc[gps_coords.speed > v_thres]
n_gps = len(lon)
x, y = self.osm.utm_proj(lon, lat)
track_xy = np.vstack([x, y]).T
osm_xy = self.osm.get_coords(frmt="xy")
c_dict, c_edges = self._do_candidate_search(osm_xy, track_xy, hor_acc)
# Initialize edge weights
for e in self.osm.G.edges:
self.osm.G.edges[e]["c_weight"] = 1*alpha
for k in c_edges.keys():
self.osm.G.edges[k]["c_weight"] = 1 / (1 + beta*sum(c_edges[k]["e_prob"]))
# Perform map matching
s_n = None
for i in range(n_gps):
if len(c_dict[i]["c_segs"]) > 0:
s_n = c_dict[i]["c_segs"][np.array(c_dict[i]["c_segs"])[:, 0].argmin()][3].id
break
e_n = None
for i in reversed(range(n_gps)):
if len(c_dict[i]["c_segs"]) > 0:
e_n = c_dict[i]["c_segs"][np.array(c_dict[i]["c_segs"])[:, 0].argmin()][4].id
break
if not s_n or not e_n:
logger.warning("(%s) Map matching failed, no start or end node found!" % self.name)
else:
# Use Dijkstra's shortest path to perform map matching, but use a weighting based on emission
# probabilities instead of node distances
if algorithm == 'pyridy':
# Matched node ids
_, _, m_n_ids = self.osm.get_shortest_path(source=s_n, target=e_n, weight="c_weight")
else:
m_n_ids = nx.shortest_path(self.osm.G, source=s_n, target=e_n, weight="c_weight")
self.matched_nodes = [self.osm.node_dict[n] for n in m_n_ids] # Matched nodes
m_w_ids = [self.osm.G[n1][n2][0]["way_id"] for n1, n2 in zip(m_n_ids, m_n_ids[1:])]
self.matched_ways = list(set([self.osm.way_dict[w_id] for w_id in m_w_ids])) # Mapped Ways
logger.info("(%s) Found %d nodes that match the files GPS track!" % (self.name,
len(self.matched_nodes)))
else:
logger.warning("(%s) Can't do map matching since no file contains no OSM data" % self.name)
pass
def get_integrity_report(self):
""" Returns a dict that contains information which measurement types are available in the file
Returns
-------
dict
"""
report = {}
for k, v in self.measurements.items():
if len(v) > 0:
report[k.__name__] = True
else:
report[k.__name__] = False
attr = self.__dict__.copy()
attr.update(attr["device"].__dict__.copy())
for a in ["db_con", "measurements", "device", "sensors"]:
attr.pop(a)
pass
report.update(attr)
return report
def load_file(self, path: str):
""" Loads a single Ridy file located at path
Parameters
----------
path Path to the ridy file
"""
logger.info("Loading file: %s" % path)
_, self.extension = os.path.splitext(path)
_, self.name = os.path.split(path)
if self.extension == ".rdy":
with open(path, 'r') as file:
rdy = json.load(file)
if 'Ridy_Version' in rdy:
self.ridy_version = rdy['Ridy_Version']
else:
logger.info("No Ridy_Version in file: %s" % self.name)
self.ridy_version = None
if 'Ridy_Version_Code' in rdy:
self.ridy_version_code = rdy['Ridy_Version_Code']
else:
logger.info("No Ridy_Version_Code in file: %s" % self.name)
self.ridy_version_code = None
if 'RDY_Format_Version' in rdy:
self.rdy_format_version = rdy['RDY_Format_Version']
else:
logger.info("No RDY_Format_Version in file: %s" % self.name)
self.rdy_format_version = None
if 'RDY_Info_Name' in rdy:
self.rdy_info_name = rdy['RDY_Info_Name']
else:
logger.info("No RDY_Info_Name in file: %s" % self.name)
self.rdy_info_name = None
if 'RDY_Info_Sex' in rdy:
self.rdy_info_sex = rdy['RDY_Info_Sex']
else:
logger.info("No RDY_Info_Sex in file: %s" % self.name)
self.rdy_info_sex = None
if 'RDY_Info_Age' in rdy:
self.rdy_info_age = rdy['RDY_Info_Age']
else:
logger.info("No RDY_Info_Age in file: %s" % self.name)
self.rdy_info_age = None
if 'RDY_Info_Height' in rdy:
self.rdy_info_height = rdy['RDY_Info_Height']
else:
logger.info("No RDY_Info_Height in file: %s" % self.name)
self.rdy_info_height = None
if 'RDY_Info_Weight' in rdy:
self.rdy_info_weight = rdy['RDY_Info_Weight']
else:
logger.info("No RDY_Info_Weight in file: %s" % self.name)
self.rdy_info_weight = None
if 't0' in rdy:
if self.strip_timezone:
t0 = datetime.datetime.fromisoformat(rdy['t0']).replace(tzinfo=None)
self.t0 = np.datetime64(t0)
else:
self.t0 = np.datetime64(rdy['t0'])
else:
self.t0 = None
logger.info("No t0 in file: %s" % self.name)
if 'cs_matrix_string' in rdy:
self.cs_matrix_string = rdy['cs_matrix_string']
else:
self.cs_matrix_string = None
logger.info("No t0 in file: %s" % self.name)
if 'timestamp_when_started' in rdy:
self.timestamp_when_started = rdy['timestamp_when_started']
else:
self.timestamp_when_started = None
logger.info("No timestamp_when_started in file: %s" % self.name)
if 'timestamp_when_stopped' in rdy:
self.timestamp_when_stopped = rdy['timestamp_when_stopped']
else:
self.timestamp_when_stopped = None
logger.info("No timestamp_when_stopped in file: %s" % self.name)
if 'ntp_timestamp' in rdy:
self.ntp_timestamp = rdy['ntp_timestamp']
else:
self.ntp_timestamp = None
logger.info("No ntp_timestamp in file: %s" % self.name)
if 'ntp_date_time' in rdy:
if self.strip_timezone:
ntp_datetime_str = rdy['ntp_date_time']
if ntp_datetime_str:
ntp_date_time = datetime.datetime.fromisoformat(ntp_datetime_str).replace(tzinfo=None)
self.ntp_date_time = np.datetime64(ntp_date_time)
else:
self.ntp_date_time = None
else:
self.ntp_date_time = np.datetime64(rdy['t0'])
else:
self.ntp_date_time = None
logger.info("No ntp_date_time in file: %s" % self.name)
if "device" in rdy:
self.device = Device(**rdy['device_info'])
else:
logger.info("No device information in file: %s" % self.name)
if "sensors" in rdy:
for sensor in rdy['sensors']:
self.sensors.append(Sensor(**sensor))
else:
logger.info("No sensor descriptions in file: %s" % self.name)
if "acc_series" in rdy:
self.measurements[AccelerationSeries] = AccelerationSeries(rdy_format_version=self.rdy_format_version,
**rdy['acc_series'])
else:
logger.info("No Acceleration Series in file: %s" % self.name)
if "acc_uncal_series" in rdy:
self.measurements[AccelerationUncalibratedSeries] = AccelerationUncalibratedSeries(
rdy_format_version=self.rdy_format_version, **rdy['acc_uncal_series'])
else:
logger.info("No uncalibrated Acceleration Series in file: %s" % self.name)
if "lin_acc_series" in rdy:
self.measurements[LinearAccelerationSeries] = LinearAccelerationSeries(
rdy_format_version=self.rdy_format_version,
**rdy['lin_acc_series'])
else:
logger.info("No Linear Acceleration Series in file: %s" % self.name)
if "mag_series" in rdy:
self.measurements[MagnetometerSeries] = MagnetometerSeries(rdy_format_version=self.rdy_format_version,
**rdy['mag_series'])
else:
logger.info("No Magnetometer Series in file: %s" % self.name)
if "mag_uncal_series" in rdy:
self.measurements[MagnetometerUncalibratedSeries] = MagnetometerUncalibratedSeries(
rdy_format_version=self.rdy_format_version, **rdy['mag_uncal_series'])
else:
logger.info("No uncalibrated Magnetometer Series in file: %s" % self.name)
if "orient_series" in rdy:
self.measurements[OrientationSeries] = OrientationSeries(rdy_format_version=self.rdy_format_version,
**rdy['orient_series'])
else:
logger.info("No Orientation Series in file: %s" % self.name)
if "gyro_series" in rdy:
self.measurements[GyroSeries] = GyroSeries(rdy_format_version=self.rdy_format_version,
**rdy['gyro_series'])
else:
logger.info("No Gyro Series in file: %s" % self.name)
if "gyro_uncal_series" in rdy:
self.measurements[GyroUncalibratedSeries] = GyroUncalibratedSeries(
rdy_format_version=self.rdy_format_version, **rdy['gyro_uncal_series'])
else:
logger.info("No uncalibrated Gyro Series in file: %s" % self.name)
if "rot_series" in rdy:
self.measurements[RotationSeries] = RotationSeries(rdy_format_version=self.rdy_format_version,
**rdy['rot_series'])
else:
logger.info("No Rotation Series in file: %s" % self.name)
if "gps_series" in rdy:
self.measurements[GPSSeries] = GPSSeries(rdy_format_version=self.rdy_format_version,
**rdy['gps_series'])
else:
logger.info("No GPS Series in file: %s" % self.name)
if "gnss_series" in rdy:
self.measurements[GNSSMeasurementSeries] = GNSSMeasurementSeries(
rdy_format_version=self.rdy_format_version, **rdy['gnss_series'])
else:
logger.info("No GPS Series in file: %s" % self.name)
if "gnss_clock_series" in rdy:
self.measurements[GNSSClockMeasurementSeries] = GNSSClockMeasurementSeries(
rdy_format_version=self.rdy_format_version, **rdy['gnss_clock_series'])
else:
logger.info("No GNSS Clock Series in file: %s" % self.name)
if "nmea_series" in rdy:
self.measurements[NMEAMessageSeries] = NMEAMessageSeries(
rdy_format_version=self.rdy_format_version, **rdy['nmea_series'])
else:
logger.info("No GPS Series in file: %s" % self.name)
if "pressure_series" in rdy:
self.measurements[PressureSeries] = PressureSeries(rdy_format_version=self.rdy_format_version,
**rdy['pressure_series'])
else:
logger.info("No Pressure Series in file: %s" % self.name)
if "temperature_series" in rdy:
self.measurements[TemperatureSeries] = TemperatureSeries(rdy_format_version=self.rdy_format_version,
**rdy['temperature_series'])
else:
logger.info("No Temperature Series in file: %s" % self.name)
if "humidity_series" in rdy:
self.measurements[HumiditySeries] = HumiditySeries(rdy_format_version=self.rdy_format_version,
**rdy['humidity_series'])
else:
logger.info("No Humidity Series in file: %s" % self.name)
if "light_series" in rdy:
self.measurements[LightSeries] = LightSeries(rdy_format_version=self.rdy_format_version,
**rdy['light_series'])
else:
logger.info("No Light Series in file: %s" % self.name)
if "wz_series" in rdy:
self.measurements[WzSeries] = WzSeries(rdy_format_version=self.rdy_format_version,
**rdy['wz_series'])
else:
logger.info("No Wz Series in file: %s" % self.name)
if "subjective_comfort_series" in rdy:
self.measurements[SubjectiveComfortSeries] = SubjectiveComfortSeries(
rdy_format_version=self.rdy_format_version,
**rdy['subjective_comfort_series'])
else:
logger.info("No Subjective Comfort Series in file: %s" % self.name)
pass
elif self.extension == ".sqlite":
db_con = sqlite3.connect(path)
try:
info: Dict = dict(pd.read_sql_query("SELECT * from measurement_information_table", db_con))
except (DatabaseError, PandasDatabaseError) as e:
logger.error(e)
# Older files can contain wrong table name
try:
info = dict(pd.read_sql_query("SELECT * from measurment_information_table", db_con))
except (DatabaseError, PandasDatabaseError) as e:
logger.error(
"DatabaseError occurred when accessing measurement_information_table, file: %s" % self.name)
logger.error(e)
info = {}
try:
sensor_df = pd.read_sql_query("SELECT * from sensor_descriptions_table", db_con)
for _, row in sensor_df.iterrows():
self.sensors.append(Sensor(**dict(row)))
except (DatabaseError, PandasDatabaseError) as e:
logger.error(
"DatabaseError occurred when accessing sensor_descriptions_table, file: %s" % self.name)
logger.error(e)
try:
device_df = pd.read_sql_query("SELECT * from device_information_table", db_con)
self.device = Device(**dict(device_df))
except (DatabaseError, PandasDatabaseError) as e:
logger.error("DatabaseError occurred when accessing device_information_table, file: %s" % self.name)
logger.error(e)
self.device = Device()
# Info
if 'ridy_version' in info and len(info['ridy_version']) > 1:
logger.info("Measurement information table contains more than 1 row!")
if 'ridy_version' in info and len(info['ridy_version']) > 0:
self.ridy_version = info['ridy_version'].iloc[-1]
if 'ridy_version_code' in info and len(info['ridy_version_code']) > 0:
self.ridy_version_code = info['ridy_version_code'].iloc[-1]
if 'rdy_format_version' in info and len(info['rdy_format_version']) > 0:
self.rdy_format_version = info['rdy_format_version'].iloc[-1]
if 'rdy_info_name' in info and len(info['rdy_info_name']) > 0:
self.rdy_info_name = info['rdy_info_name'].iloc[-1]
if 'rdy_info_sex' in info and len(info['rdy_info_sex']) > 0:
self.rdy_info_sex = info['rdy_info_sex'].iloc[-1]
if 'rdy_info_age' in info and len(info['rdy_info_age']) > 0:
self.rdy_info_age = info['rdy_info_age'].iloc[-1]
if 'rdy_info_height' in info and len(info['rdy_info_height']) > 0:
self.rdy_info_height = info['rdy_info_height'].iloc[-1]
if 'rdy_info_weight' in info and len(info['rdy_info_weight']) > 0:
self.rdy_info_weight = info['rdy_info_weight'].iloc[-1]
if 't0' in info and len(info['t0']) > 0:
if self.strip_timezone:
t0 = datetime.datetime.fromisoformat(info['t0'].iloc[-1]).replace(tzinfo=None)
self.t0 = np.datetime64(t0)
else:
self.t0 = np.datetime64(info['t0'].iloc[-1])
if 'cs_matrix_string' in info and len(info['cs_matrix_string']) > 0:
self.cs_matrix_string = info['cs_matrix_string'].iloc[-1]
if 'timestamp_when_started' and len(info['timestamp_when_started']) > 0:
self.timestamp_when_started = info['timestamp_when_started'].iloc[-1]
if 'timestamp_when_stopped' in info and len(info['timestamp_when_stopped']) > 0:
self.timestamp_when_stopped = info['timestamp_when_stopped'].iloc[-1]
if 'ntp_timestamp' in info and len(info['ntp_timestamp']) > 0:
self.ntp_timestamp = info['ntp_timestamp'].iloc[-1]
if 'ntp_date_time' in info and len(info['ntp_date_time']) > 0:
if self.strip_timezone:
ntp_datetime_str = info['ntp_date_time'].iloc[-1]
if ntp_datetime_str:
ntp_date_time = datetime.datetime.fromisoformat(ntp_datetime_str).replace(tzinfo=None)
self.ntp_date_time = np.datetime64(ntp_date_time)
else:
self.ntp_date_time = None
else:
self.ntp_date_time = np.datetime64(info['ntp_date_time'].iloc[-1])
# Measurements
try:
acc_df = pd.read_sql_query("SELECT * from acc_measurements_table", db_con)
self.measurements[AccelerationSeries] = AccelerationSeries(rdy_format_version=self.rdy_format_version,
**dict(acc_df))
except (DatabaseError, PandasDatabaseError) as e:
logger.error("DatabaseError occurred when accessing acc_measurements_table, file: %s" % self.name)
logger.error(e)
try:
acc_uncal_df = pd.read_sql_query("SELECT * from acc_uncal_measurements_table", db_con)
self.measurements[AccelerationUncalibratedSeries] = AccelerationUncalibratedSeries(
rdy_format_version=self.rdy_format_version, **dict(acc_uncal_df))
except (DatabaseError, PandasDatabaseError) as e:
logger.error("DatabaseError occurred when accessing acc_uncal_measurements_table, file: %s" % self.name)
logger.error(e)
try:
lin_acc_df = pd.read_sql_query("SELECT * from lin_acc_measurements_table", db_con)
self.measurements[LinearAccelerationSeries] = LinearAccelerationSeries(
rdy_format_version=self.rdy_format_version,
**dict(lin_acc_df))
except (DatabaseError, PandasDatabaseError) as e:
logger.error(
"DatabaseError occurred when accessing lin_acc_measurements_table, file: %s" % self.name)
logger.error(e)
try:
mag_df = pd.read_sql_query("SELECT * from mag_measurements_table", db_con)
self.measurements[MagnetometerSeries] = MagnetometerSeries(rdy_format_version=self.rdy_format_version,
**dict(mag_df))
except (DatabaseError, PandasDatabaseError) as e:
logger.error("DatabaseError occurred when accessing mag_measurements_table, file: %s" % self.name)
logger.error(e)
try:
mag_uncal_df = pd.read_sql_query("SELECT * from mag_uncal_measurements_table", db_con)
self.measurements[MagnetometerUncalibratedSeries] = MagnetometerUncalibratedSeries(
rdy_format_version=self.rdy_format_version, **dict(mag_uncal_df))
except (DatabaseError, PandasDatabaseError) as e:
logger.error("DatabaseError occurred when accessing mag_uncal_measurements_table, file: %s" % self.name)
logger.error(e)
try:
orient_df = pd.read_sql_query("SELECT * from orient_measurements_table", db_con)
self.measurements[OrientationSeries] = OrientationSeries(rdy_format_version=self.rdy_format_version,
**dict(orient_df))
except (DatabaseError, PandasDatabaseError) as e:
logger.error(
"DatabaseError occurred when accessing orient_measurements_table, file: %s" % self.name)
logger.error(e)
try:
gyro_df = pd.read_sql_query("SELECT * from gyro_measurements_table", db_con)
self.measurements[GyroSeries] = GyroSeries(rdy_format_version=self.rdy_format_version,
**dict(gyro_df))
except (DatabaseError, PandasDatabaseError) as e:
logger.error("DatabaseError occurred when accessing gyro_measurements_table, file: %s" % self.name)
logger.error(e)
try:
gyro_uncal_df = pd.read_sql_query("SELECT * from gyro_uncal_measurements_table", db_con)
self.measurements[GyroUncalibratedSeries] = GyroUncalibratedSeries(
rdy_format_version=self.rdy_format_version, **dict(gyro_uncal_df))
except (DatabaseError, PandasDatabaseError) as e:
logger.error(
"DatabaseError occurred when accessing gyro_uncal_measurements_table, file: %s" % self.name)
logger.error(e)
try:
rot_df = pd.read_sql_query("SELECT * from rot_measurements_table", db_con)
self.measurements[RotationSeries] = RotationSeries(rdy_format_version=self.rdy_format_version,
**dict(rot_df))
except (DatabaseError, PandasDatabaseError) as e:
logger.error("DatabaseError occurred when accessing rot_measurements_table, file: %s" % self.name)
logger.error(e)
try:
gps_df = pd.read_sql_query("SELECT * from gps_measurements_table", db_con)
self.measurements[GPSSeries] = GPSSeries(rdy_format_version=self.rdy_format_version,
**dict(gps_df))
except (DatabaseError, PandasDatabaseError) as e:
logger.error("DatabaseError occurred when accessing gps_measurements_table, file: %s" % self.name)
logger.error(e)
try:
gnss_df = pd.read_sql_query("SELECT * from gnss_measurement_table", db_con)
self.measurements[GNSSMeasurementSeries] = GNSSMeasurementSeries(
rdy_format_version=self.rdy_format_version, **dict(gnss_df))
except (DatabaseError, PandasDatabaseError) as e:
logger.error("DatabaseError occurred when accessing gnss_measurement_table, file: %s" % self.name)
logger.error(e)
try:
gnss_clock_df = | pd.read_sql_query("SELECT * from gnss_clock_measurement_table", db_con) | pandas.read_sql_query |
import json
import math
import numpy as np
import os.path
import pandas as pd
import skimage.io
import sys
from xview3.utils.grid_index import GridIndex
distance_thresh = 10
def merge(preds):
for i, pred in enumerate(preds):
if 'input_idx' in pred.columns:
pred = pred.drop(columns=['input_idx'])
pred.insert(len(pred.columns), 'input_idx', [i]*len(pred))
pred = pd.concat(preds).reset_index()
new_scores = {}
for scene_id in pred.scene_id.unique():
print(scene_id)
cur = pred[pred.scene_id == scene_id]
# Insert into grid index.
grid_index = GridIndex(distance_thresh)
for index, row in cur.iterrows():
grid_index.insert((row.detect_scene_row, row.detect_scene_column), index)
# Set score of each point to the average over highest-scoring neighbors from each input_idx.
for index, row in cur.iterrows():
rect = [
row.detect_scene_row - distance_thresh,
row.detect_scene_column - distance_thresh,
row.detect_scene_row + distance_thresh,
row.detect_scene_column + distance_thresh,
]
best = [0.0]*len(preds)
for other_index in grid_index.search(rect):
other = pred.loc[other_index]
dx = other.detect_scene_column - row.detect_scene_column
dy = other.detect_scene_row - row.detect_scene_row
distance = math.sqrt(dx*dx+dy*dy)
if distance > distance_thresh:
continue
best[other.input_idx] = max(best[other.input_idx], other.score)
best[row.input_idx] = row.score
new_scores[index] = np.mean(best)
print('set scores')
for index, score in new_scores.items():
pred.loc[index, 'score'] = score
pred = pred.drop(columns=['index'])
return pred
if __name__ == "__main__":
in_paths = sys.argv[1:-1]
out_path = sys.argv[-1]
preds = []
for i, in_path in enumerate(in_paths):
pred = | pd.read_csv(in_path) | pandas.read_csv |
# -*- coding: utf-8 -*-
import sys, os
import pandas as pd
import openpyxl
from openpyxl.styles import PatternFill
import numpy as np
from collections import defaultdict
from scanner_map import searchKey, CertifiedManufacturerModelNameCTDict, CertifiedManufacturerCTDict, TrueManufacturerModelNameCTDict, TrueManufacturerCTDict
from scanner_map import ScannerType, CertifiedManufacturerModelNameICADict, CertifiedManufacturerICADict, TrueManufacturerModelNameICADict, TrueManufacturerICADict
from datetime import datetime
from openpyxl.utils import get_column_letter
from openpyxl.styles import Font, Color, Border, Side
from openpyxl.styles.differential import DifferentialStyle
from openpyxl.formatting import Rule
def highlight_columns(sheet, columns=[], color='A5A5A5', offset=2):
for col in columns:
cell = sheet.cell(1, col+offset)
cell.fill = PatternFill(start_color=color, end_color=color, fill_type = 'solid')
return sheet
def merge_defaultdicts(d,d1):
for k,v in d1.items():
if (k in d):
d[k].update(d1[k])
else:
d[k] = d1[k]
return d
def covertDate(date_str):
month_lookup = defaultdict(lambda: None, {'JAN':1, 'FEB':2, 'MAR':3, 'APR':4, 'MAY':5, 'JUN':6, 'JUL':7, 'AUG':8,'SEP':9, 'OCT':10,'NOV':11, 'DEC':12})
day = str(date_str[0:2])
month = str(month_lookup[date_str[2:5]])
year = date_str[5:9]
s = year + month + day
return datetime.strptime(s, '%Y%m%d')
def checkModalities(modList0, modList1):
for m0 in modList0:
for m1 in modList1:
if m0==m1:
return True
return False
def splitScannerList(filepath_scanner):
#filepath_scanner = 'H:/cloud/cloud_data/Projects/CACSFilter/data/scanner/scanner_correspondence_V05_manual.xlsx'
df_scanner = pd.read_excel(filepath_scanner, 'linear', index_col=0)
df_missing_CT = pd.DataFrame(columns=df_scanner.columns)
df_missing_XA = pd.DataFrame(columns=df_scanner.columns)
df_missing = df_scanner[(df_scanner['ECRF_MISSING']==True) & (df_scanner['ITT']!=2)]
for index, row in df_missing.iterrows():
if 'DICOM XA' in row['ManualCheck']:
df_missing_XA = df_missing_XA.append(row)
if 'DICOM CT' in row['ManualCheck']:
df_missing_CT = df_missing_CT.append(row)
# Update CT sheet
writer = pd.ExcelWriter(filepath_scanner, engine="openpyxl", mode="a")
# Update CT sheet
sheet_name = 'ECRF_MISSING_CT'
workbook = writer.book
df_missing_CT.to_excel(writer, sheet_name=sheet_name, index=False)
sheet = workbook[sheet_name]
# Update XA sheet
sheet_name = 'ECRF_MISSING_XA'
workbook = writer.book
df_missing_XA.to_excel(writer, sheet_name=sheet_name, index=False)
sheet = workbook[sheet_name]
writer.save()
# Read discharge data
filepath_dicom = 'H:/cloud/cloud_data/Projects/CACSFilter/data/scanner/discharge_dicom_27082020_OT.xlsx'
filepath_ecrf_study = 'H:/cloud/cloud_data/Projects/CACSFilter/data/scanner/ecrf_study_20200827.xlsx'
filepath_scanner_old = 'H:/cloud/cloud_data/Projects/CACSFilter/data/scanner/scanner_correspondence_V04_manual.xlsx'
filepath_scanner = 'H:/cloud/cloud_data/Projects/CACSFilter/data/scanner/scanner_correspondence.xlsx'
df_dicom = pd.read_excel(filepath_dicom, 'linear', index_col=0)
#df_dicom=df_dicom[0:1000]
df_dicom.replace(to_replace=[np.nan], value='', inplace=True)
df_ecrf = pd.read_excel(filepath_ecrf_study, 'Tabelle1')
#df_ecrf=df_ecrf[0:1000]
df_scanner_old = pd.read_excel(filepath_scanner_old, 'linear', index_col=0)
df_scanner_old.replace(to_replace=[np.nan], value='', inplace=True)
columns_scanner_rename=['PatientID', 'Site', 'ITT', 'RD_MB', '1. Date of CT', 'Date of ICA scan',
'Date of ICA scan 2', 'Date of staged PCI 1', 'Date of staged PCI 2',
'Date of staged PCI 3', 'duplicate entry', 'FFR', 'MRI_visite',
'Date of Echo', 'Date of PET', 'Date of SPECT:', 'Date of FU_CT-scan',
'Date cec_ct', 'Date pet ct', 'Date ldct', 'ldct 3m', 'ldct 6m',
'ldct 12m', 'Date FU ICA scan']
columns_scanner=['PatientID', 'Site', 'ITT', 'RD_MB',
'1. Date of CT', '1. Date of CT StudyInstanceUID',
'Date of ICA scan', 'Date of ICA scan StudyInstanceUID',
'Date of ICA scan 2', 'Date of ICA scan 2 StudyInstanceUID',
'Date of staged PCI 1', 'Date of staged PCI 1 StudyInstanceUID',
'Date of staged PCI 2', 'Date of staged PCI 2 StudyInstanceUID',
'Date of staged PCI 3', 'Date of staged PCI 3 StudyInstanceUID',
'duplicate entry',
'FFR', 'FFR StudyInstanceUID',
'MRI_visite',
'Date of Echo', 'Date of Echo StudyInstanceUID',
'Date of PET', 'Date of PET StudyInstanceUID',
'Date of SPECT:', 'Date of SPECT: StudyInstanceUID',
'Date of FU_CT-scan', 'Date of FU_CT-scan StudyInstanceUID',
'Date cec_ct', 'Date cec_ct StudyInstanceUID',
'Date pet ct', 'Date pet ct StudyInstanceUID',
'Date ldct', 'Date ldct StudyInstanceUID',
'ldct 3m', 'ldct 3m StudyInstanceUID',
'ldct 6m', 'ldct 6m StudyInstanceUID',
'ldct 12m', 'ldct 12m StudyInstanceUID',
'Date FU ICA scan', 'Date FU ICA scan StudyInstanceUID']
columns_scanner_missing = [x for x in columns_scanner if x not in columns_scanner_rename]
#columns_result = ['OK', 'DICOM_MISSING', 'ECRF_MISSING', 'DICOM_ECRF_MISMATCH']
columns_result = ['DICOM_MISSING', 'ECRF_MISSING', 'ECRF_MISSING_SeriesInstanceUID']
columns_ecrf=['Patient identifier', 'Centre name (mnpctrname)', 'ITT', 'RD_MB', '1. Date of CT', 'Date of ICA scan',
'Date of ICA scan 2', 'Date of staged PCI 1', 'Date of staged PCI 2',
'Date of staged PCI 3', 'duplicate entry ', 'FFR', 'MRI_visite',
'Date of Echo', 'Date of PET', 'Date of SPECT:', 'Date of FU_CT-scan:',
'Date cec_ct', 'Date pet ct', 'Date ldct:', 'ldct 3m', 'ldct 6m',
'ldct 12m', 'Date FU ICA scan:']
dates_required = ['1. Date of CT', 'Date of ICA scan', 'Date of ICA scan 2', 'Date of staged PCI 1', 'Date of staged PCI 2',
'Date of staged PCI 3']
modalities_required = defaultdict(lambda: None, {'1. Date of CT': ['CT'], 'Date of ICA scan': ['XA'], 'Date of ICA scan 2': ['XA'],
'Date of staged PCI 1': ['XA'], 'Date of staged PCI 2': ['XA'], 'Date of staged PCI 3': ['XA']})
dates_sidestudy = ['FFR','Date of Echo', 'Date of PET', 'Date of SPECT:', 'Date of FU_CT-scan',
'Date cec_ct', 'Date pet ct', 'Date ldct', 'ldct 3m', 'ldct 6m','ldct 12m', 'Date FU ICA scan']
modalities_sidestudy = defaultdict(lambda: None, {'FFR': ['XA'], 'Date of Echo': ['US'], 'Date of PET': ['CT','PT'], 'Date of SPECT:': ['CT','NM'], 'Date of FU_CT-scan': ['CT'],
'Date cec_ct': ['CT'], 'Date pet ct': ['PT'], 'Date ldct': ['CT'], 'ldct 3m': ['CT'], 'ldct 6m': ['CT'],'ldct 12m': ['CT'],
'Date FU ICA scan': ['XA']})
dates_all = dates_required + dates_sidestudy
# f = 'H:/cloud/cloud_data/Projects/BIOQIC/08_Research/PACSServer/date.sas7bdat'
# f = 'C:/Users/bernifoellmer/Downloads/SASVisualForecasting_sampledatasets/skinproduct_vfdemo.sas7bdat'
# db = pd.read_sas(f)
# Create dataframe with patient per line
df_scanner = pd.DataFrame()
df_dicom_study = df_dicom.drop_duplicates(subset=['StudyInstanceUID'], ignore_index=True)
# Convert modalities into list of modalities
df_dicom_study.reset_index(drop=True,inplace=True)
for index, row in df_dicom_study.iterrows():
print(index)
#sys.exit()
df = df_dicom[df_dicom['StudyInstanceUID']==row['StudyInstanceUID']]
modList=list(set(list(df['Modality'])))
modList_str = ','.join(modList)
df_dicom_study.loc[index, 'Modality'] = modList_str
df_ecrf_study = df_ecrf.rename(columns = dict(zip(columns_ecrf, columns_scanner_rename)))
df_ecrf_study = df_ecrf_study[columns_scanner_rename]
# Convert date
for ecrf_date in dates_all:
for index, row in df_ecrf_study.iterrows():
date_str = df_ecrf_study.loc[index, ecrf_date]
#print('ecrf_date', ecrf_date)
#print('index', index)
#print('date_str', date_str)
if (type(date_str)==str) and (not date_str=='.'):
df_ecrf_study.loc[index, ecrf_date] = covertDate(date_str)
# date_str = df_ecrf_study.loc[277, 'FFR']
# d=covertDate(date_str)
colmax=[]
for index_ecrf, row_ecrf in df_ecrf_study.iterrows():
#sys.exit()
df_patient = df_dicom_study[df_dicom_study['PatientID']==row_ecrf['PatientID']]
df_patient.sort_values('StudyDate', inplace=True)
df_patient.reset_index(inplace=True)
print('index_ecrf:', index_ecrf)
s = row_ecrf[columns_scanner_rename]
for index, row in df_patient.iterrows():
# Filter wrong ManufacturerModelName and Manufacturer
TrueManufacturerList = [row['Manufacturer']]
TrueManufacturer_str = ','.join(list(set(TrueManufacturerList)))
TrueManufacturer = searchKey(TrueManufacturerCTDict, TrueManufacturer_str)
TrueManufacturerModelNameList = [row['ManufacturerModelName']]
TrueManufacturerModelName_str = ','.join(list(set(TrueManufacturerModelNameList)))
TrueManufacturerModelName = searchKey(TrueManufacturerModelNameCTDict, TrueManufacturerModelName_str)
s['StudyDate' + '_' + str(index).zfill(2)] = datetime.strptime(str(row['StudyDate']), '%Y%m%d')
s['StudyInstanceUID' + '_' + str(index).zfill(2)] = row['StudyInstanceUID']
s['Modality' + '_' + str(index).zfill(2)] = row['Modality']
s['TrueManufacturer' + '_' + str(index).zfill(2)] = TrueManufacturer
s['TrueManufacturerModelName' + '_' + str(index).zfill(2)] = TrueManufacturerModelName
s['EcrfFound' + '_' + str(index).zfill(2)] = ''
if len(s.keys())>len(colmax):
colmax = list(s.keys())
df_scanner = df_scanner.append(s, ignore_index=True, sort=True)
# Add columns_scanner_missing
# Reindex columns
# df_scanner = df_scanner[colmax]
# df_scanner['DICOM_MISSING']=False
# df_scanner['ECRF_MISSING']=False
#for x in columns_scanner_missing: df_scanner[x]=''
colmax2=colmax.copy()
for x in columns_scanner_missing:
if ' StudyInstanceUID' in x:
#sys.exit()
k = x[0:-17]
idx = colmax2.index(k)
colmax2 = colmax2[0:idx+1] + [x] + colmax2[idx+1:]
for x in columns_scanner_missing: df_scanner[x]=''
df_scanner = df_scanner[colmax2]
df_scanner['DICOM_MISSING']=False
df_scanner['ECRF_MISSING']=False
df_scanner['ManualCheck']=''
df_scanner['Solved']=''
df_scanner['EI']=''
df_scanner['MB']=''
df_scanner['BF']=''
colmax2 = colmax2 + ['DICOM_MISSING', 'ECRF_MISSING', 'ManualCheck', 'Solved', 'EI', 'MB', 'BF']
# Create color dataframe
df_scanner_color = df_scanner.copy()
# Check dates from ecrf
columns_study = [c for c in df_scanner.columns if 'StudyDate' in c]
columns_study_mod = [c for c in df_scanner.columns if 'Modality' in c]
columns_study_id = [c for c in df_scanner.columns if 'StudyInstanceUID_' in c]
columns_study_found= [c for c in df_scanner.columns if 'Found in ecrf' in c]
modalities_all = merge_defaultdicts(modalities_required , modalities_sidestudy)
for index, row in df_scanner.iterrows():
print('index', index)
#if index==103:
# sys.exit()
dates_dicom = list(row[columns_study])
mod_dicom = list(row[columns_study_mod])
for k in dates_all:
if(not pd.isnull(row[k])) and (not row[k]=='.'):
if not row[k] in dates_dicom and k in dates_required:
df_scanner_color.loc[index, k] = 'RED'
df_scanner.loc[index, 'DICOM_MISSING'] = True
#print(index, 'ECRF_MISSING')
#sys.exit()
else:
#if index==844:
# sys.exit()
idx = [i for i,x in enumerate(dates_dicom) if x == row[k]]
#idx_mod = [i for i in idx if modalities_all[k] in mod_dicom[i]]
idx_mod = [i for i in idx if checkModalities(modalities_all[k], mod_dicom[i].split(','))]
for i in idx_mod:
k_study = k + ' StudyInstanceUID'
dicom_study = columns_study_id[i]
if df_scanner.loc[index, k_study] == '':
df_scanner.loc[index, k_study] = df_scanner.loc[index, k_study] + df_scanner.loc[index, dicom_study]
else:
# if index==103:
# print('add')
df_scanner_color.loc[index, k] = 'RED'
df_scanner.loc[index, k_study] = df_scanner.loc[index, k_study] + ',' + df_scanner.loc[index, dicom_study]
df_scanner = df_scanner[colmax2]
# # Check dates from ecrf
# columns_study = newlist = [c for c in df_scanner.columns if 'StudyDate' in c]
# columns_study_mod = newlist = [c for c in df_scanner.columns if 'Modality' in c]
# for index, row in df_scanner.iterrows():
# print('index', index)
# #sys.exit()
# dates_dicom = list(row[columns_study])
# mod_dicom = list(row[columns_study_mod])
# for k in dates_required:
# if not pd.isnull(row[k]):
# if not row[k] in dates_dicom:
# df_scanner_color.loc[index, k] = 'RED'
# df_scanner.loc[index, 'ECRF_MISSING'] = True
# Check dates from dicom
columns_study = [c for c in df_scanner.columns if 'StudyDate' in c]
columns_study_mod = newlist = [c for c in df_scanner.columns if 'Modality' in c]
for index, row in df_scanner.iterrows():
#if row['PatientID']=='09-BEL-0002':
# sys.exit()
#sys.exit()
print('index', index)
columns_date_ecrf = dates_required + dates_sidestudy
dates_ecrf = list(row[columns_date_ecrf])
dates_index = [(not pd.isnull(x)) and (not x=='.') for x in dates_ecrf]
mod_ecrf=[]
for i,x in enumerate(columns_date_ecrf):
if dates_index[i]:
mod_ecrf.append(modalities_all[x])
else:
mod_ecrf.append('')
#mod_ecrf = [modalities_all[x] for i,x in enumerate(columns_date_ecrf) if dates_index[i]]
#dates_ecrf = list(row[columns_date_ecrf])
for e, k in enumerate(columns_study):
#if k=='StudyDate_00':
# sys.exit()
if not | pd.isnull(row[k]) | pandas.isnull |
from src.config import CENSUS_KEY
import json
import requests
import pandas as pd
from typing import Dict, List, Tuple, Optional, Collection
from src import data
def download_census_data(geo_ls=["zip", "county"]) -> data.Wrapper:
'''
Top level function to run the queries
'''
# Census tables
detailed_table = 'https://api.census.gov/data/2018/acs/acs5?'
subject_table = 'https://api.census.gov/data/2018/acs/acs5/subject?'
# define race instance
# Values name format: topic_property_subproperty...
# B03002_003E: Does not include people of hispanic/latino origin
race_variables = {'B03002_001E': 'race_total',
'B03002_005E': 'race_native',
'B03002_004E': 'race_black', 'B03002_003E': 'race_white',
'B03002_009E': 'race_twoplus_total',
'B03002_007E': 'race_pacific',
'B03002_008E': 'race_other', 'B03002_006E': 'race_asian',
'B03002_012E': 'race_hispaniclatino_total'}
# race_functions = [processRaceData]
# variable does not need to be defined, but it is for readability
race = CensusRequest("race", detailed_table, race_variables)
# define poverty instance
poverty_variables = {'S1701_C01_001E': 'poverty_population_total',
'S1701_C02_001E': 'poverty_population_poverty',
'S1701_C02_002E': 'poverty_population_poverty_child'}
# If additional subdivision are needed
# 'S1701_C02_003E' = AGE!!Under 18 years!! Under 5 years!!
# 'S1701_C02_004E' = AGE!!Under 18 years!! 5 to 17 years!!
# poverty_functions = [processPovertyData]
poverty = CensusRequest("poverty", subject_table, poverty_variables)
return get_census_data_list([race, poverty], geo_ls)
def get_census_response(table_url: str,
get_ls: Collection[str],
geo: str,
mock_response: str = None) -> List[List[str]]:
'''
Concatenates url string and returns response from census api query
input:
table_url (str): census api table url
get_ls (ls): list of tables to get data from
geo (str): geographic area and filter
output:
list of rows. first row is header:
[NAME, <elements of get_ls>, state, county/zip header]
'''
if geo == 'zip':
geo_parameter = 'zip code tabulation area:*'
elif geo == 'county':
geo_parameter = 'county:*&in=state:17'
else:
raise ValueError('Unsupported geography type: ' + geo)
get = 'NAME,' + ",".join(get_ls)
url = f'{table_url}get={get}&for={geo_parameter}&key={CENSUS_KEY}'
if mock_response:
with open(mock_response, 'r') as mock_response_file:
try:
data_table = json.load(mock_response_file)
except json.JSONDecodeError as e:
print("Error reading json from mock data: {} (line {} col {})".format(e.msg, e.lineno, e.colno)) # noqa: E501
data_table = []
else:
response = requests.get(url)
try:
data_table = response.json()
except json.JSONDecodeError:
print("Error reading json from census response. Make sure you have a valid census key. Census Response: " + response.text) # noqa: E501
data_table = []
return data_table
def create_percentages(df: pd.DataFrame, total_col_str: str) -> pd.DataFrame:
'''
Calculates percentages and removes NaN for dict conversion
Returns calculated percent_df
'''
# divides df by total column to calculate percentages
divide_by_total = lambda x: x / df[total_col_str] # noqa: E731, E501
# Casts to type float64 for numpy interoperability
percent_df: pd.DataFrame = df.apply(divide_by_total) \
.drop(total_col_str, axis=1) \
.astype('float64')
# Rounds to save space
percent_df = percent_df.round(6)
return percent_df
class CensusRequest:
def __init__(self,
metric: str,
table_url: str,
variables: Dict[str, str]) -> None:
self.metric = metric
self.table_url = table_url
self.variables = variables
def county_fips(reverse=False) -> Dict[str, str]:
'''
Requests county fips from census API and returns list of IL county FIPS
input: reverse (bool) reverses keys and values in output
output: il_json (dict) {'county name': 'fip'}
'''
url = 'https://api.census.gov/data/2010/dec/sf1?get=NAME&for=county:*'
response = requests.get(url)
# filter for IL
def il_county_filter(x):
return x[1] == '17'
response_json = response.json()
if reverse:
il_json = {county[1] + county[2]: county[0]
for county in response_json if il_county_filter(county)}
else:
il_json = {county[0]: county[1] + county[2]
for county in response_json if il_county_filter(county)}
return il_json
def majority(series: pd.Series) -> Optional[str]:
'''
Returns majority race demographic
for each geo_area
If no majority, returns 'majority_minority'
'''
# indexes max value, returns NA for NA rows
idx = series.idxmax()
try:
value = series[idx]
except KeyError:
# if NA row, idx = NA
return None
if value >= 0.5:
return idx
else:
return 'majority_minority'
def dataframe_and_bins_from_census_rows(all_rows: List[List[str]], geography_type: str, request: CensusRequest) -> Tuple[pd.DataFrame, Dict[str, Dict[str, List[float]]]]: # noqa: E501
# Translate the census variables into our descriptive names
columns = [request.variables.get(column_name, column_name)
for column_name in all_rows[0]]
dataframe = pd.DataFrame(columns=columns, data=all_rows[1:])
# Without forcing the types, the numbers end up as strings
dataframe = dataframe.astype('string') # without this, you get "TypeError: object cannot be converted to an IntegerDtype" during when we convert to integers # noqa: E501
conversion_dict = {v: "Int64" for v in request.variables.values()}
dataframe = dataframe.astype(conversion_dict)
bins = {}
if geography_type == "county":
fip_series = dataframe.loc[:, 'state'] + dataframe.loc[:, 'county']
fip_series.rename('FIPS', inplace=True)
dataframe = pd.concat([dataframe, fip_series], axis=1)
dataframe = dataframe.set_index('FIPS').drop(['state', 'county'], axis=1) # noqa: E501
elif geography_type == "zip":
dataframe = dataframe.set_index('zip code tabulation area') \
.drop(['NAME', 'state'], axis=1) \
.filter(regex='^(6[0-2])[0-9]+', axis=0)
else:
raise ValueError("Unsupported geography type: " + geography_type)
if request.metric == "race":
numeric_columns = request.variables.values()
bins = {
'quantiles': {
'race_data': data.calculate_quantiles_bins(dataframe, numeric_columns) # noqa: E501
},
'natural_breaks': {
'race_data': data.calculate_natural_breaks_bins(dataframe, numeric_columns) # noqa: E501
}
}
race_df = dataframe.loc[:, numeric_columns]
pct_df = create_percentages(race_df, 'race_total')
bins['quantiles']['race_data']['race_percentages'] = \
data.calculate_quantiles_bins(pct_df, pct_df.columns)
bins['natural_breaks']['race_data']['race_percentages'] = \
data.calculate_natural_breaks_bins(pct_df, pct_df.columns)
# creates series of majority race demographics
majority_series = pct_df.apply(majority, axis=1)
majority_series.name = 'race_majority'
# converts NAN to None, for proper JSON encoding
working_df = pct_df.where( | pd.notnull(pct_df) | pandas.notnull |
# IRS CA in-migration from other states (2012-2018)
import pandas as pd
import numpy as np
import os
import re
inmig_12_18 = os.listdir('CA In 12_18')
outmig_12_18 = os.listdir('CA Out 12_18')
master_df = pd.DataFrame()
years = []
folders = ['CA In 12_18/', 'CA Out 12_18/']
types = ['In', 'Out']
counter = 0
for folder in [inmig_12_18, outmig_12_18]:
temp_df = pd.DataFrame()
for file in folder:
# Identifying year from different file names
num = re.findall('\d+', file)[0][-2:]
yr = int('20' + num)
print(yr)
df = pd.read_csv(folders[counter] + file, usecols=[0,1,3,5])
df.columns = ['Origin', 'State_FIPS', 'State', 'Exemptions']
df = df[df.Origin == 6].drop('Origin', axis=1)
df = df[(df.State_FIPS != 6) & (df.State_FIPS < 57)]
years += [yr]*len(df)
print(yr, len(df))
temp_df = pd.concat([temp_df, df])
temp_df['Type'] = [types[counter]] * len(temp_df)
master_df = | pd.concat([master_df, temp_df]) | pandas.concat |
"""
__author__ = <NAME>
"""
import attr
import numpy as np
import pandas as pd
from attr.validators import instance_of
from pysight.nd_hist_generator.movie import Movie, FrameChunk
from collections import deque, namedtuple
from typing import Tuple, Union
from numba import jit, uint8, int64
@attr.s(slots=True)
class CensorCorrection(object):
raw = attr.ib(validator=instance_of(dict))
data = attr.ib(validator=instance_of(pd.DataFrame))
movie = attr.ib(validator=instance_of(Movie))
all_laser_pulses = attr.ib()
nano_flim_list = attr.ib(init=False)
flim = attr.ib(default=False, validator=instance_of(bool))
reprate = attr.ib(default=80e6, validator=instance_of(float))
binwidth = attr.ib(default=800e-12, validator=instance_of(float))
laser_offset = attr.ib(default=3.5, validator=instance_of(float))
num_of_channels = attr.ib(default=1, validator=instance_of(int))
@property
def bins_bet_pulses(self) -> int:
return int(np.ceil(1 / (self.reprate * self.binwidth)))
@property
def offset(self):
return int(np.floor(self.laser_offset * 10 ** -9 / self.binwidth))
def run(self):
"""
Main pipeline for the censor correction part.
"""
if self.flim:
print("Starting the censor correction...")
self.create_arr_of_hists_deque()
else:
print("FLIM deactivated, no censor correction performed.")
def __gen_laser_pulses_deque(self) -> np.ndarray:
"""
If data has laser pulses - return them. Else - simulate them with an offset
"""
start_time = 0
step = self.bins_bet_pulses
volumes_in_movie = self.movie.gen_of_volumes()
if (
self.all_laser_pulses == 0 and self.flim == False
): # no 'Laser' data was recorded
for vol in volumes_in_movie:
yield np.arange(
start=start_time + self.offset,
stop=vol.end_time,
step=step,
dtype=np.uint64,
)
elif self.all_laser_pulses == 0 and self.flim == True:
pass
else:
for vol in volumes_in_movie:
yield self.all_laser_pulses[
(self.all_laser_pulses >= vol.abs_start_time - step)
& (self.all_laser_pulses <= vol.end_time + step)
] + self.offset
def __get_bincount_deque(self):
print("Movie object created. Generating the bincount deque...")
bincount_deque = deque()
laser_pulses_deque = self.__gen_laser_pulses_deque()
volumes_in_movie = self.movie.gen_of_volumes()
for idx, vol in enumerate(volumes_in_movie):
censored = CensoredVolume(
df=vol.data,
vol=vol,
offset=self.offset,
laser_pulses=next(laser_pulses_deque),
)
dig, bincount = censored.gen_bincount()
pos_idx = np.where(dig >= 0)[0]
dig = dig[pos_idx]
pos_photons = censored.df.iloc[pos_idx, -1].values.T
if len(pos_photons) == 0:
data_dict = {
"photon_hist": np.zeros((self.bins_bet_pulses, 1), dtype=np.uint8),
"bincount": bincount,
"num_empty_hists": bincount[0],
}
return data_dict
photon_hist = np.zeros(
(self.bins_bet_pulses, pos_photons.shape[0]), dtype=np.uint8
)
for laser_idx, photon in enumerate(np.nditer(pos_photons)):
start_time = censored.laser_pulses[dig[laser_idx]]
try:
end_time = censored.laser_pulses[dig[laser_idx] + 1]
except IndexError: # photons out of laser pulses
continue
else:
photon_hist[:, laser_idx] = np.histogram(
photon, bins=np.arange(start_time, end_time + 1, dtype="uint64")
)[0]
data_dict = {
"photon_hist": photon_hist,
"bincount": bincount,
"num_empty_hists": bincount[0],
}
assert (
data_dict["num_empty_hists"] >= 0
), "Sum of bincount: {}, number of photons: {}".format(
sum(bincount), laser_idx
)
bincount_deque.append(data_dict)
return bincount_deque
def find_temporal_structure_deque(self):
temp_struct_deque = deque()
laser_pulses_deque = self.__gen_laser_pulses_deque()
volumes_in_movie = self.movie.gen_of_volumes()
for idx, vol in enumerate(volumes_in_movie):
censored = CensoredVolume(
df=vol.data,
vol=vol,
offset=self.offset,
laser_pulses=next(laser_pulses_deque),
binwidth=self.binwidth,
reprate=self.reprate,
)
temp_struct_deque.append(censored.find_temp_structure())
return temp_struct_deque
def __worker_arr_of_hists(self, vol):
censored = CensoredVolume(
df=vol.data,
vol=vol,
offset=self.offset,
binwidth=self.binwidth,
reprate=self.reprate,
)
return censored.gen_arr_of_hists()
def create_arr_of_hists_deque(self):
"""
For each volume generate a single matrix with the same size as the underlying volume,
which contains a histogram of photons in their laser pulses for each pixel.
:return: deque() that contains an array of histograms in each place
"""
self.nano_flim_list = [] # each cell contains a different data channel
for chan in range(1, self.num_of_channels + 1):
print("Starting channel number {}: ".format(chan))
volumes_in_movie = self.movie.gen_of_volumes(channel_num=chan)
self.nano_flim_list.append(
[self.__worker_arr_of_hists(vol) for vol in volumes_in_movie]
)
def create_array_of_hists_deque(self):
"""
Go through each volume in the deque and find the laser pulses for each pixel, creating a summed histogram per pixel.
:return:
"""
temp_struct_deque = deque()
laser_pulses_deque = self.__gen_laser_pulses_deque()
volumes_in_movie = self.movie.gen_of_volumes()
for idx, vol in enumerate(volumes_in_movie):
censored = CensoredVolume(
df=vol.data,
vol=vol,
offset=self.offset,
laser_pulses=next(laser_pulses_deque),
binwidth=self.binwidth,
reprate=self.reprate,
)
temp_struct_deque.append(censored.gen_array_of_hists())
return temp_struct_deque
def __gen_labels(self, size: int, label: Union[int, float]) -> np.ndarray:
"""
Create labels for the ML algorithm. Label value must be an integer.
:size: Number of elements
:return: np.ndarray
"""
if isinstance(label, int): # fixed power during the session
return np.ones(size, dtype=np.uint8) * label
elif isinstance(
label, float
): # `label` contains the frequency of the triangular wave
pass
def learn_histograms(
self, label: Union[int, float], power: int, folder_to_save: str
):
"""
Implement the machine learning algorithm on the data.
:param label: Label of ML algorithm.
:param power: How much power was injected to the Qubig. For saving the file.
:return: data, labels
"""
from sklearn import svm, metrics
import pathlib
# Start by generating the data and arranging it properly for the machine
bincount = self.__get_bincount_deque()
print("Bincount done. Adding all data to a single matrix.")
data = np.empty((self.bins_bet_pulses, 0))
for vol in bincount:
data = np.concatenate(
(data, vol["photon_hist"]), axis=1
) # the histograms with photons in them
data = np.concatenate(
(
data,
np.zeros(
(self.bins_bet_pulses, vol["num_empty_hists"]), dtype=np.uint8
),
),
axis=1,
) # empty hists
data = data.T
n_samples = data.shape[0]
labels = self.__gen_labels(n_samples, label)
classifier = svm.SVC(gamma=0.001)
labels[1] = 10 # toying around
print("Fitting the data...")
classifier.fit(data[: n_samples // 2], labels[: n_samples // 2])
# Predictions
expected = labels[n_samples // 2 :]
predicted = classifier.predict(data[n_samples // 2 :])
print("Number of samples is %s." % n_samples)
print(
"Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted))
)
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
# Save the data for future use
folder_as_path = pathlib.Path(folder_to_save)
filename = str(folder_as_path / "{}p_label_{}.npy".format(power, label))
self.__save_data(data=data, filename=filename)
return data, labels
def __save_data(self, data: np.ndarray, filename: str):
"""
Save the data array for future training.
:param data: Data to be learnt.
:param filename: Including dir
:return:
"""
print("Saving to {}...".format(filename))
with open(filename, "wb") as f:
np.save(f, data)
def append_laser_line(self):
"""
Add a final laser line to the laser signal input.
"""
last_laser_row = pd.DataFrame(
{
"abs_time": self.raw["Laser"]["abs_time"].iat[-1]
+ self.bins_bet_pulses,
"edge": 0,
"sweep": self.raw["Laser"]["sweep"].iat[-1],
"time_rel_sweep": self.raw["Laser"]["time_rel_sweep"].iat[-1]
+ self.bins_bet_pulses,
},
index=[self.raw["Laser"].shape[0]],
)
self.raw["Laser"] = | pd.concat([self.raw["Laser"], last_laser_row]) | pandas.concat |
import calendar
import math
import re
from datetime import datetime, timedelta, date
import pandas as pd
import pytz
from catalyst.exchange.exchange_errors import InvalidHistoryFrequencyError, \
InvalidHistoryFrequencyAlias
def get_date_from_ms(ms):
"""
The date from the number of miliseconds from the epoch.
Parameters
----------
ms: int
Returns
-------
datetime
"""
return datetime.fromtimestamp(ms / 1000.0)
def get_seconds_from_date(date):
"""
The number of seconds from the epoch.
Parameters
----------
date: datetime
Returns
-------
int
"""
epoch = datetime.utcfromtimestamp(0)
epoch = epoch.replace(tzinfo=pytz.UTC)
return int((date - epoch).total_seconds())
def get_delta(periods, data_frequency):
"""
Get a time delta based on the specified data frequency.
Parameters
----------
periods: int
data_frequency: str
Returns
-------
timedelta
"""
return timedelta(minutes=periods) \
if data_frequency == 'minute' else timedelta(days=periods)
def get_periods_range(freq, start_dt=None, end_dt=None, periods=None):
"""
Get a date range for the specified parameters.
Parameters
----------
start_dt: datetime
end_dt: datetime
freq: str
Returns
-------
DateTimeIndex
"""
if freq == 'minute':
freq = 'T'
elif freq == 'daily':
freq = 'D'
if start_dt is not None and end_dt is not None and periods is None:
return pd.date_range(start_dt, end_dt, freq=freq)
elif periods is not None and (start_dt is not None or end_dt is not None):
_, unit_periods, unit, _ = get_frequency(freq)
adj_periods = periods * unit_periods
# TODO: standardize time aliases to avoid any mapping
unit = 'd' if unit == 'D' else 'h' if unit == 'H' else 'm'
delta = pd.Timedelta(adj_periods, unit)
if start_dt is not None:
return pd.date_range(
start=start_dt,
end=start_dt + delta,
freq=freq,
closed='left',
)
else:
return pd.date_range(
start=end_dt - delta,
end=end_dt,
freq=freq,
)
else:
raise ValueError(
'Choose only two parameters between start_dt, end_dt '
'and periods.'
)
def get_periods(start_dt, end_dt, freq):
"""
The number of periods in the specified range.
Parameters
----------
start_dt: datetime
end_dt: datetime
freq: str
Returns
-------
int
"""
return len(get_periods_range(start_dt=start_dt, end_dt=end_dt, freq=freq))
def get_start_dt(end_dt, bar_count, data_frequency, include_first=True):
"""
The start date based on specified end date and data frequency.
Parameters
----------
end_dt: datetime
bar_count: int
data_frequency: str
include_first
Returns
-------
datetime
"""
periods = bar_count
if periods > 1:
delta = get_delta(periods, data_frequency)
start_dt = end_dt - delta
if not include_first:
start_dt += get_delta(1, data_frequency)
else:
start_dt = end_dt
return start_dt
def get_period_label(dt, data_frequency):
"""
The period label for the specified date and frequency.
Parameters
----------
dt: datetime
data_frequency: str
Returns
-------
str
"""
if data_frequency == 'minute':
return '{}-{:02d}'.format(dt.year, dt.month)
else:
return '{}'.format(dt.year)
def get_month_start_end(dt, first_day=None, last_day=None):
"""
The first and last day of the month for the specified date.
Parameters
----------
dt: datetime
first_day: datetime
last_day: datetime
Returns
-------
datetime, datetime
"""
month_range = calendar.monthrange(dt.year, dt.month)
if first_day:
month_start = first_day
else:
month_start = pd.to_datetime(datetime(
dt.year, dt.month, 1, 0, 0, 0, 0
), utc=True)
if last_day:
month_end = last_day
else:
month_end = pd.to_datetime(datetime(
dt.year, dt.month, month_range[1], 23, 59, 0, 0
), utc=True)
if month_end > pd.Timestamp.utcnow():
month_end = pd.Timestamp.utcnow().floor('1D')
return month_start, month_end
def get_year_start_end(dt, first_day=None, last_day=None):
"""
The first and last day of the year for the specified date.
Parameters
----------
dt: datetime
first_day: datetime
last_day: datetime
Returns
-------
datetime, datetime
"""
year_start = first_day if first_day \
else pd.to_datetime(date(dt.year, 1, 1), utc=True)
year_end = last_day if last_day \
else pd.to_datetime(date(dt.year, 12, 31), utc=True)
if year_end > pd.Timestamp.utcnow():
year_end = pd.Timestamp.utcnow().floor('1D')
return year_start, year_end
def get_frequency(freq, data_frequency=None, supported_freqs=['D', 'H', 'T']):
"""
Takes an arbitrary candle size (e.g. 15T) and converts to the lowest
common denominator supported by the data bundles (e.g. 1T). The data
bundles only support 1T and 1D frequencies. If another frequency
is requested, Catalyst must request the underlying data and resample.
Notes
-----
We're trying to use Pandas convention for frequency aliases.
Parameters
----------
freq: str
data_frequency: str
Returns
-------
str, int, str, str
"""
if data_frequency is None:
data_frequency = 'daily' if freq.upper().endswith('D') else 'minute'
if freq == 'minute':
unit = 'T'
candle_size = 1
elif freq == 'daily':
unit = 'D'
candle_size = 1
else:
freq_match = re.match(r'([0-9].*)?(m|M|d|D|h|H|T)', freq, re.M | re.I)
if freq_match:
candle_size = int(freq_match.group(1)) if freq_match.group(1) \
else 1
unit = freq_match.group(2)
else:
raise InvalidHistoryFrequencyError(frequency=freq)
# TODO: some exchanges support H and W frequencies but not bundles
# Find a way to pass-through these parameters to exchanges
# but resample from minute or daily in backtest mode
# see catalyst/exchange/ccxt/ccxt_exchange.py:242 for mapping between
# Pandas offet aliases (used by Catalyst) and the CCXT timeframes
if unit.lower() == 'd':
unit = 'D'
alias = '{}D'.format(candle_size)
if data_frequency == 'minute':
data_frequency = 'daily'
elif unit.lower() == 'm' or unit == 'T':
unit = 'T'
alias = '{}T'.format(candle_size)
data_frequency = 'minute'
elif unit.lower() == 'h':
data_frequency = 'minute'
if 'H' in supported_freqs:
unit = 'H'
alias = '{}H'.format(candle_size)
else:
candle_size = candle_size * 60
alias = '{}T'.format(candle_size)
else:
raise InvalidHistoryFrequencyAlias(freq=freq)
return alias, candle_size, unit, data_frequency
def from_ms_timestamp(ms):
return pd.to_datetime(ms, unit='ms', utc=True)
def get_epoch():
return | pd.to_datetime('1970-1-1', utc=True) | pandas.to_datetime |
#!/usr/bin/env python
#
# parse multiple HDF5 files and print graph
#
import pandas as pd
import matplotlib
matplotlib.use('AGG')
import matplotlib.pyplot as plt
from pandas import ExcelWriter
print('Import data from HDF5 files')
store1 = pd.HDFStore("E:\\home\\2014\\07\\logfile_20140701.hdf5", mode='r')
print(store1.root.series.tarom.Total_Charge_Current.data[0:10])
store2 = pd.HDFStore("E:\\home\\2014\\07\\logfile_20140702.hdf5", mode='r')
print(store2.root.series.tarom.Total_Charge_Current.data[0:10])
store3 = pd.HDFStore("E:\\home\\2014\\07\\logfile_20140703.hdf5", mode='r')
print(store3.root.series.tarom.Total_Charge_Current.data[0:10])
store4 = pd.HDFStore("E:\\home\\2014\\07\\logfile_20140704.hdf5", mode='r')
print(store4.root.series.tarom.Total_Charge_Current.data[0:10])
# Example accessing HDF5 using MyRaspiHome framework
# -------------------------------------------------------------
# from data_accessor.data_accessor_hdf5 import DataAccessorHDF5
# da = DataAccessorHDF5()
# da.open("E:\\home\\2014\\07\\logfile_20140701.hdf5", 'r')
# parameter = ('Total_Charge_Current',)
# data = da.read('tarom', parameter)
# print(data)
# da.close()
# convert to pandas dataframes
s1 = pd.Series.from_array(store1.root.series.tarom.Total_Charge_Current.data)
df1 = pd.DataFrame(s1)
s2 = pd.Series.from_array(store2.root.series.tarom.Total_Charge_Current.data)
df2 = pd.DataFrame(s2)
s3 = pd.Series.from_array(store3.root.series.tarom.Total_Charge_Current.data)
df3 = pd.DataFrame(s3)
s4 = pd.Series.from_array(store4.root.series.tarom.Total_Charge_Current.data)
df4 = pd.DataFrame(s3)
print('Merge data')
merged = pd.merge(df1, df2)
print('Print graph')
figure = plt.figure()
#merged.plot(kind='area', stacked=False)
df1.plot(kind='area', stacked=False)
#df2.plot(kind='area', stacked=False)
#df3.plot(kind='area', stacked=False)
#df4.plot(kind='area', stacked=False)
plt.grid(True)
plt.savefig('test.png')
plt.close(figure)
print('To excel')
with | ExcelWriter('output.xlsx') | pandas.ExcelWriter |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from distutils.version import LooseVersion
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
from datetime import datetime
plt.rcParams['font.size'] = 6
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
graphs_path = root_path+'/boundary_effect/graph/'
if not os.path.exists(graphs_path):
os.makedirs(graphs_path)
time = pd.read_csv(root_path+'/time_series/MonthlyRunoffWeiRiver.csv')['Time']
time = time.values
time = [datetime.strptime(t,'%Y/%m') for t in time]
time = [t.strftime('%b %Y') for t in time]
# print(time)
# CHECK 1: is VMD shift-invariant?
# If yes, any shifted copy of an IMF from a VMD decomposition, similar to a
# shifted copy of the original time series, should be maintained.
# For example, given the sunspot time series x (of length 792) we can
# generate a 1-step advanced copy of the original time series as follows:
# x0=(1:791)
# x1=(2:792) this is a 1-step advanced version of x0
# Observiously, shift-invariancy is preserved between x0 and x1 since
# x0(2:791)=x1(1:790)
# For shift-invariancy to be preserved for VMD, we would observe, for
# example, that the VMD IMF1 components for x0 (imf1 of x0) and x1 (imf1 of
# x1) should be exact copies of one another, advanced by a single step.
# i.e., x0_imf(2:791,1) should equal x1_imf(1:790,1) if shift-invariancy
# is preserved.
# As the case for VMD shown below, we can see the x0_imf(2:791,1) basically
# equal to x1_imf(1:790,1) except for a few samples close to the begin and
# end of x0 and x1. Interestingly, we see a low level of error close to the
# begin of the time series and a high level of error close to the end of
# the time series, of high importance in operational forecasting tasks.
# The errors along the middle range are zeros indicating VMD is
# shift-invariant.
# We argue that the error close to the boundaries are
# caused by boundary effect, which is the exact problem this study designed
# to solve.
# CHECK 2: The impact of appedning data points to a time series then
# performing VMD, analogous the case in operational forecasting when new
# data becomes available and an updated forecast is made using the newly
# arrived data.
# Ideally, for forecasting situations, when new data is appended to a time
# series and some preprocessing is performed, it should not have an impact
# on previous measurements of the pre-processed time series.
# For example, if IMF1_1:N represents the IMF1, which has N total
# measurements and was derived by applying VMD to x_1:N the we would expect
# that when we perform VMD when x is appended with another measurement,
# i.e., x_1:N+1, resulting in IMF1_1:N+1 that the first 1:N measurements in
# IMF1_1:N+1 are equal to IMF1_1:N. In other words,
# IMF1_1:N+1[1:N]=IMF1_1:N[1:N].
# We see than is not the case. Appending an additional observation to the
# time series results in the updated VMD components to be entirely
# different then the original (as of yet updated) VMD components.
# Interesting, we see a high level of error at the boundaries of the time
# seriesm, of high importance in operational forecasting tasks.
x0_imf = pd.read_csv(root_path+'/boundary_effect/vmd-decompositions-huaxian/x0_imf.csv')
x1_imf = pd.read_csv(root_path+'/boundary_effect/vmd-decompositions-huaxian/x1_imf.csv')
x_1_552_imf = pd.read_csv(root_path+"/boundary_effect/vmd-decompositions-huaxian/x_1_552_imf.csv")
x_1_791_imf = | pd.read_csv(root_path+'/boundary_effect/vmd-decompositions-huaxian/x_1_791_imf.csv') | pandas.read_csv |
import argparse
import logging
import sys
from typing import List
import pandas as pd
from analysis.src.python.data_analysis.model.column_name import IssuesColumns, SubmissionColumns
from analysis.src.python.data_analysis.utils.df_utils import merge_dfs
from analysis.src.python.data_analysis.utils.statistics_utils import get_statistics_by_group
def calculate_issues_change_statistics(df_issues_statistics: pd.DataFrame,
issues_classes: List[str]):
""" Calculate issues count diff between previous and current attempt in one submissions series. """
df_issues_statistics = df_issues_statistics.sort_values([SubmissionColumns.ATTEMPT])
issues_change_statistics = {
SubmissionColumns.ID: df_issues_statistics[SubmissionColumns.ID].values,
}
for issue_class in issues_classes:
issues_change_statistics[issue_class] = []
previous_submission_issues_statistics = None
for _, submission_issues_statistics in df_issues_statistics.iterrows():
for issue_class in issues_classes:
if previous_submission_issues_statistics is None:
diff = submission_issues_statistics[issue_class]
else:
diff = submission_issues_statistics[issue_class] - previous_submission_issues_statistics[issue_class]
issues_change_statistics[issue_class].append(diff)
previous_submission_issues_statistics = submission_issues_statistics
return pd.DataFrame.from_dict(issues_change_statistics)
def get_submissions_issues_change_statistics(submissions_path: str,
issues_statistics_path: str,
issues_change_statistics_path: str,
issues_path: str,
chunk_size=20000):
""" Calculate issues count diff between previous and current attempt in all submissions series. """
df_submissions = pd.read_csv(submissions_path)
df_issues_statistics = pd.read_csv(issues_statistics_path)
df_issues = | pd.read_csv(issues_path) | pandas.read_csv |
from results_2013_2014.state_legislature_scrape_2013_2014 import scrape_results as sr1314
from results_2015.state_legislature_scrape_2015 import scrape_results as sr15
from results_2016.state_legislature_scrape_2016 import scrape_results as sr16
from results_2016.state_legislature_scrape_2016_ny import scrape_results as sr16_ny
from results_2017.state_legislature_scrape_2017 import scrape_results as sr17
import pandas as pd
import json
if __name__ == '__main__':
#files containing web scraper outputs, used as inputs to create election information file
outfile_2013_2014 = 'results_2013_2014/election_results_2013_2014.csv'
outfile_2015 = 'results_2015/2015_election_results.csv'
outfile_2016 = 'results_2016/2016_election_results.csv'
outfile_2016_ny = 'results_2016/2016_election_results_ny.csv'
outfile_2017_nj = 'results_2017/nj2017.csv'
outfile_2017_va = 'results_2017/va2017.csv'
#toggle to re-scrape the results from ballotpedia. If false,
#intermediate data files (results of web-scraping) are used
#to create election information file
rescrape = False
if rescrape:
#scripts to run web scrapes for the various years. url_file_xxx contains
#the webpage urls. See year directories for details
url_file_2013_2014 = 'results_2013_2014/2013_2014_urls.csv'
race_results_2013_2014 = sr1314(url_file_2013_2014, outfile_2013_2014)
url_file_2015 = 'results_2015/2015_urls.csv'
race_results_2015 = sr15(url_file_2015, outfile_2015)
url_file_2016 = 'results_2016/2016_urls.csv'
race_results_2016 = sr16(url_file_2016, outfile_2016)
#new york 2016 was formatted differently than the other 2016 results years,
#so has a seperate scraping script
url_file_2016_ny = 'results_2016/2016_urls_ny.csv'
race_results_2016_ny = sr16_ny(url_file_2016_ny, outfile_2016_ny)
url_file_2017 = 'results_2017/2017_urls.csv'
race_results_2017 = sr17(outfile_2017_nj, outfile_2017_va)
#state name -> abbreviation dictionary file
states = json.load(open('name_to_abbrev.json'))
#file where the election info will be saved
all_elections_outfile = 'post2013_state_legislative_elections.csv'
#%%
dfs_old_style = [pd.read_csv(x) for x in [outfile_2013_2014, outfile_2015, outfile_2016_ny]] + \
[pd.read_csv(outfile_2017_nj, header=None, names=['Year', 'State', 'District', 'Party', 'Name', 'Votes', 'Winner', 'Incumbent'])]
dfs_new_style = [pd.read_csv(x) for x in [outfile_2016, outfile_2017_va]]
all = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# HEREHEREHERE
#############################################################################
#
# /home/git/clones/external/SAS_3DSpectrographs/py/gratingequation.py
# ; read-quoted-char-radix
#emacs helpers
# (insert (format "\n# " (buffer-file-name)))
#
# (set-input-method 'TeX' t)
# (toggle-input-method)
# (ediff-current-file)
# (wg-python-fix-pdbrc)
# (find-file-other-frame "./.pdbrc")
# (wg-python-fix-pdbrc) # PDB DASH DEBUG end-comments
#
# (setq mypdbcmd (concat (buffer-file-name) "<args...>"))
# (progn (wg-python-fix-pdbrc) (pdb mypdbcmd))
#
# (wg-astroconda-pdb) # IRAF27
# (wg-astroconda3-pdb) # CONDA Python3
#
# (set-background-color "light blue")
# (wg-python-toc)
#
# conda install jupyterlab
# conda install -c conda-forge voila
# conda install nodejs
# jupyter labextension install @jupyter-widgets/jupyterlab-manager
#
# M-x set-input-mode RET TeX (then toggle-input-mode )
#
# (wg-python-toc)
#
#
# __doc__ = """
#
# __author__ = '<NAME>'
#
# __version__ = '0.1'
#
# class GratingException(Exception):
# def __init__(self,message,errors=None):
# @staticmethod
# def __format__(e):
#
# class Grating: # Grating(object) if inherited
# def __init__(self,alpha : "degrees", # Grating::__init__()
# def setalpha(self,newalpha: "degrees"): # Grating::setalpha()
# def setmode(self,newmode: "integer"): # Grating::setmode()
# def setgrating(self,linesmm:float): # Grating::setgrating()
# def setsize(self,length:float ,width:float): # Grating::setsize()
# def setblaze(self,blaze: float): # Grating::setblaze()
# def setlmm(self,lmm: float): # Grating::setlmm()
# def difftable(self,df: pd.DataFrame, key: str): # Grating::difftable()
# def debug(self,msg="",os=sys.stderr): # Grating::debug()
# def grating_quation(self, waverange,step = 0) -> 'radians': # Grating::grating_quation()
# def report(self): # Grating::report()
# def csv(self,fname: 'string'): # Grating::csv()
# def groovedepth(self): # Grating::groovedepth()
# def startplot(self): # Grating::startplot
# def plot(self,keys=[]): # Grating::plot
# def littrow_equation(self,ฮฑ): # Grating.littrow_equation()
# def peakAngle(self,ฮป: "angstroms" ): # Grating::peakAngle()
# def phi(self,ฮป : "angstroms") -> 'degrees': # Grating::phi()
#
# if __name__ == "__main__":
#
#
#
#
#############################################################################
import optparse
import re
import sys
import numpy as np
import pandas as pd
from numpy import sin,cos,arcsin,arccos,radians,degrees
import matplotlib.pyplot as plt
# (wg-python-graphics)
__doc__ = """
gratingequation.py [options]
options:
-p, --plot bool make a Matplotlib plot
-r, --report bool make a 'report' to stdout
-c, --csv <pathname> bool produce a CSV file to path/file name.
-a --alpha float incidence angle
-b --blaze float degrees blaze angle
-m --mode int mode
-l --lmm int lines per mm
This is a basic stand-alone program that is a useful regression
testbed for the Grating class. It permits playing with grating
equations. The idea is to collect attributes of the grating and
provide a set of equations and other functions to compute values for a
grating design. It will produce plots and tables.
Again, this is a regression test for the Grating class. It shows
some of the features (OK I was lazy and did not fully regression
test!). Use it as you will.
<NAME>'s favorite grating web site:
https://www.spectrogon.com/product-services/gratings/grating-design-tool/
handy notes about Pandas
https://pandas.pydata.org/Pandas_Cheat_Sheet.pdf
The red dashed line is the angle of incidence.
The green dotted line is the normal.
the blue dotted line is the angle of diffraction.
"""
__copyright__ = """Copyright 2020 <NAME>.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Open Source Initiative Approved License: MIT
"""
__author__ = '<NAME>'
__version__ = '0.1'
_all__ = ['GratingException','Grating'] # expose these things
##############################################################################
# GratingException
#
##############################################################################
class GratingException(Exception):
"""Special exception to allow differentiated capture of exceptions"""
def __init__(self,message,errors=None):
super(GratingException,self).__init__("Grating "+ message)
self.errors = errors
@staticmethod
def __format__(e):
return "Grating" % e
# GratingException
##############################################################################
# Grating
#
##############################################################################
class Grating: # Grating(object) if inherited
""" Permit more than one instance per run.
ALL units in centimeters. 5000 angstroms is 5e-5 cm.
Gratings are lines per cm
Mode is integer (signed)
blaze is stated in degrees (per manufacture's data, we convert to radians)
"""
# https://en.jeulin.fr/simple-radial-slits-212076.html
ovioslits_cm = np.array([10.0, 20.0, 30.0, 40.0, 50.0, 70.0, 100.0,
150.0, 200.0, 300.0, 500.0, 700.0])/10000.0 # in cm
latex1 = """\\frac{m\\lambda}{d} &= sin(\\alpha) + sin(\\beta)"""
print1 = "mฮป/d = sin(ฮฒ) + sin(ฮฑ)" # print a basic equation (unicode to rescue)
specrange = np.arange(3300, 8000, 100) # units: angstroms every 10nm useful default range
def __init__(self,alpha : "degrees", # Grating::__init__()
m : "Mode [integer]",
lmm : "lines per mm",
blaze : "degrees",
slit : "microns",
length: "mm" = 25,
width : "mm" = 25):
"""Describe the grating in conventional terms, we use CM and angstroms
as our basis.
Key traits of a Grating:
alpha rotation of grating w.r.t. grating normal
m mode
d count of lines per mm
blaze The blaze angle for this grating
length length of the physical grating surface
width width of the physical grating surface
Developed internally to each instance
wave the wavelength range per call, reflect the last one
dispersion accumulate data for different settings
df Pandas dataframe to accumulate/manage data. Key is alpha+mode+lmm
"""
#super(base,self).__init__()
#self.
self.alpha = alpha # rotation of grating w.r.t. grating normal
self.m = m # mode
self.lmm = float(lmm) # remember lines per mm
self.d = 1.0/(self.lmm * 10) # inverse count of lines per mm
self.blaze = blaze # The blaze angle for this grating
self.length = length # length of the physical grating surface
self.width = width # width of the physical grating surface
self.wave = [] # the wavelength range per call, reflect the last one
self.dispersion = {} # accumulate data for different settings
self.df = None # manage data as a Pandas dataframe
self.notes = [] # strings of user notes
self._fig = None # plot figure.
### Grating.__init__()
def setalpha(self,newalpha: "degrees"): # Grating::setalpha()
"""Update alpha for another go."""
self.alpha = newalpha
return self
### Grating.setalpha()
def setmode(self,newmode: "integer"): # Grating::setmode()
"""Set the new mode"""
self.m = newmode
return self
### Grating.setmode()
def setgrating(self,linesmm:float): # Grating::setgrating()
"""Set the d = lines/mm"""
self.d = float(linesmm)
return self
### Grating.setgrating()
def setsize(self,length:float ,width:float): # Grating::setsize()
"""Set the length/width of physical grating"""
self.length = length
self.width = width
return self
### Grating.setsize()
def setblaze(self,blaze: float): # Grating::setblaze()
"""Set the blaze width"""
self.blaze = float(blaze)
return self
### Grating.setblaze()
def setlmm(self,lmm: float): # Grating::setlmm()
"""Set the lines/mm and remember to update d"""
self.lmm = float(lmm)
self.d = 1.0/(self.lmm * 10) # inverse count of lines per mm
return self
### Grating.setlmm()
def difftable(self,df: pd.DataFrame, key: str): # Grating::difftable()
"""Report forward differences on a column. The column is a 'key'
comprised of alpha and mode"""
col = degrees(df[key].values) # get the angles in radians
fdiff = col[1:] - col[:-1] # first forward diff using np
ratios = fdiff[1:]/fdiff[:-1] # reduce scale.
tmp = pd.DataFrame({'Diff' : fdiff[1:], 'Ratio' : ratios}, index=self.wave[1:-1]*1e8)
print(tmp)
return self
### Grating.difftable()
def debug(self,msg="",os=sys.stderr): # Grating::debug()
"""Help with momentary debugging, file to fit."""
print("Grating - %s " % msg, file=os)
for key,value in self.__dict__.items():
if(key[0] != '_'):
print("%20s = %s" % (key,value),file=os)
return self
### Grating.debug()
__Grating_debug = debug # preserve our debug name if we're inherited
def grating_quation(self, waverange = None,step = 0) -> 'radians': # Grating::grating_quation()
"""Return ฮฒ from applying the grating equation to a numpy array of wavelengths
(cm), given the conditions that are held constant of this class.
"""
np.seterr(invalid='ignore') # asking for a broad range, sin will blow up.
if(waverange is None):
waverange = self.specrange # provide a decent default visual span
m = float(self.m)
self.wave = waverange / 1.e8 # convert to cm
sinalpha = sin(radians(self.alpha)) # constant
sinb = m * (self.wave / self.d) + sinalpha # spread out to watch.
ฮฒ = arcsin(sinb) # beta
key = """ฮฑ={:5.2f}, m={:2d} lmm={:5.2f}""".format(self.alpha,self.m,int(self.lmm))
self.dispersion[key] = degrees(ฮฒ) # save 'last' result
np.seterr(invalid=None) # reset for other parts of the code.
return ฮฒ
### Grating.grating_quation()
def report(self): # Grating::report()
"""Create a pandas df, make a report by wavelength."""
if(self.dispersion != []):
| pd.set_option('display.max_rows', None) | pandas.set_option |
# -*- coding: utf-8 -*-
# Load All Packages
import numpy as np, pandas as pd
import xgboost as xgb
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from keras.models import Sequential
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from keras.layers import Input, Dense, Embedding, SpatialDropout1D
from keras.layers import GRU, Bidirectional, GlobalAveragePooling1D
from keras.preprocessing import text, sequence
import warnings
warnings.filterwarnings('ignore')
import os
os.environ['OMP_NUM_THREADS'] = '4'
np.random.seed(2018)
# Change the Root Path Here
rootPath = '/Users/ValarMorghulis/Johnson_Johnson/FinalCode/'
# read data
class unspsc():
# Default Path, these paths can be replaced when initializating
def __init__(self,SapPath = rootPath + 'defaultInput/csvFiles/UNSPSC Full Data Update 2.csv',
PrhdaPath= rootPath + 'defaultInput/csvFiles/Prhda.csv',
GmdnPath = rootPath + 'defaultInput/csvFiles/GMDN_desp.csv',
embedPath = rootPath + 'defaultInput/wordEmbeddingMartix/crawl-300d-2M.vec',
weightsPath = rootPath + 'defaultInput/preTrainedWeights/',
wordEmPath = rootPath + 'defaultInput/wordEmbeddingMartix/'):
self.SapPath,self.PrhdaPath = SapPath, PrhdaPath
self.EMBEDDING_FILE,self.GmdnPath = embedPath, GmdnPath
self.weightsPath,self.wordEmPath = weightsPath, wordEmPath
def dataPre(self):
"""
Prepare all Data to be used.
return: two files - Full data and Y train data
Unit Test Passed
"""
# Load All data
rawData = pd.read_csv(self.SapPath,error_bad_lines=False,encoding='latin1')
rawPRDHA = pd.read_csv(self.PrhdaPath,error_bad_lines=False,delimiter='\t')
# Only Select these fields
columns = ['Breit','Brgew','Hoehe','Laeng','Volum','Zzwerks','Ntgew','Material Description',
'Material','Ean11','Gmdnptdefinition','Gmdnptname','Unspsc','Prdha']
filterData,filterPRDHA = rawData[columns],rawPRDHA[['Prdha','Minor_name','Major_name']]
# 93 - UNSPSC 73 - UNSPSC
filterData = filterData.dropna().reset_index(drop=True)
filterPRDHA['Prdha'] = filterPRDHA['Prdha'].astype('O')
def prdha_zero(x):
x, num = str(x), len(str(x))
if num != 18:
return '0' * (18 - num) + str(x)
else:
return str(x)
# Fill 18 Digits and Extract First 3 char
filterData['Prdha'] = filterData['Prdha'].apply(prdha_zero)
filterPRDHA['Prdha'] = filterPRDHA['Prdha'].apply(prdha_zero)
# Merge All data
filterAll = pd.merge(filterData, filterPRDHA, right_on='Prdha', left_on='Prdha', how='inner')
filterAll['Material_top3'] = filterAll['Material'].apply(lambda x: x[:3])
y_train = pd.factorize(filterAll['Unspsc'])
print('Data Preparation Finished')
return filterAll,y_train
def Token(self,filterAll,Filed):
'''
Word Embedding files here.
:param Filed: support "material","gmdn","prdha"
:return:
Unit Test Passed
'''
if Filed == 'material':
pre_train = filterAll['Material Description'].apply(str).str.lower()
pre_test = filterAll['Material Description'].apply(str).str.lower()
max_features = 4000 # If you want to change, check the matrix length af first
if Filed == 'gmdn':
rawGMDN = pd.read_csv(self.GmdnPath, error_bad_lines=False, encoding='utf8')
pre_train = rawGMDN['Short Desp'].apply(str).str.lower()
pre_test = filterAll['Gmdnptname'].apply(str).str.lower()
max_features = 700
if Filed == 'prdha':
rawPRDHA = pd.read_csv(self.PrhdaPath, error_bad_lines=False, delimiter='\t')
pre_train = rawPRDHA['Minor_name'].apply(str).str.lower()
pre_test = filterAll['Minor_name'].apply(str).str.lower()
max_features = 1000
tokenizer = text.Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(list(pre_train))
x_train = unspsc.tokenSide(self,tokenizer,pre_train)
x_test = unspsc.tokenSide(self,tokenizer,pre_test)
return x_train, x_test, tokenizer, max_features
def TokenInput(self,dataset,Filed):
'''
Word Embedding files here.
:param Filed: support "material","gmdn","prdha"
:return:
Unit Test Passed
'''
if Filed == 'material':
pre_train = filterAll['Material Description'].apply(str).str.lower()
pre_test = dataset['Material Description'].apply(str).str.lower()
max_features = 4000
if Filed == 'gmdn':
rawGMDN = pd.read_csv(self.GmdnPath, error_bad_lines=False, encoding='utf8')
pre_train = rawGMDN['Short Desp'].apply(str).str.lower()
pre_test = dataset['Gmdnptname'].apply(str).str.lower()
max_features = 700
if Filed == 'prdha':
rawPRDHA = | pd.read_csv(self.PrhdaPath, error_bad_lines=False, delimiter='\t') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Zerodha Kite Connect - candlestick pattern scanner
@author: <NAME> (http://rasuquant.com/wp/)
"""
from kiteconnect import KiteConnect
import pandas as pd
import datetime as dt
import os
import time
import numpy as np
from technicalta import *
#cwd = os.chdir("D:\\Udemy\\Zerodha KiteConnect API\\1_account_authorization")
apikey = '<KEY>'
#generate trading session
'''access_token = open("access_token.txt",'r').read()
key_secret = open("api_key.txt",'r').read().split()
kite = KiteConnect(api_key=key_secret[0])
kite.set_access_token(access_token)
#get dump of all NSE instruments
instrument_dump = kite.instruments("NSE")
instrument_df = pd.DataFrame(instrument_dump)
'''
def instrumentLookup(instrument_df,symbol):
"""Looks up instrument token for a given script from instrument dump"""
try:
return instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]
except:
return -1
def fetchOHLC(ticker,interval,duration):
"""extracts historical data and outputs in the form of dataframe"""
instrument = instrumentLookup(instrument_df,ticker)
data = pd.DataFrame(kite.historical_data(instrument,dt.date.today()-dt.timedelta(duration), dt.date.today(),interval))
data.set_index("date",inplace=True)
return data
def doji(ohlc_df):
"""returns dataframe with doji candle column"""
df = ohlc_df.copy()
avg_candle_size = abs(df['close']-df['open']).median()#abs(df["close"]- df["open"]).median()
df["doji"] = abs(df["close"] - df["open"]) <= (0.05 * avg_candle_size)
return df
def maru_bozu(ohlc_df):
"""returns dataframe with maru bozu candle column"""
df = ohlc_df.copy()
avg_candle_size = abs(df["close"] - df["open"]).median()
df["h-c"] = df["high"]-df["close"]
df["l-o"] = df["low"]-df["open"]
df["h-o"] = df["high"]-df["open"]
df["l-c"] = df["low"]-df["close"]
df["maru_bozu"] = np.where((df["close"] - df["open"] > 2*avg_candle_size) & \
(df[["h-c","l-o"]].max(axis=1) < 0.005*avg_candle_size),"maru_bozu_green",
np.where((df["open"] - df["close"] > 2*avg_candle_size) & \
(abs(df[["h-o","l-c"]]).max(axis=1) < 0.005*avg_candle_size),"maru_bozu_red",False))
df.drop(["h-c","l-o","h-o","l-c"],axis=1,inplace=True)
return df
def hammer(ohlc_df):
"""returns dataframe with hammer candle column"""
df = ohlc_df.copy()
df["hammer"] = (((df["high"] - df["low"])>3*(df["open"] - df["close"])) & \
((df["close"] - df["low"])/(.001 + df["high"] - df["low"]) > 0.6) & \
((df["open"] - df["low"])/(.001 + df["high"] - df["low"]) > 0.6)) & \
(abs(df["close"] - df["open"]) > 0.1* (df["high"] - df["low"]))
return df
def shooting_star(ohlc_df):
"""returns dataframe with shooting star candle column"""
df = ohlc_df.copy()
df["sstar"] = (((df["high"] - df["low"])>3*(df["open"] - df["close"])) & \
((df["high"] - df["close"])/(.001 + df["high"] - df["low"]) > 0.6) & \
((df["high"] - df["open"])/(.001 + df["high"] - df["low"]) > 0.6)) & \
(abs(df["close"] - df["open"]) > 0.1* (df["high"] - df["low"]))
return df
def levels(ohlc_day):
"""returns pivot point and support/resistance levels"""
high = round(ohlc_day["high"][-1],2)
low = round(ohlc_day["low"][-1],2)
close = round(ohlc_day["close"][-1],2)
pivot = round((high + low + close)/3,2)
r1 = round((2*pivot - low),2)
r2 = round((pivot + (high - low)),2)
r3 = round((high + 2*(pivot - low)),2)
s1 = round((2*pivot - high),2)
s2 = round((pivot - (high - low)),2)
s3 = round((low - 2*(high - pivot)),2)
return (pivot,r1,r2,r3,s1,s2,s3)
def trend(ohlc_df,n):
"function to assess the trend by analyzing each candle"
df = ohlc_df.copy()
df["up"] = np.where(df["low"]>=df["low"].shift(1),1,0)
df["dn"] = np.where(df["high"]<=df["high"].shift(1),1,0)
if df["close"][-1] > df["open"][-1]:
if df["up"][-1*n:].sum() >= 0.7*n:
return "uptrend"
elif df["open"][-1] > df["close"][-1]:
if df["dn"][-1*n:].sum() >= 0.7*n:
return "downtrend"
else:
return None
def res_sup(ohlc_df,ohlc_day):
"""calculates closest resistance and support levels for a given candle"""
level = ((ohlc_df["close"][-1] + ohlc_df["open"][-1])/2 + (ohlc_df["high"][-1] + ohlc_df["low"][-1])/2)/2
p,r1,r2,r3,s1,s2,s3 = levels(ohlc_day)
l_r1=level-r1
l_r2=level-r2
l_r3=level-r3
l_p=level-p
l_s1=level-s1
l_s2=level-s2
l_s3=level-s3
lev_ser = pd.Series([l_p,l_r1,l_r2,l_r3,l_s1,l_s2,l_s3],index=["p","r1","r2","r3","s1","s2","s3"])
sup = lev_ser[lev_ser>0].idxmin()
res = lev_ser[lev_ser>0].idxmax()
return (eval('{}'.format(res)), eval('{}'.format(sup)))
def candle_type(ohlc_df):
"""returns the candle type of the last candle of an OHLC DF"""
'''ohlc_df['open']=int(ohlc_df['open'])
ohlc_df['close']=int(ohlc_df['close'])
ohlc_df['high']=int(ohlc_df['high'])
ohlc_df['low']=int(ohlc_df['low'])'''
candle = None
if doji(ohlc_df)["doji"][-1] == True:
candle = "doji"
if maru_bozu(ohlc_df)["maru_bozu"][-1] == "maru_bozu_green":
candle = "maru_bozu_green"
if maru_bozu(ohlc_df)["maru_bozu"][-1] == "maru_bozu_red":
candle = "maru_bozu_red"
if shooting_star(ohlc_df)["sstar"][-1] == True:
candle = "shooting_star"
if hammer(ohlc_df)["hammer"][-1] == True:
candle = "hammer"
return candle
def candle_pattern(ohlc_df,ohlc_day):
"""returns the candle pattern identified"""
pattern = None
signi = "low"
avg_candle_size = abs(ohlc_df["close"] - ohlc_df["open"]).median()
sup, res = res_sup(ohlc_df,ohlc_day)
if (sup - 1.5*avg_candle_size) < ohlc_df["close"][-1] < (sup + 1.5*avg_candle_size):
signi = "HIGH"
if (res - 1.5*avg_candle_size) < ohlc_df["close"][-1] < (res + 1.5*avg_candle_size):
signi = "HIGH"
if candle_type(ohlc_df) == 'doji' \
and ohlc_df["close"][-1] > ohlc_df["close"][-2] \
and ohlc_df["close"][-1] > ohlc_df["open"][-1]:
pattern = "doji_bullish"
if candle_type(ohlc_df) == 'doji' \
and ohlc_df["close"][-1] < ohlc_df["close"][-2] \
and ohlc_df["close"][-1] < ohlc_df["open"][-1]:
pattern = "doji_bearish"
if candle_type(ohlc_df) == "maru_bozu_green":
pattern = "maru_bozu_bullish"
if candle_type(ohlc_df) == "maru_bozu_red":
pattern = "maru_bozu_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" and candle_type(ohlc_df) == "hammer":
pattern = "hanging_man_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" and candle_type(ohlc_df) == "hammer":
pattern = "hammer_bullish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" and candle_type(ohlc_df) == "shooting_star":
pattern = "shooting_star_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" \
and candle_type(ohlc_df) == "doji" \
and ohlc_df["high"][-1] < ohlc_df["close"][-2] \
and ohlc_df["low"][-1] > ohlc_df["open"][-2]:
pattern = "harami_cross_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" \
and candle_type(ohlc_df) == "doji" \
and ohlc_df["high"][-1] < ohlc_df["open"][-2] \
and ohlc_df["low"][-1] > ohlc_df["close"][-2]:
pattern = "harami_cross_bullish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" \
and candle_type(ohlc_df) != "doji" \
and ohlc_df["open"][-1] > ohlc_df["high"][-2] \
and ohlc_df["close"][-1] < ohlc_df["low"][-2]:
pattern = "engulfing_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" \
and candle_type(ohlc_df) != "doji" \
and ohlc_df["close"][-1] > ohlc_df["high"][-2] \
and ohlc_df["open"][-1] < ohlc_df["low"][-2]:
pattern = "engulfing_bullish"
return "Significance - {}, Pattern - {}".format(signi,pattern)
##############################################################################################
tickers = ["ZEEL","WIPRO","VEDL","ULTRACEMCO","UPL","TITAN","TECHM","TATASTEEL",
"TATAMOTORS","TCS","SUNPHARMA","SBIN","SHREECEM","RELIANCE","POWERGRID",
"ONGC","NESTLEIND","NTPC","MARUTI","M&M","LT","KOTAKBANK","JSWSTEEL","INFY",
"INDUSINDBK","IOC","ITC","ICICIBANK","HDFC","HINDUNILVR","HINDALCO",
"HEROMOTOCO","HDFCBANK","HCLTECH","GRASIM","GAIL","EICHERMOT","DRREDDY",
"COALINDIA","CIPLA","BRITANNIA","INFRATEL","BHARTIARTL","BPCL","BAJAJFINSV",
"BAJFINANCE","BAJAJ-AUTO","AXISBANK","ASIANPAINT","ADANIPORTS","IDEA",
"MCDOWELL-N","UBL","NIACL","SIEMENS","SRTRANSFIN","SBILIFE","PNB",
"PGHH","PFC","PEL","PIDILITIND","PETRONET","PAGEIND","OFSS","NMDC","NHPC",
"MOTHERSUMI","MARICO","LUPIN","L&TFH","INDIGO","IBULHSGFIN","ICICIPRULI",
"ICICIGI","HINDZINC","HINDPETRO","HAVELLS","HDFCLIFE","HDFCAMC","GODREJCP",
"GICRE","DIVISLAB","DABUR","DLF","CONCOR","COLPAL","CADILAHC","BOSCHLTD",
"BIOCON","BERGEPAINT","BANKBARODA","BANDHANBNK","BAJAJHLDNG","DMART",
"AUROPHARMA","ASHOKLEY","AMBUJACEM","ADANITRANS","ACC",
"WHIRLPOOL","WABCOINDIA","VOLTAS","VINATIORGA","VBL","VARROC","VGUARD",
"UNIONBANK","UCOBANK","TRENT","TORNTPOWER","TORNTPHARM","THERMAX","RAMCOCEM",
"TATAPOWER","TATACONSUM","TVSMOTOR","TTKPRESTIG","SYNGENE","SYMPHONY",
"SUPREMEIND","SUNDRMFAST","SUNDARMFIN","SUNTV","STRTECH","SAIL","SOLARINDS",
"SHRIRAMCIT","SCHAEFFLER","SANOFI","SRF","SKFINDIA","SJVN","RELAXO",
"RAJESHEXPO","RECLTD","RBLBANK","QUESS","PRESTIGE","POLYCAB","PHOENIXLTD",
"PFIZER","PNBHOUSING","PIIND","OIL","OBEROIRLTY","NAM-INDIA","NATIONALUM",
"NLCINDIA","NBCC","NATCOPHARM","MUTHOOTFIN","MPHASIS","MOTILALOFS","MINDTREE",
"MFSL","MRPL","MANAPPURAM","MAHINDCIE","M&MFIN","MGL","MRF","LTI","LICHSGFIN",
"LTTS","KANSAINER","KRBL","JUBILANT","JUBLFOOD","JINDALSTEL","JSWENERGY",
"IPCALAB","NAUKRI","IGL","IOB","INDHOTEL","INDIANB","IBVENTURES","IDFCFIRSTB",
"IDBI","ISEC","HUDCO","HONAUT","HAL","HEXAWARE","HATSUN","HEG","GSPL",
"GUJGASLTD","GRAPHITE","GODREJPROP","GODREJIND","GODREJAGRO","GLENMARK",
"GLAXO","GILLETTE","GMRINFRA","FRETAIL","FCONSUMER","FORTIS","FEDERALBNK",
"EXIDEIND","ESCORTS","ERIS","ENGINERSIN","ENDURANCE","EMAMILTD","EDELWEISS",
"EIHOTEL","LALPATHLAB","DALBHARAT","CUMMINSIND","CROMPTON","COROMANDEL","CUB",
"CHOLAFIN","CHOLAHLDNG","CENTRALBK","CASTROLIND","CANBK","CRISIL","CESC",
"BBTC","BLUEDART","BHEL","BHARATFORG","BEL","BAYERCROP","BATAINDIA",
"BANKINDIA","BALKRISIND","ATUL","ASTRAL","APOLLOTYRE","APOLLOHOSP",
"AMARAJABAT","ALKEM","APLLTD","AJANTPHARM","ABFRL","ABCAPITAL","ADANIPOWER",
"ADANIGREEN","ADANIGAS","ABBOTINDIA","AAVAS","AARTIIND","AUBANK","AIAENG","3MINDIA"]
def main():
for ticker in tickers:
try:
ohlc = fetchOHLC(ticker, '5minute',5)
ohlc_day = fetchOHLC(ticker, 'day',30)
ohlc_day = ohlc_day.iloc[:-1,:]
cp = candle_pattern(ohlc,ohlc_day)
print(ticker, ": ",cp)
except:
print("skipping for ",ticker)
'''
# Continuous execution
starttime=time.time()
timeout = time.time() + 60*60*1 # 60 seconds times 60 meaning the script will run for 1 hr
while time.time() <= timeout:
try:
print("passthrough at ",time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
main()
time.sleep(300 - ((time.time() - starttime) % 300.0)) # 300 second interval between each new execution
except KeyboardInterrupt:
print('\n\nKeyboard exception received. Exiting.')
exit()'''
from pprint import pprint
def AlphaData_fxintraday(frombase,to,interval):
import requests
import json
from pprint import pprint
global apikey
url="https://www.alphavantage.co/query?function=FX_INTRADAY&from_symbol={}&to_symbol={}&interval={}min&apikey={}".format(frombase,to,interval,apikey)
data=requests.get(url).json()
#pprint(dict(data['Time Series FX ({}min)'.format(interval)]))#['2020-07-31 20:20:00'])#['4. close'])
import pandas as pd
try:
if data:
data=data['Time Series FX ({}min)'.format(interval)]
df=pd.DataFrame(data).T
df['open']=df['1. open']
df['high']=df['2. high']
df['low']=df['3. low']
df['close']=df['4. close']
df=df.drop(['1. open','2. high','3. low', '4. close'], axis=1)
return df#data['Time Series FX ({}min)'.format(interval)]
except:
print("An exception occurred")
frombase=['EUR','USD','GBP','AUD','EUR']
to=['USD','JPY','CAD','CNY','CHF','HKD','GBP','KRW']
'''
for j in frombase:
for i in to:
pprint('{}/{} in process'.format(i,j))
data=AlphaData_intraday(i,j,60)
pprint('{}/{} Done'.format(i,j))
time.sleep(30)
'''
def AlphaData_fxdaily(frombase,to):
import requests
import json
from pprint import pprint
global apikey
url="https://www.alphavantage.co/query?function=FX_DAILY&from_symbol={}&to_symbol={}&apikey={}".format(frombase,to,apikey)
#url="https://www.alphavantage.co/query?function=FX_INTRADAY&from_symbol={}&to_symbol={}&interval={}min&apikey={}".format(frombase,to,interval,apikey)
data=requests.get(url).json()
#pprint(dict(data['Time Series FX ({}min)'.format(interval)]))#['2020-07-31 20:20:00'])#['4. close'])
import pandas as pd
try:
if data:
data=data['Time Series FX (Daily)']
df=pd.DataFrame(data).T
df['open']=df['1. open']
df['high']=df['2. high']
df['low']=df['3. low']
df['close']=df['4. close']
df=df.drop(['1. open','2. high','3. low', '4. close'], axis=1)
return df#data['Time Series FX ({}min)'.format(interval)]
except:
print("An exception occurred")
'''
for j in frombase:
for i in to:
pprint('{}/{} in process'.format(i,j))
dataintra=AlphaData_intraday(i,j,5)
datadaily=AlphaData_daily(i,j)
pprint(dataintra)
if len(dataintra) > 0:
if len(datadaily) > 0 :
pprint(candle_type(dataintra))
#cp = candle_pattern(dataintra,datadaily)
pprint('{}/{} Done'.format(i,j))
time.sleep(5)'''
'''
for j in frombase:
for i in to:
pprint('{}/{} in process'.format(i,j))
data=AlphaData_daily(i,j)
pprint('{}/{} Done'.format(i,j))
time.sleep(5)
'''
def AlphaData_intraday(symbol,interval):
import requests
import json
from pprint import pprint
global apikey
url="https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol={}&interval={}min&apikey={}".format(symbol,interval,apikey)
data=requests.get(url).json()
#pprint(dict(data['Time Series FX ({}min)'.format(interval)]))#['2020-07-31 20:20:00'])#['4. close'])
import pandas as pd
try:
if data:
data=data['Time Series ({}min)'.format(interval)]
df=pd.DataFrame(data).T
df['open']=df['1. open']
df['high']=df['2. high']
df['low']=df['3. low']
df['close']=df['4. close']
df['volume']=df['5. volume']
df['volume']=df['5. volume']
df=df.drop(['1. open','2. high','3. low', '4. close','5. volume'], axis=1)
df['open']=pd.to_numeric(df['open'])
df['high']=pd.to_numeric(df['high'])
df['low']=pd.to_numeric(df['low'])
df['close']=pd.to_numeric(df['close'])
df['volume']=pd.to_numeric(df['volume'])
return df#data['Time Series FX ({}min)'.format(interval)]
except:
print("An exception occurred")
def AlphaData_daily(symbol):
import requests
import json
from pprint import pprint
global apikey
url="https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={}&apikey={}".format(symbol,apikey)
#url="https://www.alphavantage.co/query?function=FX_INTRADAY&from_symbol={}&to_symbol={}&interval={}min&apikey={}".format(frombase,to,interval,apikey)
data=requests.get(url).json()
#pprint(dict(data['Time Series FX ({}min)'.format(interval)]))#['2020-07-31 20:20:00'])#['4. close'])
import pandas as pd
try:
if data:
data=data['Time Series (Daily)']
df=pd.DataFrame(data).T
df['open']=df['1. open']
df['high']=df['2. high']
df['low']=df['3. low']
df['close']=df['4. close']
df['volume']=df['5. volume']
df=df.drop(['1. open','2. high','3. low', '4. close','5. volume'], axis=1)
df['open']=pd.to_numeric(df['open'])
df['high']=pd.to_numeric(df['high'])
df['low']=pd.to_numeric(df['low'])
df['close']=pd.to_numeric(df['close'])
df['volume']=pd.to_numeric(df['volume'])
return df#data['Time Series FX ({}min)'.format(interval)]
except:
print("An exception occurred")
''''
for i in to:
pprint('{}/{} in process'.format(i,j))
dataintra=AlphaData_intraday(i,5)
datadaily=AlphaData_daily(i)
pprint(dataintra)
if len(dataintra) > 0:
if len(datadaily) > 0 :
pprint(candle_type(dataintra))
#cp = candle_pattern(dataintra,datadaily)
pprint('{}/{} Done'.format(i,j))
time.sleep(5)'''
def main():
for ticker in tickers:
try:
ohlc = fetchOHLC(ticker, '5minute',5)
ohlc_day = fetchOHLC(ticker, 'day',30)
ohlc_day = ohlc_day.iloc[:-1,:]
cp = candle_pattern(ohlc,ohlc_day)
print(ticker, ": ",cp)
except:
print("skipping for ",ticker)
ticks=['atvi','adbe','amd','alxn','algn','goog','googl','amzn','amgn','adi','anss','aapl','amat','asml','adsk','adp','bidu','biib','bmrn','bkng','avgo','cdns','cdw','cern','chtr','chkp','ctas','csco','ctxs','ctsh','cmcsa','cprt','cost','csx','dxcm','docu','dltr','ebay','ea','exc','expe','fb','fast','fisv','fox','foxa','gild','idxx','ilmn','incy','intc','intu','isrg','jd','klac','lrcx','lbtya','lbtyk','lulu','mar','mxim','meli','mchp','mu','msft','mrna','mdlz','mnst','ntap','ntes','nflx','nvda','nxpi','orly','pcar','payx','pypl','pep','qcom','regn','rost','sgen','siri','swks','splk','sbux','snps','tmus','ttwo','tsla','txn','khc','tcom','ulta','vrsn','vrsk','vrtx','wba','wdc','wday','xel','xlnx','zm']
patterns=['Two Crows',
'Three Black Crows',
'Three Inside Up/Down',
'Three-Line Strike',
'Three Outside Up/Down',
'Three Stars In The South',
'Three Advancing White Soldiers',
'Abandoned Baby',
'Advance Block',
'Belt-hold',
'Breakaway',
'Closing Marubozu',
'Concealing Baby Swallow',
'Counterattack',
'Dark Cloud Cover',
'Doji',
'Doji Star',
'Dragonfly Doji',
'Engulfing Pattern',
'Evening Doji Star',
'Evening Star',
'Up/Down-gap side-by-side white lines',
'Gravestone Doji',
'Hammer',
'Hanging Man',
'Harami Pattern',
'Harami Cross Pattern',
'High-Wave Candle',
'Hikkake Pattern',
'Modified Hikkake Pattern',
'Homing Pigeon',
'Identical Three Crows',
'In-Neck Pattern',
'Inverted Hammer',
'Kicking',
'Kicking - bull/bear',
'Ladder Bottom',
'Long Legged Doji',
'Long Line Candle',
'Marubozu',
'Matching Low',
'Mat Hold',
'Morning Doji Star',
'Morning Star',
'On-Neck Pattern',
'Piercing Pattern',
'Rickshaw Man',
'Rising/Falling Three Methods',
'Separating Lines',
'Shooting Star',
'Short Line Candle',
'Spinning Top',
'Stalled Pattern',
'Stick Sandwich',
'Takuri',
'Tasuki Gap',
'Thrusting Pattern',
'Tristar Pattern',
'Unique 3 River',
'Upside Gap Two Crows',
'Upside/Downside Gap Three Methods']
def texterconversion(text):
tex=text.replace('/','').replace('-','_').replace(' ','_').replace('(','').replace(')','')
return tex
def technical_lib(technical,df):
open=df['open']
high=df['high']
low=df['low']
close=df['close']
if technical == 'Two_Crows':
tech=Two_Crows(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Three_Black_Crows':
tech=Three_Black_Crows(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Three_Inside_UpDown':
tech=Three_Inside_UpDown(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Three_Line_Strike':
tech=Three_Line_Strike(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Three_Outside_UpDown':
tech=Three_Outside_UpDown(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Three_Stars_In_The_South':
tech=Three_Stars_In_The_South(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Three_Advancing_White_Soldiers':
tech=Three_Advancing_White_Soldiers(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Abandoned_Baby':
tech=Abandoned_Baby(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Advance_Block':
tech=Advance_Block(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Belt_hold':
tech=Belt_hold(open,high,low,close)
tech=pd.DataFrame(tech)
tech[0]=(np.where(tech[0] > 0,'{}_bullish'.format(technical),np.where(tech[0] < 0,'{}_bearish'.format(technical),0)))
elif technical == 'Breakaway':
tech=Breakaway(open,high,low,close)
tech= | pd.DataFrame(tech) | pandas.DataFrame |
import pandas as pd
from .datastore import merge_postcodes
from .types import ErrorDefinition
from .utils import add_col_to_tables_CONTINUOUSLY_LOOKED_AFTER as add_CLA_column # Check 'Episodes' present before use!
def validate_165():
error = ErrorDefinition(
code = '165',
description = 'Data entry for mother status is invalid.',
affected_fields = ['MOTHER', 'SEX', 'ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
valid_values = ['0','1']
# prepare to merge
oc3.reset_index(inplace=True)
header.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['EPS'] = (episodes['DECOM']>=collection_start) & (episodes['DECOM']<=collection_end)
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']).merge(oc3, on='CHILD', how='left')
# Raise error if provided <MOTHER> is not a valid value.
value_validity = merged['MOTHER'].notna() & (~merged['MOTHER'].isin(valid_values))
# If not provided
female = (merged['SEX']=='1')
eps_in_year = (merged['EPS_COUNT']>0)
none_provided = (merged['ACTIV'].isna()& merged['ACCOM'].isna()& merged['IN_TOUCH'].isna())
# If provided <MOTHER> must be a valid value. If not provided <MOTHER> then either <GENDER> is male or no episode record for current year and any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided
mask = value_validity | (merged['MOTHER'].isna() & (female & (eps_in_year | none_provided)))
# That is, if value not provided and child is a female with eps in current year or no values of IN_TOUCH, ACTIV and ACCOM, then raise error.
error_locs_eps = merged.loc[mask, 'index_eps']
error_locs_header = merged.loc[mask, 'index_er']
error_locs_oc3 = merged.loc[mask, 'index']
return {'Header':error_locs_header.dropna().unique().tolist(),
'OC3':error_locs_oc3.dropna().unique().tolist()}
return error, _validate
def validate_1014():
error = ErrorDefinition(
code='1014',
description='UASC information is not required for care leavers',
affected_fields=['ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'UASC' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
uasc = dfs['UASC']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
# prepare to merge
oc3.reset_index(inplace=True)
uasc.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
date_check = (
((episodes['DECOM'] >= collection_start) & (episodes['DECOM'] <= collection_end))
| ((episodes['DEC'] >= collection_start) & (episodes['DEC'] <= collection_end))
| ((episodes['DECOM'] <= collection_start) & episodes['DEC'].isna())
)
episodes['EPS'] = date_check
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
# inner merge to take only episodes of children which are also found on the uasc table
merged = episodes.merge(uasc, on='CHILD', how='inner', suffixes=['_eps', '_sc']).merge(oc3, on='CHILD',
how='left')
# adding suffixes with the secondary merge here does not go so well yet.
some_provided = (merged['ACTIV'].notna() | merged['ACCOM'].notna() | merged['IN_TOUCH'].notna())
mask = (merged['EPS_COUNT'] == 0) & some_provided
error_locs_uasc = merged.loc[mask, 'index_sc']
error_locs_oc3 = merged.loc[mask, 'index']
return {'UASC': error_locs_uasc.unique().tolist(), 'OC3': error_locs_oc3.unique().tolist()}
return error, _validate
# !# not sure what this rule is actually supposed to be getting at - description is confusing
def validate_197B():
error = ErrorDefinition(
code='197B',
description="SDQ score or reason for no SDQ should be reported for 4- or 17-year-olds.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
(
(oc2['DOB'] + pd.DateOffset(years=4) == start) # ???
| (oc2['DOB'] + pd.DateOffset(years=17) == start)
)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
& oc2['SDQ_REASON'].isna()
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_157():
error = ErrorDefinition(
code='157',
description="Child is aged 4 years or over at the beginning of the year or 16 years or under at the end of the "
"year and Strengths and Difficulties Questionnaire (SDQ) 1 has been recorded as the reason for no "
"Strengths and Difficulties Questionnaire (SDQ) score.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
oc2['CONTINUOUSLY_LOOKED_AFTER']
& (oc2['DOB'] + pd.DateOffset(years=4) <= start)
& (oc2['DOB'] + pd.DateOffset(years=16) >= endo)
& oc2['SDQ_SCORE'].isna()
& (oc2['SDQ_REASON'] == 'SDQ1')
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_357():
error = ErrorDefinition(
code='357',
description='If this is the first episode ever for this child, reason for new episode must be S. '
'Check whether there is an episode immediately preceding this one, which has been left out. '
'If not the reason for new episode code must be amended to S.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
eps = dfs['Episodes']
eps['DECOM'] = pd.to_datetime(eps['DECOM'], format='%d/%m/%Y', errors='coerce')
eps = eps.loc[eps['DECOM'].notnull()]
first_eps = eps.loc[eps.groupby('CHILD')['DECOM'].idxmin()]
errs = first_eps[first_eps['RNE'] != 'S'].index.to_list()
return {'Episodes': errs}
return error, _validate
def validate_117():
error = ErrorDefinition(
code='117',
description='Date of decision that a child should/should no longer be placed for adoption is beyond the current collection year or after the child ceased to be looked after.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_PLACED', 'DEC', 'REC', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placed_adoption = dfs['PlacedAdoption']
collection_end = dfs['metadata']['collection_end']
# datetime
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# Drop nans and continuing episodes
episodes = episodes.dropna(subset=['DECOM'])
episodes = episodes[episodes['REC'] != 'X1']
episodes = episodes.loc[episodes.groupby('CHILD')['DECOM'].idxmax()]
# prepare to merge
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
p4a_cols = ['DATE_PLACED', 'DATE_PLACED_CEASED']
# latest episodes
merged = episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
mask = (
(merged['DATE_PLACED'] > collection_end)
| (merged['DATE_PLACED'] > merged['DEC'])
| (merged['DATE_PLACED_CEASED'] > collection_end)
| (merged['DATE_PLACED_CEASED'] > merged['DEC'])
)
# If provided <DATE_PLACED> and/or <DATE_PLACED_CEASED> must not be > <COLLECTION_END_DATE> or <DEC> of latest episode where <REC> not = 'X1'
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_118():
error = ErrorDefinition(
code='118',
description='Date of decision that a child should no longer be placed for adoption is before the current collection year or before the date the child started to be looked after.',
affected_fields=['DECOM', 'DECOM', 'LS']
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
code_list = ['V3', 'V4']
# datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
filter_by_ls = episodes[~(episodes['LS'].isin(code_list))]
earliest_episode_idxs = filter_by_ls.groupby('CHILD')['DECOM'].idxmin()
earliest_episodes = episodes[episodes.index.isin(earliest_episode_idxs)]
# prepare to merge
placed_adoption.reset_index(inplace=True)
earliest_episodes.reset_index(inplace=True)
# merge
merged = earliest_episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
# drop rows where DATE_PLACED_CEASED is not provided
merged = merged.dropna(subset=['DATE_PLACED_CEASED'])
# If provided <DATE_PLACED_CEASED> must not be prior to <COLLECTION_START_DATE> or <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
mask = (merged['DATE_PLACED_CEASED'] < merged['DECOM']) | (merged['DATE_PLACED_CEASED'] < collection_start)
# error locations
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_352():
error = ErrorDefinition(
code='352',
description='Child who started to be looked after was aged 18 or over.',
affected_fields=['DECOM', 'RNE'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + pd.DateOffset(years=18)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
care_start = episodes_merged['RNE'].str.upper().astype(str).isin(['S'])
started_over_18 = episodes_merged['DOB18'] <= episodes_merged['DECOM']
error_mask = care_start & started_over_18
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_209():
error = ErrorDefinition(
code='209',
description='Child looked after is of school age and should not have an unknown Unique Pupil Number (UPN) code of UN1.',
affected_fields=['UPN', 'DOB']
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
collection_start = dfs['metadata']['collection_start']
# convert to datetime
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
yr = collection_start.year - 1
reference_date = pd.to_datetime('31/08/' + str(yr), format='%d/%m/%Y', errors='coerce')
# If <DOB> >= 4 years prior to 31/08/YYYY then <UPN> should not be 'UN1' Note: YYYY in this instance refers to the year prior to the collection start (for collection year 2019-2020, it would be looking at the 31/08/2018).
mask = (reference_date >= (header['DOB'] + pd.offsets.DateOffset(years=4))) & (header['UPN'] == 'UN1')
# error locations
error_locs_header = header.index[mask]
return {'Header': error_locs_header.tolist()}
return error, _validate
def validate_198():
error = ErrorDefinition(
code='198',
description="Child has not been looked after continuously for at least 12 months at 31 March but a reason "
"for no Strengths and Difficulties (SDQ) score has been completed. ",
affected_fields=['SDQ_REASON'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_REASON'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_185():
error = ErrorDefinition(
code='185',
description="Child has not been looked after continuously for at least 12 months at " +
"31 March but a Strengths and Difficulties (SDQ) score has been completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_SCORE'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_186():
error = ErrorDefinition(
code='186',
description="Children aged 4 or over at the start of the year and children aged under 17 at the " +
"end of the year and who have been looked after for at least 12 months continuously " +
"should have a Strengths and Difficulties (SDQ) score completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_start_str = dfs['metadata']['collection_start']
collection_end_str = dfs['metadata']['collection_end']
collection_start = pd.to_datetime(collection_start_str, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2 = add_CLA_column(dfs, 'OC2')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
oc2['17th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=17)
error_mask = (
(oc2['4th_bday'] <= collection_start)
& (oc2['17th_bday'] > collection_end)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_187():
error = ErrorDefinition(
code='187',
description="Child cannot be looked after continuously for 12 months at " +
"31 March (OC2) and have any of adoption or care leavers returns completed.",
affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR', # OC3
'IN_TOUCH', 'ACTIV', 'ACCOM'], # AD1
)
def _validate(dfs):
if (
'OC3' not in dfs
or 'AD1' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
ad1, oc3 = add_CLA_column(dfs, ['AD1', 'OC3'])
# OC3
should_be_blank = ['IN_TOUCH', 'ACTIV', 'ACCOM']
oc3_mask = oc3['CONTINUOUSLY_LOOKED_AFTER'] & oc3[should_be_blank].notna().any(axis=1)
oc3_error_locs = oc3[oc3_mask].index.to_list()
# AD1
should_be_blank = ['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR']
ad1_mask = ad1['CONTINUOUSLY_LOOKED_AFTER'] & ad1[should_be_blank].notna().any(axis=1)
ad1_error_locs = ad1[ad1_mask].index.to_list()
return {'AD1': ad1_error_locs,
'OC3': oc3_error_locs}
return error, _validate
def validate_188():
error = ErrorDefinition(
code='188',
description="Child is aged under 4 years at the end of the year, "
"but a Strengths and Difficulties (SDQ) score or a reason "
"for no SDQ score has been completed. ",
affected_fields=['SDQ_SCORE', 'SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_end_str = dfs['metadata']['collection_end']
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
error_mask = (
(oc2['4th_bday'] > collection_end)
& oc2[['SDQ_SCORE', 'SDQ_REASON']].notna().any(axis=1)
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_190():
error = ErrorDefinition(
code='190',
description="Child has not been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been completed.",
affected_fields=['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
, # AD1
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_blank = ['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
mask = ~oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_blank].notna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_191():
error = ErrorDefinition(
code='191',
description="Child has been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been left blank.",
affected_fields=['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'], # OC2
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_present = ['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE']
mask = oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_present].isna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_607():
error = ErrorDefinition(
code='607',
description='Child ceased to be looked after in the year, but mother field has not been completed.',
affected_fields=['DEC', 'REC', 'MOTHER', 'LS', 'SEX']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
code_list = ['V3', 'V4']
# convert to datetiime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# CEASED_TO_BE_LOOKED_AFTER = DEC is not null and REC is filled but not equal to X1
CEASED_TO_BE_LOOKED_AFTER = merged['DEC'].notna() & ((merged['REC'] != 'X1') & merged['REC'].notna())
# and <LS> not = โV3โ or โV4โ
check_LS = ~(merged['LS'].isin(code_list))
# and <DEC> is in <CURRENT_COLLECTION_YEAR
check_DEC = (collection_start <= merged['DEC']) & (merged['DEC'] <= collection_end)
# Where <CEASED_TO_BE_LOOKED_AFTER> = โYโ, and <LS> not = โV3โ or โV4โ and <DEC> is in <CURRENT_COLLECTION_YEAR> and <SEX> = โ2โ then <MOTHER> should be provided.
mask = CEASED_TO_BE_LOOKED_AFTER & check_LS & check_DEC & (merged['SEX'] == '2') & (merged['MOTHER'].isna())
header_error_locs = merged.loc[mask, 'index_er']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_210():
error = ErrorDefinition(
code='210',
description='Children looked after for more than a week at 31 March should not have an unknown Unique Pupil Number (UPN) code of UN4.',
affected_fields=['UPN', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_end = dfs['metadata']['collection_end']
# convert to datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
yr = collection_end.year
reference_date = ref_date = pd.to_datetime('24/03/' + str(yr), format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
# the logical way is to merge left on UPN but that will be a one to many merge and may not go as well as a many to one merge that we've been doing.
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <UPN> = 'UN4' then no episode <DECOM> must be >` = 24/03/YYYY Note: YYYY refers to the current collection year.
mask = (merged['UPN'] == 'UN4') & (merged['DECOM'] >= reference_date)
# error locations
error_locs_header = merged.loc[mask, 'index_er']
error_locs_eps = merged.loc[mask, 'index_eps']
return {'Episodes': error_locs_eps.tolist(), 'Header': error_locs_header.unique().tolist()}
return error, _validate
def validate_1010():
error = ErrorDefinition(
code='1010',
description='This child has no episodes loaded for current year even though there was an open episode of '
+ 'care at the end of the previous year, and care leaver data has been entered.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
oc3 = dfs['OC3']
# convert DECOM to datetime, drop missing/invalid sort by CHILD then DECOM,
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last = episodes_last.dropna(subset=['DECOM']).sort_values(['CHILD', 'DECOM'], ascending=True)
# Keep only the final episode for each child (ie where the following row has a different CHILD value)
episodes_last = episodes_last[
episodes_last['CHILD'].shift(-1) != episodes_last['CHILD']
]
# Keep only the final episodes that were still open
episodes_last = episodes_last[episodes_last['DEC'].isna()]
# The remaining children ought to have episode data in the current year if they are in OC3
has_current_episodes = oc3['CHILD'].isin(episodes['CHILD'])
has_open_episode_last = oc3['CHILD'].isin(episodes_last['CHILD'])
error_mask = ~has_current_episodes & has_open_episode_last
validation_error_locations = oc3.index[error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_525():
error = ErrorDefinition(
code='525',
description='A child for whom the decision to be placed for adoption has been reversed cannot be adopted during the year.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR',
'LS_ADOPTR']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs or 'AD1' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
ad1 = dfs['AD1']
# prepare to merge
placed_adoption.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = placed_adoption.merge(ad1, on='CHILD', how='left', suffixes=['_placed', '_ad1'])
# If <DATE_PLACED_CEASED> not Null, then <DATE_INT>; <DATE_MATCH>; <FOSTER_CARE>; <NB_ADOPTR>; <SEX_ADOPTR>; and <LS_ADOPTR> should not be provided
mask = merged['DATE_PLACED_CEASED'].notna() & (
merged['DATE_INT'].notna() | merged['DATE_MATCH'].notna() | merged['FOSTER_CARE'].notna() |
merged['NB_ADOPTR'].notna() | merged['SEX_ADOPTR'].notna() | merged['LS_ADOPTR'].notna())
# error locations
pa_error_locs = merged.loc[mask, 'index_placed']
ad_error_locs = merged.loc[mask, 'index_ad1']
# return result
return {'PlacedAdoption': pa_error_locs.tolist(), 'AD1': ad_error_locs.tolist()}
return error, _validate
def validate_335():
error = ErrorDefinition(
code='335',
description='The current foster value (0) suggests that child is not adopted by current foster carer, but last placement is A2, A3, or A5. Or the current foster value (1) suggests that child is adopted by current foster carer, but last placement is A1, A4 or A6.',
affected_fields=['PLACE', 'FOSTER_CARE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'AD1' not in dfs:
return {}
else:
episodes = dfs['Episodes']
ad1 = dfs['AD1']
# prepare to merge
episodes.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = episodes.merge(ad1, on='CHILD', how='left', suffixes=['_eps', '_ad1'])
# Where <PL> = 'A2', 'A3' or 'A5' and <DEC> = 'E1', 'E11', 'E12' <FOSTER_CARE> should not be '0'; Where <PL> = โA1โ, โA4โ or โA6โ and <REC> = โE1โ, โE11โ, โE12โ <FOSTER_CARE> should not be โ1โ.
mask = (
merged['REC'].isin(['E1', 'E11', 'E12']) & (
(merged['PLACE'].isin(['A2', 'A3', 'A5']) & (merged['FOSTER_CARE'].astype(str) == '0'))
| (merged['PLACE'].isin(['A1', 'A4', 'A6']) & (merged['FOSTER_CARE'].astype(str) == '1'))
)
)
eps_error_locs = merged.loc[mask, 'index_eps']
ad1_error_locs = merged.loc[mask, 'index_ad1']
# use .unique since join is many to one
return {'Episodes': eps_error_locs.tolist(), 'AD1': ad1_error_locs.unique().tolist()}
return error, _validate
def validate_215():
error = ErrorDefinition(
code='215',
description='Child has care leaver information but one or more data items relating to children looked after for 12 months have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM', 'CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK',
'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
)
def _validate(dfs):
if 'OC3' not in dfs or 'OC2' not in dfs:
return {}
else:
oc3 = dfs['OC3']
oc2 = dfs['OC2']
# prepare to merge
oc3.reset_index(inplace=True)
oc2.reset_index(inplace=True)
merged = oc3.merge(oc2, on='CHILD', how='left', suffixes=['_3', '_2'])
# If any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided then <CONVICTED>; <HEALTH_CHECK>; <IMMUNISATIONS>; <TEETH_CHECK>; <HEALTH_ASSESSMENT>; <SUBSTANCE MISUSE>; <INTERVENTION_RECEIVED>; <INTERVENTION_OFFERED>; should not be provided
mask = (merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna()) & (
merged['CONVICTED'].notna() | merged['HEALTH_CHECK'].notna() | merged['IMMUNISATIONS'].notna() |
merged['TEETH_CHECK'].notna() | merged['HEALTH_ASSESSMENT'].notna() | merged[
'SUBSTANCE_MISUSE'].notna() | merged['INTERVENTION_RECEIVED'].notna() | merged[
'INTERVENTION_OFFERED'].notna())
# error locations
oc3_error_locs = merged.loc[mask, 'index_3']
oc2_error_locs = merged.loc[mask, 'index_2']
return {'OC3': oc3_error_locs.tolist(), 'OC2': oc2_error_locs.tolist()}
return error, _validate
def validate_399():
error = ErrorDefinition(
code='399',
description='Mother field, review field or participation field are completed but '
+ 'child is looked after under legal status V3 or V4.',
affected_fields=['MOTHER', 'LS', 'REVIEW', 'REVIEW_CODE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs or 'Reviews' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
reviews = dfs['Reviews']
code_list = ['V3', 'V4']
# prepare to merge
episodes['index_eps'] = episodes.index
header['index_hdr'] = header.index
reviews['index_revs'] = reviews.index
# merge
merged = (episodes.merge(header, on='CHILD', how='left')
.merge(reviews, on='CHILD', how='left'))
# If <LS> = 'V3' or 'V4' then <MOTHER>, <REVIEW> and <REVIEW_CODE> should not be provided
mask = merged['LS'].isin(code_list) & (
merged['MOTHER'].notna() | merged['REVIEW'].notna() | merged['REVIEW_CODE'].notna())
# Error locations
eps_errors = merged.loc[mask, 'index_eps']
header_errors = merged.loc[mask, 'index_hdr'].unique()
revs_errors = merged.loc[mask, 'index_revs'].unique()
return {'Episodes': eps_errors.tolist(),
'Header': header_errors.tolist(),
'Reviews': revs_errors.tolist()}
return error, _validate
def validate_189():
error = ErrorDefinition(
code='189',
description='Child is aged 17 years or over at the beginning of the year, but an Strengths and Difficulties '
+ '(SDQ) score or a reason for no Strengths and Difficulties (SDQ) score has been completed.',
affected_fields=['DOB', 'SDQ_SCORE', 'SDQ_REASON']
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
collection_start = dfs['metadata']['collection_start']
# datetime format allows appropriate comparison between dates
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# If <DOB> >17 years prior to <COLLECTION_START_DATE> then <SDQ_SCORE> and <SDQ_REASON> should not be provided
mask = ((oc2['DOB'] + pd.offsets.DateOffset(years=17)) <= collection_start) & (
oc2['SDQ_REASON'].notna() | oc2['SDQ_SCORE'].notna())
# That is, raise error if collection_start > DOB + 17years
oc_error_locs = oc2.index[mask]
return {'OC2': oc_error_locs.tolist()}
return error, _validate
def validate_226():
error = ErrorDefinition(
code='226',
description='Reason for placement change is not required.',
affected_fields=['REASON_PLACE_CHANGE', 'PLACE']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
code_list = ['T0', 'T1', 'T2', 'T3', 'T4']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# create column to see previous REASON_PLACE_CHANGE
episodes = episodes.sort_values(['CHILD', 'DECOM'])
episodes['PREVIOUS_REASON'] = episodes.groupby('CHILD')['REASON_PLACE_CHANGE'].shift(1)
# If <PL> = 'T0'; 'T1'; 'T2'; 'T3' or 'T4' then <REASON_PLACE_CHANGE> should be null in current episode and current episode - 1
mask = episodes['PLACE'].isin(code_list) & (
episodes['REASON_PLACE_CHANGE'].notna() | episodes['PREVIOUS_REASON'].notna())
# error locations
error_locs = episodes.index[mask]
return {'Episodes': error_locs.tolist()}
return error, _validate
def validate_358():
error = ErrorDefinition(
code='358',
description='Child with this legal status should not be under 10.',
affected_fields=['DECOM', 'DOB', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['J1', 'J2', 'J3']
# convert dates to datetime format
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# Where <LS> = โJ1โ or โJ2โ or โJ3โ then <DOB> should <= to 10 years prior to <DECOM>
mask = merged['LS'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=10) < merged['DECOM'])
# That is, raise error if DECOM > DOB + 10years
# error locations
header_error_locs = merged.loc[mask, 'index_er']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_407():
error = ErrorDefinition(
code='407',
description='Reason episode ceased is Special Guardianship Order, but child has reached age 18.',
affected_fields=['DEC', 'DOB', 'REC']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['E45', 'E46', 'E47', 'E48']
# convert dates to datetime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <REC> = โE45โ or โE46โ or โE47โ or โE48โ then <DOB> must be < 18 years prior to <DEC>
mask = merged['REC'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=18) < merged['DEC'])
# That is, raise error if DEC > DOB + 10years
# error locations
header_error_locs = merged.loc[mask, 'index_er']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_1007():
error = ErrorDefinition(
code='1007',
description='Care leaver information is not required for 17- or 18-year olds who are still looked after.',
affected_fields=['DEC', 'REC', 'DOB', 'IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_end = dfs['metadata']['collection_end']
# convert dates to datetime format
oc3['DOB'] = pd.to_datetime(oc3['DOB'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
oc3.reset_index(inplace=True)
merged = episodes.merge(oc3, on='CHILD', how='left', suffixes=['_eps', '_oc3'])
# If <DOB> < 19 and >= to 17 years prior to <COLLECTION_END_DATE> and current episode <DEC> and or <REC> not provided then <IN_TOUCH>, <ACTIV> and <ACCOM> should not be provided
check_age = (merged['DOB'] + pd.offsets.DateOffset(years=17) <= collection_end) & (
merged['DOB'] + pd.offsets.DateOffset(years=19) > collection_end)
# That is, check that 17<=age<19
check_dec_rec = merged['REC'].isna() | merged['DEC'].isna()
# if either DEC or REC are absent
mask = check_age & check_dec_rec & (
merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna())
# Then raise an error if either IN_TOUCH, ACTIV, or ACCOM have been provided too
# error locations
oc3_error_locs = merged.loc[mask, 'index_oc3']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'OC3': oc3_error_locs.unique().tolist()}
return error, _validate
def validate_442():
error = ErrorDefinition(
code='442',
description='Unique Pupil Number (UPN) field is not completed.',
affected_fields=['UPN', 'LS']
)
def _validate(dfs):
if ('Episodes' not in dfs) or ('Header' not in dfs):
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
code_list = ['V3', 'V4']
# merge left on episodes to get all children for which episodes have been recorded even if they do not exist on the header.
merged = episodes.merge(header, on=['CHILD'], how='left', suffixes=['_eps', '_er'])
# Where any episode present, with an <LS> not = 'V3' or 'V4' then <UPN> must be provided
mask = (~merged['LS'].isin(code_list)) & merged['UPN'].isna()
episode_error_locs = merged.loc[mask, 'index_eps']
header_error_locs = merged.loc[mask, 'index_er']
return {'Episodes': episode_error_locs.tolist(),
# Select unique values since many episodes are joined to one header
# and multiple errors will be raised for the same index.
'Header': header_error_locs.dropna().unique().tolist()}
return error, _validate
def validate_344():
error = ErrorDefinition(
code='344',
description='The record shows the young person has died or returned home to live with parent(s) or someone with parental responsibility for a continuous period of 6 months or more, but activity and/or accommodation on leaving care have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
# If <IN_TOUCH> = 'DIED' or 'RHOM' then <ACTIV> and <ACCOM> should not be provided
mask = ((oc3['IN_TOUCH'] == 'DIED') | (oc3['IN_TOUCH'] == 'RHOM')) & (
oc3['ACTIV'].notna() | oc3['ACCOM'].notna())
error_locations = oc3.index[mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_345():
error = ErrorDefinition(
code='345',
description='The data collection record shows the local authority is in touch with this young person, but activity and/or accommodation data items are zero.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
# If <IN_TOUCH> = 'Yes' then <ACTIV> and <ACCOM> must be provided
mask = (oc3['IN_TOUCH'] == 'YES') & (oc3['ACTIV'].isna() | oc3['ACCOM'].isna())
error_locations = oc3.index[mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_384():
error = ErrorDefinition(
code='384',
description='A child receiving respite care cannot be in a long-term foster placement ',
affected_fields=['PLACE', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# Where <LS> = 'V3' or 'V4' then <PL> must not be 'U1' or 'U4'
mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & (
(episodes['PLACE'] == 'U1') | (episodes['PLACE'] == 'U4'))
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_390():
error = ErrorDefinition(
code='390',
description='Reason episode ceased is adopted but child has not been previously placed for adoption.',
affected_fields=['PLACE', 'REC']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# If <REC> = 'E11' or 'E12' then <PL> must be one of 'A3', 'A4', 'A5' or 'A6'
mask = ((episodes['REC'] == 'E11') | (episodes['REC'] == 'E12')) & ~(
(episodes['PLACE'] == 'A3') | (episodes['PLACE'] == 'A4') | (episodes['PLACE'] == 'A5') | (
episodes['PLACE'] == 'A6'))
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_378():
error = ErrorDefinition(
code='378',
description='A child who is placed with parent(s) cannot be looked after under a single period of accommodation under Section 20 of the Children Act 1989.',
affected_fields=['PLACE', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# the & sign supercedes the ==, so brackets are necessary here
mask = (episodes['PLACE'] == 'P1') & (episodes['LS'] == 'V2')
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_398():
error = ErrorDefinition(
code='398',
description='Distance field completed but child looked after under legal status V3 or V4.',
affected_fields=['LS', 'HOME_POST', 'PL_POST']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & (
episodes['HOME_POST'].notna() | episodes['PL_POST'].notna())
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_451():
error = ErrorDefinition(
code='451',
description='Child is still freed for adoption, but freeing orders could not be applied for since 30 December 2005.',
affected_fields=['DEC', 'REC', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = episodes['DEC'].isna() & episodes['REC'].isna() & (episodes['LS'] == 'D1')
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_519():
error = ErrorDefinition(
code='519',
description='Data entered on the legal status of adopters shows civil partnership couple, but data entered on genders of adopters does not show it as a couple.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR']
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
mask = (ad1['LS_ADOPTR'] == 'L2') & (
(ad1['SEX_ADOPTR'] != 'MM') & (ad1['SEX_ADOPTR'] != 'FF') & (ad1['SEX_ADOPTR'] != 'MF'))
error_locations = ad1.index[mask]
return {'AD1': error_locations.to_list()}
return error, _validate
def validate_520():
error = ErrorDefinition(
code='520',
description='Data entry on the legal status of adopters shows different gender married couple but data entry on genders of adopters shows it as a same gender couple.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR']
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
# check condition
mask = (ad1['LS_ADOPTR'] == 'L11') & (ad1['SEX_ADOPTR'] != 'MF')
error_locations = ad1.index[mask]
return {'AD1': error_locations.to_list()}
return error, _validate
def validate_522():
error = ErrorDefinition(
code='522',
description='Date of decision that the child should be placed for adoption must be on or before the date that a child should no longer be placed for adoption.',
affected_fields=['DATE_PLACED', 'DATE_PLACED_CEASED']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
# Convert to datetimes
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
# Boolean mask
mask = placed_adoption['DATE_PLACED_CEASED'] > placed_adoption['DATE_PLACED']
error_locations = placed_adoption.index[mask]
return {'PlacedAdoption': error_locations.to_list()}
return error, _validate
def validate_563():
error = ErrorDefinition(
code='563',
description='The child should no longer be placed for adoption but the date of the decision that the child should be placed for adoption is blank',
affected_fields=['DATE_PLACED', 'REASON_PLACED_CEASED', 'DATE_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
mask = placed_adoption['REASON_PLACED_CEASED'].notna() & placed_adoption['DATE_PLACED_CEASED'].notna() & \
placed_adoption['DATE_PLACED'].isna()
error_locations = placed_adoption.index[mask]
return {'PlacedAdoption': error_locations.to_list()}
return error, _validate
def validate_544():
error = ErrorDefinition(
code='544',
description="Any child who has conviction information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.",
affected_fields=['CONVICTED', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
convict = oc2['CONVICTED'].astype(str) == '1'
immunisations = oc2['IMMUNISATIONS'].isna()
teeth_ck = oc2['TEETH_CHECK'].isna()
health_ass = oc2['HEALTH_ASSESSMENT'].isna()
sub_misuse = oc2['SUBSTANCE_MISUSE'].isna()
error_mask = convict & (immunisations | teeth_ck | health_ass | sub_misuse)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_634():
error = ErrorDefinition(
code='634',
description='There are entries for previous permanence options, but child has not started to be looked after from 1 April 2016 onwards.',
affected_fields=['LA_PERM', 'PREV_PERM', 'DATE_PERM', 'DECOM']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PrevPerm' not in dfs:
return {}
else:
episodes = dfs['Episodes']
prevperm = dfs['PrevPerm']
collection_start = dfs['metadata']['collection_start']
# convert date field to appropriate format
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# the maximum date has the highest possibility of satisfying the condition
episodes['LAST_DECOM'] = episodes.groupby('CHILD')['DECOM'].transform('max')
# prepare to merge
episodes.reset_index(inplace=True)
prevperm.reset_index(inplace=True)
merged = prevperm.merge(episodes, on='CHILD', how='left', suffixes=['_prev', '_eps'])
# If <PREV_PERM> or <LA_PERM> or <DATE_PERM> provided, then at least 1 episode must have a <DECOM> later than 01/04/2016
mask = (merged['PREV_PERM'].notna() | merged['DATE_PERM'].notna() | merged['LA_PERM'].notna()) & (
merged['LAST_DECOM'] < collection_start)
eps_error_locs = merged.loc[mask, 'index_eps']
prevperm_error_locs = merged.loc[mask, 'index_prev']
# return {'PrevPerm':prevperm_error_locs}
return {'Episodes': eps_error_locs.unique().tolist(), 'PrevPerm': prevperm_error_locs.unique().tolist()}
return error, _validate
def validate_158():
error = ErrorDefinition(
code='158',
description='If a child has been recorded as receiving an intervention for their substance misuse problem, then the additional item on whether an intervention was offered should be left blank.',
affected_fields=['INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
error_mask = oc2['INTERVENTION_RECEIVED'].astype(str).eq('1') & oc2['INTERVENTION_OFFERED'].notna()
error_locations = oc2.index[error_mask]
return {'OC2': error_locations.tolist()}
return error, _validate
def validate_133():
error = ErrorDefinition(
code='133',
description='Data entry for accommodation after leaving care is invalid. If reporting on a childs accommodation after leaving care the data entry must be valid',
affected_fields=['ACCOM'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
valid_codes = ['B1', 'B2', 'C1', 'C2', 'D1', 'D2', 'E1', 'E2', 'G1', 'G2', 'H1', 'H2', 'K1', 'K2', 'R1',
'R2', 'S2', 'T1', 'T2', 'U1', 'U2', 'V1', 'V2', 'W1', 'W2', 'X2', 'Y1', 'Y2', 'Z1', 'Z2',
'0']
error_mask = ~oc3['ACCOM'].isna() & ~oc3['ACCOM'].isin(valid_codes)
error_locations = oc3.index[error_mask]
return {'OC3': error_locations.tolist()}
return error, _validate
def validate_565():
error = ErrorDefinition(
code='565',
description='The date that the child started to be missing or away from placement without authorisation has been completed but whether the child was missing or away from placement without authorisation has not been completed.',
affected_fields=['MISSING', 'MIS_START']
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
mask = missing['MIS_START'].notna() & missing['MISSING'].isna()
error_locations = missing.index[mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_433():
error = ErrorDefinition(
code='433',
description='The reason for new episode suggests that this is a continuation episode, but the episode does not start on the same day as the last episode finished.',
affected_fields=['RNE', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['original_index'] = episodes.index
episodes.sort_values(['CHILD', 'DECOM', 'DEC'], inplace=True)
episodes[['PREVIOUS_DEC', 'PREVIOUS_CHILD']] = episodes[['DEC', 'CHILD']].shift(1)
rne_is_ongoing = episodes['RNE'].str.upper().astype(str).isin(['P', 'L', 'T', 'U', 'B'])
date_mismatch = episodes['PREVIOUS_DEC'] != episodes['DECOM']
missing_date = episodes['PREVIOUS_DEC'].isna() | episodes['DECOM'].isna()
same_child = episodes['PREVIOUS_CHILD'] == episodes['CHILD']
error_mask = rne_is_ongoing & (date_mismatch | missing_date) & same_child
error_locations = episodes['original_index'].loc[error_mask].sort_values()
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_437():
error = ErrorDefinition(
code='437',
description='Reason episode ceased is child has died or is aged 18 or over but there are further episodes.',
affected_fields=['REC'],
)
# !# potential false negatives, as this only operates on the current year's data
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes.sort_values(['CHILD', 'DECOM'], inplace=True)
episodes[['NEXT_DECOM', 'NEXT_CHILD']] = episodes[['DECOM', 'CHILD']].shift(-1)
# drop rows with missing DECOM as invalid/missing values can lead to errors
episodes = episodes.dropna(subset=['DECOM'])
ceased_e2_e15 = episodes['REC'].str.upper().astype(str).isin(['E2', 'E15'])
has_later_episode = episodes['CHILD'] == episodes['NEXT_CHILD']
error_mask = ceased_e2_e15 & has_later_episode
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_547():
error = ErrorDefinition(
code='547',
description="Any child who has health promotion information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.",
affected_fields=['HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
healthck = oc2['HEALTH_CHECK'].astype(str) == '1'
immunisations = oc2['IMMUNISATIONS'].isna()
teeth_ck = oc2['TEETH_CHECK'].isna()
health_ass = oc2['HEALTH_ASSESSMENT'].isna()
sub_misuse = oc2['SUBSTANCE_MISUSE'].isna()
error_mask = healthck & (immunisations | teeth_ck | health_ass | sub_misuse)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_635():
error = ErrorDefinition(
code='635',
description='There are entries for date of order and local authority code where previous permanence option was arranged but previous permanence code is Z1',
affected_fields=['LA_PERM', 'DATE_PERM', 'PREV_PERM']
)
def _validate(dfs):
if 'PrevPerm' not in dfs:
return {}
else:
prev_perm = dfs['PrevPerm']
# raise and error if either LA_PERM or DATE_PERM are present, yet PREV_PERM is absent.
mask = ((prev_perm['LA_PERM'].notna() | prev_perm['DATE_PERM'].notna()) & prev_perm['PREV_PERM'].isna())
error_locations = prev_perm.index[mask]
return {'PrevPerm': error_locations.to_list()}
return error, _validate
def validate_550():
error = ErrorDefinition(
code='550',
description='A placement provider code of PR0 can only be associated with placement P1.',
affected_fields=['PLACE', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = (episodes['PLACE'] != 'P1') & episodes['PLACE_PROVIDER'].eq('PR0')
validation_error_locations = episodes.index[mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_217():
error = ErrorDefinition(
code='217',
description='Children who are placed for adoption with current foster carers (placement types A3 or A5) must have a reason for new episode of S, T or U.',
affected_fields=['PLACE', 'DECOM', 'RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
max_decom_allowed = pd.to_datetime('01/04/2015', format='%d/%m/%Y', errors='coerce')
reason_new_ep = ['S', 'T', 'U']
place_codes = ['A3', 'A5']
mask = (episodes['PLACE'].isin(place_codes) & (episodes['DECOM'] >= max_decom_allowed)) & ~episodes[
'RNE'].isin(reason_new_ep)
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_518():
error = ErrorDefinition(
code='518',
description='If reporting legal status of adopters is L4 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L4') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_517():
error = ErrorDefinition(
code='517',
description='If reporting legal status of adopters is L3 then the genders of adopters should be coded as MF. MF = the adopting couple are male and female.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L3') & ~AD1['SEX_ADOPTR'].isin(['MF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_558():
error = ErrorDefinition(
code='558',
description='If a child has been adopted, then the decision to place them for adoption has not been disrupted and the date of the decision that a child should no longer be placed for adoption should be left blank. if the REC code is either E11 or E12 then the DATE PLACED CEASED date should not be provided',
affected_fields=['DATE_PLACED_CEASED', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes = episodes.reset_index()
rec_codes = ['E11', 'E12']
placeEpisodes = episodes[episodes['REC'].isin(rec_codes)]
merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED_CEASED'].notna()]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_453():
error = ErrorDefinition(
code='453',
description='Contradiction between placement distance in the last episode of the previous year and in the first episode of the current year.',
affected_fields=['PL_DISTANCE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['PL_DISTANCE'] = pd.to_numeric(episodes['PL_DISTANCE'], errors='coerce')
episodes_last['PL_DISTANCE'] = pd.to_numeric(episodes_last['PL_DISTANCE'], errors='coerce')
# drop rows with missing DECOM before finding idxmin/max, as invalid/missing values can lead to errors
episodes = episodes.dropna(subset=['DECOM'])
episodes_last = episodes_last.dropna(subset=['DECOM'])
episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin()
episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax()
episodes = episodes[episodes.index.isin(episodes_min)]
episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)]
episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'],
suffixes=('', '_last'), indicator=True).set_index('index')
in_both_years = episodes_merged['_merge'] == 'both'
same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last']
last_year_open = episodes_merged['DEC_last'].isna()
different_pl_dist = abs(episodes_merged['PL_DISTANCE'] - episodes_merged['PL_DISTANCE_last']) >= 0.2
error_mask = in_both_years & same_rne & last_year_open & different_pl_dist
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_516():
error = ErrorDefinition(
code='516',
description='The episode data submitted for this child does not show that he/she was with their former foster carer(s) during the year.If the code in the reason episode ceased is E45 or E46 the child must have a placement code of U1 to U6.',
affected_fields=['REC', 'PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
place_codes = ['U1', 'U2', 'U3', 'U4', 'U5', 'U6']
rec_codes = ['E45', 'E46']
error_mask = episodes['REC'].isin(rec_codes) & ~episodes['PLACE'].isin(place_codes)
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_511():
error = ErrorDefinition(
code='511',
description='If reporting that the number of person(s) adopting the looked after child is two adopters then the code should only be MM, FF or MF. MM = the adopting couple are both males; FF = the adopting couple are both females; MF = The adopting couple are male and female.',
affected_fields=['NB_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
mask = AD1['NB_ADOPTR'].astype(str).eq('2') & AD1['SEX_ADOPTR'].isin(['M1', 'F1'])
validation_error_mask = mask
validation_error_locations = AD1.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_524():
error = ErrorDefinition(
code='524',
description='If reporting legal status of adopters is L12 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L12') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_441():
error = ErrorDefinition(
code='441',
description='Participation method indicates child was 4 years old or over at the time of the review, but the date of birth and review date indicates the child was under 4 years old.',
affected_fields=['DOB', 'REVIEW', 'REVIEW_CODE'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
else:
reviews = dfs['Reviews']
reviews['DOB'] = pd.to_datetime(reviews['DOB'], format='%d/%m/%Y', errors='coerce')
reviews['REVIEW'] = pd.to_datetime(reviews['REVIEW'], format='%d/%m/%Y', errors='coerce')
reviews = reviews.dropna(subset=['REVIEW', 'DOB'])
mask = reviews['REVIEW_CODE'].isin(['PN1', 'PN2', 'PN3', 'PN4', 'PN5', 'PN6', 'PN7']) & (
reviews['REVIEW'] < reviews['DOB'] + pd.offsets.DateOffset(years=4))
validation_error_mask = mask
validation_error_locations = reviews.index[validation_error_mask]
return {'Reviews': validation_error_locations.tolist()}
return error, _validate
def validate_184():
error = ErrorDefinition(
code='184',
description='Date of decision that a child should be placed for adoption is before the child was born.',
affected_fields=['DATE_PLACED', # PlacedAdoptino
'DOB'], # Header
)
def _validate(dfs):
if 'Header' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
child_record = dfs['Header']
placed_for_adoption = dfs['PlacedAdoption']
all_data = (placed_for_adoption
.reset_index()
.merge(child_record, how='left', on='CHILD', suffixes=[None, '_P4A']))
all_data['DATE_PLACED'] = pd.to_datetime(all_data['DATE_PLACED'], format='%d/%m/%Y', errors='coerce')
all_data['DOB'] = pd.to_datetime(all_data['DOB'], format='%d/%m/%Y', errors='coerce')
mask = (all_data['DATE_PLACED'] >= all_data['DOB']) | all_data['DATE_PLACED'].isna()
validation_error = ~mask
validation_error_locations = all_data[validation_error]['index'].unique()
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_612():
error = ErrorDefinition(
code='612',
description="Date of birth field has been completed but mother field indicates child is not a mother.",
affected_fields=['SEX', 'MOTHER', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
error_mask = (
((header['MOTHER'].astype(str) == '0') | header['MOTHER'].isna())
& (header['SEX'].astype(str) == '2')
& header['MC_DOB'].notna()
)
validation_error_locations = header.index[error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_552():
"""
This error checks that the first adoption episode is after the last decision !
If there are multiple of either there may be unexpected results !
"""
error = ErrorDefinition(
code="552",
description="Date of Decision to place a child for adoption should be on or prior to the date that the child was placed for adoption.",
# Field that defines date of decision to place a child for adoption is DATE_PLACED and the start of adoption is defined by DECOM with 'A' placement types.
affected_fields=['DATE_PLACED', 'DECOM'],
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
# get the required datasets
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
# keep index values so that they stay the same when needed later on for error locations
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
adoption_eps = episodes[episodes['PLACE'].isin(['A3', 'A4', 'A5', 'A6'])].copy()
# find most recent adoption decision
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
# remove rows where either of the required values have not been filled.
placed_adoption = placed_adoption[placed_adoption['DATE_PLACED'].notna()]
placed_adoption_inds = placed_adoption.groupby('CHILD')['DATE_PLACED'].idxmax(skipna=True)
last_decision = placed_adoption.loc[placed_adoption_inds]
# first time child started adoption
adoption_eps["DECOM"] = pd.to_datetime(adoption_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
adoption_eps = adoption_eps[adoption_eps['DECOM'].notna()]
adoption_eps_inds = adoption_eps.groupby('CHILD')['DECOM'].idxmin(skipna=True)
# full information of first adoption
first_adoption = adoption_eps.loc[adoption_eps_inds]
# date of decision and date of start of adoption (DECOM) have to be put in one table
merged = first_adoption.merge(last_decision, on=['CHILD'], how='left', suffixes=['_EP', '_PA'])
# check to see if date of decision to place is less than or equal to date placed.
decided_after_placed = merged["DECOM"] < merged["DATE_PLACED"]
# find the corresponding location of error values per file.
episode_error_locs = merged.loc[decided_after_placed, 'index_EP']
placedadoption_error_locs = merged.loc[decided_after_placed, 'index_PA']
return {"PlacedAdoption": placedadoption_error_locs.to_list(), "Episodes": episode_error_locs.to_list()}
return error, _validate
def validate_551():
error = ErrorDefinition(
code='551',
description='Child has been placed for adoption but there is no date of the decision that the child should be placed for adoption.',
affected_fields=['DATE_PLACED', 'PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes = episodes.reset_index()
place_codes = ['A3', 'A4', 'A5', 'A6']
placeEpisodes = episodes[episodes['PLACE'].isin(place_codes)]
merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED'].isna()]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_557():
error = ErrorDefinition(
code='557',
description="Child for whom the decision was made that they should be placed for adoption has left care " +
"but was not adopted and information on the decision that they should no longer be placed for " +
"adoption items has not been completed.",
affected_fields=['DATE_PLACED_CEASED', 'REASON_PLACED_CEASED', # PlacedAdoption
'PLACE', 'LS', 'REC'], # Episodes
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'PlacedAdoption' not in dfs:
return {}
else:
eps = dfs['Episodes']
placed = dfs['PlacedAdoption']
eps = eps.reset_index()
placed = placed.reset_index()
child_placed = eps['PLACE'].isin(['A3', 'A4', 'A5', 'A6'])
order_granted = eps['LS'].isin(['D1', 'E1'])
not_adopted = ~eps['REC'].isin(['E11', 'E12']) & eps['REC'].notna()
placed['ceased_incomplete'] = (
placed['DATE_PLACED_CEASED'].isna() | placed['REASON_PLACED_CEASED'].isna()
)
eps = eps[(child_placed | order_granted) & not_adopted]
eps = eps.merge(placed, on='CHILD', how='left', suffixes=['_EP', '_PA'], indicator=True)
eps = eps[(eps['_merge'] == 'left_only') | eps['ceased_incomplete']]
EP_errors = eps['index_EP']
PA_errors = eps['index_PA'].dropna()
return {
'Episodes': EP_errors.to_list(),
'PlacedAdoption': PA_errors.to_list(),
}
return error, _validate
def validate_207():
error = ErrorDefinition(
code='207',
description='Mother status for the current year disagrees with the mother status already recorded for this child.',
affected_fields=['MOTHER'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
mother_is_different = header_merged['MOTHER'].astype(str) != header_merged['MOTHER_last'].astype(str)
mother_was_true = header_merged['MOTHER_last'].astype(str) == '1'
error_mask = in_both_years & mother_is_different & mother_was_true
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_523():
error = ErrorDefinition(
code='523',
description="Date of decision that the child should be placed for adoption should be the same date as the decision that adoption is in the best interest (date should be placed).",
affected_fields=['DATE_PLACED', 'DATE_INT'],
)
def _validate(dfs):
if ("AD1" not in dfs) or ("PlacedAdoption" not in dfs):
return {}
else:
placed_adoption = dfs["PlacedAdoption"]
ad1 = dfs["AD1"]
# keep initial index values to be reused for locating errors later on.
placed_adoption.reset_index(inplace=True)
ad1.reset_index(inplace=True)
# convert to datetime to enable comparison
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format="%d/%m/%Y",
errors='coerce')
ad1["DATE_INT"] = pd.to_datetime(ad1['DATE_INT'], format='%d/%m/%Y', errors='coerce')
# drop rows where either of the required values have not been filled.
placed_adoption = placed_adoption[placed_adoption["DATE_PLACED"].notna()]
ad1 = ad1[ad1["DATE_INT"].notna()]
# bring corresponding values together from both dataframes
merged_df = placed_adoption.merge(ad1, on=['CHILD'], how='inner', suffixes=["_AD", "_PA"])
# find error values
different_dates = merged_df['DATE_INT'] != merged_df['DATE_PLACED']
# map error locations to corresponding indices
pa_error_locations = merged_df.loc[different_dates, 'index_PA']
ad1_error_locations = merged_df.loc[different_dates, 'index_AD']
return {"PlacedAdoption": pa_error_locations.to_list(), "AD1": ad1_error_locations.to_list()}
return error, _validate
def validate_3001():
error = ErrorDefinition(
code='3001',
description='Where care leavers information is being returned for a young person around their 17th birthday, the accommodation cannot be with their former foster carer(s).',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
oc3 = dfs['OC3']
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
header['DOB17'] = header['DOB'] + pd.DateOffset(years=17)
oc3_merged = oc3.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
accom_foster = oc3_merged['ACCOM'].str.upper().astype(str).isin(['Z1', 'Z2'])
age_17_in_year = (oc3_merged['DOB17'] <= collection_end) & (oc3_merged['DOB17'] >= collection_start)
error_mask = accom_foster & age_17_in_year
error_locations = oc3.index[error_mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_389():
error = ErrorDefinition(
code='389',
description='Reason episode ceased is that child transferred to care of adult social care services, but child is aged under 16.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB16'] = header['DOB'] + pd.DateOffset(years=16)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
ceased_asc = episodes_merged['REC'].str.upper().astype(str).isin(['E7'])
ceased_over_16 = episodes_merged['DOB16'] <= episodes_merged['DEC']
error_mask = ceased_asc & ~ceased_over_16
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_387():
error = ErrorDefinition(
code='387',
description='Reason episode ceased is child moved into independent living arrangement, but the child is aged under 14.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB14'] = header['DOB'] + pd.DateOffset(years=14)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
ceased_indep = episodes_merged['REC'].str.upper().astype(str).isin(['E5', 'E6'])
ceased_over_14 = episodes_merged['DOB14'] <= episodes_merged['DEC']
dec_present = episodes_merged['DEC'].notna()
error_mask = ceased_indep & ~ceased_over_14 & dec_present
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_452():
error = ErrorDefinition(
code='452',
description='Contradiction between local authority of placement code in the last episode of the previous year and in the first episode of the current year.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin()
episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax()
episodes = episodes[episodes.index.isin(episodes_min)]
episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)]
episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'],
suffixes=('', '_last'), indicator=True).set_index('index')
in_both_years = episodes_merged['_merge'] == 'both'
same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last']
last_year_open = episodes_merged['DEC_last'].isna()
different_pl_la = episodes_merged['PL_LA'].astype(str) != episodes_merged['PL_LA_last'].astype(str)
error_mask = in_both_years & same_rne & last_year_open & different_pl_la
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_386():
error = ErrorDefinition(
code='386',
description='Reason episode ceased is adopted but child has reached age 18.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + pd.DateOffset(years=18)
episodes_merged = (
episodes
.reset_index()
.merge(header, how='left', on=['CHILD'], suffixes=('', '_header'), indicator=True)
.set_index('index')
.dropna(subset=['DOB18', 'DEC'])
)
ceased_adopted = episodes_merged['REC'].str.upper().astype(str).isin(['E11', 'E12'])
ceased_under_18 = episodes_merged['DOB18'] > episodes_merged['DEC']
error_mask = ceased_adopted & ~ceased_under_18
error_locations = episodes_merged.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_363():
error = ErrorDefinition(
code='363',
description='Child assessment order (CAO) lasted longer than 7 days allowed in the Children Act 1989.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
collection_end_str = dfs['metadata']['collection_end']
L2_eps = episodes[episodes['LS'] == 'L3'].copy()
L2_eps['original_index'] = L2_eps.index
L2_eps = L2_eps[L2_eps['DECOM'].notna()]
L2_eps.loc[L2_eps['DEC'].isna(), 'DEC'] = collection_end_str
L2_eps['DECOM'] = pd.to_datetime(L2_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
L2_eps = L2_eps.dropna(subset=['DECOM'])
L2_eps['DEC'] = pd.to_datetime(L2_eps['DEC'], format='%d/%m/%Y', errors='coerce')
L2_eps.sort_values(['CHILD', 'DECOM'])
L2_eps['index'] = pd.RangeIndex(0, len(L2_eps))
L2_eps['index+1'] = L2_eps['index'] + 1
L2_eps = L2_eps.merge(L2_eps, left_on='index', right_on='index+1',
how='left', suffixes=[None, '_prev'])
L2_eps = L2_eps[['original_index', 'DECOM', 'DEC', 'DEC_prev', 'CHILD', 'CHILD_prev', 'LS']]
L2_eps['new_period'] = (
(L2_eps['DECOM'] > L2_eps['DEC_prev'])
| (L2_eps['CHILD'] != L2_eps['CHILD_prev'])
)
L2_eps['duration'] = (L2_eps['DEC'] - L2_eps['DECOM']).dt.days
L2_eps['period_id'] = L2_eps['new_period'].astype(int).cumsum()
L2_eps['period_duration'] = L2_eps.groupby('period_id')['duration'].transform(sum)
error_mask = L2_eps['period_duration'] > 7
return {'Episodes': L2_eps.loc[error_mask, 'original_index'].to_list()}
return error, _validate
def validate_364():
error = ErrorDefinition(
code='364',
description='Sections 41-46 of Police and Criminal Evidence (PACE; 1984) severely limits ' +
'the time a child can be detained in custody in Local Authority (LA) accommodation.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
collection_end_str = dfs['metadata']['collection_end']
J2_eps = episodes[episodes['LS'] == 'J2'].copy()
J2_eps['original_index'] = J2_eps.index
J2_eps['DECOM'] = pd.to_datetime(J2_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
J2_eps = J2_eps[J2_eps['DECOM'].notna()]
J2_eps.loc[J2_eps['DEC'].isna(), 'DEC'] = collection_end_str
J2_eps['DEC'] = pd.to_datetime(J2_eps['DEC'], format='%d/%m/%Y', errors='coerce')
J2_eps.sort_values(['CHILD', 'DECOM'])
J2_eps['index'] = pd.RangeIndex(0, len(J2_eps))
J2_eps['index_prev'] = J2_eps['index'] + 1
J2_eps = J2_eps.merge(J2_eps, left_on='index', right_on='index_prev',
how='left', suffixes=[None, '_prev'])
J2_eps = J2_eps[['original_index', 'DECOM', 'DEC', 'DEC_prev', 'CHILD', 'CHILD_prev', 'LS']]
J2_eps['new_period'] = (
(J2_eps['DECOM'] > J2_eps['DEC_prev'])
| (J2_eps['CHILD'] != J2_eps['CHILD_prev'])
)
J2_eps['duration'] = (J2_eps['DEC'] - J2_eps['DECOM']).dt.days
J2_eps['period_id'] = J2_eps['new_period'].astype(int).cumsum()
J2_eps['period_duration'] = J2_eps.groupby('period_id')['duration'].transform(sum)
error_mask = J2_eps['period_duration'] > 21
return {'Episodes': J2_eps.loc[error_mask, 'original_index'].to_list()}
return error, _validate
def validate_365():
error = ErrorDefinition(
code='365',
description='Any individual short- term respite placement must not exceed 17 days.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
collection_end_str = dfs['metadata']['collection_end']
episodes.loc[episodes['DEC'].isna(), 'DEC'] = collection_end_str
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
over_17_days = episodes['DEC'] > episodes['DECOM'] + pd.DateOffset(days=17)
error_mask = (episodes['LS'] == 'V3') & over_17_days
return {'Episodes': episodes.index[error_mask].to_list()}
return error, _validate
def validate_367():
error = ErrorDefinition(
code='367',
description='The maximum amount of respite care allowable is 75 days in any 12-month period.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
V3_eps = episodes[episodes['LS'] == 'V3']
V3_eps = V3_eps.dropna(subset=['DECOM']) # missing DECOM should get fixed before looking for this error
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
V3_eps['DECOM_dt'] = pd.to_datetime(V3_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
V3_eps['DEC_dt'] = pd.to_datetime(V3_eps['DEC'], format='%d/%m/%Y', errors='coerce')
# truncate episode start/end dates to collection start/end respectively
V3_eps.loc[V3_eps['DEC'].isna() | (V3_eps['DEC_dt'] > collection_end), 'DEC_dt'] = collection_end
V3_eps.loc[V3_eps['DECOM_dt'] < collection_start, 'DECOM_dt'] = collection_start
V3_eps['duration'] = (V3_eps['DEC_dt'] - V3_eps['DECOM_dt']).dt.days
V3_eps = V3_eps[V3_eps['duration'] > 0]
V3_eps['year_total_duration'] = V3_eps.groupby('CHILD')['duration'].transform(sum)
error_mask = V3_eps['year_total_duration'] > 75
return {'Episodes': V3_eps.index[error_mask].to_list()}
return error, _validate
def validate_440():
error = ErrorDefinition(
code='440',
description='Participation method indicates child was under 4 years old at the time of the review, but date of birth and review date indicates the child was 4 years old or over.',
affected_fields=['DOB', 'REVIEW', 'REVIEW_CODE'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
else:
reviews = dfs['Reviews']
reviews['DOB'] = pd.to_datetime(reviews['DOB'], format='%d/%m/%Y', errors='coerce')
reviews['REVIEW'] = pd.to_datetime(reviews['REVIEW'], format='%d/%m/%Y', errors='coerce')
mask = reviews['REVIEW_CODE'].eq('PN0') & (
reviews['REVIEW'] > reviews['DOB'] + pd.offsets.DateOffset(years=4))
validation_error_mask = mask
validation_error_locations = reviews.index[validation_error_mask]
return {'Reviews': validation_error_locations.tolist()}
return error, _validate
def validate_445():
error = ErrorDefinition(
code='445',
description='D1 is not a valid code for episodes starting after December 2005.',
affected_fields=['LS', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
max_decom_allowed = pd.to_datetime('31/12/2005', format='%d/%m/%Y', errors='coerce')
mask = episodes['LS'].eq('D1') & (episodes['DECOM'] > max_decom_allowed)
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_446():
error = ErrorDefinition(
code='446',
description='E1 is not a valid code for episodes starting before December 2005.',
affected_fields=['LS', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
min_decom_allowed = pd.to_datetime('01/12/2005', format='%d/%m/%Y', errors='coerce')
mask = episodes['LS'].eq('E1') & (episodes['DECOM'] < min_decom_allowed)
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_208():
error = ErrorDefinition(
code='208',
description='Unique Pupil Number (UPN) for the current year disagrees with the Unique Pupil Number (UPN) already recorded for this child.',
affected_fields=['UPN'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
upn_is_different = header_merged['UPN'].str.upper().astype(str) != header_merged[
'UPN_last'].str.upper().astype(str)
upn_not_recorded = header_merged['UPN'].str.upper().astype(str).isin(['UN2', 'UN3', 'UN4', 'UN5', 'UN6']) & \
header_merged['UPN_last'].str.upper().astype(str).isin(['UN1'])
error_mask = in_both_years & upn_is_different & ~upn_not_recorded
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_204():
error = ErrorDefinition(
code='204',
description='Ethnic origin code disagrees with the ethnic origin already recorded for this child.',
affected_fields=['ETHNIC'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
ethnic_is_different = header_merged['ETHNIC'].astype(str).str.upper() != header_merged[
'ETHNIC_last'].astype(str).str.upper()
error_mask = in_both_years & ethnic_is_different
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_203():
error = ErrorDefinition(
code='203',
description='Date of birth disagrees with the date of birth already recorded for this child.',
affected_fields=['DOB'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
header_last['DOB'] = pd.to_datetime(header_last['DOB'], format='%d/%m/%Y', errors='coerce')
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
dob_is_different = header_merged['DOB'].astype(str) != header_merged['DOB_last'].astype(str)
error_mask = in_both_years & dob_is_different
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_530():
error = ErrorDefinition(
code='530',
description="A placement provider code of PR4 cannot be associated with placement P1.",
affected_fields=['PLACE', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = episodes['PLACE'].eq('P1') & episodes['PLACE_PROVIDER'].eq('PR4')
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_571():
error = ErrorDefinition(
code='571',
description='The date that the child ceased to be missing or away from placement without authorisation is before the start or after the end of the collection year.',
affected_fields=['MIS_END'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
missing['fMIS_END'] = pd.to_datetime(missing['MIS_END'], format='%d/%m/%Y', errors='coerce')
end_date_before_year = missing['fMIS_END'] < collection_start
end_date_after_year = missing['fMIS_END'] > collection_end
error_mask = end_date_before_year | end_date_after_year
error_locations = missing.index[error_mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_1005():
error = ErrorDefinition(
code='1005',
description='The end date of the missing episode or episode that the child was away from placement without authorisation is not a valid date.',
affected_fields=['MIS_END'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
missing['fMIS_END'] = pd.to_datetime(missing['MIS_END'], format='%d/%m/%Y', errors='coerce')
missing_end_date = missing['MIS_END'].isna()
invalid_end_date = missing['fMIS_END'].isna()
error_mask = ~missing_end_date & invalid_end_date
error_locations = missing.index[error_mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_1004():
error = ErrorDefinition(
code='1004',
description='The start date of the missing episode or episode that the child was away from placement without authorisation is not a valid date.',
affected_fields=['MIS_START'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
missing['fMIS_START'] = pd.to_datetime(missing['MIS_START'], format='%d/%m/%Y', errors='coerce')
missing_start_date = missing['MIS_START'].isna()
invalid_start_date = missing['fMIS_START'].isna()
error_mask = missing_start_date | invalid_start_date
error_locations = missing.index[error_mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_202():
error = ErrorDefinition(
code='202',
description='The gender code conflicts with the gender already recorded for this child.',
affected_fields=['SEX'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
sex_is_different = header_merged['SEX'].astype(str) != header_merged['SEX_last'].astype(str)
error_mask = in_both_years & sex_is_different
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_621():
error = ErrorDefinition(
code='621',
description="Motherโs field has been completed but date of birth shows that the mother is younger than her child.",
affected_fields=['DOB', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
header['MC_DOB'] = pd.to_datetime(header['MC_DOB'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
mask = (header['MC_DOB'] > header['DOB']) | header['MC_DOB'].isna()
validation_error_mask = ~mask
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_556():
error = ErrorDefinition(
code='556',
description='Date of decision that the child should be placed for adoption should be on or prior to the date that the freeing order was granted.',
affected_fields=['DATE_PLACED', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
placedAdoptions['DATE_PLACED'] = pd.to_datetime(placedAdoptions['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
episodes = episodes.reset_index()
D1Episodes = episodes[episodes['LS'] == 'D1']
merged = D1Episodes.reset_index().merge(placedAdoptions, how='left', on='CHILD', ).set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED'] > merged['DECOM']]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_393():
error = ErrorDefinition(
code='393',
description='Child is looked after but mother field is not completed.',
affected_fields=['MOTHER'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header_female = header[header['SEX'].astype(str) == '2']
applicable_episodes = episodes[~episodes['LS'].str.upper().isin(['V3', 'V4'])]
error_mask = header_female['CHILD'].isin(applicable_episodes['CHILD']) & header_female['MOTHER'].isna()
error_locations = header_female.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_NoE():
error = ErrorDefinition(
code='NoE',
description='This child has no episodes loaded for previous year even though child started to be looked after before this current year.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last = dfs['Episodes_last']
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
episodes_before_year = episodes[episodes['DECOM'] < collection_start]
episodes_merged = episodes_before_year.reset_index().merge(episodes_last, how='left', on=['CHILD'],
indicator=True).set_index('index')
episodes_not_matched = episodes_merged[episodes_merged['_merge'] == 'left_only']
error_mask = episodes.index.isin(episodes_not_matched.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_356():
error = ErrorDefinition(
code='356',
description='The date the episode ceased is before the date the same episode started.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
error_mask = episodes['DEC'].notna() & (episodes['DEC'] < episodes['DECOM'])
return {'Episodes': episodes.index[error_mask].to_list()}
return error, _validate
def validate_611():
error = ErrorDefinition(
code='611',
description="Date of birth field is blank, but child is a mother.",
affected_fields=['MOTHER', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
validation_error_mask = header['MOTHER'].astype(str).isin(['1']) & header['MC_DOB'].isna()
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_1009():
error = ErrorDefinition(
code='1009',
description='Reason for placement change is not a valid code.',
affected_fields=['REASON_PLACE_CHANGE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'CARPL',
'CLOSE',
'ALLEG',
'STAND',
'APPRR',
'CREQB',
'CREQO',
'CHILD',
'LAREQ',
'PLACE',
'CUSTOD',
'OTHER'
]
mask = episodes['REASON_PLACE_CHANGE'].isin(code_list) | episodes['REASON_PLACE_CHANGE'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_1006():
error = ErrorDefinition(
code='1006',
description='Missing type invalid.',
affected_fields=['MISSING'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
missing_from_care = dfs['Missing']
code_list = ['M', 'A']
mask = missing_from_care['MISSING'].isin(code_list) | missing_from_care['MISSING'].isna()
validation_error_mask = ~mask
validation_error_locations = missing_from_care.index[validation_error_mask]
return {'Missing': validation_error_locations.tolist()}
return error, _validate
def validate_631():
error = ErrorDefinition(
code='631',
description='Previous permanence option not a valid value.',
affected_fields=['PREV_PERM'],
)
def _validate(dfs):
if 'PrevPerm' not in dfs:
return {}
previous_permanence = dfs['PrevPerm']
code_list = ['P1', 'P2', 'P3', 'P4', 'Z1']
mask = previous_permanence['PREV_PERM'].isin(code_list) | previous_permanence['PREV_PERM'].isna()
validation_error_mask = ~mask
validation_error_locations = previous_permanence.index[validation_error_mask]
return {'PrevPerm': validation_error_locations.tolist()}
return error, _validate
def validate_196():
error = ErrorDefinition(
code='196',
description='Strengths and Difficulties (SDQ) reason is not a valid code.',
affected_fields=['SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
code_list = ['SDQ1', 'SDQ2', 'SDQ3', 'SDQ4', 'SDQ5']
mask = oc2['SDQ_REASON'].isin(code_list) | oc2['SDQ_REASON'].isna()
validation_error_mask = ~mask
validation_error_locations = oc2.index[validation_error_mask]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_177():
error = ErrorDefinition(
code='177',
description='The legal status of adopter(s) code is not a valid code.',
affected_fields=['LS_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
adoptions = dfs['AD1']
code_list = ['L0', 'L11', 'L12', 'L2', 'L3', 'L4']
mask = adoptions['LS_ADOPTR'].isin(code_list) | adoptions['LS_ADOPTR'].isna()
validation_error_mask = ~mask
validation_error_locations = adoptions.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_176():
error = ErrorDefinition(
code='176',
description='The gender of adopter(s) at the date of adoption code is not a valid code.',
affected_fields=['SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
adoptions = dfs['AD1']
code_list = ['M1', 'F1', 'MM', 'FF', 'MF']
mask = adoptions['SEX_ADOPTR'].isin(code_list) | adoptions['SEX_ADOPTR'].isna()
validation_error_mask = ~mask
validation_error_locations = adoptions.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_175():
error = ErrorDefinition(
code='175',
description='The number of adopter(s) code is not a valid code.',
affected_fields=['NB_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
adoptions = dfs['AD1']
code_list = ['1', '2']
mask = adoptions['NB_ADOPTR'].astype(str).isin(code_list) | adoptions['NB_ADOPTR'].isna()
validation_error_mask = ~mask
validation_error_locations = adoptions.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_132():
error = ErrorDefinition(
code='132',
description='Data entry for activity after leaving care is invalid.',
affected_fields=['ACTIV'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
care_leavers = dfs['OC3']
code_list = [
'F1',
'P1',
'F2',
'P2',
'F4',
'P4',
'F5',
'P5',
'G4',
'G5',
'G6',
'0'
]
mask = care_leavers['ACTIV'].astype(str).isin(code_list) | care_leavers['ACTIV'].isna()
validation_error_mask = ~mask
validation_error_locations = care_leavers.index[validation_error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_131():
error = ErrorDefinition(
code='131',
description='Data entry for being in touch after leaving care is invalid.',
affected_fields=['IN_TOUCH'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
care_leavers = dfs['OC3']
code_list = [
'YES',
'NO',
'DIED',
'REFU',
'NREQ',
'RHOM'
]
mask = care_leavers['IN_TOUCH'].isin(code_list) | care_leavers['IN_TOUCH'].isna()
validation_error_mask = ~mask
validation_error_locations = care_leavers.index[validation_error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_120():
error = ErrorDefinition(
code='120',
description='The reason for the reversal of the decision that the child should be placed for adoption code is not valid.',
affected_fields=['REASON_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
placed_adoptions = dfs['PlacedAdoption']
code_list = ['RD1', 'RD2', 'RD3', 'RD4']
mask = placed_adoptions['REASON_PLACED_CEASED'].isin(code_list) | placed_adoptions[
'REASON_PLACED_CEASED'].isna()
validation_error_mask = ~mask
validation_error_locations = placed_adoptions.index[validation_error_mask]
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_114():
error = ErrorDefinition(
code='114',
description='Data entry to record the status of former carer(s) of an adopted child is invalid.',
affected_fields=['FOSTER_CARE'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
adoptions = dfs['AD1']
code_list = ['0', '1']
mask = adoptions['FOSTER_CARE'].astype(str).isin(code_list) | adoptions['FOSTER_CARE'].isna()
validation_error_mask = ~mask
validation_error_locations = adoptions.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_178():
error = ErrorDefinition(
code='178',
description='Placement provider code is not a valid code.',
affected_fields=['PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list_placement_provider = ['PR0', 'PR1', 'PR2', 'PR3', 'PR4', 'PR5']
code_list_placement_with_no_provider = ['T0', 'T1', 'T2', 'T3', 'T4', 'Z1']
place_provider_needed_and_correct = episodes['PLACE_PROVIDER'].isin(code_list_placement_provider) & ~episodes[
'PLACE'].isin(code_list_placement_with_no_provider)
place_provider_not_provided = episodes['PLACE_PROVIDER'].isna()
place_provider_not_needed = episodes['PLACE_PROVIDER'].isna() & episodes['PLACE'].isin(
code_list_placement_with_no_provider)
mask = place_provider_needed_and_correct | place_provider_not_provided | place_provider_not_needed
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_103():
error = ErrorDefinition(
code='103',
description='The ethnicity code is either not valid or has not been entered.',
affected_fields=['ETHNIC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
header = dfs['Header']
code_list = [
'WBRI',
'WIRI',
'WOTH',
'WIRT',
'WROM',
'MWBC',
'MWBA',
'MWAS',
'MOTH',
'AIND',
'APKN',
'ABAN',
'AOTH',
'BCRB',
'BAFR',
'BOTH',
'CHNE',
'OOTH',
'REFU',
'NOBT'
]
mask = header['ETHNIC'].isin(code_list)
validation_error_mask = ~mask
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_143():
error = ErrorDefinition(
code='143',
description='The reason for new episode code is not a valid code.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = ['S', 'P', 'L', 'T', 'U', 'B']
mask = episodes['RNE'].isin(code_list) | episodes['RNE'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_144():
error = ErrorDefinition(
code='144',
description='The legal status code is not a valid code.',
affected_fields=['LS'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'C1',
'C2',
'D1',
'E1',
'V2',
'V3',
'V4',
'J1',
'J2',
'J3',
'L1',
'L2',
'L3'
]
mask = episodes['LS'].isin(code_list) | episodes['LS'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_145():
error = ErrorDefinition(
code='145',
description='Category of need code is not a valid code.',
affected_fields=['CIN'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'N1',
'N2',
'N3',
'N4',
'N5',
'N6',
'N7',
'N8',
]
mask = episodes['CIN'].isin(code_list) | episodes['CIN'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_146():
error = ErrorDefinition(
code='146',
description='Placement type code is not a valid code.',
affected_fields=['PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'A3',
'A4',
'A5',
'A6',
'H5',
'K1',
'K2',
'P1',
'P2',
'P3',
'R1',
'R2',
'R3',
'R5',
'S1',
'T0',
'T1',
'T2',
'T3',
'T4',
'U1',
'U2',
'U3',
'U4',
'U5',
'U6',
'Z1'
]
mask = episodes['PLACE'].isin(code_list) | episodes['PLACE'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_149():
error = ErrorDefinition(
code='149',
description='Reason episode ceased code is not valid. ',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'E11',
'E12',
'E2',
'E3',
'E4A',
'E4B',
'E13',
'E41',
'E45',
'E46',
'E47',
'E48',
'E5',
'E6',
'E7',
'E8',
'E9',
'E14',
'E15',
'E16',
'E17',
'X1'
]
mask = episodes['REC'].isin(code_list) | episodes['REC'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_167():
error = ErrorDefinition(
code='167',
description='Data entry for participation is invalid or blank.',
affected_fields=['REVIEW_CODE'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
review = dfs['Reviews']
code_list = ['PN0', 'PN1', 'PN2', 'PN3', 'PN4', 'PN5', 'PN6', 'PN7']
mask = review['REVIEW'].notna() & review['REVIEW_CODE'].isin(code_list) | review['REVIEW'].isna() & review[
'REVIEW_CODE'].isna()
validation_error_mask = ~mask
validation_error_locations = review.index[validation_error_mask]
return {'Reviews': validation_error_locations.tolist()}
return error, _validate
def validate_101():
error = ErrorDefinition(
code='101',
description='Gender code is not valid.',
affected_fields=['SEX'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
header = dfs['Header']
code_list = ['1', '2']
mask = header['SEX'].astype(str).isin(code_list)
validation_error_mask = ~mask
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_141():
error = ErrorDefinition(
code='141',
description='Date episode began is not a valid date.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce').notna()
na_location = episodes['DECOM'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_147():
error = ErrorDefinition(
code='147',
description='Date episode ceased is not a valid date.',
affected_fields=['DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce').notna()
na_location = episodes['DEC'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_171():
error = ErrorDefinition(
code='171',
description="Date of birth of mother's child is not a valid date.",
affected_fields=['MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
mask = pd.to_datetime(header['MC_DOB'], format='%d/%m/%Y', errors='coerce').notna()
na_location = header['MC_DOB'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_102():
error = ErrorDefinition(
code='102',
description='Date of birth is not a valid date.',
affected_fields=['DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
mask = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce').notna()
validation_error_mask = ~mask
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_112():
error = ErrorDefinition(
code='112',
description='Date should be placed for adoption is not a valid date.',
affected_fields=['DATE_INT'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
mask = pd.to_datetime(ad1['DATE_INT'], format='%d/%m/%Y', errors='coerce').notna()
na_location = ad1['DATE_INT'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = ad1.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_115():
error = ErrorDefinition(
code='115',
description="Date of Local Authority's (LA) decision that a child should be placed for adoption is not a valid date.",
affected_fields=['DATE_PLACED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
adopt = dfs['PlacedAdoption']
mask = pd.to_datetime(adopt['DATE_PLACED'], format='%d/%m/%Y', errors='coerce').notna()
na_location = adopt['DATE_PLACED'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = adopt.index[validation_error_mask]
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_116():
error = ErrorDefinition(
code='116',
description="Date of Local Authority's (LA) decision that a child should no longer be placed for adoption is not a valid date.",
affected_fields=['DATE_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
adopt = dfs['PlacedAdoption']
mask = pd.to_datetime(adopt['DATE_PLACED_CEASED'], format='%d/%m/%Y', errors='coerce').notna()
na_location = adopt['DATE_PLACED_CEASED'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = adopt.index[validation_error_mask]
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_392c():
error = ErrorDefinition(
code='392c',
description='Postcode(s) provided are invalid.',
affected_fields=['HOME_POST', 'PL_POST'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
home_provided = episodes['HOME_POST'].notna()
home_details = merge_postcodes(episodes, "HOME_POST")
home_valid = home_details['pcd'].notna()
pl_provided = episodes['PL_POST'].notna()
pl_details = merge_postcodes(episodes, "PL_POST")
pl_valid = pl_details['pcd'].notna()
error_mask = (home_provided & ~home_valid) | (pl_provided & ~pl_valid)
return {'Episodes': episodes.index[error_mask].tolist()}
return error, _validate
def validate_213():
error = ErrorDefinition(
code='213',
description='Placement provider information not required.',
affected_fields=['PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
mask = df['PLACE'].isin(['T0', 'T1', 'T2', 'T3', 'T4', 'Z1']) & df['PLACE_PROVIDER'].notna()
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_168():
error = ErrorDefinition(
code='168',
description='Unique Pupil Number (UPN) is not valid. If unknown, default codes should be UN1, UN2, UN3, UN4 or UN5.',
affected_fields=['UPN'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
df = dfs['Header']
mask = df['UPN'].str.match(r'(^((?![IOS])[A-Z]){1}(\d{12}|\d{11}[A-Z]{1})$)|^(UN[1-5])$', na=False)
mask = ~mask
return {'Header': df.index[mask].tolist()}
return error, _validate
def validate_388():
error = ErrorDefinition(
code='388',
description='Reason episode ceased is coded new episode begins, but there is no continuation episode.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
df['DECOM'] = pd.to_datetime(df['DECOM'], format='%d/%m/%Y', errors='coerce')
df['DEC'] = pd.to_datetime(df['DEC'], format='%d/%m/%Y', errors='coerce')
df['DECOM'] = df['DECOM'].fillna('01/01/1901') # Watch for potential future issues
df = df.sort_values(['CHILD', 'DECOM'])
df['DECOM_NEXT_EPISODE'] = df.groupby(['CHILD'])['DECOM'].shift(-1)
# The max DECOM for each child is also the one with no next episode
# And we also add the skipna option
# grouped_decom_by_child = df.groupby(['CHILD'])['DECOM'].idxmax(skipna=True)
no_next = df.DECOM_NEXT_EPISODE.isna() & df.CHILD.notna()
# Dataframe with the maximum DECOM removed
max_decom_removed = df[~no_next]
# Dataframe with the maximum DECOM only
max_decom_only = df[no_next]
# Case 1: If reason episode ceased is coded X1 there must be a subsequent episode
# starting on the same day.
case1 = max_decom_removed[(max_decom_removed['REC'] == 'X1') &
(max_decom_removed['DEC'].notna()) &
(max_decom_removed['DECOM_NEXT_EPISODE'].notna()) &
(max_decom_removed['DEC'] != max_decom_removed['DECOM_NEXT_EPISODE'])]
# Case 2: If an episode ends but the child continues to be looked after, a new
# episode should start on the same day.The reason episode ceased code of
# the episode which ends must be X1.
case2 = max_decom_removed[(max_decom_removed['REC'] != 'X1') &
(max_decom_removed['REC'].notna()) &
(max_decom_removed['DEC'].notna()) &
(max_decom_removed['DECOM_NEXT_EPISODE'].notna()) &
(max_decom_removed['DEC'] == max_decom_removed['DECOM_NEXT_EPISODE'])]
# Case 3: If a child ceases to be looked after reason episode ceased code X1 must
# not be used.
case3 = max_decom_only[(max_decom_only['DEC'].notna()) &
(max_decom_only['REC'] == 'X1')]
mask_case1 = case1.index.tolist()
mask_case2 = case2.index.tolist()
mask_case3 = case3.index.tolist()
mask = mask_case1 + mask_case2 + mask_case3
mask.sort()
return {'Episodes': mask}
return error, _validate
def validate_113():
error = ErrorDefinition(
code='113',
description='Date matching child and adopter(s) is not a valid date.',
affected_fields=['DATE_MATCH'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
mask = pd.to_datetime(ad1['DATE_MATCH'], format='%d/%m/%Y', errors='coerce').notna()
na_location = ad1['DATE_MATCH'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = ad1.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_134():
error = ErrorDefinition(
code='134',
description='Data on adoption should not be entered for the OC3 cohort.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM', 'DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR',
'SEX_ADOPTR', 'LS_ADOPTR'],
)
def _validate(dfs):
if 'OC3' not in dfs or 'AD1' not in dfs:
return {}
else:
oc3 = dfs['OC3']
ad1 = dfs['AD1']
ad1['ad1_index'] = ad1.index
all_data = ad1.merge(oc3, how='left', on='CHILD')
na_oc3_data = (
all_data['IN_TOUCH'].isna() &
all_data['ACTIV'].isna() &
all_data['ACCOM'].isna()
)
na_ad1_data = (
all_data['DATE_INT'].isna() &
all_data['DATE_MATCH'].isna() &
all_data['FOSTER_CARE'].isna() &
all_data['NB_ADOPTR'].isna() &
all_data['SEX_ADOPTR'].isna() &
all_data['LS_ADOPTR'].isna()
)
validation_error = ~na_oc3_data & ~na_ad1_data
validation_error_locations = all_data.loc[validation_error, 'ad1_index'].unique()
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_119():
error = ErrorDefinition(
code='119',
description='If the decision is made that a child should no longer be placed for adoption, then the date of this decision and the reason why this decision was made must be completed.',
affected_fields=['REASON_PLACED_CEASED', 'DATE_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
adopt = dfs['PlacedAdoption']
na_placed_ceased = adopt['DATE_PLACED_CEASED'].isna()
na_reason_ceased = adopt['REASON_PLACED_CEASED'].isna()
validation_error = (na_placed_ceased & ~na_reason_ceased) | (~na_placed_ceased & na_reason_ceased)
validation_error_locations = adopt.index[validation_error]
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_159():
error = ErrorDefinition(
code='159',
description='If a child has been recorded as not receiving an intervention for their substance misuse problem, then the additional item on whether an intervention was offered should be completed as well.',
affected_fields=['SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
mask1 = oc2['SUBSTANCE_MISUSE'].astype(str) == '1'
mask2 = oc2['INTERVENTION_RECEIVED'].astype(str) == '0'
mask3 = oc2['INTERVENTION_OFFERED'].isna()
validation_error = mask1 & mask2 & mask3
validation_error_locations = oc2.index[validation_error]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_142():
error = ErrorDefinition(
code='142',
description='A new episode has started, but the previous episode has not ended.',
affected_fields=['DEC', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
df['DECOM'] = pd.to_datetime(df['DECOM'], format='%d/%m/%Y', errors='coerce')
df['DEC'] = pd.to_datetime(df['DEC'], format='%d/%m/%Y', errors='coerce')
df['DECOM'] = df['DECOM'].fillna('01/01/1901') # Watch for potential future issues
df['DECOM'] = df['DECOM'].replace('01/01/1901', pd.NA)
last_episodes = df.sort_values('DECOM').reset_index().groupby(['CHILD'])['index'].last()
ended_episodes_df = df.loc[~df.index.isin(last_episodes)]
ended_episodes_df = ended_episodes_df[(ended_episodes_df['DEC'].isna() | ended_episodes_df['REC'].isna()) &
ended_episodes_df['CHILD'].notna() & ended_episodes_df[
'DECOM'].notna()]
mask = ended_episodes_df.index.tolist()
return {'Episodes': mask}
return error, _validate
def validate_148():
error = ErrorDefinition(
code='148',
description='Date episode ceased and reason episode ceased must both be coded, or both left blank.',
affected_fields=['DEC', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
df['DEC'] = pd.to_datetime(df['DEC'], format='%d/%m/%Y', errors='coerce')
mask = ((df['DEC'].isna()) & (df['REC'].notna())) | ((df['DEC'].notna()) & (df['REC'].isna()))
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_151():
error = ErrorDefinition(
code='151',
description='All data items relating to a childs adoption must be coded or left blank.',
affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTER', 'SEX_ADOPTR', 'LS_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
na_date_int = ad1['DATE_INT'].isna()
na_date_match = ad1['DATE_MATCH'].isna()
na_foster_care = ad1['FOSTER_CARE'].isna()
na_nb_adoptr = ad1['NB_ADOPTR'].isna()
na_sex_adoptr = ad1['SEX_ADOPTR'].isna()
na_lsadoptr = ad1['LS_ADOPTR'].isna()
ad1_not_null = (
~na_date_int & ~na_date_match & ~na_foster_care & ~na_nb_adoptr & ~na_sex_adoptr & ~na_lsadoptr)
validation_error = (
~na_date_int | ~na_date_match | ~na_foster_care | ~na_nb_adoptr | ~na_sex_adoptr | ~na_lsadoptr) & ~ad1_not_null
validation_error_locations = ad1.index[validation_error]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_182():
error = ErrorDefinition(
code='182',
description='Data entries on immunisations, teeth checks, health assessments and substance misuse problem identified should be completed or all OC2 fields should be left blank.',
affected_fields=['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'CONVICTED',
'HEALTH_CHECK', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
mask1 = (
oc2['IMMUNISATIONS'].isna() |
oc2['TEETH_CHECK'].isna() |
oc2['HEALTH_ASSESSMENT'].isna() |
oc2['SUBSTANCE_MISUSE'].isna()
)
mask2 = (
oc2['CONVICTED'].isna() &
oc2['HEALTH_CHECK'].isna() &
oc2['INTERVENTION_RECEIVED'].isna() &
oc2['INTERVENTION_OFFERED'].isna()
)
validation_error = mask1 & ~mask2
validation_error_locations = oc2.index[validation_error]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_214():
error = ErrorDefinition(
code='214',
description='Placement location information not required.',
affected_fields=['PL_POST', 'URN'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
mask = df['LS'].isin(['V3', 'V4']) & ((df['PL_POST'].notna()) | (df['URN'].notna()))
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_222():
error = ErrorDefinition(
code='222',
description='Ofsted Unique reference number (URN) should not be recorded for this placement type.',
affected_fields=['URN'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
place_code_list = ['H5', 'P1', 'P2', 'P3', 'R1', 'R2', 'R5', 'T0', 'T1', 'T2', 'T3', 'T4', 'Z1']
mask = (df['PLACE'].isin(place_code_list)) & (df['URN'].notna()) & (df['URN'] != 'XXXXXX')
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_366():
error = ErrorDefinition(
code='366',
description='A child cannot change placement during the course of an individual short-term respite break.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
mask = (df['LS'] == 'V3') & (df['RNE'] != 'S')
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_628():
error = ErrorDefinition(
code='628',
description='Motherhood details are not required for care leavers who have not been looked after during the year.',
affected_fields=['MOTHER'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs or 'OC3' not in dfs:
return {}
else:
hea = dfs['Header']
epi = dfs['Episodes']
oc3 = dfs['OC3']
hea = hea.reset_index()
oc3_no_nulls = oc3[oc3[['IN_TOUCH', 'ACTIV', 'ACCOM']].notna().any(axis=1)]
hea_merge_epi = hea.merge(epi, how='left', on='CHILD', indicator=True)
hea_not_in_epi = hea_merge_epi[hea_merge_epi['_merge'] == 'left_only']
cohort_to_check = hea_not_in_epi.merge(oc3_no_nulls, how='inner', on='CHILD')
error_cohort = cohort_to_check[cohort_to_check['MOTHER'].notna()]
error_list = list(set(error_cohort['index'].to_list()))
error_list.sort()
return {'Header': error_list}
return error, _validate
def validate_164():
error = ErrorDefinition(
code='164',
description='Distance is not valid. Please check a valid postcode has been entered.',
affected_fields=['PL_DISTANCE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
is_short_term = df['LS'].isin(['V3', 'V4'])
distance = pd.to_numeric(df['PL_DISTANCE'], errors='coerce')
# Use a bit of tolerance in these bounds
distance_valid = distance.gt(-0.2) & distance.lt(1001.0)
mask = ~is_short_term & ~distance_valid
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_169():
error = ErrorDefinition(
code='169',
description='Local Authority (LA) of placement is not valid or is missing. Please check a valid postcode has been entered.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
is_short_term = df['LS'].isin(['V3', 'V4'])
# Because PL_LA is derived, it will always be valid if present
mask = ~is_short_term & df['PL_LA'].isna()
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_179():
error = ErrorDefinition(
code='179',
description='Placement location code is not a valid code.',
affected_fields=['PL_LOCATION'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
is_short_term = df['LS'].isin(['V3', 'V4'])
# Because PL_LOCATION is derived, it will always be valid if present
mask = ~is_short_term & df['PL_LOCATION'].isna()
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_1015():
error = ErrorDefinition(
code='1015',
description='Placement provider is own provision but child not placed in own LA.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
local_authority = dfs['metadata']['localAuthority']
placement_fostering_or_adoption = df['PLACE'].isin([
'A3', 'A4', 'A5', 'A6', 'U1', 'U2', 'U3', 'U4', 'U5', 'U6',
])
own_provision = df['PLACE_PROVIDER'].eq('PR1')
is_short_term = df['LS'].isin(['V3', 'V4'])
is_pl_la = df['PL_LA'].eq(local_authority)
checked_episodes = ~placement_fostering_or_adoption & ~is_short_term & own_provision
checked_episodes = checked_episodes & df['LS'].notna() & df['PLACE'].notna()
mask = checked_episodes & ~is_pl_la
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_411():
error = ErrorDefinition(
code='411',
description='Placement location code disagrees with LA of placement.',
affected_fields=['PL_LOCATION'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
local_authority = dfs['metadata']['localAuthority']
mask = df['PL_LOCATION'].eq('IN') & df['PL_LA'].ne(local_authority)
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_420():
error = ErrorDefinition(
code='420',
description='LA of placement completed but child is looked after under legal status V3 or V4.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
is_short_term = df['LS'].isin(['V3', 'V4'])
mask = is_short_term & df['PL_LA'].notna()
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_355():
error = ErrorDefinition(
code='355',
description='Episode appears to have lasted for less than 24 hours',
affected_fields=['DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
mask = df['DECOM'].astype(str) == df['DEC'].astype(str)
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_586():
error = ErrorDefinition(
code='586',
description='Dates of missing periods are before childโs date of birth.',
affected_fields=['MIS_START'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
df = dfs['Missing']
df['DOB'] = pd.to_datetime(df['DOB'], format='%d/%m/%Y', errors='coerce')
df['MIS_START'] = pd.to_datetime(df['MIS_START'], format='%d/%m/%Y', errors='coerce')
error_mask = df['MIS_START'].notna() & (df['MIS_START'] <= df['DOB'])
return {'Missing': df.index[error_mask].to_list()}
return error, _validate
def validate_630():
error = ErrorDefinition(
code='630',
description='Information on previous permanence option should be returned.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'PrevPerm' not in dfs or 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
pre = dfs['PrevPerm']
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
epi = epi.reset_index()
# Form the episode dataframe which has an 'RNE' of 'S' in this financial year
epi_has_rne_of_S_in_year = epi[(epi['RNE'] == 'S') & (epi['DECOM'] >= collection_start)]
# Merge to see
# 1) which CHILD ids are missing from the PrevPerm file
# 2) which CHILD are in the prevPerm file, but don't have the LA_PERM/DATE_PERM field completed where they should be
# 3) which CHILD are in the PrevPerm file, but don't have the PREV_PERM field completed.
merged_epi_preperm = epi_has_rne_of_S_in_year.merge(pre, on='CHILD', how='left', indicator=True)
error_not_in_preperm = merged_epi_preperm['_merge'] == 'left_only'
error_wrong_values_in_preperm = (merged_epi_preperm['PREV_PERM'] != 'Z1') & (
merged_epi_preperm[['LA_PERM', 'DATE_PERM']].isna().any(axis=1))
error_null_prev_perm = (merged_epi_preperm['_merge'] == 'both') & (merged_epi_preperm['PREV_PERM'].isna())
error_mask = error_not_in_preperm | error_wrong_values_in_preperm | error_null_prev_perm
error_list = merged_epi_preperm[error_mask]['index'].to_list()
error_list = list(set(error_list))
error_list.sort()
return {'Episodes': error_list}
return error, _validate
def validate_501():
error = ErrorDefinition(
code='501',
description='A new episode has started before the end date of the previous episode.',
affected_fields=['DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
epi = epi.reset_index()
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
epi['DEC'] = pd.to_datetime(epi['DEC'], format='%d/%m/%Y', errors='coerce')
epi = epi.sort_values(['CHILD', 'DECOM'])
epi_lead = epi.shift(1)
epi_lead = epi_lead.reset_index()
m_epi = epi.merge(epi_lead, left_on='index', right_on='level_0', suffixes=('', '_prev'))
error_cohort = m_epi[(m_epi['CHILD'] == m_epi['CHILD_prev']) & (m_epi['DECOM'] < m_epi['DEC_prev'])]
error_list = error_cohort['index'].to_list()
error_list.sort()
return {'Episodes': error_list}
return error, _validate
def validate_502():
error = ErrorDefinition(
code='502',
description='Last yearโs record ended with an open episode. The date on which that episode started does not match the start date of the first episode on this yearโs record.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs:
return {}
else:
epi = dfs['Episodes']
epi_last = dfs['Episodes_last']
epi = epi.reset_index()
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
epi_last['DECOM'] = pd.to_datetime(epi_last['DECOM'], format='%d/%m/%Y', errors='coerce')
epi_last_no_dec = epi_last[epi_last['DEC'].isna()]
epi_min_decoms_index = epi[['CHILD', 'DECOM']].groupby(['CHILD'])['DECOM'].idxmin()
epi_min_decom_df = epi.loc[epi_min_decoms_index, :]
merged_episodes = epi_min_decom_df.merge(epi_last_no_dec, on='CHILD', how='inner')
error_cohort = merged_episodes[merged_episodes['DECOM_x'] != merged_episodes['DECOM_y']]
error_list = error_cohort['index'].to_list()
error_list = list(set(error_list))
error_list.sort()
return {'Episodes': error_list}
return error, _validate
def validate_153():
error = ErrorDefinition(
code='153',
description="All data items relating to a child's activity or accommodation after leaving care must be coded or left blank.",
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
oc3 = dfs['OC3']
oc3_not_na = (
oc3['IN_TOUCH'].notna() &
oc3['ACTIV'].notna() &
oc3['ACCOM'].notna()
)
oc3_all_na = (
oc3['IN_TOUCH'].isna() &
oc3['ACTIV'].isna() &
oc3['ACCOM'].isna()
)
validation_error = ~oc3_not_na & ~oc3_all_na
validation_error_locations = oc3.index[validation_error]
return {'OC3': validation_error_locations.to_list()}
return error, _validate
def validate_166():
error = ErrorDefinition(
code='166',
description="Date of review is invalid or blank.",
affected_fields=['REVIEW'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
else:
review = dfs['Reviews']
error_mask = pd.to_datetime(review['REVIEW'], format='%d/%m/%Y', errors='coerce').isna()
validation_error_locations = review.index[error_mask]
return {'Reviews': validation_error_locations.to_list()}
return error, _validate
def validate_174():
error = ErrorDefinition(
code='174',
description="Mother's child date of birth is recorded but gender shows that the child is a male.",
affected_fields=['SEX', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
child_is_male = header['SEX'].astype(str) == '1'
mc_dob_recorded = header['MC_DOB'].notna()
error_mask = child_is_male & mc_dob_recorded
validation_error_locations = header.index[error_mask]
return {'Header': validation_error_locations.to_list()}
return error, _validate
def validate_180():
error = ErrorDefinition(
code='180',
description="Data entry for the strengths and difficulties questionnaire (SDQ) score is invalid.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
oc2['SDQ_SCORE'] = pd.to_numeric(oc2['SDQ_SCORE'], errors='coerce')
error_mask = oc2['SDQ_SCORE'].notna() & ~oc2['SDQ_SCORE'].isin(range(41))
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_181():
error = ErrorDefinition(
code='181',
description="Data items relating to children looked after continuously for 12 months should be completed with a 0 or 1.",
affected_fields=['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
code_list = ['0', '1']
fields_of_interest = ['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
error_mask = (
oc2[fields_of_interest].notna()
& ~oc2[fields_of_interest].astype(str).isin(['0', '1'])
).any(axis=1)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_192():
error = ErrorDefinition(
code='192',
description="Child has been identified as having a substance misuse problem but the additional item on whether an intervention was received has been left blank.",
affected_fields=['SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
misuse = oc2['SUBSTANCE_MISUSE'].astype(str) == '1'
intervention_blank = oc2['INTERVENTION_RECEIVED'].isna()
error_mask = misuse & intervention_blank
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_193():
error = ErrorDefinition(
code='193',
description="Child not identified as having a substance misuse problem but at least one of the two additional items on whether an intervention were offered and received have been completed.",
affected_fields=['SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
no_substance_misuse = oc2['SUBSTANCE_MISUSE'].isna() | (oc2['SUBSTANCE_MISUSE'].astype(str) == '0')
intervention_not_blank = oc2['INTERVENTION_RECEIVED'].notna() | oc2['INTERVENTION_OFFERED'].notna()
error_mask = no_substance_misuse & intervention_not_blank
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_197a():
error = ErrorDefinition(
code='197a',
description="Reason for no Strengths and Difficulties (SDQ) score is not required if Strengths and Difficulties Questionnaire score is filled in.",
affected_fields=['SDQ_SCORE', 'SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
sdq_filled_in = oc2['SDQ_SCORE'].notna()
reason_filled_in = oc2['SDQ_REASON'].notna()
error_mask = sdq_filled_in & reason_filled_in
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_567():
error = ErrorDefinition(
code='567',
description='The date that the missing episode or episode that the child was away from placement without authorisation ended is before the date that it started.',
affected_fields=['MIS_START', 'MIS_END'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
mis = dfs['Missing']
mis['MIS_START'] = pd.to_datetime(mis['MIS_START'], format='%d/%m/%Y', errors='coerce')
mis['MIS_END'] = pd.to_datetime(mis['MIS_END'], format='%d/%m/%Y', errors='coerce')
mis_error = mis[mis['MIS_START'] > mis['MIS_END']]
return {'Missing': mis_error.index.to_list()}
return error, _validate
def validate_304():
error = ErrorDefinition(
code='304',
description='Date unaccompanied asylum-seeking child (UASC) status ceased must be on or before the 18th birthday of a child.',
affected_fields=['DUC'],
)
def _validate(dfs):
if 'UASC' not in dfs:
return {}
else:
uasc = dfs['UASC']
uasc['DOB'] = pd.to_datetime(uasc['DOB'], format='%d/%m/%Y', errors='coerce')
uasc['DUC'] = pd.to_datetime(uasc['DUC'], format='%d/%m/%Y', errors='coerce')
mask = uasc['DUC'].notna() & (uasc['DUC'] > uasc['DOB'] + | pd.offsets.DateOffset(years=18) | pandas.offsets.DateOffset |
#!/usr/bin/env python
# coding: utf-8
import simpy
import datetime
import pandas as pd
import logging
from enum import Enum
import random
from itertools import repeat
from ruamel.yaml import YAML
from datetime import timedelta
log_filename = "logs-10.log"
mainLogger = logging.getLogger()
fhandler = logging.FileHandler(filename=log_filename, mode='w')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fhandler.setFormatter(formatter)
mainLogger.addHandler(fhandler)
mainLogger.setLevel(logging.DEBUG)
mainLogger.debug("test")
class Metric(Enum):
RW = "Requests Waiting"
BS = "Busy Slots"
AU = "Active Users"
class User:
def __init__(self, id, scenario, world):
self.id = id
self.scenario = scenario
self._world = world
self.taskid = 0
self.create()
# Start the run process everytime an instance is created.
# create itself as a processs
self.action = self._world.env.process(self.run())
def create(self):
self.enteringAt = self._world.env.now
self.name = "User-%03d" % self.id
mainLogger.info(f"user created {self.name}")
self._world.user_monitor.report_new_user(self)
def run_old(self):
while True:
self.taskid += 1
for task in self.scenario.tasks:
taskname = task['Name']
task_duration = task['Duration']
mark = self._world.env.now
mainLogger.debug(f"{self.name} starts task {taskname} at %d" % mark)
if 'Res' in task:
self._world.user_monitor.report_start(
self.name,
self.scenario,
taskname,
self.taskid)
# We yield the process that process() returns
# to wait for it to finish
amount = task['Res']
yield self._world.env.process(self.process_task(task_duration, amount))
self._world.user_monitor.report_stop(
self.name,
self.scenario,
taskname,
self.taskid)
else:
# wait some time even if no tracked
yield self._world.env.timeout(task_duration)
mainLogger.debug(f"{self.name} ends task {taskname} at %d" % mark)
def run(self):
scenario = self.scenario
mainLogger.debug(f"entering scenario: {scenario['name']}")
mainLogger.debug(f"steps: {scenario['steps']}")
if 'init' in scenario['steps']:
mainLogger.debug("has init")
mainLogger.debug("run_step_tasks init")
process = self.run_step_tasks(scenario['steps']['init']['tasks'])
yield self._world.env.process(process)
if 'loop' in scenario['steps']:
mainLogger.debug("has loop")
step_loop = scenario['steps']['loop']
if 'repeat' in step_loop:
counter = 0
while counter < step_loop['repeat']:
mainLogger.debug("run_step_tasks loop")
process = self.run_step_tasks(scenario['steps']['loop']['tasks'])
yield self._world.env.process(process)
counter += 1
else:
mainLogger.debug("run_step_tasks loop infinite")
process = self.run_step_tasks(scenario['steps']['loop']['tasks'])
yield self._world.env.process(process)
if 'finally' in scenario['steps']:
mainLogger.debug("has finally")
mainLogger.debug("run_step_tasks finally")
process = self.run_step_tasks(scenario['steps']['finally']['tasks'])
yield self._world.env.process(process)
def run_step_tasks(self, tasks):
mainLogger.debug(f"entering run_step_tasks {tasks}")
for task in tasks:
mainLogger.debug(f"run_step_tasks::task: {task}")
yield self._world.env.process(self.run_task(task))
def run_task(self, task):
mainLogger.debug(f"entering run_task {task} id:{self.taskid}")
max_count = 1
if 'repeat' in task:
max_count = task['repeat']
counter = 0
while counter < max_count:
self.taskid += 1
mainLogger.debug(f"run task {task['name']} for {task['duration']}")
if 'resources' in task:
res_amount = task['resources']
if 'parallel' in task:
mainLogger.debug("run_task in parallel")
res_amount = res_amount * task['parallel']
mainLogger.debug(f"task resources amount {res_amount}")
self._world.user_monitor.report_start(
self.name,
self.scenario['name'],
task['name'],
self.taskid)
process = self.process_task(task['duration'], res_amount)
yield self._world.env.process(process)
self._world.user_monitor.report_stop(
self.name,
self.scenario['name'],
task['name'],
self.taskid)
mainLogger.debug("task processing completed")
else:
mainLogger.debug(f"wait after task for {task['duration']}")
yield self._world.env.timeout(task['duration'])
mainLogger.debug("wait after task completed")
if 'wait' in task:
mainLogger.debug(f"manual task for {task['wait']}")
yield self._world.env.timeout(task['wait'])
mainLogger.debug("manual task completed")
# increment counter
counter += 1
def process_task(self, duration, amount):
mainLogger.debug("entering process task at %d" % self._world.env.now)
with Job(self._world.res, amount) as req:
yield req
yield self._world.env.timeout(duration)
mainLogger.debug("exiting process task at %d" % self._world.env.now)
class Clock:
def __init__(self, tick_interval):
self.tick_interval = tick_interval
self.base_epoch = datetime.datetime.now().timestamp()
mainLogger.info(f"Clock created - base {self.base_epoch}")
def to_date(self, tick):
delta = tick * self.tick_interval
datetime_time = datetime.datetime.fromtimestamp(self.base_epoch) + delta
return datetime_time
class UsersMonitor:
def __init__(self, world):
self._world = world
# init parameters are self reported
# start and stop events
self.start_data = []
self.stop_data = []
# list of users
self.users = []
def report_new_user(self, user):
self.users.append(user)
def report_start(self, username, scenarioname, taskname, taskid):
mark = self._world.env.now
self.start_data.append(
dict(
StartMark=mark,
Start=self._world.clock.to_date(mark),
Username=username,
Scenario=scenarioname,
Task=taskname,
TaskId=taskid
)
)
def report_stop(self, username, scenarioname, taskname, taskid):
mark = self._world.env.now
self.stop_data.append(
dict(
FinishMark=mark,
Finish=self._world.clock.to_date(mark),
Username=username,
Scenario=scenarioname,
Task=taskname,
TaskId=taskid
)
)
def collect(self):
df_start = | pd.DataFrame(self.start_data) | pandas.DataFrame |
import pandas as pd
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
import time
import multiprocessing as mp
start_time=time.time()
def svm(location1,location2):
data=pd.read_csv(location1)
data_columns=data.columns
xtrain = data[data_columns[data_columns != 'typeoffraud']]
ytrain=data['typeoffraud']
data1=pd.read_csv(location2)
data1_columns=data1.columns
xtest = data1[data1_columns[data1_columns != 'typeoffraud']]
from sklearn import svm
clf=svm.SVC(kernel='rbf')
clf.fit(xtrain,ytrain)
ypredict=clf.predict(xtest)
rel=list(zip(ypredict))
pp=pd.DataFrame(data=rel,columns=['label'])
pp.to_csv('label.csv',index=False)
###########################################################################################################
def maketags(location2,location3):
e= | pd.read_csv(location2) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 14 19:01:45 2021
@author: David
"""
from pathlib import Path
from datetime import datetime as dt
import zipfile
import os.path
import numpy as np
import scipy.signal as sig
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
from matplotlib import gridspec
import seaborn as sea
import fig_util
from IPython.display import display, Image
SLATE = (0.15, 0.15, 0.15)
WD_ARR = {
1: 'Montag',
2: 'Dienstag',
3: 'Mittwoch',
4: 'Donnerstag',
5: 'Freitag',
6: 'Samstag',
7: 'Sonntag'
}
OUTPUT_DIR = '..\\output\\RNowcast\\anim\\'
OUTPUT_DIR = 'D:\\COVID-19\\output\\RNowcast\\anim\\'
ARCHIVE_FPATH = '..\\data\\RKI\\Nowcasting\\Nowcast_R_{:s}.csv'
ARCHIVE_ZIP_URL = 'https://github.com/robert-koch-institut/SARS-CoV-2-Nowcasting_und_-R-Schaetzung/archive/refs/heads/main.zip'
#'https://github.com/robert-koch-institut/SARS-CoV-2-Nowcasting_und_-R-Schaetzung/raw/main/Archiv/Nowcast_R_{:s}.csv'
SPECIFIC_DAY = None
#SPECIFIC_DAY = '2021-09-24'
#SPECIFIC_DAY = '2021-10-08'
#SPECIFIC_DAY = '2021-11-12'
INPUT_DATA_RANGE = ['2021-03-16', dt.now().strftime('%Y-%m-%d')]
PLOT_MAX_DATE = '2021-12-31'
DO_EXTRAPOLATION = False
if not SPECIFIC_DAY is None:
INPUT_DATA_RANGE[1] = SPECIFIC_DAY
dataset_date_range = pd.date_range(*INPUT_DATA_RANGE)
r_idx_min = dataset_date_range[0] - pd.DateOffset(days=4)
r_idx = pd.date_range(r_idx_min, dataset_date_range[-5].strftime('%Y-%m-%d'))
r_cols = pd.Int64Index(range(4, 4+7*6, 1))
Path(OUTPUT_DIR).mkdir(parents=True, exist_ok=True)
# %%
rep_tri = pd.DataFrame(
data=np.zeros((r_idx.size, r_cols.size)),
index=r_idx,
columns=r_cols)
datasets = {}
for i in range(dataset_date_range.size):
dataset_date = dataset_date_range[i]
dataset_date_str = dataset_date.strftime('%Y-%m-%d')
print(dataset_date_str)
#if os.path.isfile(ARCHIVE_FPATH.format(dataset_date_str)):
try:
data = pd.read_csv(
ARCHIVE_FPATH.format(dataset_date_str),
index_col = 'Datum',
parse_dates = True
)
except ValueError:
# two steps:
data = pd.read_csv(
ARCHIVE_FPATH.format(dataset_date_str),
parse_dates = True,
sep=';', decimal=',',
skip_blank_lines=False
)
extra_rows = data.index.size - data.index[data.Datum.isna()][0]
data = pd.read_csv(
ARCHIVE_FPATH.format(dataset_date_str),
index_col = 'Datum',
parse_dates = True,
sep=';', decimal=',',
date_parser=lambda x: dt.strptime(x, '%d.%m.%Y'),
skipfooter=extra_rows, encoding='UTF-8'
)
data.rename(columns={'Schรคtzer_Neuerkrankungen': 'PS_COVID_Faelle'},
inplace=True)
last_dataset = data.loc[:,['PS_COVID_Faelle']].copy()
last_dataset['Iso Weekdays'] = last_dataset.index.map(lambda d: d.isoweekday())
last_dataset['Date Offset'] = (dataset_date - last_dataset.index).days
datasets[dataset_date_str] = last_dataset
comm_rows = r_idx.intersection(data.index)
data = data.loc[comm_rows]
d_cols = (dataset_date-data.index).days
data['Offset'] = d_cols
comm_cols = d_cols.intersection(r_cols)
max_offset = comm_cols.max()
data = data.loc[data['Offset'] <= max_offset, ['Offset', 'PS_COVID_Faelle']]
data = data.pivot(columns='Offset', values='PS_COVID_Faelle')
data.fillna(0, inplace=True)
rep_tri.loc[data.index, comm_cols] += data.loc[:, comm_cols]
(na_cols, na_rows) = np.tril_indices(rep_tri.shape[0], -1)
if any(na_cols >= r_cols.size):
max_cols = np.nonzero(na_cols >= r_cols.size)[0][0]
na_cols = na_cols[:max_cols]
na_rows = na_rows[:max_cols]
rep_tri2 = rep_tri.to_numpy().copy()
rep_tri2[r_idx.size-1-na_rows, na_cols] = np.nan
rep_tri3 = rep_tri.copy()
rep_tri3.loc[:,:] = rep_tri2
rep_tri4 = rep_tri3.iloc[:-14, :].div(rep_tri3.apply(lambda s: s[pd.Series.last_valid_index(s)], axis=1), axis=0)
# %%
q10_dist = pd.DataFrame(index=r_cols, columns=range(1,7,1))
lq_dist = pd.DataFrame(index=r_cols, columns=range(1,7,1))
med_dist = pd.DataFrame(index=r_cols, columns=range(1,7,1))
uq_dist = pd.DataFrame(index=r_cols, columns=range(1,7,1))
q90_dist = pd.DataFrame(index=r_cols, columns=range(1,7,1))
max_days_offset = r_cols.max()
for i in range(7):
iwd = rep_tri4.index[i].isoweekday()
rep_tri5 = rep_tri4.iloc[i::7]
tri5_med = rep_tri5.median(axis=0)
rep_tri5 = rep_tri5.loc[(((rep_tri5-tri5_med) > 1) | (rep_tri5-tri5_med < -1)).sum(axis=1)==0]
rep_tri5 *= 100
test = rep_tri5.iloc[:,0:11].melt(var_name='Datenstand in "n Tage nach Datum des Nowcasts"', value_name='Nowcast Korrekturfaktor in %')
test = test.loc[~test['Nowcast Korrekturfaktor in %'].isna()]
fig = plt.figure(figsize=(16, 9))
gs = gridspec.GridSpec(2, 1, figure=fig,
height_ratios = [7, 1],
hspace = 0.1)
ax = fig.add_subplot(gs[0, 0])
fig.suptitle('COVID-19 - Variation des RKI Nowcasts der Fallzahlen รผber Datenstand-Alter nach Wochentag: {:s}'.format(WD_ARR[iwd]),
horizontalalignment='center',
verticalalignment='center',
fontsize=21, color=SLATE, y=0.91)
sea.violinplot(x='Datenstand in "n Tage nach Datum des Nowcasts"',
y='Nowcast Korrekturfaktor in %',
data=test,
scale="count")
ax.set_ylim([0, 160])
ax.yaxis.set_major_locator(MultipleLocator(20))
ax.yaxis.set_minor_locator(MultipleLocator(5))
ax.set_xlim([-1, 11])
ax.tick_params(which='minor', length=0, width=0, pad=10)
ax.tick_params(axis=u'both', labelsize=16, labelcolor = SLATE)
ax.grid(True, which='major', axis='both', linestyle='-', color=(0.85,0.85,0.85))
ax.grid(True, which='minor', axis='both', linestyle='-', color=(0.95,0.95,0.95))
ax.set_ylabel('Nowcast Korrekturfaktor in %', fontdict={'fontsize': 18}, fontsize=18, color = SLATE, labelpad=14)
ax.set_xlabel('Datenstand in "n Tage nach Datum des Nowcasts"', fontdict={'fontsize': 18}, fontsize=18, color = SLATE, labelpad=14)
ax2 = fig.add_subplot(gs[1, 0])
ax2.axis('off')
if dataset_date_range[0].year == dataset_date_range[-1].year:
Datenstand_range_str = (
dataset_date_range[0].strftime('%d.%m.-') +
dataset_date_range[-1].strftime('%d.%m.%Y') )
else:
Datenstand_range_str = (
dataset_date_range[0].strftime('%d.%m.%y-') +
dataset_date_range[-1].strftime('%d.%m.%Y') )
plt.text(0, 0.05,
'Datenquelle:\n' +
'<NAME>-Institut (RKI), an der Heiden, Matthias (2021): SARS-CoV-2-Nowcasting und -R-Schaetzung, Berlin: Zenodo. DOI:10.5281/zenodo.4680400\n'+
'URL: https://github.com/robert-koch-institut/SARS-CoV-2-Nowcasting_und_-R-Schaetzung ; ' +
'Abfragedatum/Datenstand: ' + Datenstand_range_str + ';\n' +
'Datenlizenz CC-BY 4.0 International; eigene Berechnung/eigene Darstellung',
fontsize=11.5)
if True:
exp_full_fname = '{:s}{:s}_{:d}_{:s}.png'.format(
OUTPUT_DIR + '..\\', 'Nowcast_Var', iwd, WD_ARR[iwd])
print('Saving ' + exp_full_fname)
try:
fig_util.set_size(fig, (1920.0/100.0, 1080.0/100.0), dpi=100, pad_inches=0.35)
except:
fig_util.force_fig_size(fig, (1920.0, 1080.0), dpi=100, pad_inches=0.35)
fig.savefig(exp_full_fname, dpi=100, bbox_inches='tight', pad_inches=0.35)
display(Image(filename=exp_full_fname))
plt.close()
else:
plt.show()
q10_dist.loc[:, iwd] = 0.01 * rep_tri5.quantile(0.1, axis=0)
lq_dist.loc[:, iwd] = 0.01 * rep_tri5.quantile(0.25, axis=0)
med_dist.loc[:, iwd] = 0.01 * rep_tri5.median(axis=0)
uq_dist.loc[:, iwd] = 0.01 * rep_tri5.quantile(0.75, axis=0)
q90_dist.loc[:, iwd] = 0.01 * rep_tri5.quantile(0.9, axis=0)
#input_matrix[np.tril_indices(input_matrix.shape[0], -1)] = np.nan
# %%
for i in range(dataset_date_range.size):
dataset_date = dataset_date_range[i]
dataset_date_str = dataset_date.strftime('%Y-%m-%d')
print(dataset_date_str)
last_dataset = datasets[dataset_date_str]
last_dataset['Med NowNowcast'] = last_dataset.apply(lambda r: r['PS_COVID_Faelle'] if r['Date Offset'] > max_days_offset else r['PS_COVID_Faelle'] / med_dist[r['Iso Weekdays']][r['Date Offset']], axis=1)
#last_dataset['Q1 NowNowcast'] = last_dataset.apply(lambda r: r['PS_COVID_Faelle'] if r['Date Offset'] > max_days_offset else r['PS_COVID_Faelle'] / lq_dist[r['Iso Weekdays']][r['Date Offset']], axis=1)
#last_dataset['Q3 NowNowcast'] = last_dataset.apply(lambda r: r['PS_COVID_Faelle'] if r['Date Offset'] > max_days_offset else r['PS_COVID_Faelle'] / uq_dist[r['Iso Weekdays']][r['Date Offset']], axis=1)
last_dataset['Med NowNowcast 7d MA'] = np.hstack((
np.full((3), np.nan),
sig.correlate(last_dataset['Med NowNowcast'], np.full((7), 1.0/7), method='direct', mode='valid'),
np.full((3), np.nan)))
# last_dataset['Q1 NowNowcast 7d MA'] = np.hstack((
# np.full((3), np.nan),
# sig.correlate(last_dataset['Q1 NowNowcast'], np.full((7), 1.0/7), method='direct', mode='valid'),
# np.full((3), np.nan)))
# last_dataset['Q3 NowNowcast 7d MA'] = np.hstack((
# np.full((3), np.nan),
# sig.correlate(last_dataset['Q3 NowNowcast'], np.full((7), 1.0/7), method='direct', mode='valid'),
# np.full((3), np.nan)))
last_dataset['Nowcast 7d MA'] = np.hstack((
np.full((3), np.nan),
sig.correlate(last_dataset['PS_COVID_Faelle'], np.full((7), 1.0/7), method='direct', mode='valid'),
np.full((3), np.nan)))
v = last_dataset['Med NowNowcast 7d MA'].to_numpy()
v = v[4:] / v[:-4]
v = np.hstack((
np.full((6), np.nan),
v[:-2]))
last_dataset['R (Med NowNowcast 7d MA)'] = v
v = 2.0**(sig.correlate(np.log2(v), np.full((7), 1.0/7), method='direct', mode='valid'))
v = np.hstack((
np.full((3), np.nan),
v,
np.full((3), np.nan)))
last_dataset['Rgeom (Med NowNowcast 7d MA)'] = v
# v1 = last_dataset['Q1 NowNowcast 7d MA'].to_numpy()
# v3 = last_dataset['Q3 NowNowcast 7d MA'].to_numpy()
# vmin = np.vstack((v1, v3)).max(axis=0)
# vmax = np.vstack((v1, v3)).max(axis=0)
# vlo = vmin[4:] / vmax[:-4]
# vhi = vmax[4:] / vmin[:-4]
# vlo = np.hstack((
# np.full((3), np.nan),
# vlo,
# np.full((1), np.nan)))
# vhi = np.hstack((
# np.full((3), np.nan),
# vhi,
# np.full((1), np.nan)))
# last_dataset['R (Q3 NowNowcast 7d MA)'] = vhi
# last_dataset['R (Q1 NowNowcast 7d MA)'] = vlo
# vlo = 2.0**(sig.correlate(np.log2(vlo), np.full((7), 1.0/7), method='direct', mode='valid'))
# vhi = 2.0**(sig.correlate(np.log2(vhi), np.full((7), 1.0/7), method='direct', mode='valid'))
# vlo = np.hstack((
# np.full((3), np.nan),
# vlo,
# np.full((3), np.nan)))
# vhi = np.hstack((
# np.full((3), np.nan),
# vhi,
# np.full((3), np.nan)))
# last_dataset['Rgeom (Q3 NowNowcast 7d MA)'] = vhi
# last_dataset['Rgeom (Q1 NowNowcast 7d MA)'] = vlo
v = last_dataset['Nowcast 7d MA'].to_numpy()
v = v[4:] / v[:-4]
v = np.hstack((
np.full((6), np.nan),
v[:-2]))
last_dataset['R (Nowcast 7d MA)'] = v
v = 2.0**(sig.correlate(np.log2(v), np.full((7), 1.0/7), method='direct', mode='valid'))
v = np.hstack((
np.full((3), np.nan),
v,
np.full((3), np.nan)))
last_dataset['Rgeom (Nowcast 7d MA)'] = v
datasets[dataset_date_str] = last_dataset
# %%
fidz = datasets[INPUT_DATA_RANGE[0]]['Rgeom (Med NowNowcast 7d MA)'].first_valid_index()
total_idz = datasets[INPUT_DATA_RANGE[1]]['Rgeom (Med NowNowcast 7d MA)'].index[12:-4].copy()
test = pd.DataFrame(index=total_idz, columns=dataset_date_range.copy())
for dataset_date in dataset_date_range:
dataset_date_str = dataset_date.strftime('%Y-%m-%d')
cur_dataset = datasets[dataset_date_str]
comm_idz = total_idz.intersection(cur_dataset.index)
test.loc[comm_idz, dataset_date] = cur_dataset.loc[comm_idz, 'Rgeom (Med NowNowcast 7d MA)']
test_s = test.subtract(test.iloc[:, -1], axis=0)
# if np.isnan(test_s.iloc[0,0]):
# first_nnz_idx = np.nonzero(~test_s.iloc[:,0].isna().to_numpy())[0][0]
# test_s = test_s.iloc[first_nnz_idx:,:]
first_nz_idx = np.nonzero(test_s.iloc[:,0].isna().to_numpy())[0][0]-1
test_s = test_s.iloc[first_nz_idx:,:-1]
test_s['Datum'] = test_s.index.copy()
test_s = test_s.melt(value_name='Error', var_name='Report Date', id_vars='Datum').dropna()
test_s['Offset'] = (test_s['Report Date'] - test_s['Datum']).dt.days
test_s.drop(columns=['Report Date'], inplace=True)
test_s.loc[:, 'Error'] = pd.to_numeric(test_s.Error)
test_s = -test_s.pivot(index='Datum', columns='Offset', values='Error')
max_err = test_s.apply(lambda c: c.dropna().max(), axis=0)
min_err = test_s.apply(lambda c: c.dropna().min(), axis=0)
med_err = test_s.apply(lambda c: c.dropna().median(), axis=0)
q25_err = test_s.apply(lambda c: c.dropna().quantile(0.25), axis=0)
q75_err = test_s.apply(lambda c: c.dropna().quantile(0.75), axis=0)
q025_err = test_s.apply(lambda c: c.dropna().quantile(0.025), axis=0)
q975_err = test_s.apply(lambda c: c.dropna().quantile(0.975), axis=0)
iq50_err = (q75_err - q25_err)
iq95_err = (q975_err - q025_err)
#test2 = test.div(test.iloc[:,-1], axis=0)
#first_nz_idx = np.nonzero((test_s.iloc[:,0]!=1).to_numpy())[0][0]
# test2 = test2.iloc[first_nz_idx:,:]
# test2a = test2.iloc[:-(31+12), :]
# test3 = pd.DataFrame(index = test2a.index, columns = range(12, 100))
# for d in test2a.index:
# v = pd.DataFrame(data = test2a.loc[d, :].to_numpy().copy(),
# index = (test2a.columns - d).days,
# columns = ['data'])
# com_cols = test3.columns.intersection(v.index)
# test3.loc[d, com_cols] = v.loc[com_cols, 'data']-1
# error_band_md = test3.apply(lambda c: c.dropna().quantile(0.5) , axis=0)
# error_band_q1 = test3.apply(lambda c: c.dropna().quantile(0.25) , axis=0)
# error_band_q3 = test3.apply(lambda c: c.dropna().quantile(0.75) , axis=0)
# error_band_max = test3.apply(lambda c: c.dropna().max(), axis=0)
# error_band_min = test3.apply(lambda c: c.dropna().min(), axis=0)
# error_band_lo = error_band_md - 1.5 * (error_band_q3 - error_band_q1)
# error_band_hi = error_band_md + 1.5 * (error_band_q3 - error_band_q1)
# %%
band_data_med = pd.DataFrame(index = r_idx, columns=dataset_date_range)
band_data_min = pd.DataFrame(index = r_idx, columns=dataset_date_range)
band_data_max = pd.DataFrame(index = r_idx, columns=dataset_date_range)
band_data_iq95_lo = pd.DataFrame(index = r_idx, columns=dataset_date_range)
band_data_iq95_hi = pd.DataFrame(index = r_idx, columns=dataset_date_range)
band_data_iq50_lo = pd.DataFrame(index = r_idx, columns=dataset_date_range)
band_data_iq50_hi = pd.DataFrame(index = r_idx, columns=dataset_date_range)
max_num_entries = (dataset_date_range[-1]-dataset_date_range[0]).days
max_lut = max_err.index.max()
min_lut = max_err.index.min()
# max_err = test_s.apply(lambda c: c.dropna().max(), axis=0)
# min_err = test_s.apply(lambda c: c.dropna().min(), axis=0)
# med_err = test_s.apply(lambda c: c.dropna().median(), axis=0)
# q25_err = test_s.apply(lambda c: c.dropna().quantile(0.25), axis=0)
# q75_err = test_s.apply(lambda c: c.dropna().quantile(0.75), axis=0)
# iq_err = 1.5 * (q75_err - q25_err)
for i in range(dataset_date_range.size):
dataset_date = dataset_date_range[i]
dataset_date_str = dataset_date.strftime('%Y-%m-%d')
v = pd.DataFrame(datasets[dataset_date_str]['Rgeom (Med NowNowcast 7d MA)'].iloc[-max_num_entries:].dropna())
v.rename(columns={'Rgeom (Med NowNowcast 7d MA)': 'Data'}, inplace=True)
cur_idx = v.index
com_idx = r_idx.intersection(cur_idx)
if com_idx.size == 0:
continue
v = v.loc[com_idx]
cur_idx = v.index
v['Offset'] = (dataset_date - cur_idx).days
cur_idx = v.index
com_idx = r_idx.intersection(cur_idx)
# vmed = v['Data']
vmed = v['Data'] + v.apply(lambda r: 0.0 if r['Offset'] > max_lut else med_err[r['Offset']], axis=1)
vmax = v['Data'] + v.apply(lambda r: 0.0 if r['Offset'] > max_lut else max_err[r['Offset']], axis=1)
vmin = v['Data'] + v.apply(lambda r: 0.0 if r['Offset'] > max_lut else min_err[r['Offset']], axis=1)
vq25 = v['Data'] + v.apply(lambda r: 0.0 if r['Offset'] > max_lut else q25_err[r['Offset']], axis=1)
vq75 = v['Data'] + v.apply(lambda r: 0.0 if r['Offset'] > max_lut else q75_err[r['Offset']], axis=1)
vq025 = v['Data'] + v.apply(lambda r: 0.0 if r['Offset'] > max_lut else q025_err[r['Offset']], axis=1)
vq975 = v['Data'] + v.apply(lambda r: 0.0 if r['Offset'] > max_lut else q975_err[r['Offset']], axis=1)
band_data_med.loc[com_idx, dataset_date] = vmed.loc[com_idx]
band_data_min.loc[com_idx, dataset_date] = vmin.loc[com_idx]
band_data_max.loc[com_idx, dataset_date] = vmax.loc[com_idx]
band_data_iq50_lo.loc[com_idx, dataset_date] = vq25.loc[com_idx]
band_data_iq50_hi.loc[com_idx, dataset_date] = vq75.loc[com_idx]
band_data_iq95_lo.loc[com_idx, dataset_date] = vq025.loc[com_idx]
band_data_iq95_hi.loc[com_idx, dataset_date] = vq975.loc[com_idx]
# %%
plt.rc('axes', axisbelow=True)
if False:
# testX = test.subtract(test.iloc[:,-1], axis=0)
# band_max = testX.apply(lambda r: r.dropna().max(), axis=1).max()
# band_min = testX.apply(lambda r: r.dropna().min(), axis=1).min()
# band_q75 = testX.apply(lambda r: r.dropna().quantile(0.75), axis=1)
# band_q25 = testX.apply(lambda r: r.dropna().quantile(0.25), axis=1)
# band_iq = 1.5 * (band_q75 - band_q25).max()
# band_pm = np.max([-band_min, band_max])
fig = plt.figure(figsize=(16,9))
gs = gridspec.GridSpec(2, 1, figure=fig,
height_ratios = [14, 3],
hspace = 0.1)
ax = fig.add_subplot(gs[0, 0])
if dataset_date_range[0].year == dataset_date_range[-1].year:
Datenstand_range_str = (
dataset_date_range[0].strftime('%d.%m.-') +
dataset_date_range[-1].strftime('%d.%m.%Y') )
else:
Datenstand_range_str = (
dataset_date_range[0].strftime('%d.%m.%y-') +
dataset_date_range[-1].strftime('%d.%m.%Y') )
fig.suptitle('COVID-19 - Original Punktschรคtzer des RKI 7-Tage-R nach Erkrankungsdatum - {:s}'.format(
Datenstand_range_str),
horizontalalignment='center',
verticalalignment='center',
fontsize=21, color=SLATE, y=0.91)
for i in range(dataset_date_range.size):
dataset_date = dataset_date_range[i]
dataset_date_str = dataset_date.strftime('%Y-%m-%d')
xidz = datasets[dataset_date_str].index[-max_num_entries:]
v = datasets[dataset_date_str]['R (Nowcast 7d MA)'].iloc[-max_num_entries:]
#y1 = v * (1 - IQmax)
#y2 = v * (1 + IQmax)
#y1 = datasets[dataset_date_str]['R (Q1 NowNowcast 7d MA)'].iloc[-56:]
#y2 = datasets[dataset_date_str]['R (Q3 NowNowcast 7d MA)'].iloc[-56:]
#plt.fill_between(xidz, y1, y2, facecolor=(0.3, 0.3, 0.3), alpha=0.5)
plt.plot(v)
ax.set_ylim([0.6,1.5])
ax.yaxis.set_major_locator(MultipleLocator(0.1))
ax.yaxis.set_minor_locator(MultipleLocator(0.02))
#ax.set_xlim([r_idx[0], r_idx[-1]])
ax.set_xlim([
pd.to_datetime(r_idx[0]),
pd.to_datetime(PLOT_MAX_DATE)
])
date_form = DateFormatter("%d.%m.\n%Y")
ax.xaxis.set_major_formatter(date_form)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=2, byweekday=0))
ax.xaxis.set_minor_locator(mdates.DayLocator())
ax.tick_params(which='minor', length=0, width=0)
ax.tick_params(axis=u'both', labelsize=16, labelcolor = SLATE)
ax.grid(True, which='major', axis='both', linestyle='-', color=(0.85,0.85,0.85))
ax.grid(True, which='minor', axis='both', linestyle='-', color=(0.95,0.95,0.95))
ax.set_ylabel('7-Tage Reproduktionszahl R', fontdict={'fontsize': 18}, fontsize=18, color = SLATE, labelpad=14)
ax.set_xlabel('Geschรคtztes Erkrankungsdatum', fontdict={'fontsize': 18}, fontsize=18, color = SLATE, labelpad=14)
ax2 = fig.add_subplot(gs[1, 0])
ax2.axis('off')
plt.text(0, 0.05,
'Datenquelle:\n' +
'<NAME>-Institut (RKI), an der <NAME> (2021): SARS-CoV-2-Nowcasting und -R-Schaetzung, Berlin: Zenodo. DOI:10.5281/zenodo.4680400\n'+
'URL: https://github.com/robert-koch-institut/SARS-CoV-2-Nowcasting_und_-R-Schaetzung ; ' +
'Abfragedatum/Datenstand: ' + Datenstand_range_str + '; eigene Berechnung/eigene Darstellung; \n' +
'Datenlizenz CC-BY 4.0 International',
fontsize=11.5)
exp_full_fname = '{:s}{:s}.png'.format(
OUTPUT_DIR + '..\\', 'Nowcasts_RKI_orig')
print('Saving ' + exp_full_fname)
fig_util.set_size(fig, (1920.0/100.0, 1080.0/100.0), dpi=100, pad_inches=0.35)
fig.savefig(exp_full_fname, dpi=100, bbox_inches='tight', pad_inches=0.35)
display(Image(filename=exp_full_fname))
plt.close()
# testX = test.subtract(test.iloc[:,-1], axis=0)
# band_max = testX.apply(lambda r: r.dropna().max(), axis=1).max()
# band_min = testX.apply(lambda r: r.dropna().min(), axis=1).min()
# band_q75 = testX.apply(lambda r: r.dropna().quantile(0.75), axis=1)
# band_q25 = testX.apply(lambda r: r.dropna().quantile(0.25), axis=1)
# band_iq = 1.5 * (band_q75 - band_q25).max()
# band_pm = np.max([-band_min, band_max])
fig = plt.figure(figsize=(16,9))
gs = gridspec.GridSpec(2, 1, figure=fig,
height_ratios = [14, 3],
hspace = 0.1)
ax = fig.add_subplot(gs[0, 0])
if dataset_date_range[0].year == dataset_date_range[-1].year:
Datenstand_range_str = (
dataset_date_range[0].strftime('%d.%m.-') +
dataset_date_range[-1].strftime('%d.%m.%Y') )
else:
Datenstand_range_str = (
dataset_date_range[0].strftime('%d.%m.%y-') +
dataset_date_range[-1].strftime('%d.%m.%Y') )
fig.suptitle('COVID-19 - Punktschรคtzer$^{{*)}}$ des RKI 7-Tage-R nach Erkrankungsdatum - {:s}'.format(
Datenstand_range_str),
horizontalalignment='center',
verticalalignment='center',
fontsize=21, color=SLATE, y=0.91)
for i in range(dataset_date_range.size):
dataset_date = dataset_date_range[i]
dataset_date_str = dataset_date.strftime('%Y-%m-%d')
xidz = datasets[dataset_date_str].index[-max_num_entries:]
v = datasets[dataset_date_str]['Rgeom (Nowcast 7d MA)'].iloc[-max_num_entries:]
#y1 = v * (1 - IQmax)
#y2 = v * (1 + IQmax)
#y1 = datasets[dataset_date_str]['R (Q1 NowNowcast 7d MA)'].iloc[-56:]
#y2 = datasets[dataset_date_str]['R (Q3 NowNowcast 7d MA)'].iloc[-56:]
#plt.fill_between(xidz, y1, y2, facecolor=(0.3, 0.3, 0.3), alpha=0.5)
plt.plot(v)
ax.set_ylim([0.6,1.5])
ax.yaxis.set_major_locator(MultipleLocator(0.1))
ax.yaxis.set_minor_locator(MultipleLocator(0.02))
#ax.set_xlim([r_idx[0], r_idx[-1]])
ax.set_xlim([
pd.to_datetime(r_idx[0]),
pd.to_datetime(PLOT_MAX_DATE)
])
date_form = DateFormatter("%d.%m.\n%Y")
ax.xaxis.set_major_formatter(date_form)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=2, byweekday=0))
ax.xaxis.set_minor_locator(mdates.DayLocator())
ax.tick_params(which='minor', length=0, width=0)
ax.tick_params(axis=u'both', labelsize=16, labelcolor = SLATE)
ax.grid(True, which='major', axis='both', linestyle='-', color=(0.85,0.85,0.85))
ax.grid(True, which='minor', axis='both', linestyle='-', color=(0.95,0.95,0.95))
ax.set_ylabel('7-Tage Reproduktionszahl R', fontdict={'fontsize': 18}, fontsize=18, color = SLATE, labelpad=14)
ax.set_xlabel('Geschรคtztes Erkrankungsdatum', fontdict={'fontsize': 18}, fontsize=18, color = SLATE, labelpad=14)
ax2 = fig.add_subplot(gs[1, 0])
ax2.axis('off')
plt.text(0, 0.05,
'Datenquelle:\n' +
'<NAME>-Institut (RKI), an der Heiden, Matthias (2021): SARS-CoV-2-Nowcasting und -R-Schaetzung, Berlin: Zenodo. DOI:10.5281/zenodo.4680400\n'+
'URL: https://github.com/robert-koch-institut/SARS-CoV-2-Nowcasting_und_-R-Schaetzung ; ' +
'Abfragedatum/Datenstand: ' + Datenstand_range_str + '; eigene Berechnung/eigene Darstellung; \n' +
'Datenlizenz CC-BY 4.0 International '+
'$^{*)}$ gleitender geometrischer Mittelwert (Wurzel der Produkte)',
fontsize=11.5)
exp_full_fname = '{:s}{:s}.png'.format(
OUTPUT_DIR + '..\\', 'Nowcasts_RKI_geom')
print('Saving ' + exp_full_fname)
fig_util.set_size(fig, (1920.0/100.0, 1080.0/100.0), dpi=100, pad_inches=0.35)
fig.savefig(exp_full_fname, dpi=100, bbox_inches='tight', pad_inches=0.35)
display(Image(filename=exp_full_fname))
plt.close()
fig = plt.figure(figsize=(16,9))
gs = gridspec.GridSpec(2, 1, figure=fig,
height_ratios = [14, 3],
hspace = 0.1)
ax = fig.add_subplot(gs[0, 0])
fig.suptitle('COVID-19 - Wochentagkorrigierter Punktschรคtzer$^{{*)}}$ des RKI 7-Tage-R nach Erkrankungsdatum - {:s}'.format(
Datenstand_range_str),
horizontalalignment='center',
verticalalignment='center',
fontsize=21, color=SLATE, y=0.91)
# y1 = band_data_min.apply(lambda r: r.dropna().min(), axis=1).dropna()
# y2 = band_data_max.apply(lambda r: r.dropna().max(), axis=1).dropna()
# x = y1.index
# plt.fill_between(x, y1, y2, facecolor=(0.0, 0.0, 0.0), alpha=0.2)
# y1 = band_data_iq_min.apply(lambda r: r.dropna().min(), axis=1).dropna()
# y2 = band_data_iq_max.apply(lambda r: r.dropna().max(), axis=1).dropna()
# x = y1.index
# plt.fill_between(x, y1, y2, facecolor=(1.0, 0.0, 0.0), alpha=0.8)
# y1 = band_data_q25.apply(lambda r: r.dropna().min(), axis=1).dropna()
# y2 = band_data_q75.apply(lambda r: r.dropna().max(), axis=1).dropna()
# x = y1.index
# plt.fill_between(x, y1, y2, facecolor=(0.4, 0.4, 1.0), alpha=0.8)
for i in range(dataset_date_range.size):
dataset_date = dataset_date_range[i]
dataset_date_str = dataset_date.strftime('%Y-%m-%d')
xidz = datasets[dataset_date_str].index[-max_num_entries:]
v = datasets[dataset_date_str]['Rgeom (Med NowNowcast 7d MA)'].iloc[-max_num_entries:]
plt.plot(v) #, 'k-') #, linewidth=0.5)
ax.set_ylim([0.6,1.5])
ax.yaxis.set_major_locator(MultipleLocator(0.1))
ax.yaxis.set_minor_locator(MultipleLocator(0.02))
#ax.set_xlim([r_idx[0], r_idx[-1]])
ax.set_xlim([
pd.to_datetime(r_idx[0]),
pd.to_datetime(PLOT_MAX_DATE)
])
date_form = DateFormatter("%d.%m.\n%Y")
ax.xaxis.set_major_formatter(date_form)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=2, byweekday=0))
ax.xaxis.set_minor_locator(mdates.DayLocator())
ax.tick_params(which='minor', length=0, width=0)
ax.tick_params(axis=u'both', labelsize=16, labelcolor = SLATE)
ax.grid(True, which='major', axis='both', linestyle='-', color=(0.85,0.85,0.85))
ax.grid(True, which='minor', axis='both', linestyle='-', color=(0.95,0.95,0.95))
ax.set_ylabel('7-Tage Reproduktionszahl R', fontdict={'fontsize': 18}, fontsize=18, color = SLATE, labelpad=14)
ax.set_xlabel('Geschรคtztes Erkrankungsdatum', fontdict={'fontsize': 18}, fontsize=18, color = SLATE, labelpad=14)
ax2 = fig.add_subplot(gs[1, 0])
ax2.axis('off')
plt.text(0, 0.05,
'Datenquelle:\n' +
'<NAME>och-Institut (RKI), an der Heiden, Matthias (2021): SARS-CoV-2-Nowcasting und -R-Schaetzung, Berlin: Zenodo. DOI:10.5281/zenodo.4680400\n'+
'URL: https://github.com/robert-koch-institut/SARS-CoV-2-Nowcasting_und_-R-Schaetzung ; ' +
'Abfragedatum/Datenstand: ' + Datenstand_range_str + '; eigene Berechnung/eigene Darstellung;\n' +
'Datenlizenz CC-BY 4.0 International '+
'$^{*)}$ gleitender geometrischer Mittelwert (Wurzel der Produkte)',
fontsize=11.5)
exp_full_fname = '{:s}{:s}.png'.format(
OUTPUT_DIR + '..\\', 'Nowcasts_RKI_korr')
print('Saving ' + exp_full_fname)
fig_util.set_size(fig, (1920.0/100.0, 1080.0/100.0), dpi=100, pad_inches=0.35)
fig.savefig(exp_full_fname, dpi=100, bbox_inches='tight', pad_inches=0.35)
display(Image(filename=exp_full_fname))
plt.close()
# %%
ext_r_idx = pd.date_range(r_idx[0], r_idx[-1]-pd.DateOffset(days=1))
old_boundaries = pd.DataFrame(index = ext_r_idx, columns=[
'min', 'max', 'iq50_lo', 'iq50_hi', 'iq95_lo', 'iq95_hi', 'med'])
for i in range(max_num_entries+1):
# i = max_num_entries
dataset_date = dataset_date_range[i]
dataset_date_str = dataset_date.strftime('%Y-%m-%d')
if pd.to_numeric(band_data_min.iloc[:,i].dropna()).size == 0:
continue
y1 = pd.to_numeric(band_data_min.iloc[:,i].dropna())
y2 = pd.to_numeric(band_data_max.iloc[:,i].dropna())
old_boundaries.loc[y1.index[-1], 'min'] = y1.iloc[-1]
old_boundaries.loc[y2.index[-1], 'max'] = y2.iloc[-1]
y1 = pd.to_numeric(band_data_iq95_lo.iloc[:,i].dropna())
y2 = pd.to_numeric(band_data_iq95_hi.iloc[:,i].dropna())
old_boundaries.loc[y1.index[-1], 'iq95_lo'] = y1.iloc[-1]
old_boundaries.loc[y2.index[-1], 'iq95_hi'] = y2.iloc[-1]
y1 = pd.to_numeric(band_data_iq50_lo.iloc[:,i].dropna())
y2 = pd.to_numeric(band_data_iq50_hi.iloc[:,i].dropna())
old_boundaries.loc[y1.index[-1], 'iq50_lo'] = y1.iloc[-1]
old_boundaries.loc[y2.index[-1], 'iq50_hi'] = y2.iloc[-1]
y = pd.to_numeric(band_data_med.iloc[:,i].dropna())
old_boundaries.loc[y.index[-1], 'med'] = y.iloc[-1]
# extrapolation of last 3 days
# for i in range(max_num_entries-2, max_num_entries+1):
# for j in range(-10,-4.shape[1]):
# p = np.polyfit([*range(-9,-3)], pd.to_numeric(old_boundaries.iloc[-9:-3, j]).to_numpy(),2)
# old_boundaries.iloc[-3:, j] = np.polyval(p, [-3,-2,-1])
# %%
if DO_EXTRAPOLATION:
p = np.polyfit([*range(-10,-4)], | pd.to_numeric(band_data_min.iloc[-10:-4, -1]) | pandas.to_numeric |
from datetime import timedelta
import numpy as np
import pytest
from pandas import Categorical, DataFrame, NaT, Period, Series, Timedelta, Timestamp
import pandas._testing as tm
class TestSeriesFillNA:
def test_fillna_pytimedelta(self):
# GH#8209
ser = Series([np.nan, Timedelta("1 days")], index=["A", "B"])
result = ser.fillna(timedelta(1))
expected = Series(Timedelta("1 days"), index=["A", "B"])
tm.assert_series_equal(result, expected)
def test_fillna_period(self):
# GH#13737
ser = Series([Period("2011-01", freq="M"), Period("NaT", freq="M")])
res = ser.fillna(Period("2012-01", freq="M"))
exp = Series([Period("2011-01", freq="M"), Period("2012-01", freq="M")])
tm.assert_series_equal(res, exp)
assert res.dtype == "Period[M]"
def test_fillna_dt64_timestamp(self):
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
ser[2] = np.nan
# reg fillna
result = ser.fillna(Timestamp("20130104"))
expected = Series(
[
| Timestamp("20130101") | pandas.Timestamp |
"""dev_env_for_beta_app"""
code='dev_env_for_beta_app'
from elasticsearch import Elasticsearch
es = Elasticsearch([{'host': 'elastic-helm-elasticsearch-coordinating-only'}])
sample_user_id='607077a405164b0001e72f69'
log='https://apibeta.bighaat.com/crop/api/logerror/create-recommendation-error-log?message={}&api-version=1.0'
"""local_beta_app"""
#code='local_app_beta'
#from elasticsearch import Elasticsearch
#es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
#sample_user_id='607077a405164b0001e72f69'
#log='https://apibeta.bighaat.com/crop/api/logerror/create-recommendation-error-log?message={}&api-version=1.0'
"""local_prod_app"""
#code='local_app_prod'
#from elasticsearch import Elasticsearch
#es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
#sample_user_id='5fb0ced5cd15bf00013f68d0'
#log='https://api.bighaat.com/crop/api/logerror/create-recommendation-error-log?message={}&api-version=1.0'
"""beta_env_for_prod_app"""
#code='beta_env_for_prod_app'
#from elasticsearch import Elasticsearch
#es = Elasticsearch([{'host': 'elastic-helm-elasticsearch-coordinating-only'}])
#sample_user_id='5fb0ced5cd15bf00013f68d0'
#log='https://api.bighaat.com/crop/api/logerror/create-recommendation-error-log?message={}&api-version=1.0'
"""prod_env_for_prod_app"""
#code='prod_env_for_prod_app'
#from elasticsearch import Elasticsearch
#es = Elasticsearch([{'host': 'elastic-helm-elasticsearch-coordinating-only'}])
#sample_user_id='5fb0ced5cd15bf00013f68d0'
#log='https://api.bighaat.com/crop/api/logerror/create-recommendation-error-log?message={}&api-version=1.0'
auth={'apiKey': '135BE3F9-B1D8-4A14-A999-303286C32275'}
from flask import Flask
app = Flask(__name__)
from pandas.io.json import json_normalize
#import numpy as np
import pandas as pd
from nltk.corpus import stopwords
#import random
import time
#from datetime import datetime,timedelta
STOP_WORDS=stopwords.words('english')
#es = Elasticsearch('https://prodelastic.bighaat.cf/')
#es = Elasticsearch([{'host': 'elastic-helm-elasticsearch-coordinating-only'}])
#es = Elasticsearch("https://elastic:[email protected]:9200/", verify_certs=False, connection_class=RequestsHttpConnection)
from flask import request
import warnings
warnings.filterwarnings("ignore")
from elasticsearch.helpers import scan
import requests
import datetime
def logger(message):
message_dict={
'code':code,
'message':message}
r=requests.post(log.format(str(message_dict)),headers=auth)
return r.text
def big_read(index):
es_response = scan(
es,
index=index,
doc_type='_doc',
query={"query": { "match_all" : {}}}
)
return es_response
#global_user={}
#res = es.search(index="users_sync", body={"size":10000,"query":{"match_all":{}}})
#users = json_normalize(res['hits']['hits'])
def cropDocRecommend(u):
try:
logger("CropDoc started for user:"+u)
userId=u
b={
'query':{
'match':{"_id": userId}
}
}
res = es.search(index="crop_doc_disease", body=b)
q=res['hits']['hits'][0]['_source']['0']
if q==None:
logger("Exit CropDoc for user:"+u+"with 0 results")
return {'total_cd':[]}
"""3 latest disease searched by user in crop doc"""
# q=",".join(q[-3:])
q=q.split(",")
if ' ' in q:
q.remove(' ')
q=q[-3:]
#q='Vkasetu'
q1=[w for w in q if not w in STOP_WORDS]
should1=[]
for i in q1:
should1.append({
"fuzzy":{"title": {"value":i,"fuzziness":10}}
})
b1={
"query": {
"bool": {
"should": should1,
"must":{'match':{'isDeleted':False }}
}
}
}
post_dict_list=[]
res1= es.search(index='posts_search_sync',body=b1)
print("Keyword Search:\n")
for hit in res1['hits']['hits']:
# print(str(hit['_score']) + "\t" + hit['_source']['title'])
post_dict_list.append({'score_post':hit['_score'],'postId':hit['_id'],'postTitle_post':hit['_source']['title']})
q2=[w for w in q if not w in STOP_WORDS]
should2=[]
for i in q2:
should2.append({
"fuzzy":{"comment": {"value":i,"fuzziness":10}}
})
b2={
"query": {
"bool": {
"should": should2,
"must":{'match':{'isDeleted':False }}
}
}
}
comment_dict_list=[]=[]
res2= es.search(index='comments_sync',body=b2)
for hit in res2['hits']['hits']:
# print(str(hit['_score']) + "\t" + hit['_source']['postId']+"\t" + hit['_source']['comment'] )
b={
'query':{
'match':{"_id":hit['_source']['postId'] }
}
}
try:
p=es.search(index='posts_sync',body=b)['hits']['hits'][0]['_source']['title']
comment_dict_list.append({'score_comment':hit['_score'],'postId':hit['_source']['postId'],'postTitle_comment':p,'comment':hit['_source']['comment']})
except:
print("Comment's post is deleted")
q3=[w for w in q if not w in STOP_WORDS]
should3=[]
for i in q3:
should3.append({
"fuzzy":{"description": {"value":i,"fuzziness":10}}
})
b3={
"query": {
"bool": {
"should": should3,
"must":{'match':{'isDeleted':False }}
}
}
}
description_dict_list=[]
res3= es.search(index='posts_search_sync',body=b3)
# print("Keyword Search:\n")
for hit in res3['hits']['hits']:
# print(str(hit['_score']) + "\t" + hit['_source']['title'])
description_dict_list.append({'score_description':hit['_score'],'postId':hit['_id'],'postTitle_description':hit['_source']['title'],'description':hit['_source']['description']})
if len(post_dict_list)!=0:
df1=pd.DataFrame(post_dict_list)
else:
df1=pd.DataFrame(columns=['score_post', 'postId', 'postTitle_post'])
if len(description_dict_list)!=0:
df2=pd.DataFrame(description_dict_list)
else:
df2=pd.DataFrame(columns=['score_description', 'postId', 'postTitle_description','description'])
if len(comment_dict_list)!=0:
df3=pd.DataFrame(comment_dict_list)
else:
df3=pd.DataFrame(columns=['score_comment', 'postId', 'postTitle_comment', 'comment'])
combo_pcd=df1.merge(df2,how='inner',on='postId').merge(df3,how='inner',on='postId')
combo_pd=df1.merge(df2,how='inner',on='postId')
combo_pc=df1.merge(df3,how='inner',on='postId')
combo_cd=df2.merge(df3,how='inner',on='postId')
id_top=list(combo_pcd['postId'])+list(combo_pd['postId'])+list(combo_pc['postId'])+list(combo_cd['postId'])
combo=pd.concat([combo_pcd,combo_pd,combo_pc,combo_cd])
df1=df1[df1['postId'].apply(lambda x: x not in id_top)]
df2=df2[df2['postId'].apply(lambda x: x not in id_top)]
df3=df3[df3['postId'].apply(lambda x: x not in id_top)]
final=pd.concat([combo,df1,df2,df3])
# final=final[start:fraction_size]
# print(final['postId'].tolist(),'cropdoc')
logger("Exit CropDoc for user:"+u+"with"+str(len(final['postId']))+" "+str(final['postId']))
return {'total_cd':final['postId'].tolist()}
except Exception as e:
logger("Some error in corpdoc "+str(e))
def user_profile_activity(posts_vec,u):
try:
logger("Started Profile_activity recom for usser"+u)
user_id=u
# logger(str(total_cd))
b={
'query':{
'match':{"_id": user_id}
}
}
vec_user=es.search(body=b,index='users_profile_vec')['hits']['hits'][0]['_source']
vec_user_activity=es.search(body=b,index='users_activity_vec')['hits']['hits'][0]['_source']
b={
'query':{
'match':{"_id": "aabab"}
}
}
col_names=es.search(index="cols", body=b)['hits']['hits'][0]['_source']
language=col_names['language'].split(",")
crop_names=col_names['crop_names'].split(",")
for i in vec_user:
pass
vec_user[i]=[vec_user[i]]
lan=pd.DataFrame.from_dict(vec_user)[language]
if lan.sum().sum()!=0:
lan=lan.div(lan.sum(axis=1),axis=0)
crops=pd.DataFrame.from_dict(vec_user)[crop_names]
if crops.sum().sum()!=0:
crops=crops.div(crops.sum(axis=1),axis=0)
vec_user=pd.concat([pd.DataFrame.from_dict(vec_user)[['x','y']],lan,crops],axis=1)
for i in vec_user_activity:
pass
vec_user_activity[i]=[vec_user_activity[i]]
lan=pd.DataFrame.from_dict(vec_user_activity)[language]
if lan.sum().sum()!=0:
lan=lan.div(lan.sum(axis=1),axis=0)
crops=pd.DataFrame.from_dict(vec_user_activity)[crop_names]
if crops.sum().sum()!=0:
crops=crops.div(crops.sum(axis=1),axis=0)
vec_user_activity=pd.concat([pd.DataFrame.from_dict(vec_user_activity)[['x','y']],lan,crops],axis=1)
# res = es.search(index="posts_search_sync", body={"size":10000,"query":{"match_all":{}}})
# posts_vec = json_normalize(res['hits']['hits'])
# posts_vec.columns=pd.Series(posts_vec.columns).apply(lambda x:x.split('.')[-1])
# posts_vec=posts_vec[posts_vec['isDeleted']==False]
vec_user[language+crop_names]=vec_user[language+crop_names]+vec_user_activity[language+crop_names]
# l=vec_user[language].sample(1,weights=vec_user[language].values[0],axis=1).columns[0]
# c=vec_user[crop_names].sample(1,weights=vec_user[crop_names].values[0],axis=1).columns[0]
crop_list=vec_user[crop_names].T.sort_values(by=0,ascending=False)
crop_list=crop_list[crop_list!=0].dropna().index.tolist()
language_list=vec_user[language].T.sort_values(by=0,ascending=False)
language_list=language_list[language_list!=0].dropna().index.tolist()
pv=posts_vec.copy()
pv=pv[pv['userId']!=u]
# l2_list=[]
# for l in language_list:
# l1_list=[]
# for c in crop_list:
# p=pv[(pv['language']==l) & (pv['cropName']==c)]
# p=p.sort_values(by='actionUpdatedAt',ascending=False)
# l1_list.append(p['_id'].tolist())
# l2_list.append(l1_list)
# if len(p)!=0:
# try:
# post_list.extend(p['_id'].tolist())
# except:
# pass
# user_own_post=posts_vec.copy()
# user_own_post=user_own_post[user_own_post['userId']==u]
# user_own_post['actionUpdatedAt']=pd.to_datetime(user_own_post['actionUpdatedAt'])
# user_own_post=user_own_post[user_own_post['actionUpdatedAt']-datetime.utcnow()<timedelta(days=3)]
# user_own_post_list=user_own_post['_id'].tolist()[:5]
p=pd.DataFrame()
for l in language_list:
for c in crop_list:
pt=pv[(pv['language']==l) & (pv['cropName']==c)]
p=pd.concat([p,pt])
p=p.sort_values(by='actionUpdatedAt',ascending=False)
posts=p
# for l1 in l2_list:
# len_list=[]
# for i in l1:
# len_list.append(len(i))
# m=max(len_list)
# for i in range(m):
# for j in l1:
# if i<len(j):
# post_list.append(j[i])
if crop_list==[]:
posts=pv[pv.language.isin(language_list)].sort_values(by='actionUpdatedAt',ascending=False)
# print(total_cd)
# post_list=[x for x in post_list if x not in total_cd+global_user[u]]
# for post in post_list[start:]:
# if post not in total_cd+global_user[u]:
# final.append(post)
# if len(final)==fraction_size-start:
# break
# print(final)
# post_list=user_own_post_list+post_list
posts_crop_doc_list=cropDocRecommend(u)['total_cd']
posts_crop_doc=pv[pv['_id'].isin(posts_crop_doc_list)]
posts_act_doc= | pd.concat([posts,posts_crop_doc]) | pandas.concat |
import numpy as np
import pandas as pd
from keras import backend as K
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Activation, BatchNormalization, Dense, Input
from keras.models import Model
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.linear_model import ARDRegression, Ridge
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
def root_mean_squared_error(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_true - y_pred)))
if __name__ == "__main__":
NUM_FOLDS = 50
SEED = 1000
shigeria_pred1 = np.load("shigeria_pred1.npy")
shigeria_pred2 = np.load("shigeria_pred2.npy")
shigeria_pred3 = np.load("shigeria_pred3.npy")
shigeria_pred4 = np.load("shigeria_pred4.npy")
shigeria_pred5 = np.load("shigeria_pred5.npy")
shigeria_pred6 = np.load("shigeria_pred6.npy")
shigeria_pred7 = np.load("shigeria_pred7.npy")
shigeria_pred8 = np.load("shigeria_pred8.npy")
shigeria_pred9 = np.load("shigeria_pred9.npy")
shigeria_pred10 = np.load("shigeria_pred10.npy")
upura_pred = np.load("upura_pred.npy")
takuoko_exp085 = np.load("takuoko_exp085.npy")
takuoko_exp096 = np.load("takuoko_exp096.npy")
takuoko_exp105 = np.load("takuoko_exp105.npy")
takuoko_exp108 = np.load("takuoko_exp108.npy")
takuoko_exp184 = np.load("takuoko_exp184.npy")
X_train_svd = np.load("X_train_all.npy")
X_test_svd = np.load("X_test_all.npy")
train_idx = np.load("train_idx.npy", allow_pickle=True)
svd1 = TruncatedSVD(n_components=3, n_iter=10, random_state=42)
svd1.fit(X_train_svd)
X_train_svd = svd1.transform(X_train_svd)
X_test_svd = svd1.transform(X_test_svd)
X_test = pd.DataFrame(
{
"shigeria_pred1": shigeria_pred1.reshape(-1),
"shigeria_pred2": shigeria_pred2.reshape(-1),
"shigeria_pred3": shigeria_pred3.reshape(-1),
"shigeria_pred4": shigeria_pred4.reshape(-1),
"shigeria_pred5": shigeria_pred5.reshape(-1),
"shigeria_pred6": shigeria_pred6.reshape(-1),
"shigeria_pred7": shigeria_pred7.reshape(-1),
"shigeria_pred8": shigeria_pred8.reshape(-1),
"shigeria_pred9": shigeria_pred9.reshape(-1),
"shigeria_pred10": shigeria_pred10.reshape(-1),
"upura": upura_pred,
"takuoko_exp085": takuoko_exp085,
"takuoko_exp096": takuoko_exp096,
"takuoko_exp105": takuoko_exp105,
"takuoko_exp108": takuoko_exp108,
"takuoko_exp184": takuoko_exp184,
}
)
X_test = pd.concat(
[
X_test,
pd.DataFrame(
X_test_svd, columns=[f"svd_{c}" for c in range(X_test_svd.shape[1])]
),
],
axis=1,
)
# upura oof
pred_val000 = pd.read_csv("../input/commonlit-oof/pred_val000.csv")
# shigeria oof
andrey_df = pd.read_csv("../input/commonlitstackingcsv/roberta_base_itpt.csv")
andrey_df2 = pd.read_csv("../input/commonlitstackingcsv/attention_head_nopre.csv")
andrey_df3 = | pd.read_csv("../input/commonlitstackingcsv/attention_head_itpt.csv") | pandas.read_csv |
import pandas as pd
import bioframe
import pyranges as pr
import numpy as np
from io import StringIO
def bioframe_to_pyranges(df):
pydf = df.copy()
pydf.rename(
{"chrom": "Chromosome", "start": "Start", "end": "End"},
axis="columns",
inplace=True,
)
return pr.PyRanges(pydf)
def pyranges_to_bioframe(pydf):
df = pydf.df
df.rename(
{"Chromosome": "chrom", "Start": "start", "End": "end", "Count": "n_intervals"},
axis="columns",
inplace=True,
)
return df
def pyranges_overlap_to_bioframe(pydf):
## convert the df output by pyranges join into a bioframe-compatible format
df = pydf.df.copy()
df.rename(
{
"Chromosome": "chrom_1",
"Start": "start_1",
"End": "end_1",
"Start_b": "start_2",
"End_b": "end_2",
},
axis="columns",
inplace=True,
)
df["chrom_1"] = df["chrom_1"].values.astype("object") # to remove categories
df["chrom_2"] = df["chrom_1"].values
return df
chroms = ["chr12", "chrX"]
def mock_bioframe(num_entries=100):
pos = np.random.randint(1, 1e7, size=(num_entries, 2))
df = pd.DataFrame()
df["chrom"] = np.random.choice(chroms, num_entries)
df["start"] = np.min(pos, axis=1)
df["end"] = np.max(pos, axis=1)
df.sort_values(["chrom", "start"], inplace=True)
return df
############# tests #####################
def test_select():
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
region1 = "chr1:4-10"
df_result = pd.DataFrame([["chr1", 4, 5]], columns=["chrom", "start", "end"])
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX:4-6"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
def test_expand():
fake_bioframe = pd.DataFrame(
{"chrom": ["chr1", "chr1", "chr2"], "start": [1, 50, 100], "end": [5, 55, 200]}
)
fake_chromsizes = {"chr1": 60, "chr2": 300}
expand_bp = 10
fake_expanded = bioframe.expand(fake_bioframe.copy(), expand_bp, fake_chromsizes)
print(fake_expanded)
assert fake_expanded.iloc[0].start == 0 # don't expand below zero
assert (
fake_expanded.iloc[1].end == fake_chromsizes["chr1"]
) # don't expand above chromsize
assert (
fake_expanded.iloc[2].end == fake_bioframe.iloc[2].end + expand_bp
) # expand end normally
assert (
fake_expanded.iloc[2].start == fake_bioframe.iloc[2].start - expand_bp
) # expand start normally
def test_overlap():
### test consistency of overlap(how='inner') with pyranges.join ###
### note does not test overlap_start or overlap_end columns of bioframe.overlap
df1 = mock_bioframe()
df2 = mock_bioframe()
assert df1.equals(df2) == False
p1 = bioframe_to_pyranges(df1)
p2 = bioframe_to_pyranges(df2)
pp = pyranges_overlap_to_bioframe(p1.join(p2, how=None))[
["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
]
bb = bioframe.overlap(df1, df2, how="inner")[
["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
]
pp = pp.sort_values(
["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
ignore_index=True)
bb = bb.sort_values(
["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
ignore_index=True)
pd.testing.assert_frame_equal(bb, pp, check_dtype=False, check_exact=False)
print("overlap elements agree")
### test overlap on= [] ###
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=["chrom1", "start", "end", "strand", "animal"],
)
df2 = pd.DataFrame(
[["chr1", 6, 10, "+", "dog"], ["chrX", 7, 10, "-", "dog"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
)
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_2"].values)) == 3
b = bioframe.overlap(
df1,
df2,
on=["strand"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_2"].values)) == 2
b = bioframe.overlap(
df1,
df2,
on=None,
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_2"].values)) == 0
### test overlap 'left', 'outer', and 'right'
b = bioframe.overlap(
df1,
df2,
on=None,
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 5
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="inner",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 0
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="right",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 2
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
def test_cluster():
df1 = pd.DataFrame(
[["chr1", 1, 5], ["chr1", 3, 8], ["chr1", 8, 10], ["chr1", 12, 14],],
columns=["chrom", "start", "end"],
)
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 1])
).all() # the last interval does not overlap the first three
df_annotated = bioframe.cluster(df1, min_dist=2)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 0])
).all() # all intervals part of the same cluster
df_annotated = bioframe.cluster(df1, min_dist=None)
assert (
df_annotated["cluster"].values == np.array([0, 0, 1, 2])
).all() # adjacent intervals not clustered
df1.iloc[0, 0] = "chrX"
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([2, 0, 0, 1])
).all() # do not cluster intervals across chromosomes
# test consistency with pyranges (which automatically sorts df upon creation and uses 1-based indexing for clusters)
assert (
(bioframe_to_pyranges(df1).cluster(count=True).df["Cluster"].values - 1)
== bioframe.cluster(df1.sort_values(["chrom", "start"]))["cluster"].values
).all()
# test on=[] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert (
bioframe.cluster(df1, on=["animal"])["cluster"].values == np.array([0, 1, 0, 2])
).all()
assert (
bioframe.cluster(df1, on=["strand"])["cluster"].values == np.array([0, 1, 1, 2])
).all()
assert (
bioframe.cluster(df1, on=["location", "animal"])["cluster"].values
== np.array([0, 2, 1, 3])
).all()
def test_merge():
df1 = pd.DataFrame(
[["chr1", 1, 5], ["chr1", 3, 8], ["chr1", 8, 10], ["chr1", 12, 14],],
columns=["chrom", "start", "end"],
)
# the last interval does not overlap the first three with default min_dist=0
assert (bioframe.merge(df1)["n_intervals"].values == np.array([3, 1])).all()
# adjacent intervals are not clustered with min_dist=none
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values == np.array([2, 1, 1])
).all()
# all intervals part of one cluster
assert (
bioframe.merge(df1, min_dist=2)["n_intervals"].values == np.array([4])
).all()
df1.iloc[0, 0] = "chrX"
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values
== np.array([1, 1, 1, 1])
).all()
assert (
bioframe.merge(df1, min_dist=0)["n_intervals"].values == np.array([2, 1, 1])
).all()
# total number of intervals should equal length of original dataframe
mock_df = mock_bioframe()
assert np.sum(bioframe.merge(mock_df, min_dist=0)["n_intervals"].values) == len(
mock_df
)
# test consistency with pyranges
pd.testing.assert_frame_equal(
pyranges_to_bioframe(bioframe_to_pyranges(df1).merge(count=True)),
bioframe.merge(df1),
check_dtype=False,
check_exact=False,
)
# test on=['chrom',...] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert len(bioframe.merge(df1, on=None)) == 2
assert len(bioframe.merge(df1, on=["strand"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location", "animal"])) == 4
d = """ chrom start end animal n_intervals
0 chr1 3 10 cat 2
1 chr1 3 8 dog 1
2 chrX 6 10 cat 1"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(
df, bioframe.merge(df1, on=["animal"]), check_dtype=False,
)
def test_complement():
df1 = pd.DataFrame(
[["chr1", 1, 5], ["chr1", 3, 8], ["chr1", 8, 10], ["chr1", 12, 14]],
columns=["chrom", "start", "end"],
)
df1_chromsizes = {"chr1": 100, "chrX": 100}
df1_complement = pd.DataFrame(
[["chr1", 0, 1], ["chr1", 10, 12], ["chr1", 14, 100], ['chrX', 0, 100]],
columns=["chrom", "start", "end"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, chromsizes=df1_chromsizes), df1_complement
)
### test complement with two chromosomes ###
df1.iloc[0, 0] = "chrX"
df1_complement = pd.DataFrame(
[
["chr1", 0, 3],
["chr1", 10, 12],
["chr1", 14, 100],
["chrX", 0, 1],
["chrX", 5, 100],
],
columns=["chrom", "start", "end"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, chromsizes=df1_chromsizes), df1_complement
)
def test_closest():
df1 = pd.DataFrame([["chr1", 1, 5],], columns=["chrom", "start", "end"])
df2 = pd.DataFrame(
[["chr1", 4, 8], ["chr1", 10, 11]], columns=["chrom", "start", "end"]
)
### closest(df1,df2,k=1) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 4 8 0"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.closest(df1, df2, k=1))
### closest(df1,df2, ignore_overlaps=True)) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.closest(df1, df2, ignore_overlaps=True))
### closest(df1,df2,k=2) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 4 8 0
1 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.closest(df1, df2, k=2))
### closest(df2,df1) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 4 8 chr1 1 5 0
1 chr1 10 11 chr1 1 5 5 """
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.closest(df2, df1))
### change first interval to new chrom ###
df2.iloc[0, 0] = "chrA"
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.closest(df1, df2, k=1))
### test other return arguments ###
df2.iloc[0, 0] = "chr1"
d = """
index_1 index_2 have_overlap overlap_start overlap_end distance
0 0 0 True 4 5 0
1 0 1 False <NA> <NA> 5
"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(
df,
bioframe.closest(
df1,
df2,
k=2,
return_overlap=True,
return_index=True,
return_input=False,
return_distance=True,
),
check_dtype=False,
)
def test_coverage():
#### coverage does not exceed length of original interval
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = | pd.DataFrame([["chr1", 2, 10]], columns=["chrom", "start", "end"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import sqlite3
from retrobiocat_web.retro.generation.node_analysis import rdkit_smile
def convert_to_rdkit(smi):
try:
new_smi = rdkit_smile(smi)
return new_smi
except:
return None
def load_data(path, cols, sep, smi_col):
print(f'Load path: {path}')
try:
data = | pd.read_csv(path, sep=sep) | pandas.read_csv |
import os
import json
from dotenv import load_dotenv
import pandas as pd
from web3 import Web3
from pathlib import Path
class BlockheadMarketPlace:
"""
Attributes:
nft_contract: string
Contract's Application Binary Interface (ABI) represented
in JSON format for the NFT contract. See Contracts/nft.sol
marketplace_contract: string
Contract's Application Binary Inteface (ABI) represented
in JSON format for the NFT Marketplace contract. See
Contracts/nftMarket.sol
"""
w3 = None
nft_address = None
nft_contract = None
marketplace_address = None
marketplace_contract = None
def __init__(self, web3_provider_uri : str):
load_dotenv()
self.w3 = Web3(Web3.HTTPProvider(web3_provider_uri))
self.nft_contract_address = os.getenv("NFT_CONTRACT_ADDRESS")
self.marketplace_address = os.getenv("NFT_MARKET_CONTRACT_ADDRESS")
self.nft_contract = \
self.__load_contract("./blockhead/abi/nft_abi.json", self.nft_contract_address)
self.marketplace_contract = \
self.__load_contract("./blockhead/abi/nft_marketplace_abi.json", self.marketplace_address)
def mint_token(self,
from_address : str,
token_uri : str) -> str:
"""Mints a token on the blockhain
Minting entails assigning a unique identifier to the nft located
at the uri. This process is governed by an ERC-721 compliant
contract. See Contracts/nft.sol.
Args:
w3: Web3 Provider
Web3 Provider object enabling access to the blockchain
from_address: string
Blockchain address of the current user
token_uri:
Unique Resource Identifier that identifies the digital NFT
Returns:
A token id
"""
transaction = self.nft_contract.functions.createToken(token_uri)
tx_hash = transaction.transact({"from": from_address, "gas": 1000000})
receipt = self.w3.eth.waitForTransactionReceipt(tx_hash)
rich_logs = self.nft_contract.events.TokenCreated().processReceipt(receipt)
token_id = rich_logs[0]['args']['itemId']
return token_id
def create_market_item(self,
from_address: str,
token_id : int,
price_wei : int,
listing_price_wei : int) -> int:
""" Establishes an NFT for sale on the Blockhead NFT MarketPlace
Puts an NFT that has been tokenized using the NFT contract
(Contracts/nft.sol) for sale on the Blockhead NFT Marketplace
governed by the Marketplace Contract (Contracts/nftMarket.sol).
The Blockhead NFT Marketplace has permissions to access tokens
created on the NFT Contract.
Putting up for sale requires submitting the listing price, which is the
cost of putting the NFT up for sale.
Args:
from_address: string
Blockchain address of the current user, or seller, of the NFT
token_id: int
Unique identifier of the minted NFT as governed by the
NFT contract (Contracts/nft.sol)
price_wei: int
The NFT purchse price, in wei, set by the seller.
listing_price_wei: int
The amount being submitted to cover the listing price, which is
the cost of listing an NFT. This value is established by the
Blockhead NFT Marketplace. See Contracts/nftMarket.sol
Returns:
An item id which represents the unique index of the NFT on the
Blockhead NFT Marketplace.
"""
transaction = self.marketplace_contract.functions.createMarketItem(
self.nft_contract_address, int(token_id), int(price_wei))
tx_hash = transaction.transact(
{
"from": from_address,
"gas": 1000000,
"value": int(listing_price_wei)
})
receipt = self.w3.eth.waitForTransactionReceipt(tx_hash)
rich_logs = self.marketplace_contract.events.MarketItemCreated().processReceipt(receipt)
item_id = rich_logs[0]['args']['itemId']
return item_id
def get_listed_items_for_account(self, from_address : str, show_sold : bool = True) -> pd.DataFrame:
""" Retrieves the current account's listed items.
Args:
from_address: string
Blockchain address of the current user, or seller, of the NFT
Returns:
Unsold NFTs returned as an array of MarketItem as a DataFrame
"""
transaction = self.marketplace_contract.functions.fetchItemsCreated()
items = transaction.call(
{
"from": from_address
})
items_df = pd.DataFrame(items)
if not items_df.empty:
items_df.columns = ['Token Id', 'Contract', 'Blockhead Id', 'Minter', 'Owner', 'Cost', 'Sold']
items_df.drop(['Contract'], axis=1, inplace=True)
if not show_sold:
items_df = items_df[items_df["Sold"] == False]
items_df['Minter'] = items_df.apply(lambda row : self.__simplify_address(row['Minter']), axis=1)
items_df['Owner'] = items_df.apply(lambda row : self.__simplify_address(row['Owner']), axis=1)
return items_df
def get_bought_items_for_account(self, from_address : str) -> pd.DataFrame:
""" Retrieves the current account's bought items.
Args:
from_address: string
Blockchain address of the current user, or seller, of the NFT
Returns:
Bought NFTs returned as an array of MarketItem as a DataFrame
"""
transaction = self.marketplace_contract.functions.fetchMyNFTs()
items = transaction.call(
{
"from": from_address
})
items_df = pd.DataFrame(items)
if not items_df.empty:
items_df.columns = ['Token Id', 'Contract', 'Blockhead Id', 'Minter', 'Owner', 'Cost', 'Sold']
items_df.drop(['Contract'], axis=1, inplace=True)
items_df['Minter'] = items_df.apply(lambda row : self.__simplify_address(row['Minter']), axis=1)
items_df['Owner'] = items_df.apply(lambda row : self.__simplify_address(row['Owner']), axis=1)
return items_df
def get_nfts_for_sale(self) -> pd.DataFrame:
""" Retrieves unsold NFTs on the Blockhead MarketPlace
Returns:
Unsold NFTs returned as an array of MarketItem in JSON format
"""
transaction = self.marketplace_contract.functions.fetchMarketItems()
data = transaction.call()
unsold_items_df = | pd.DataFrame(data) | pandas.DataFrame |
import numpy as np
import os
import csv
import requests
import pandas as pd
import time
import datetime
from stockstats import StockDataFrame as Sdf
from ta import add_all_ta_features
from ta.utils import dropna
from ta import add_all_ta_features
from ta.utils import dropna
from config import config
def load_dataset(*, file_name: str) -> pd.DataFrame:
"""
load csv dataset from path
:return: (df) pandas dataframe
"""
#_data = pd.read_csv(f"{config.DATASET_DIR}/{file_name}")
_data = pd.read_csv(file_name)
return _data
def data_split(df,start,end):
"""
split the dataset into training or testing using date
:param data: (df) pandas dataframe, start, end
:return: (df) pandas dataframe
"""
data = df[(df.datadate >= start) & (df.datadate < end)]
data=data.sort_values(['datadate','tic'],ignore_index=True)
#data = data[final_columns]
data.index = data.datadate.factorize()[0]
return data
def calcualte_price(df):
"""
calcualte adjusted close price, open-high-low price and volume
:param data: (df) pandas dataframe
:return: (df) pandas dataframe
"""
data = df.copy()
data = data[['datadate', 'tic', 'prccd', 'ajexdi', 'prcod', 'prchd', 'prcld', 'cshtrd']]
data['ajexdi'] = data['ajexdi'].apply(lambda x: 1 if x == 0 else x)
data['adjcp'] = data['prccd'] / data['ajexdi']
data['open'] = data['prcod'] / data['ajexdi']
data['high'] = data['prchd'] / data['ajexdi']
data['low'] = data['prcld'] / data['ajexdi']
data['volume'] = data['cshtrd']
data = data[['datadate', 'tic', 'adjcp', 'open', 'high', 'low', 'volume']]
data = data.sort_values(['tic', 'datadate'], ignore_index=True)
return data
def add_technical_indicator(df):
"""
calcualte technical indicators
use stockstats package to add technical inidactors
:param data: (df) pandas dataframe
:return: (df) pandas dataframe
"""
stock = Sdf.retype(df.copy())
stock['close'] = stock['adjcp']
unique_ticker = stock.tic.unique()
macd = pd.DataFrame()
rsi = pd.DataFrame()
cci = pd.DataFrame()
dx = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from trading_calendars import get_calendar
def get_benchmark_returns(symbol, first_date, last_date):
cal = get_calendar('NYSE')
dates = cal.sessions_in_range(first_date, last_date)
data = | pd.DataFrame(0.0, index=dates, columns=['close']) | pandas.DataFrame |
import os
import h5py
import matplotlib.pyplot as plt
from pathlib import Path
from time import time, strftime
import pandas as pd
import numpy as np
import scipy.ndimage as ndi
import argparse
from rabbitccs.data.utilities import load, save, print_orthogonal
from rabbitccs.inference.thickness_analysis import _local_thickness
if __name__ == '__main__':
# ******************************** 3D case ************************************
start = time()
# base_path = Path('../../../Data/ยตCT')
base_path = Path('/media/dios/dios2/RabbitSegmentation/ยตCT/Full dataset')
filter_size = 12
parser = argparse.ArgumentParser()
parser.add_argument('--masks', type=Path, default=base_path / 'Predictions_FPN_Resnet18_OA')
parser.add_argument('--th_maps', type=Path, default=base_path / f'thickness_median{filter_size}_missing')
parser.add_argument('--plot', type=bool, default=True)
parser.add_argument('--save_h5', type=bool, default=True)
parser.add_argument('--batch_id', type=int, default=None)
parser.add_argument('--resolution', type=tuple, default=(3.2, 3.2, 3.2)) # in ยตm
#parser.add_argument('--resolution', type=tuple, default=(12.8, 12.8, 12.8)) # in ยตm
parser.add_argument('--mode', type=str,
choices=['med2d_dist3d_lth3d', 'stacked_2d', 'med2d_dist2d_lth3d'],
default='med2d_dist3d_lth3d')
parser.add_argument('--max_th', type=float, default=None) # in ยตm
parser.add_argument('--median', type=int, default=filter_size)
parser.add_argument('--completed', type=int, default=0)
args = parser.parse_args()
# Sample list
samples = os.listdir(args.masks)
samples.sort()
if args.batch_id is not None:
samples = [samples[args.batch_id]]
elif args.completed > 0:
samples = samples[args.completed:]
# Save paths
args.th_maps.mkdir(exist_ok=True)
(args.th_maps / 'visualization').mkdir(exist_ok=True)
(args.th_maps / 'h5').mkdir(exist_ok=True)
results = {'Sample': [], 'Mean thickness': [], 'Median thickness': [], 'Thickness STD': [],'Maximum thickness': []}
t = strftime(f'%Y_%m_%d_%H_%M')
# Loop for samples
for sample in samples:
time_sample = time()
print(f'Processing sample {sample}')
# Load prediction
pred, files = load(str(args.masks / sample), axis=(1, 2, 0,))
# Downscale
#pred = (ndi.zoom(pred, 0.25) > 126).astype(np.bool)
if args.plot:
print_orthogonal(pred, savepath=str(args.th_maps / 'visualization' / (sample + '_pred.png')))
# Median filter
pred = ndi.median_filter(pred, size=args.median)
if args.plot:
print_orthogonal(pred, savepath=str(args.th_maps / 'visualization' / (sample + '_median.png')))
# Thickness analysis
# Create array of correct size
th_map = _local_thickness(pred, mode=args.mode, spacing_mm=args.resolution, stack_axis=1,
thickness_max_mm=args.max_th)
if args.plot:
print_orthogonal(th_map, savepath=str(args.th_maps / 'visualization' / (sample + '_th_map.png')),
cmap='hot')
plt.hist(x=th_map[np.nonzero(th_map)].flatten(), bins='auto')
plt.show()
# Save resulting thickness map with bmp and h5py
save(str(args.th_maps / sample), sample, th_map, dtype='.bmp')
# H5PY save
if args.save_h5:
savepath = args.th_maps / 'h5' / (sample + '.h5')
h5 = h5py.File(str(savepath), 'w')
h5.create_dataset('data', data=th_map)
h5.close()
# Update results
th_map = th_map[np.nonzero(th_map)].flatten()
results['Sample'].append(sample)
results['Mean thickness'].append(np.mean(th_map))
results['Median thickness'].append(np.median(th_map))
results['Maximum thickness'].append(np.max(th_map))
results['Thickness STD'].append(np.std(th_map))
# Save results to excel
writer = pd.ExcelWriter(str(args.th_maps / ('Results_' + t)) + '.xlsx')
df1 = | pd.DataFrame(results) | pandas.DataFrame |
import requests
from lxml import etree
from urllib.parse import urljoin
from pandas import DataFrame, read_html, concat
from bs4 import BeautifulSoup
import re
from tqdm import tqdm
def _getLinksFromPage(url, textcrib=None, hrefcrib=True):
page = requests.get(url)
#The file we have grabbed in this case is a web page - that is, an HTML file
#We can get the content of the page and parse it
soup=BeautifulSoup(page.content, "html5lib")
#BeautifulSoup has a routine - find_all() - that will find all the HTML tags of a particular sort
#Links are represented in HTML pages in the form <a href="http//example.com/page.html">link text</a>
#Grab all the <a> (anchor) tags...
if textcrib:
souplinks=soup.find_all('a', text=re.compile(textcrib))
elif hrefcrib:
souplinks=soup.find_all('a', href=re.compile(hrefcrib))
else:
souplinks=soup.find_all('a')
return souplinks
def _get_most_recent_APPG_link(url=None):
if url is None:
url='http://www.parliament.uk/mps-lords-and-offices/standards-and-financial-interests/parliamentary-commissioner-for-standards/registers-of-interests/register-of-all-party-party-parliamentary-groups/'
links = _getLinksFromPage(url,textcrib='Registers published')
#get the most recent link
recent = sorted([link.text for link in links])[-1]
link = [link for link in links if link.text == recent][0]
url='http://www.parliament.uk'+link['href']
links = _getLinksFromPage(url,hrefcrib='pa/cm/cmallparty.*htm')
return links[0]
def getDetails(dfs,gid):
df=dfs[0][:]
df.set_index(0,inplace=True)
df=df.T
df['gid']=gid
return df.reset_index(drop=True)
def getOfficers(dfs, gid):
df=dfs[1][:]
df.columns=['Property','Name', 'Party']
df.set_index('Property',inplace=True)
df=df[2:]
df['gid']=gid
return df.reset_index(drop=True)
def getContacts(dfs,gid):
df=dfs[2][:]
df.rename(columns=df[0], inplace=True)
df['gid']=gid
return df[1:].reset_index(drop=True)
def getAGM(dfs,gid):
df=dfs[3].loc[:]
df['gid']=gid
return df.pivot(index='gid',columns=0,values=1).reset_index()
def getRegBenefits(dfs,gid):
df=dfs[4][:]
df.rename(columns=df.iloc[0], inplace=True)
df['gid']=gid
if len(df)>2:
df.columns=['Source', 'Value', 'Received', 'Registered', 'gid']
return df[3:]
def getInKindBenefits(dfs,gid):
if len(dfs)>5:
df=dfs[5][:]
df.rename(columns=df.iloc[1], inplace=True)
df=df[2:]
df['gid']=gid
else: return DataFrame()
return df
def scraper(url=None, conn=None, exists=None, to_csv=False):
if not url:
a = _get_most_recent_APPG_link()
print('Using most recent file I can find: {}'.format(a.text))
url = a['href']
#url="https://www.publications.parliament.uk/pa/cm/cmallparty/170502/contents.htm"
df_details=DataFrame() #getDetails(dfs,gid)
df_officers=DataFrame() #getOfficers
df_contacts=DataFrame() #getContacts(dfs,gid)
df_AGM=DataFrame() #getAGM(dfs,gid)
df_regBenefits=DataFrame() #getRegBenefits(dfs,gid)
df_inKindBenefits= | DataFrame() | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
import InterruptionAnalysis as ia
readpath = './data/edgedir-sim'
data = pd.read_csv('./data/timeseries.csv', index_col = 0)
votedata = pd.read_csv('./data/vote-data.csv')
votedata.set_index('pID', inplace = True)
surveydata = pd.read_csv('./data/speakingTime-data.csv', index_col = 0)
surveydata.set_index('pID', inplace = True)
a = 0.99
gIDs = | pd.unique(data['gID']) | pandas.unique |
# pylint: disable=W0201
from statsmodels.compat.python import iteritems, string_types, range
import numpy as np
from statsmodels.tools.decorators import cache_readonly
import pandas as pd
from . import var_model as _model
from . import util
from . import plotting
FULL_SAMPLE = 0
ROLLING = 1
EXPANDING = 2
def _get_window_type(window_type):
if window_type in (FULL_SAMPLE, ROLLING, EXPANDING):
return window_type
elif isinstance(window_type, string_types):
window_type_up = window_type.upper()
if window_type_up in ('FULL SAMPLE', 'FULL_SAMPLE'):
return FULL_SAMPLE
elif window_type_up == 'ROLLING':
return ROLLING
elif window_type_up == 'EXPANDING':
return EXPANDING
raise Exception('Unrecognized window type: %s' % window_type)
class DynamicVAR(object):
"""
Estimates time-varying vector autoregression (VAR(p)) using
equation-by-equation least squares
Parameters
----------
data : pandas.DataFrame
lag_order : int, default 1
window : int
window_type : {'expanding', 'rolling'}
min_periods : int or None
Minimum number of observations to require in window, defaults to window
size if None specified
trend : {'c', 'nc', 'ct', 'ctt'}
TODO
Returns
-------
**Attributes**:
coefs : WidePanel
items : coefficient names
major_axis : dates
minor_axis : VAR equation names
"""
def __init__(self, data, lag_order=1, window=None, window_type='expanding',
trend='c', min_periods=None):
self.lag_order = lag_order
self.names = list(data.columns)
self.neqs = len(self.names)
self._y_orig = data
# TODO: deal with trend
self._x_orig = _make_lag_matrix(data, lag_order)
self._x_orig['intercept'] = 1
(self.y, self.x, self.x_filtered, self._index,
self._time_has_obs) = _filter_data(self._y_orig, self._x_orig)
self.lag_order = lag_order
self.trendorder = util.get_trendorder(trend)
self._set_window(window_type, window, min_periods)
def _set_window(self, window_type, window, min_periods):
self._window_type = _get_window_type(window_type)
if self._is_rolling:
if window is None:
raise Exception('Must pass window when doing rolling '
'regression')
if min_periods is None:
min_periods = window
else:
window = len(self.x)
if min_periods is None:
min_periods = 1
self._window = int(window)
self._min_periods = min_periods
@cache_readonly
def T(self):
"""
Number of time periods in results
"""
return len(self.result_index)
@property
def nobs(self):
# Stub, do I need this?
data = dict((eq, r.nobs) for eq, r in iteritems(self.equations))
return pd.DataFrame(data)
@cache_readonly
def equations(self):
eqs = {}
for col, ts in iteritems(self.y):
model = pd.ols(y=ts, x=self.x, window=self._window,
window_type=self._window_type,
min_periods=self._min_periods)
eqs[col] = model
return eqs
@cache_readonly
def coefs(self):
"""
Return dynamic regression coefficients as WidePanel
"""
data = {}
for eq, result in iteritems(self.equations):
data[eq] = result.beta
panel = pd.WidePanel.fromDict(data)
# Coefficient names become items
return panel.swapaxes('items', 'minor')
@property
def result_index(self):
return self.coefs.major_axis
@cache_readonly
def _coefs_raw(self):
"""
Reshape coefficients to be more amenable to dynamic calculations
Returns
-------
coefs : (time_periods x lag_order x neqs x neqs)
"""
coef_panel = self.coefs.copy()
del coef_panel['intercept']
coef_values = coef_panel.swapaxes('items', 'major').values
coef_values = coef_values.reshape((len(coef_values),
self.lag_order,
self.neqs, self.neqs))
return coef_values
@cache_readonly
def _intercepts_raw(self):
"""
Similar to _coefs_raw, return intercept values in easy-to-use matrix
form
Returns
-------
intercepts : (T x K)
"""
return self.coefs['intercept'].values
@cache_readonly
def resid(self):
data = {}
for eq, result in iteritems(self.equations):
data[eq] = result.resid
return | pd.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ใใใใ', u'ใใใใใ', u'ใใใใใใใ']
* 20)
expected = u"""\
[ใใใใ, ใใใใใ, ใใใใใใใ, ใใใใ, ใใใใใ, ..., ใใใใใ, ใใใใใใใ, ใใใใ, ใใใใใ, ใใใใใใใ]
Length: 60
Categories (3, object): [ใใใใ, ใใใใใ, ใใใใใใใ]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ใใใใ', u'ใใใใใ', u'ใใใใใใใ']
* 20)
expected = u"""[ใใใใ, ใใใใใ, ใใใใใใใ, ใใใใ, ใใใใใ, ..., ใใใใใ, ใใใใใใใ, ใใใใ, ใใใใใ, ใใใใใใใ]
Length: 60
Categories (3, object): [ใใใใ, ใใใใใ, ใใใใใใใ]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = | Categorical(["a", "b", "c", "a"], ordered=True) | pandas.Categorical |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.