prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import re
import numpy as np
import pandas as pd
import pytest
from woodwork import DataTable
from woodwork.logical_types import (
URL,
Boolean,
Categorical,
CountryCode,
Datetime,
Double,
Filepath,
FullName,
Integer,
IPAddress,
LatLong,
NaturalLanguage,
Ordinal,
PhoneNumber,
SubRegionCode,
ZIPCode
)
def test_datatable_physical_types(sample_df):
dt = DataTable(sample_df)
assert isinstance(dt.physical_types, dict)
assert set(dt.physical_types.keys()) == set(sample_df.columns)
for k, v in dt.physical_types.items():
assert isinstance(k, str)
assert v == sample_df[k].dtype
def test_sets_category_dtype_on_init():
column_name = 'test_series'
series_list = [
pd.Series(['a', 'b', 'c'], name=column_name),
pd.Series(['a', None, 'c'], name=column_name),
| pd.Series(['a', np.nan, 'c'], name=column_name) | pandas.Series |
from scipy import stats
import pandas as pd
import numpy as np
path_mutlivariate_feat_imps = '/n/groups/patel/samuel/EWAS/feature_importances_paper/'
Environmental = ['Clusters_Alcohol', 'Clusters_Diet', 'Clusters_Education', 'Clusters_ElectronicDevices',
'Clusters_Employment', 'Clusters_FamilyHistory', 'Clusters_Eyesight', 'Clusters_Mouth',
'Clusters_GeneralHealth', 'Clusters_Breathing', 'Clusters_Claudification', 'Clusters_GeneralPain',
'Clusters_ChestPain', 'Clusters_CancerScreening', 'Clusters_Medication', 'Clusters_Hearing',
'Clusters_Household', 'Clusters_MentalHealth', 'Clusters_OtherSociodemographics',
'Clusters_PhysicalActivityQuestionnaire', 'Clusters_SexualFactors', 'Clusters_Sleep', 'Clusters_SocialSupport',
'Clusters_SunExposure', 'Clusters_EarlyLifeFactors', 'Clusters_Smoking']
Biomarkers = ['Clusters_PhysicalActivity', 'Clusters_HandGripStrength', 'Clusters_BrainGreyMatterVolumes', 'Clusters_BrainSubcorticalVolumes',
'Clusters_HeartSize', 'Clusters_HeartPWA', 'Clusters_ECGAtRest', 'Clusters_AnthropometryImpedance',
'Clusters_UrineBiochemistry', 'Clusters_BloodBiochemistry', 'Clusters_BloodCount',
'Clusters_EyeAutorefraction', 'Clusters_EyeAcuity', 'Clusters_EyeIntraoculaPressure',
'Clusters_BraindMRIWeightedMeans', 'Clusters_Spirometry', 'Clusters_BloodPressure',
'Clusters_AnthropometryBodySize', 'Clusters_ArterialStiffness', 'Clusters_CarotidUltrasound',
'Clusters_BoneDensitometryOfHeel', 'Clusters_HearingTest', 'Clusters_CognitiveFluidIntelligence', 'Clusters_CognitiveMatrixPatternCompletion',
'Clusters_CognitiveNumericMemory', 'Clusters_CognitivePairedAssociativeLearning', 'Clusters_CognitivePairsMatching', 'Clusters_CognitiveProspectiveMemory',
'Clusters_CognitiveReactionTime', 'Clusters_CognitiveSymbolDigitSubstitution', 'Clusters_CognitiveTowerRearranging', 'Clusters_CognitiveTrailMaking']
Pathologies = ['medical_diagnoses_%s' % letter for letter in ['A', 'B', 'C', 'D', 'E',
'F', 'G', 'H', 'I', 'J',
'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z']]
Clusters = []
All = Environmental + Biomarkers + Pathologies #+ ['Genetics']
organs = ['\*', '*instances01', '*instances1.5x', '*instances23', 'Abdomen' , 'AbdomenLiver' , 'AbdomenPancreas' , 'Arterial' , 'ArterialCarotids' , 'ArterialPulseWaveAnalysis' , 'Biochemistry' , 'BiochemistryBlood' , 'BiochemistryUrine' , 'Brain' , 'BrainCognitive' , 'BrainMRI' , 'Eyes' , 'EyesAll' , 'EyesFundus' , 'EyesOCT' , 'Hearing' , 'Heart' , 'HeartECG' , 'HeartMRI' , 'ImmuneSystem' , 'Lungs' , 'Musculoskeletal' , 'MusculoskeletalFullBody' , 'MusculoskeletalHips' , 'MusculoskeletalKnees' , 'MusculoskeletalScalars' , 'MusculoskeletalSpine' , 'PhysicalActivity']
path_heritability = '/n/groups/patel/Alan/Aging/Medical_Images/GWAS_hits_Age'
def Create_data(corr_type, model):
df_corr_env = pd.DataFrame(columns = ['env_dataset', 'organ_1', 'organ_2', 'corr', 'sample_size'])
for env_dataset in All:
print("Env dataset : ", env_dataset)
for organ_1 in organs:
try :
df_1 = | pd.read_csv(path_mutlivariate_feat_imps + 'FeatureImp_%s_%s_%s.csv' % (env_dataset, organ_1, model)) | pandas.read_csv |
from pandas import Series
from sklearn.preprocessing import MinMaxScaler
# define contrived series
data = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
series = | Series(data) | pandas.Series |
# Initially downloaded after conversation with <NAME>/<NAME>
#
# url for download: https://www.dropbox.com/s/ftwkozmk373t45s/fmi_report_data.py?dl=0
#
# Downloaded 2021-09-15
import xml.etree.ElementTree as et
from glob import glob
import pandas as pd
from os import path
import os
import datetime
'''
This script connects to the a folder with Foundation XML files and then
parses and summarizes returned mutation information from the FMI patient reports
in one Excel file, and outputs csvs for further analysis
'''
cwd = os.getcwd()
fmi_dir = '/mnt/'
xml = fmi_dir + '*.xml'
files = [path.basename(x) for x in glob(xml)]
global todays_date
now = datetime.datetime.now()
todays_date = now.strftime("%Y-%m-%d")
def report_variable_to_frame(root):
report_detail = []
report_dict = {}
for result in root.iter('{http://foundationmedicine.com/compbio/variant-report-external}variant-report'):
result_dict = result.attrib
report_detail.append(result_dict)
for i in enumerate(report_detail):
report_dict[i[0]] = (i[1])
patient_report_detail = pd.DataFrame.from_dict(report_dict, orient='index',
columns = ['test-request', 'specimen', 'disease', 'disease-ontology', 'gender', 'pathology-diagnosis', 'tissue-of-origin', 'purity-assessment'])
treq = ttype = recd = seqd = fullname = mrn = dob = ''
colld = biop = ordmd = medfnm = medfid = spec = ''
diag = dis = disOnt = pathd = orig = pure = qual = ''
for repId in root.iter('ReportId'):
treq = repId.text
for test in root.iter('TestType'):
ttype = test.text
for receipt in root.iter('ReceivedDate'):
recd = receipt.text
dt = datetime.datetime.strptime(recd, "%Y-%m-%d")
seqd = dt.strftime("%m-%d-%Y")
for colldate in root.iter('CollDate'):
colld = colldate.text
dt = datetime.datetime.strptime(colld, "%Y-%m-%d")
biop = dt.strftime("%m-%d-%Y")
for doctor in root.iter('OrderingMD'):
ordmd = doctor.text
for ptntname in root.iter('FullName'):
fullname = ptntname.text
for mrnumber in root.iter('MRN'):
mrn = mrnumber.text
for birthdate in root.iter('DOB'):
dob = birthdate.text
for inst in root.iter('MedFacilName'):
facnm = inst.text
for facil in root.iter('MedFacilID'):
facid = facil.text
for site in root.iter('SpecSite'):
spec = site.text
for submDiag in root.iter('SubmittedDiagnosis'):
diag = submDiag.text
for qc in root.iter('{http://foundationmedicine.com/compbio/variant-report-external}quality-control'):
qual = qc.attrib
qcstat = qual.get('status', 0)
dd = {'ReportId': [treq], 'TestType': [ttype], 'Disease': [dis], 'DiseaseOntology': [disOnt], 'PathologyDiagnosis': [pathd],
'SubmittedDiagnosis': [diag], 'OrderingMD': [ordmd], 'Patient': [fullname], 'MRN': [mrn], 'DOB': [dob], 'SpecimenSite': [spec], 'TissueOfOrigin': [orig],
'DateSequenced': [seqd], 'DateCollected': [biop], 'Facility': [facnm], 'FacilityId': [facid], 'Purity': [pure], 'QualityControl': [qcstat]}
patient_report_frame = | pd.DataFrame.from_dict(dd, orient='columns') | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
import os
from glob import glob
import numpy as np
import pandas as pd
import torch as t
from torch import optim
from collections import OrderedDict
from typing import Tuple
from pathlib import Path
from datetime import datetime as dt
from dataclasses import dataclass
from tqdm import tqdm
os.environ['KMP_DUPLICATE_LIB_OK']='True'
np.random.seed(2021)
@dataclass
class M4Config:
pathDatasetOrg: str
pathDatasetDump: str
pathResult: str
seasonal_patterns: list
horizons: list
horizons_map: dict
frequencies: list
frequency_map: dict
history_size: dict
iterations: dict
layer_size: int
layers: int
stacks: int
batch_size: int
learning_rate: float
# Ensemble parameters
repeats: int
lookbacks: list
losses: list
def __init__(self):
self.pathDatasetOrg = r'C:/Users/taeni/Documents/GitHub/nbeats_reproduce/datasets/m4/'
self.pathDatasetDump = r'C:/Users/taeni/Documents/GitHub/nbeats_reproduce/datasets/m4/'
self.pathResult = r'C:/Users/taeni/Documents/GitHub/nbeats_reproduce/results/m4/'
self.seasonal_patterns = ['Yearly', 'Quarterly', 'Monthly',
'Weekly', 'Daily', 'Hourly']
self.horizons = [6, 8, 18, 13, 14, 48]
self.horizons_map = {
'Yearly': 6,
'Quarterly': 8,
'Monthly': 18,
'Weekly': 13,
'Daily': 14,
'Hourly': 48
}
self.frequencies = [1, 4, 12, 1, 1, 24]
self.frequency_map = {
'Yearly': 1,
'Quarterly': 4,
'Monthly': 12,
'Weekly': 1,
'Daily': 1,
'Hourly': 24
}
self.history_size = {
'Yearly': 1.5,
'Quarterly': 1.5,
'Monthly': 1.5,
'Weekly': 10,
'Daily': 10,
'Hourly': 10
}
self.iterations = {
'Yearly': 15000,
'Quarterly': 15000,
'Monthly': 15000,
'Weekly': 5000,
'Daily': 5000,
'Hourly': 5000
}
# generic
self.layers = 4
self.stacks = 30
self.layer_size = 512
# trend
self.trend_layers = 4
self.trend_blocks = 3
self.trend_layer_size = 256
self.trend_degree_of_polynomial = 2
# seasonality
self.seasonality_layers = 4
self.seasonality_blocks = 3
self.seasonality_layer_size = 2048
self.seasonality_num_of_harmonics = 1
# build
self.batch_size = 1024
self.learning_rate = 0.001
# Ensemble parameters
self.repeats = 10
self.lookbacks = [2, 3, 4, 5, 6, 7]
self.losses = ['MASE', 'MAPE', 'SMAPE']
class M4Dataset:
info = pd.DataFrame()
ids: np.ndarray
groups: np.ndarray
frequencies: np.ndarray
horizons: np.ndarray
trainset: np.ndarray
testset: np.ndarray
def __init__(self, path_org, path_dump):
self.pathDatasetOrg = path_org
self.pathDatasetDump = path_dump
info = pd.read_csv(path_org + "M4-info.csv")
self.info = info
self.ids = info.M4id.values
self.groups = info.SP.values
self.frequencies = info.Frequency.values
self.horizons = info.Horizon.values
def build_cache(files: str, cache_path: str) -> None:
ts_dict = OrderedDict(list(zip(info.M4id.values, [[]] * len(info.M4id.values))))
for f in glob(os.path.join(path_org, files)):
dataset = pd.read_csv(f)
dataset.set_index(dataset.columns[0], inplace=True)
for m4id, row in dataset.iterrows():
values = row.values
ts_dict[m4id] = values[~np.isnan(values)]
np.array(list(ts_dict.values())).dump(cache_path)
if not os.path.isfile(path_dump + "train.npz"):
print("Dump train datset process...")
build_cache(os.path.join(path_org,'Train/*-train.csv'),
os.path.join(path_dump, 'train.npz'))
else:
print("Skip train dataset process... train.npz")
if not os.path.isfile(path_dump + "test.npz"):
print("Dump test datset process...")
build_cache(os.path.join(path_org,'Test/*-test.csv'),
os.path.join(path_dump, 'test.npz'))
else:
print("Skip test dataset process... test.npz")
self.trainset = np.load(os.path.join(path_dump, 'train.npz'),
allow_pickle=True)
self.testset = np.load(os.path.join(path_dump, 'test.npz'),
allow_pickle=True)
###############################################################################
class NBeatsBlock(t.nn.Module):
"""
N-BEATS block which takes a basis function as an argument.
"""
def __init__(self,
input_size,
theta_size: int,
basis_function: t.nn.Module,
layers: int,
layer_size: int):
"""
N-BEATS block.
:param input_size: Insample size.
:param theta_size: Number of parameters for the basis function.
:param basis_function: Basis function which takes the parameters and produces backcast and forecast.
:param layers: Number of layers.
:param layer_size: Layer size.
"""
super().__init__()
self.layers = t.nn.ModuleList([t.nn.Linear(in_features=input_size, out_features=layer_size)] +
[t.nn.Linear(in_features=layer_size, out_features=layer_size)
for _ in range(layers - 1)])
self.basis_parameters = t.nn.Linear(in_features=layer_size, out_features=theta_size)
self.basis_function = basis_function
def forward(self, x: t.Tensor) -> Tuple[t.Tensor, t.Tensor]:
block_input = x
for layer in self.layers:
block_input = t.relu(layer(block_input))
basis_parameters = self.basis_parameters(block_input)
return self.basis_function(basis_parameters)
class NBeats(t.nn.Module):
"""
N-Beats Model.
"""
def __init__(self, blocks: t.nn.ModuleList):
super().__init__()
self.blocks = blocks
def forward(self, x: t.Tensor, input_mask: t.Tensor) -> t.Tensor:
residuals = x.flip(dims=(1,)) # left<->right
input_mask = input_mask.flip(dims=(1,)) # left<->right
forecast = x[:, -1:]
for i, block in enumerate(self.blocks):
backcast, block_forecast = block(residuals)
residuals = (residuals - backcast) * input_mask
forecast = forecast + block_forecast
return forecast
class GenericBasis(t.nn.Module):
"""
Generic basis function.
"""
def __init__(self, backcast_size: int, forecast_size: int):
super().__init__()
self.backcast_size = backcast_size
self.forecast_size = forecast_size
def forward(self, theta: t.Tensor):
return theta[:, :self.backcast_size], theta[:, -self.forecast_size:]
class TrendBasis(t.nn.Module):
"""
Polynomial function to model trend.
"""
def __init__(self, degree_of_polynomial: int, backcast_size: int, forecast_size: int):
super().__init__()
self.polynomial_size = degree_of_polynomial + 1 # degree of polynomial with constant term
self.backcast_time = t.nn.Parameter(
t.tensor(np.concatenate([np.power(np.arange(backcast_size, dtype=np.float) / backcast_size, i)[None, :]
for i in range(self.polynomial_size)]), dtype=t.float32),
requires_grad=False)
self.forecast_time = t.nn.Parameter(
t.tensor(np.concatenate([np.power(np.arange(forecast_size, dtype=np.float) / forecast_size, i)[None, :]
for i in range(self.polynomial_size)]), dtype=t.float32), requires_grad=False)
def forward(self, theta: t.Tensor):
backcast = t.einsum('bp,pt->bt', theta[:, self.polynomial_size:], self.backcast_time)
forecast = t.einsum('bp,pt->bt', theta[:, :self.polynomial_size], self.forecast_time)
return backcast, forecast
class SeasonalityBasis(t.nn.Module):
"""
Harmonic functions to model seasonality.
"""
def __init__(self, harmonics: int, backcast_size: int, forecast_size: int):
super().__init__()
self.frequency = np.append(np.zeros(1, dtype=np.float32),
np.arange(harmonics, harmonics / 2 * forecast_size,
dtype=np.float32) / harmonics)[None, :]
backcast_grid = -2 * np.pi * (
np.arange(backcast_size, dtype=np.float32)[:, None] / forecast_size) * self.frequency
forecast_grid = 2 * np.pi * (
np.arange(forecast_size, dtype=np.float32)[:, None] / forecast_size) * self.frequency
self.backcast_cos_template = t.nn.Parameter(t.tensor(np.transpose(np.cos(backcast_grid)), dtype=t.float32),
requires_grad=False)
self.backcast_sin_template = t.nn.Parameter(t.tensor(np.transpose(np.sin(backcast_grid)), dtype=t.float32),
requires_grad=False)
self.forecast_cos_template = t.nn.Parameter(t.tensor(np.transpose(np.cos(forecast_grid)), dtype=t.float32),
requires_grad=False)
self.forecast_sin_template = t.nn.Parameter(t.tensor(np.transpose(np.sin(forecast_grid)), dtype=t.float32),
requires_grad=False)
def forward(self, theta: t.Tensor):
params_per_harmonic = theta.shape[1] // 4
backcast_harmonics_cos = t.einsum('bp,pt->bt', theta[:, 2 * params_per_harmonic:3 * params_per_harmonic],
self.backcast_cos_template)
backcast_harmonics_sin = t.einsum('bp,pt->bt', theta[:, 3 * params_per_harmonic:], self.backcast_sin_template)
backcast = backcast_harmonics_sin + backcast_harmonics_cos
forecast_harmonics_cos = t.einsum('bp,pt->bt',
theta[:, :params_per_harmonic], self.forecast_cos_template)
forecast_harmonics_sin = t.einsum('bp,pt->bt', theta[:, params_per_harmonic:2 * params_per_harmonic],
self.forecast_sin_template)
forecast = forecast_harmonics_sin + forecast_harmonics_cos
return backcast, forecast
# util
def median_ensemble(experiment_path: str,
summary_filter: str = '*',
forecast_file: str = 'forecast.csv',
group_by: str = 'id'):
"""
Build a median ensemble from files found in the experiment path.
:param experiment_path: Experiment path.
:param summary_filter: Filter which experiment instances should be included in ensemble.
:param forecast_file: Name of the file with results.
:param group_by: Grouping key.
:return: Pandas dataframe with median forecasts.
"""
return pd.concat([pd.read_csv(file)
for file in
tqdm(glob(os.path.join(experiment_path, summary_filter + forecast_file)))], sort=False) \
.set_index(group_by).groupby(level=group_by, sort=False).median().values
def group_ids(ids: np.ndarray, groups: np.ndarray, group_name: str) -> np.ndarray:
"""
Filter ids array by group indices and clean it from NaNs.
:param values: Values to filter.
:param groups: Timeseries groups.
:param group_name: Group name to filter by.
:return: Filtered and cleaned timeseries - ids.
"""
ids = np.array([v for v in ids[groups == group_name]], dtype=object)
return ids
def group_values(values: np.ndarray, groups: np.ndarray, group_name: str) -> np.ndarray:
"""
Filter values array by group indices and clean it from NaNs.
:param values: Values to filter.
:param groups: Timeseries groups.
:param group_name: Group name to filter by.
:return: Filtered and cleaned timeseries - values.
"""
values = np.array([v for v in values[groups == group_name]], dtype=object)
return values
def do_sample(ts, insample_size, outsample_size, batch_size, window_sampling_limit):
insample = np.zeros((batch_size, insample_size))
insample_mask = np.zeros((batch_size, insample_size))
outsample = np.zeros((batch_size, outsample_size))
outsample_mask = np.zeros((batch_size, outsample_size))
sampled_ts_indices = np.random.randint(len(ts), size=batch_size)
for i, sampled_index in enumerate(sampled_ts_indices):
sampled_timeseries = ts[sampled_index]
cut_point = np.random.randint(low=max(1, len(sampled_timeseries) - window_sampling_limit),
high=len(sampled_timeseries),
size=1)[0]
insample_window = sampled_timeseries[max(0, cut_point - insample_size):cut_point]
insample[i, -len(insample_window):] = insample_window
insample_mask[i, -len(insample_window):] = 1.0
outsample_window = sampled_timeseries[
cut_point:min(len(sampled_timeseries), cut_point + outsample_size)]
outsample[i, :len(outsample_window)] = outsample_window
outsample_mask[i, :len(outsample_window)] = 1.0
return insample, insample_mask, outsample, outsample_mask
def last_insample_window(ts, insample_size):
"""
The last window of insample size of all timeseries.
This function does not support batching and does not reshuffle timeseries.
:return: Last insample window of all timeseries. Shape "timeseries, insample size"
"""
insample = np.zeros((len(ts), insample_size))
insample_mask = np.zeros((len(ts), insample_size))
for i, ts in enumerate(ts):
ts_last_window = ts[-insample_size:]
insample[i, -len(ts):] = ts_last_window
insample_mask[i, -len(ts):] = 1.0
return insample, insample_mask
def default_device() -> t.device:
"""
PyTorch default device is GPU when available, CPU otherwise.
:return: Default device.
"""
return t.device('cuda' if t.cuda.is_available() else 'cpu')
def to_tensor(array: np.ndarray) -> t.Tensor:
"""
Convert numpy array to tensor on default device.
:param array: Numpy array to convert.
:return: PyTorch tensor on default device.
"""
return t.tensor(array, dtype=t.float32).to(default_device())
# loss function
def divide_no_nan(a, b):
"""
a/b where the resulted NaN or Inf are replaced by 0.
"""
result = a / b
result[result != result] = .0
result[result == np.inf] = .0
return result
def mape_loss(forecast: t.Tensor, target: t.Tensor, mask: t.Tensor) -> t.float:
"""
MAPE loss as defined in: https://en.wikipedia.org/wiki/Mean_absolute_percentage_error
:param forecast: Forecast values. Shape: batch, time
:param target: Target values. Shape: batch, time
:param mask: 0/1 mask. Shape: batch, time
:return: Loss value
"""
weights = divide_no_nan(mask, target)
return t.mean(t.abs((forecast - target) * weights))
def smape_2_loss(forecast, target, mask) -> t.float:
"""
sMAPE loss as defined in https://robjhyndman.com/hyndsight/smape/ (Makridakis 1993)
:param forecast: Forecast values. Shape: batch, time
:param target: Target values. Shape: batch, time
:param mask: 0/1 mask. Shape: batch, time
:return: Loss value
"""
return 200 * t.mean(divide_no_nan(t.abs(forecast - target),
t.abs(forecast.data) + t.abs(target.data)) * mask)
def mase_loss(insample: t.Tensor, freq: int,
forecast: t.Tensor, target: t.Tensor, mask: t.Tensor) -> t.float:
"""
MASE loss as defined in "Scaled Errors" https://robjhyndman.com/papers/mase.pdf
:param insample: Insample values. Shape: batch, time_i
:param freq: Frequency value
:param forecast: Forecast values. Shape: batch, time_o
:param target: Target values. Shape: batch, time_o
:param mask: 0/1 mask. Shape: batch, time_o
:return: Loss value
"""
masep = t.mean(t.abs(insample[:, freq:] - insample[:, :-freq]), dim=1)
masked_masep_inv = divide_no_nan(mask, masep[:, None])
return t.mean(t.abs(target - forecast) * masked_masep_inv)
def __loss_fn(loss_name: str):
def loss(x, freq, forecast, target, target_mask):
if loss_name == 'MAPE':
return mape_loss(forecast, target, target_mask)
elif loss_name == 'MASE':
return mase_loss(x, freq, forecast, target, target_mask)
elif loss_name == 'SMAPE':
return smape_2_loss(forecast, target, target_mask)
else:
raise Exception(f'Unknown loss function: {loss_name}')
return loss
# metric
def mase(forecast: np.ndarray, insample: np.ndarray, outsample: np.ndarray, frequency: int) -> np.ndarray:
"""
MASE loss as defined in "Scaled Errors" https://robjhyndman.com/papers/mase.pdf
:param forecast: Forecast values. Shape: batch, time_o
:param insample: Insample values. Shape: batch, time_i
:param outsample: Target values. Shape: batch, time_o
:param frequency: Frequency value
:return: Same shape array with error calculated for each time step
"""
return np.mean(np.abs(forecast - outsample)) / np.mean(np.abs(insample[:-frequency] - insample[frequency:]))
def nd(forecast: np.ndarray, target: np.ndarray) -> float:
"""
Normalized deviation as defined in https://www.cs.utexas.edu/~rofuyu/papers/tr-mf-nips.pdf
:param forecast: Forecast values. Shape: batch, time
:param target: Target values. Shape: batch, time
:return: Error value
"""
return np.mean(np.abs(target - forecast)) / np.mean(np.abs(target))
def nrmse(forecast: np.ndarray, target: np.ndarray) -> float:
"""
Normalized RMSE as defined in https://www.cs.utexas.edu/~rofuyu/papers/tr-mf-nips.pdf
:param forecast: Forecast values. Shape: batch, time
:param target: Target values. Shape: batch, time
:return: Error values
"""
return np.sqrt(np.mean(np.power((forecast - target), 2))) / (np.mean(np.abs(target)))
def mape(forecast: np.ndarray, target: np.ndarray) -> np.ndarray:
"""
MAPE loss as defined in: https://en.wikipedia.org/wiki/Mean_absolute_percentage_error
:param forecast: Predicted values.
:param target: Target values.
:return: Same shape array with error calculated for each time step
"""
return 100 * np.abs(forecast - target) / target
def smape_1(forecast: np.ndarray, target: np.ndarray) -> np.ndarray:
"""
sMAPE loss as defined in "Appendix A" of
http://www.forecastingprinciples.com/files/pdf/Makridakia-The%20M3%20Competition.pdf
:param forecast: Forecast values. Shape: batch, time
:param target: Target values. Shape: batch, time
:return: Same shape array with error calculated for each time step
"""
return 200 * np.abs(forecast - target) / (target + forecast)
def smape_2(forecast: np.ndarray, target: np.ndarray) -> np.ndarray:
"""
sMAPE loss as defined in https://robjhyndman.com/hyndsight/smape/ (Makridakis 1993)
:param forecast: Forecast values. Shape: batch, time
:param target: Target values. Shape: batch, time
:return: Same shape array with sMAPE calculated for each time step of each timeseries.
"""
denom = np.abs(target) + np.abs(forecast)
# divide by 1.0 instead of 0.0, in case when denom is zero the enumerator will be 0.0 anyway.
denom[denom == 0.0] = 1.0
return 200 * np.abs(forecast - target) / denom
def check_directorys(f: str) -> None:
if not Path(f).is_dir():
print(f"create directory: {f.split(sep='/')[-1]}")
Path(f).mkdir(parents=True, exist_ok=True)
###############################################################################
# init
# M4 Experiments
# Models: seasonal -> lookback -> loss
def m4experiments(cfg: M4Config, dataset: M4Dataset, model_type='generic') -> None:
trainset = dataset.trainset
for seasonal_pattern in cfg.seasonal_patterns:
for j, lookback in enumerate(cfg.lookbacks):
for k, loss in enumerate(cfg.losses):
history_size_in_horizons = cfg.history_size[seasonal_pattern]
horizon = cfg.horizons_map[seasonal_pattern]
input_size = lookback * horizon
timeseries_frequency=cfg.frequency_map[seasonal_pattern]
# Data sampling
train_ids = group_values(dataset.ids, dataset.groups, seasonal_pattern)
train_values = group_values(trainset, dataset.groups, seasonal_pattern)
#test_values = group_values(testset, dataset.groups, seasonal_pattern)
timeseries = [ts for ts in train_values]
window_sampling_limit = int(history_size_in_horizons * horizon)
batch_size = cfg.batch_size
insample_size = input_size
outsample_size = horizon
if model_type == 'generic':
model = NBeats(t.nn.ModuleList([NBeatsBlock(input_size=insample_size,
theta_size=insample_size + outsample_size,
basis_function=GenericBasis(backcast_size=insample_size,
forecast_size=outsample_size),
layers=cfg.layers,
layer_size=cfg.layer_size)
for _ in range(cfg.stacks)]))
elif model_type == 'interpretable':
trend_block = NBeatsBlock(input_size=insample_size,
theta_size=2 * (cfg.trend_degree_of_polynomial + 1),
basis_function=TrendBasis(degree_of_polynomial=cfg.trend_degree_of_polynomial,
backcast_size=insample_size,
forecast_size=outsample_size),
layers=cfg.trend_layers,
layer_size=cfg.trend_layer_size)
seasonality_block = NBeatsBlock(input_size=insample_size,
theta_size=4 * int(
np.ceil(cfg.seasonality_num_of_harmonics / 2 * outsample_size) - (cfg.seasonality_num_of_harmonics - 1)),
basis_function=SeasonalityBasis(harmonics=cfg.seasonality_num_of_harmonics,
backcast_size=insample_size,
forecast_size=outsample_size),
layers=cfg.seasonality_layers,
layer_size=cfg.seasonality_layer_size)
model = NBeats(t.nn.ModuleList([trend_block for _ in range(cfg.trend_blocks)] +
[seasonality_block for _ in range(cfg.seasonality_blocks)]))
else:
print(f"There is no {model_type} model-!!")
return
model = model.to(default_device())
learning_rate = cfg.learning_rate
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
training_loss_fn = __loss_fn(loss)
iterations = cfg.iterations[seasonal_pattern]
lr_decay_step = iterations // 3
if lr_decay_step == 0:
lr_decay_step = 1
forecasts = []
for i in range(1, iterations + 1):
model.train()
training_set = do_sample(timeseries,
insample_size,
outsample_size,
batch_size,
window_sampling_limit)
x, x_mask, y, y_mask = map(to_tensor, training_set)
optimizer.zero_grad() # init gradients before back-propagation
forecast = model(x, x_mask)
training_loss = training_loss_fn(x, timeseries_frequency, forecast, y, y_mask)
if np.isnan(float(training_loss)):
break
training_loss.backward()
t.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
for param_group in optimizer.param_groups:
param_group["lr"] = learning_rate * 0.5 ** (i // lr_decay_step)
if iterations <= 15 or i % 100 == 0:
f = f'./steps/{model_type}-{seasonal_pattern}-{lookback}-{loss}/'
check_directorys(f)
f += f'weight_iter_{i}.pth'
print('Save Model:', f)
t.save(model, f)
print(f'iter:{i}/{iterations} \t loss:{training_loss:.3f}')
# Evaluate
x, x_mask = map(to_tensor,
last_insample_window(timeseries, insample_size))
model.eval()
with t.no_grad():
forecasts.extend(model(x, x_mask).cpu().detach().numpy())
forecasts_df = pd.DataFrame(forecasts,
columns=[f'V{i + 1}' for i in range(horizon)])
forecasts_df.index = train_ids
forecasts_df.index.name = 'id'
f = cfg.pathResult + f'{model_type}/'
check_directorys(f)
f += dt.now().strftime("%Y-%m-%d %H%M%S")
f += f'+{seasonal_pattern}+{lookback}+{loss}+forecast.csv'
print(f'Dump start: {f}')
forecasts_df.to_csv(f)
def summarize_groups(groups, scores):
"""
Re-group scores respecting M4 rules.
:param scores: Scores per group.
:return: Grouped scores.
"""
scores_summary = OrderedDict()
def group_count(group_name):
return len(np.where(groups == group_name)[0])
weighted_score = {}
for g in ['Yearly', 'Quarterly', 'Monthly']:
weighted_score[g] = scores[g] * group_count(g)
scores_summary[g] = scores[g]
others_score = 0
others_count = 0
for g in ['Weekly', 'Daily', 'Hourly']:
others_score += scores[g] * group_count(g)
others_count += group_count(g)
weighted_score['Others'] = others_score
scores_summary['Others'] = others_score / others_count
average = np.sum(list(weighted_score.values())) / len(groups)
scores_summary['Average'] = average
return scores_summary
def m4evaluate(cfg: M4Config, dataset: M4Dataset, target_path: str) -> None:
# Need path check later...
target_path = cfg.pathResult + f'/{target_path}/'
forecast = median_ensemble(experiment_path = target_path,
summary_filter = '**',
forecast_file = 'forecast.csv',
group_by = 'id')
forecast = np.array([v[~np.isnan(v)] for v in forecast], dtype=object)
groups = dataset.groups
grouped_smapes = {group_name: np.mean(smape_2(forecast=group_values(values=forecast,
groups=groups,
group_name=group_name),
target=group_values(values=dataset.testset,
groups=groups,
group_name=group_name)))
for group_name in np.unique(groups)}
grouped_smapes = summarize_groups(groups, grouped_smapes)
grouped_owa = OrderedDict()
naive2_forecasts = pd.read_csv(os.path.join(cfg.pathDatasetOrg, 'submission-Naive2.csv')).values[:, 1:].astype(np.float32)
naive2_forecasts = np.array([v[~np.isnan(v)] for v in naive2_forecasts], dtype=object)
model_mases = {}
naive2_smapes = {}
naive2_mases = {}
for group_name in np.unique(groups):
model_forecast = group_values(forecast,
groups,
group_name)
naive2_forecast = group_values(naive2_forecasts,
groups,
group_name)
target = group_values(dataset.testset,
groups,
group_name)
# all timeseries within group have same frequency
frequency = dataset.frequencies[groups == group_name][0]
insample = group_values(dataset.trainset,
groups,
group_name)
model_mases[group_name] = np.mean([mase(forecast=model_forecast[i],
insample=insample[i],
outsample=target[i],
frequency=frequency) for i in range(len(model_forecast))])
naive2_mases[group_name] = np.mean([mase(forecast=naive2_forecast[i],
insample=insample[i],
outsample=target[i],
frequency=frequency) for i in range(len(model_forecast))])
naive2_smapes[group_name] = np.mean(smape_2(naive2_forecast, target))
grouped_model_mases = summarize_groups(groups, model_mases)
grouped_naive2_smapes = summarize_groups(groups, naive2_smapes)
grouped_naive2_mases = summarize_groups(groups, naive2_mases)
for k in grouped_model_mases.keys():
grouped_owa[k] = (grouped_model_mases[k] / grouped_naive2_mases[k] +
grouped_smapes[k] / grouped_naive2_smapes[k]) / 2
def round_all(d):
return dict(map(lambda kv: (kv[0], np.round(kv[1], 3)), d.items()))
return round_all(grouped_smapes), round_all(grouped_owa)
if __name__ == '__main__':
# working directory
os.chdir(r"C:\Users\taeni\Documents\GitHub\nbeats_reproduce/")
# Set Config
m4cfg = M4Config()
m4cfg.lookbacks = [2, 4, 7]
m4cfg.iterations = {'Yearly': 5,
'Quarterly': 5,
'Monthly': 5,
'Weekly': 5,
'Daily': 5,
'Hourly': 5
}
# M4 Dataset
m4dataset = M4Dataset(path_org = m4cfg.pathDatasetOrg,
path_dump = m4cfg.pathDatasetDump)
# M4 Experiments
print("Experiment - generic")
m4experiments(m4cfg, m4dataset, 'generic')
print("Experiment - interpretable")
m4experiments(m4cfg, m4dataset, 'interpretable')
# M4 Evaluate
eval_generic = m4evaluate(m4cfg, m4dataset, 'generic')
eval_generic = pd.DataFrame(eval_generic, index=['SMAPE', 'OWA'])
eval_interpretable = m4evaluate(m4cfg, m4dataset, 'interpretable')
eval_interpretable = | pd.DataFrame(eval_interpretable, index=['SMAPE', 'OWA']) | pandas.DataFrame |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > pd.Timestamp("20130105")]
import datetime # noqa
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_series(self, setup_path):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@td.xfail_non_writeable
def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(
l, r, check_dtype=True, check_index_type=True, check_series_type=True
)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
self._check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
self._check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = pd.Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
self._check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
def test_select(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300))
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
dict(
A=np.random.rand(20),
B=np.random.rand(20),
index=np.arange(20, dtype="f8"),
)
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
dict(
ts=bdate_range("2012-01-01", periods=300),
A=np.random.randn(300),
B=range(300),
users=["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ ["a{i:03d}".format(i=i) for i in range(100)],
)
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
"df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']"
)
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + ["a{i:03d}".format(i=i) for i in range(60)]
result = store.select(
"df", "ts>=Timestamp('2012-02-01') and users=selector"
)
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self, setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '{end_dt}'".format(end_dt=end_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(self, setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '{beg_dt}' & index >= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes(self, setup_path):
# GH 3499, losing frequency info on index recreation
df = DataFrame(
dict(A=Series(range(3), index=date_range("2000-1-1", periods=3, freq="H")))
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "data")
store.put("data", df, format="table")
result = store.get("data")
tm.assert_frame_equal(df, result)
for attr in ["freq", "tz", "name"]:
for idx in ["index", "columns"]:
assert getattr(getattr(df, idx), attr, None) == getattr(
getattr(result, idx), attr, None
)
# try to append a table with a different frequency
with catch_warnings(record=True):
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("data", df2)
assert store.get_storer("data").info["index"]["freq"] is None
# this is ok
_maybe_remove(store, "df2")
df2 = DataFrame(
dict(
A=Series(
range(3),
index=[
Timestamp("20010101"),
Timestamp("20010102"),
Timestamp("20020101"),
],
)
)
)
store.append("df2", df2)
df3 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("df2", df3)
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes2(self, setup_path):
with ensure_clean_path(setup_path) as path:
with catch_warnings(record=True):
df = DataFrame(
dict(
A=Series(
range(3), index=date_range("2000-1-1", periods=3, freq="H")
)
)
)
df.to_hdf(path, "data", mode="w", append=True)
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
df2.to_hdf(path, "data", append=True)
idx = date_range("2000-1-1", periods=3, freq="H")
idx.name = "foo"
df = DataFrame(dict(A=Series(range(3), index=idx)))
df.to_hdf(path, "data", mode="w", append=True)
assert read_hdf(path, "data").index.name == "foo"
with catch_warnings(record=True):
idx2 = date_range("2001-1-1", periods=3, freq="H")
idx2.name = "bar"
df2 = DataFrame(dict(A=Series(range(3), index=idx2)))
df2.to_hdf(path, "data", append=True)
assert read_hdf(path, "data").index.name is None
def test_frame_select(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
tm.assert_frame_equal(result, expected)
result = store.select("frame", [crit3])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append("df_time", df)
with pytest.raises(ValueError):
store.select("df_time", "index>0")
# can't select if not written as table
# store['frame'] = df
# with pytest.raises(ValueError):
# store.select('frame', [crit1, crit2])
def test_frame_select_complex(self, setup_path):
# select via complex criteria
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", data_columns=["string"])
# empty
result = store.select("df", 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select("df", 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")]
tm.assert_frame_equal(result, expected)
# or
result = store.select("df", 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select(
"df", "(index>df.index[3] & " 'index<=df.index[6]) | string="bar"'
)
expected = df.loc[
((df.index > df.index[3]) & (df.index <= df.index[6]))
| (df.string == "bar")
]
tm.assert_frame_equal(result, expected)
# invert
result = store.select("df", 'string!="bar"')
expected = df.loc[df.string != "bar"]
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
with pytest.raises(NotImplementedError):
store.select("df", '~(string="bar")')
# invert ok for filters
result = store.select("df", "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(["A", "B"])]
tm.assert_frame_equal(result, expected)
# in
result = store.select("df", "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self, setup_path):
with ensure_clean_path(["parms.hdf", "hist.hdf"]) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({"A": [1, 1, 2, 2, 3]})
parms.to_hdf(pp, "df", mode="w", format="table", data_columns=["A"])
selection = read_hdf(pp, "df", where="A=[2,3]")
hist = DataFrame(
np.random.randn(25, 1),
columns=["data"],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5) for j in range(5)], names=["l1", "l2"]
),
)
hist.to_hdf(hh, "df", mode="w", format="table")
expected = read_hdf(hh, "df", where="l1=[2, 3, 4]")
# scope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select("df", where="l1=l")
tm.assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, "df", where="l1=l")
tm.assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, "df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
# scope with index
store = HDFStore(hh)
result = store.select("df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self, setup_path):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
# not implemented
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A'] | columns=['B']")
# in theory we could deal with this
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A','B'] & columns=['C']")
def test_string_select(self, setup_path):
# GH 2973
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df["x"] = "none"
df.loc[2:7, "x"] = ""
store.append("df", df, data_columns=["x"])
result = store.select("df", "x=none")
expected = df[df.x == "none"]
tm.assert_frame_equal(result, expected)
result = store.select("df", "x!=none")
expected = df[df.x != "none"]
tm.assert_frame_equal(result, expected)
df2 = df.copy()
df2.loc[df2.x == "", "x"] = np.nan
store.append("df2", df2, data_columns=["x"])
result = store.select("df2", "x!=none")
expected = df2[isna(df2.x)]
tm.assert_frame_equal(result, expected)
# int ==/!=
df["int"] = 1
df.loc[2:7, "int"] = 2
store.append("df3", df, data_columns=["int"])
result = store.select("df3", "int=2")
expected = df[df.int == 2]
tm.assert_frame_equal(result, expected)
result = store.select("df3", "int!=2")
expected = df[df.int != 2]
tm.assert_frame_equal(result, expected)
def test_read_column(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# GH 17912
# HDFStore.select_column should raise a KeyError
# exception if the key is not a valid store
with pytest.raises(KeyError, match="No object named df in the file"):
store.select_column("df", "index")
store.append("df", df)
# error
with pytest.raises(
KeyError, match=re.escape("'column [foo] not found in the table'")
):
store.select_column("df", "foo")
with pytest.raises(Exception):
store.select_column("df", "index", where=["index>5"])
# valid
result = store.select_column("df", "index")
tm.assert_almost_equal(result.values, Series(df.index).values)
assert isinstance(result, Series)
# not a data indexable column
with pytest.raises(ValueError):
store.select_column("df", "values_block_0")
# a data column
df2 = df.copy()
df2["string"] = "foo"
store.append("df2", df2, data_columns=["string"])
result = store.select_column("df2", "string")
tm.assert_almost_equal(result.values, df2["string"].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3["string"] = "foo"
df3.loc[4:6, "string"] = np.nan
store.append("df3", df3, data_columns=["string"])
result = store.select_column("df3", "string")
tm.assert_almost_equal(result.values, df3["string"].values)
# start/stop
result = store.select_column("df3", "string", start=2)
tm.assert_almost_equal(result.values, df3["string"].values[2:])
result = store.select_column("df3", "string", start=-2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:])
result = store.select_column("df3", "string", stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[:2])
result = store.select_column("df3", "string", stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[:-2])
result = store.select_column("df3", "string", start=2, stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[2:-2])
result = store.select_column("df3", "string", start=-2, stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:2])
# GH 10392 - make sure column name is preserved
df4 = DataFrame({"A": np.random.randn(10), "B": "foo"})
store.append("df4", df4, data_columns=True)
expected = df4["B"]
result = store.select_column("df4", "B")
tm.assert_series_equal(result, expected)
def test_coordinates(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append("df", df)
# all
c = store.select_as_coordinates("df")
assert (c.values == np.arange(len(df.index))).all()
# get coordinates back & test vs frame
_maybe_remove(store, "df")
df = DataFrame(dict(A=range(5), B=range(5)))
store.append("df", df)
c = store.select_as_coordinates("df", ["index<3"])
assert (c.values == np.arange(3)).all()
result = store.select("df", where=c)
expected = df.loc[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates("df", ["index>=3", "index<=4"])
assert (c.values == np.arange(2) + 3).all()
result = store.select("df", where=c)
expected = df.loc[3:4, :]
tm.assert_frame_equal(result, expected)
assert isinstance(c, Index)
# multiple tables
_maybe_remove(store, "df1")
_maybe_remove(store, "df2")
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
c = store.select_as_coordinates("df1", ["A>0", "B>0"])
df1_result = store.select("df1", c)
df2_result = store.select("df2", c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# pass array/mask as the coordinates
with ensure_clean_store(setup_path) as store:
df = DataFrame(
np.random.randn(1000, 2), index=date_range("20000101", periods=1000)
)
store.append("df", df)
c = store.select_column("df", "index")
where = c[DatetimeIndex(c).month == 5].index
expected = df.iloc[where]
# locations
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# boolean
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# invalid
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df), dtype="float64"))
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df) + 1))
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df)), start=5)
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df)), start=5, stop=10)
# selection with filter
selection = date_range("20000101", periods=500)
result = store.select("df", where="index in selection")
expected = df[df.index.isin(selection)]
tm.assert_frame_equal(result, expected)
# list
df = DataFrame(np.random.randn(10, 2))
store.append("df2", df)
result = store.select("df2", where=[0, 3, 5])
expected = df.iloc[[0, 3, 5]]
tm.assert_frame_equal(result, expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select("df2", where=where)
expected = df.loc[where]
tm.assert_frame_equal(result, expected)
# start/stop
result = store.select("df2", start=5, stop=10)
expected = df[5:10]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
df = | concat([df1, df2], axis=1) | pandas.concat |
"""
Script to perform tests by use of the pytest suite
"""
import os
from os import path
import logging
import pandas as pd
import joblib
import churn_library as cl
import constants
def test_import(test_dataframe):
"""
tests data import - Uses the test_dataframe fixture to read in the csv
"""
try:
assert test_dataframe.shape[0] > 0
assert test_dataframe.shape[1] > 0
except AssertionError as err:
logging.error(
"Testing import_data: The file doesn't appear to have rows or columns")
raise err
def test_create_churn_col():
"""
tests create_churn_col
"""
dataframe = | pd.DataFrame(columns=['Attrition_Flag']) | pandas.DataFrame |
"""
Get AWS spot price information
"""
import datetime
import time
import boto3
import pandas as pd
from decisionengine.framework.modules import Transform
# default values
REGION = "us-west-2"
INSTANCE_TYPES = ["m3.2xlarge", "c3.2xlarge"]
HISTORY_OBSERVATION_TIME = 3600 # observe spot price for the last hour
# HISTORY_START_TIME = HISTORY_END_TIME.replace(day=(HISTORY_END_TIME.day-1)) # 1 day ago
PRODUCT_DESCRIPTIONS = ["Linux/UNIX"]
DRY_RUN = False
MAX_RESULTS = 1000
AVAILABILITY_ZONE = "" # any
class SpotPriceData:
"""
Spot Price data element
"""
def __init__(self, sp_data):
"""
:type sp_data: :obj:`dict`
:arg sp_data: spot price data
"""
self.data = sp_data
self.data["Timestamp"] = sp_data["Timestamp"].isoformat()
def __eq__(self, other):
"""
overrides comparison method
"""
if not isinstance(other, SpotPriceData):
return False
return (self.data["AvailabilityZone"], self.data["InstanceType"]) == (
other.data["AvailabilityZone"],
other.data["InstanceType"],
)
def __ne__(self, other):
return not self.__eq__(other)
class AWSSpotPriceForRegion:
"""
Spot price data and methods
"""
def __init__(self, region=REGION, profile_name=None):
"""
:type region: :obj:`str`
:arg region: AWS region name
:type profile_name: :obj:`str`
:arg profile_name: legal AWS profile name
"""
if profile_name:
session = boto3.session.Session(profile_name=profile_name, region_name=region)
self.ec2 = session.client("ec2", region_name=region)
else:
self.ec2 = boto3.client("ec2", region_name=region)
self.account_name = profile_name
t = time.time()
self.start_time = datetime.datetime.utcfromtimestamp(t - HISTORY_OBSERVATION_TIME)
self.end_time = datetime.datetime.utcfromtimestamp(t)
self.intance_types = INSTANCE_TYPES
self.dry_run = DRY_RUN
self.max_results = MAX_RESULTS
self.product_descriptions = PRODUCT_DESCRIPTIONS
self.availability_zone = AVAILABILITY_ZONE
def init_query(
self,
spot_price_history_start_time=None,
spot_price_history_end_time=None,
instance_types=INSTANCE_TYPES,
product_descriptions=PRODUCT_DESCRIPTIONS,
dry_run=DRY_RUN,
max_resuts=MAX_RESULTS,
availability_zone=AVAILABILITY_ZONE,
):
"""
Init AWS spot price query
:type spot_price_history_start_time: :obj:`str`
:arg spot_price_history_start_time: price since.
:type spot_price_history_end_time: :obj:`str`
:arg spot_price_history_end_time: price till.
:type instance_types: :obj:`list`
:arg instance_types: list of AWS instance types to query spot price for.
:type dry_run: :obj:`bool`
:arg dry_run: as described in boto3 documentation.
:type max_resuts: :obj:`int`
:arg max_resuts: maximum number of results to return.
"""
if spot_price_history_start_time:
self.start_time = spot_price_history_start_time
if spot_price_history_end_time:
self.end_time = spot_price_history_end_time
self.intance_types = instance_types
self.dry_run = dry_run
self.max_results = max_resuts
self.product_descriptions = product_descriptions
self.availability_zone = availability_zone
def get_price(self, logger):
"""
Get AWS spot prices.
"""
try:
rc = self.ec2.describe_spot_price_history(
DryRun=self.dry_run,
StartTime=self.start_time,
EndTime=self.end_time,
InstanceTypes=self.intance_types,
ProductDescriptions=self.product_descriptions,
Filters=[],
AvailabilityZone=self.availability_zone,
MaxResults=self.max_results,
NextToken="",
)
except Exception:
logger.exception("Exception in AWSSpotPrice call to get_price")
return None
price_history = rc.get("SpotPriceHistory")
if len(price_history) == 0:
price_history = None
return price_history
def spot_price_summary(self, spot_price_history):
"""
Returns the current spot prices per
availability zone and instance type
:type spot_price_history: :obj:`list`
:arg spot_price_history: list of dictonaries
:rtype: :obj:`list`: list of spot price data (:class:`SpotPriceData`)
"""
ll = []
for item in spot_price_history:
item["AccountName"] = self.account_name
spd = SpotPriceData(item)
if spd not in ll:
# append if there is no element with given
# availability zone and instance type
ll.append(spd)
else:
# replace spot price by the most recent
i = ll.index(spd)
ll[i].data["Timestamp"] = spd.data["Timestamp"]
ll[i].data["SpotPrice"] = spd.data["SpotPrice"]
return ll
@Transform.consumes(spot_occupancy_config=pd.DataFrame)
@Transform.produces(provisioner_resource_spot_prices=pd.DataFrame)
class AWSSpotPrice(Transform.Transform):
def __init__(self, config):
super().__init__(config)
def transform(self, data_block):
"""
Gets data from AWS
:rtype: pandas frame (:class:`pd.DataFramelist`)
"""
self.logger.debug("in AWSSpotPrice transform")
account_dict = data_block.get("spot_occupancy_config").to_dict()
self.logger.debug(f"account_dict {account_dict}")
sp_data = []
for account in account_dict:
for region, instances in account_dict[account].items():
spot_price_info = AWSSpotPriceForRegion(region, profile_name=account)
spot_price_info.init_query(instance_types=instances)
spot_price_history = spot_price_info.get_price(self.logger)
if spot_price_history:
sp_data += spot_price_info.spot_price_summary(spot_price_history)
sp_list = [i.data for i in sp_data]
column_names = [
"AccountName",
"AvailabilityZone",
"InstanceType",
"ProductDescription",
"SpotPrice",
"Timestamp",
]
return {"provisioner_resource_spot_prices": | pd.DataFrame(sp_list, columns=column_names) | pandas.DataFrame |
# coding: utf-8
import warnings
warnings.filterwarnings('ignore')
#reading the text file containing x and y coordinates of 654 customer locations
import pandas as pd
df = | pd.read_csv('p654.txt', skiprows=1, skipfooter=22, engine='python', delimiter = r"\s+", names=['x', 'y', 'c']) | pandas.read_csv |
import numpy as np
import utils.gen_cutouts as gc
from sklearn import metrics
import pandas as pd
import ipdb
import matplotlib
from matplotlib import pyplot as plt
matplotlib.rcParams['mathtext.fontset'] = 'stixsans'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
MEAN_TEMP = 2.726 * (10**6)
DEFAULT_FONT = 24
import os
from global_settings import DATA_PATH, FULL_DATA_PATH, FULL_DATA_LABEL_PATH, CNN_MODEL_OUTPUT_DIR, CACHE_FULLDF, CACHE_MAPPED_HALOS, CACHE_FULLDF_DIST2EDGE_CAL
import os
def prepare_data_class(dir_test, num_frequency=3, get_all_components=False, label_fname="1025_hashalo_freq%03i.npy" % 148,
balanced=False,
suffix=""):
"""
read data from dir_test, and prepare data with different noise level (components)
"""
freqs=[90,148,219]
def _load_help(name_format):
paths = [os.path.join(dir_test, name_format%freq) for freq in freqs]
ret = [np.load(p) for p in paths]
#print(paths)
return ret
# set file names for data
#y_data = np.load(dir_test + "1025_hashalo_freq%03i.npy"%148) # y data (labels)
y_data = np.load(os.path.join(dir_test, label_fname))
y_data[y_data > 1] = 1
y_data = y_data.astype(float)
nsamples = len(y_data)
#load data into dictionary
x_data_all = {}
# load data
k2uk = 1.0e6
Tcmb = 2.726
#load noise (for SPT-3G 1500 sq deg patch, it's [2.8,2.6,6.6]uK-arcmin)
noises = [np.load(os.path.join(dir_test, "noise_1uK-arcmin{}{}.npy".format(s, suffix))) for s in ["_90","_150", "_220"]]
noises = [noises[0]*2.8, noises[1]*2.6, noises[2]*6.6]
#samples has CMB+TSZ
try:
com = ['samples','ksz','ir_pts','rad_pts','dust']
x_data_all['base'] = _load_help("1025_samples_freq%03i{}.npy".format(suffix))
ksz_comp = _load_help("1025_ksz_freq%03i{}.npy".format(suffix))
x_data_all['ksz'] = [x_data_all['base'][i] + ksz_comp[i] for i in range(3)]
ir_comp = _load_help("1025_ir_pts_freq%03i{}.npy".format(suffix))
x_data_all['ir'] = [x_data_all['ksz'][i] + ir_comp[i] for i in range(3)]
rad_comp = _load_help("1025_rad_pts_freq%03i{}.npy".format(suffix))
x_data_all['rad'] = [x_data_all['ir'][i] + rad_comp[i] for i in range(3)]
dust_comp = _load_help("1025_dust_freq%03i{}.npy".format(suffix))
x_data_all['dust'] = [x_data_all['rad'][i] + dust_comp[i] for i in range(3)]
except Exception as err:
print("error: ", err)
print("reading only the composite")
x_data_all['dust'] = _load_help("1025_skymap_freq%03i{}.npy".format(suffix))
#return x_data_all['dust'], y_data
x_data = {}
for com1 in x_data_all.keys():
# add noise
x_data[com1] = np.empty((nsamples,num_frequency,10,10),dtype=np.float64)
if num_frequency == 3:
for i in range(3):
x_data[com1][:,i,:,:] = np.squeeze(x_data_all[com1][i]*k2uk*Tcmb) + noises[i]
else:
x_data[com1][:,0,:,:] = -np.squeeze(x_data_all[com1][2]*k2uk*Tcmb) - noises[2]
x_data[com1][:,0,:,:] += np.squeeze(x_data_all[com1][1]*k2uk*Tcmb) + noises[1]
if num_frequency > 1:
x_data[com1][:,1,:,:] = -np.squeeze(x_data_all[com1][2]*k2uk*Tcmb) - noises[2]
x_data[com1][:,1,:,:] += np.squeeze(x_data_all[com1][0]*k2uk*Tcmb) + noises[0]
if balanced:
n_pos = int(y_data.sum())
idx = np.arange(nsamples)
idx = np.concatenate([idx[y_data==0.0][:n_pos], idx[y_data==1.0]])
x_data = {k: x_data[k][idx] for k in x_data.keys()}
return x_data if get_all_components else x_data['dust'], y_data[idx], idx
return x_data if get_all_components else x_data['dust'], y_data
def prepare_data_class2(dir_test, num_frequency=3, component="skymap", label_fname="1025_hashalo_freq%03i.npy" % 148,
balanced=False,
use_noise=True,
get_test_idx=False,
suffix=""):
"""
read data from dir_test, and prepare data with different noise level (components)
"""
freqs=[90,148,219]
def _load_help(name_format):
paths = [os.path.join(dir_test, name_format%freq) for freq in freqs]
ret = [np.load(p) for p in paths]
#print(paths)
return ret
# set file names for data
y_data = np.load(os.path.join(dir_test, label_fname))
y_data[y_data > 1] = 1
y_data = y_data.astype(float)
nsamples = len(y_data)
#load data into dictionary
x_data_all = {}
# load data
k2uk = 1.0e6
Tcmb = 2.726
#load noise (for SPT-3G 1500 sq deg patch, it's [2.8,2.6,6.6]uK-arcmin)
if use_noise:
noises = [np.load(os.path.join(dir_test, "noise_1uK-arcmin{}{}.npy".format(s, suffix))) for s in ["_90","_150", "_220"]]
noises = [noises[0]*2.8, noises[1]*2.6, noises[2]*6.6]
else:
noises = [0., 0., 0.]
#samples has CMB+TSZ
x_data_all[component] = _load_help("1025_{}_freq%03i{}.npy".format(component, suffix))
x_data = {}
for com1 in x_data_all.keys():
# add noise
x_data[com1] = np.empty((nsamples,num_frequency,10,10),dtype=np.float64)
if num_frequency == 3:
for i in range(3):
x_data[com1][:,i,:,:] = np.squeeze(x_data_all[com1][i]*k2uk*Tcmb) + noises[i]
else:
x_data[com1][:,0,:,:] = -np.squeeze(x_data_all[com1][2]*k2uk*Tcmb) - noises[2]
x_data[com1][:,0,:,:] += np.squeeze(x_data_all[com1][1]*k2uk*Tcmb) + noises[1]
if num_frequency > 1:
x_data[com1][:,1,:,:] = -np.squeeze(x_data_all[com1][2]*k2uk*Tcmb) - noises[2]
x_data[com1][:,1,:,:] += np.squeeze(x_data_all[com1][0]*k2uk*Tcmb) + noises[0]
splits = np.asarray([0.8, 0.2])
splits = np.round(splits / splits.sum() * nsamples).astype(int).cumsum()
split_idx = np.split(np.arange(nsamples),splits[:-1])
x_data, x_test = {k: x_data[k][split_idx[0]] for k in x_data.keys()}, {k: x_data[k][split_idx[-1]] for k in x_data.keys()}
y_data, y_test = y_data[split_idx[0]], y_data[split_idx[-1]]
nsamples = len(y_data)
if balanced:
n_pos = int(y_data.sum())
idx = np.arange(nsamples)
idx = np.concatenate([idx[y_data==0.0][:n_pos], idx[y_data==1.0]])
x_data = {k: x_data[k][idx] for k in x_data.keys()}
if get_test_idx: return x_data[component], y_data[idx], x_test[component], y_test, idx, split_idx[-1]
return x_data[component], y_data[idx], x_test[component], y_test, idx
if get_test_idx:
return x_data[component], y_data, x_test[component], y_test, split_idx[-1]
return x_data[component], y_data, x_test[component], y_test
class DataHolder:
def __init__(self, data, label, idx):
self.data = data
self.label = label
self.idx = idx
def get(self, which, ratio=None, incl_idx=False):
curr_idx = self.idx[which]
y_data = self.label[curr_idx]
if ratio is not None:
n_pos = int(y_data.sum())
idx = np.arange(len(y_data))
idx = np.concatenate([idx[y_data == 0.0][:int(ratio * n_pos)], idx[y_data == 1.0]])
curr_idx = curr_idx[idx]
if incl_idx:
return self.data[curr_idx], self.label[curr_idx], curr_idx
return self.data[curr_idx], self.label[curr_idx]
class DataGetter:
WO_DUST_MAPPING = ("dust", ['samples', 'ksz', 'ir_pts', 'rad_pts'])
def __init__(self, dir_test, overlap=False):
self.dir_test = dir_test
self.overlap = overlap
self.halocounter = gc.HalosCounter(overlap=overlap)
df = self.halocounter.get_complete_df()
if overlap:
df = df.reset_index().rename(columns={"index": "cutout_id"})
test_idx = df[(df['cutout_ra'] >= 0.5 * 90) & (df['cutout_dec'] > 0.5 * 90)].index
train_idx = df[~df.index.isin(test_idx)].index
n_samples = len(train_idx)
splits = np.asarray([0.65, 0.1])
splits = np.round(splits / splits.sum() * n_samples).astype(int).cumsum()
#print(splits)
#print(train_idx, len(train_idx))
split_idx = np.split(train_idx, splits[:-1])
split_idx = [split_idx[0], split_idx[1], test_idx]
#print(len(split_idx[0]), len(split_idx[1]), len(split_idx[2]))
#print(split_idx[0], split_idx[1], split_idx[2])
else:
n_samples = df.shape[0]
splits = np.asarray([0.7, 0.1, 0.2]) # (train ratio, valid ratio, test ratio)
splits = np.round(splits / splits.sum() * n_samples).astype(int).cumsum()
split_idx = np.split(np.arange(n_samples), splits[:-1])
#print(list(map(len, split_idx)), df.shape)
self.split_idx = {"train":split_idx[0], 'valid':split_idx[1], 'test':split_idx[2]}
pass
def get_labels(self, thres=5e13, which='full'):
if isinstance(thres, float) or isinstance(thres, int):
thres = ("%0.0e"%(thres)).replace("+", "")
label_fname = {"5e13": "m5e13_z0.25_y.npy", "2e14":"m2e14_z0.5_y.npy"}[thres]
y_data = np.load(os.path.join(self.dir_test, label_fname))
y_data[y_data > 1] = 1
y_data = y_data.astype(float)
if which == 'full': return y_data
return y_data[self.split_idx[which]]
def get_data(self, component, thres=5e13, use_noise=False, num_frequency=3):
suffix = "_overlap" if self.overlap else ""
freqs = [90, 148, 219]
def _load_help(name_format):
paths = [os.path.join(self.dir_test, name_format % freq) for freq in freqs]
return [np.load(p) for p in paths]
y_data = self.get_labels(thres, which='full')
nsamples = len(y_data)
x_data_all = {}
# load data
k2uk = 1.0e6
Tcmb = 2.726
# load noise (for SPT-3G 1500 sq deg patch, it's [2.8,2.6,6.6]uK-arcmin)
if use_noise:
noises = [np.load(os.path.join(self.dir_test, "noise_1uK-arcmin{}{}.npy".format(s, suffix))) for s in
["_90", "_150", "_220"]]
noises = [noises[0] * 2.8, noises[1] * 2.6, noises[2] * 6.6]
else:
noises = [0., 0., 0.]
# samples has CMB+TSZ
if isinstance(component, str):
x_data_all[component] = _load_help("1025_{}_freq%03i{}.npy".format(component, suffix))
elif isinstance(component,tuple):
component, lc = component
x_data_all[component] = _load_help("1025_{}_freq%03i{}.npy".format(lc[0], suffix))
for cc in lc[1:]:
tx = _load_help("1025_{}_freq%03i{}.npy".format(cc, suffix))
assert len(tx) == len(x_data_all[component])
x_data_all[component] = [x_data_all[component][i] + tx[i] for i in range(len(tx))]
x_data = {}
for com1 in x_data_all.keys():
# add noise
x_data[com1] = np.empty((nsamples, num_frequency, 10, 10), dtype=np.float64)
if num_frequency == 3:
for i in range(3):
x_data[com1][:, i, :, :] = np.squeeze(x_data_all[com1][i] * k2uk * Tcmb) + noises[i]
else:
x_data[com1][:, 0, :, :] = -np.squeeze(x_data_all[com1][2] * k2uk * Tcmb) - noises[2]
x_data[com1][:, 0, :, :] += np.squeeze(x_data_all[com1][1] * k2uk * Tcmb) + noises[1]
if num_frequency > 1:
x_data[com1][:, 1, :, :] = -np.squeeze(x_data_all[com1][2] * k2uk * Tcmb) - noises[2]
x_data[com1][:, 1, :, :] += np.squeeze(x_data_all[com1][0] * k2uk * Tcmb) + noises[0]
return DataHolder(x_data[component], y_data, self.split_idx)
def get_full_df(self):
df = self.halocounter.get_complete_df().reset_index().rename(columns={"index":"cutout_id"})
for k, idx in self.split_idx.items():
df.loc[idx, "which_set"] = k
return df
class IndexMapper:
#map cutout_id to the index location
def __init__(self, overlap=False):
self.overlap = overlap
o = DataGetter(overlap)
self.split_idx = o.split_idx
self.full_idx = gc.HalosCounter(overlap=overlap).get_complete_df().index
self.split_idx = {"train":self.full_idx[self.split_idx['train']],
"valid":self.full_idx[self.split_idx['valid']],
"test":self.full_idx[self.split_idx['test']],
"full":self.full_idx}
self.reverse_idx = {'train':{}, 'valid':{}, 'test':{}, 'full':{}}
for k in self.split_idx.keys():
idx = self.split_idx[k]
for i, d in enumerate(idx):
self.reverse_idx[k][d] = i
def get(self, i, which='test'):
return self.reverse_idx[which][i]
def eval(models, get_test_func, model_weight_paths=None, pred_only=False):
y_prob_avg = None
y_probs = []
x_test, y_test = get_test_func()
num_nets = len(models)
for i in range(num_nets):
model = models[i]
if model_weight_paths is not None:
model.load_weights(model_weight_paths[i])
y_prob = model.predict(x_test)
y_probs.append(y_prob.squeeze())
y_prob_avg = y_prob if y_prob_avg is None else y_prob + y_prob_avg
y_probs = np.stack(y_probs, 0)
y_prob_avg /= float(num_nets)
y_pred = (y_prob_avg > 0.5).astype('int32').squeeze() # binary classification
if pred_only:
return y_prob_avg
return summary_results_class(y_probs, y_test), y_pred, y_prob_avg, y_test, x_test, models
def summary_results_class(y_probs, y_test, threshold=0.5, log_roc=False, show_f1=True):
"""
y_probs: a list of independent predictions
y_test: true label
threshold: predict the image to be positive when the prediction > threshold
"""
# measure confusion matrix
if show_f1:
threshold, maxf1 = get_F1(y_probs.mean(0),y_test)
threshold = threshold - 1e-7
cm = pd.DataFrame(0, index=['pred0','pred1'], columns=['actual0','actual1'])
cm_std = pd.DataFrame(0, index=['pred0', 'pred1'], columns=['actual0', 'actual1'])
#memorizing the number of samples in each case (true positive, false positive, etc.)
tp_rate, tn_rate = np.zeros(len(y_probs)), np.zeros(len(y_probs))
for actual_label in range(2):
for pred_label in range(2):
cnt = np.zeros(len(y_probs))
for i in range(len(y_probs)):
cnt[i] = np.sum(np.logical_and(y_test == actual_label, (y_probs[i] > threshold) == pred_label))
cm.loc["pred%d"%pred_label,"actual%d"%actual_label] = cnt.mean()
cm_std.loc["pred%d" % pred_label, "actual%d" % actual_label] = cnt.std()
print("Confusion matrix (cnts)",cm)
print("Confusion matrix (stdev of cnts)", cm_std)
#Measuring the true positive and negative rates,
#since the false positive/negative rates are always 1 minus these,
#they are not printed and have the same standard deviation
for i in range(len(y_probs)):
pred_i = y_probs[i] > threshold
tp_rate[i] = np.sum(np.logical_and(y_test==1, pred_i==1)) / np.sum(pred_i==1)
tn_rate[i] = np.sum(np.logical_and(y_test==0, pred_i==0)) / np.sum(pred_i == 0)
print("True Positive (rate): {0:0.4f} ({1:0.4f})".format(tp_rate.mean(), tp_rate.std()))
print("True Negative (rate): {0:0.4f} ({1:0.4f})".format(tn_rate.mean(), tn_rate.std()))
def vertical_averaging_help(xs, ys, xlen=101):
"""
Interpolate the ROC curves to the same grid on x-axis
"""
numnets = len(xs)
xvals = np.linspace(0,1,xlen)
yinterp = np.zeros((len(ys),len(xvals)))
for i in range(numnets):
yinterp[i,:] = np.interp(xvals, xs[i], ys[i])
return xvals, yinterp
fprs, tprs = [], []
for i in range(len(y_probs)):
fpr, tpr, _ = metrics.roc_curve(y_test, y_probs[i], pos_label=1)
fprs.append(fpr)
tprs.append(tpr)
new_fprs, new_tprs = vertical_averaging_help(fprs, tprs)
# measure Area Under Curve (AUC)
y_prob_mean = y_probs.mean(0)
auc = metrics.roc_auc_score(y_test, y_prob_mean)
try:
auc = metrics.roc_auc_score(y_test, y_prob_mean)
print()
print("AUC:", auc)
except Exception as err:
print(err)
auc = np.nan
#Take the percentiles for of the ROC curves at each point
new_tpr_mean, new_tpr_5, new_tpr_95 = new_tprs.mean(0), np.percentile(new_tprs, 95, 0), np.percentile(new_tprs, 5, 0)
# plot ROC curve
plt.figure(figsize=[12,8])
lw = 2
plt.plot(new_fprs, new_tpr_mean, color='darkorange',
lw=lw, label='ROC curve (area = %0.4f)' % metrics.auc(new_fprs, new_tpr_mean))
if len(y_probs) > 1:
plt.plot(new_fprs, new_tpr_95, color='yellow',
lw=lw, label='ROC curve 5%s (area = %0.4f)' % ("%", metrics.auc(new_fprs, new_tpr_95)))
plt.plot(new_fprs, new_tpr_5, color='yellow',
lw=lw, label='ROC curve 95%s (area = %0.4f)' % ("%", metrics.auc(new_fprs, new_tpr_5)))
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate', fontsize=16)
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right", fontsize=16)
plt.grid()
plt.show()
#If log flag is set, plot also the log of the ROC curves within some reasonable range
if log_roc:
# plot ROC curve
plt.figure(figsize=[12,8])
lw = 2
plt.plot(np.log(new_fprs), np.log(new_tpr_mean), color='darkorange',
lw=lw, label='ROC curve (area = %0.4f)' % metrics.auc(new_fprs, new_tpr_mean))
if len(y_probs) > 1:
plt.plot(np.log(new_fprs), np.log(new_tpr_95), color='yellow',
lw=lw, label='ROC curve 5%s (area = %0.4f)' % ("%", metrics.auc(new_fprs, new_tpr_95)))
plt.plot(np.log(new_fprs), np.log(new_tpr_5), color='yellow',
lw=lw, label='ROC curve 95%s (area = %0.4f)' % ("%", metrics.auc(new_fprs, new_tpr_5)))
#plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([-5, -3])
plt.ylim([-1, 0.2])
plt.xlabel('Log False Positive Rate', fontsize=16)
plt.ylabel('Log True Positive Rate', fontsize=16)
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right", fontsize=16)
plt.grid()
plt.show()
return (auc,maxf1) if show_f1 else auc, (tp_rate.mean(),tn_rate.mean()), new_fprs, new_tprs
#=======================================================Prediction criteria here
import numpy as np
import pickle
import pandas as pd
from scipy.optimize import minimize
def _get_Fbeta(y, yhat, beta=1., debug=False, get_raw=False):
TP = ((y == 1) & (yhat == 1)).sum()
FP = ((y == 0) & (yhat == 1)).sum()
TN = ((y == 0) & (yhat == 0)).sum()
FN = ((y == 1) & (yhat == 0)).sum()
if debug: print("TP: {}, FP:{}, TN:{}, FN:{}".format(TP, FP, TN, FN))
if FP+TP == 0 or TP + FN==0 or TP == 0: return -1.
precision = (TP) / (FP + TP).astype(float)
recall = (TP) / (TP + FN).astype(float)
if debug:
print("TP={}; FP={}; TN={}; FN={}; precision={};recall={}".format(((y == 1) & (yhat == 1)).sum(),
((y == 0) & (yhat == 1)).sum(),
((y == 0) & (yhat == 0)).sum(),
((y == 1) & (yhat == 0)).sum(), precision,
recall))
if get_raw: return precision, recall, (1 + beta ** 2) * (precision * recall) / (beta * precision + recall)
return (1 + beta ** 2) * (precision * recall) / (beta * precision + recall)
def get_F1(y_pred, y, xlim=None, method='cnn', mass_thresh='5e13', plot=True,
save_path=None, xlabel=None, get_raw=False, font=DEFAULT_FONT):
plt.rcParams.update({'font.size': font})
if xlim is None:
xlim = (0, 0.997)
x = np.linspace(xlim[0], xlim[1])
elif isinstance(xlim, tuple):
x = np.linspace(xlim[0], xlim[1])
else:
x = xlim
Fscore = lambda x: _get_Fbeta(y, (y_pred > x).astype(int))
y = np.asarray([Fscore(xx) for xx in x]).clip(0.)
if plot:
f = plt.figure(figsize=(8, 5))
plt.plot(x, y)
plt.xlim(x[0], x[-1])
if xlabel:
plt.xlabel(xlabel)
else:
plt.xlabel('%s Threshold' % ("CNN Prob" if method == 'cnn' else "MF S/N"))
plt.ylabel('F1 Score', fontsize=font)
if save_path is not None: plt.savefig(save_path, dpi=500, bbox_inches='tight')
plt.show(block=True)
if get_raw: return x, y
return x[np.argmax(y)], np.max(y)
def stack_F1(xmf, ymf, xcnn, ycnn, save_path=None, font=DEFAULT_FONT, title="", hist={}, nxbins=20):
lns = []
fig = plt.figure(figsize=(8,5))
ax1 = fig.add_subplot(111)
ax2 = ax1.twiny()
plt.rcParams.update({'font.size': font})
lns.extend(ax1.plot(xmf, ymf, label='MF', color='purple'))
ax1.set_xlabel("MF S/N Ratio")
ax1.set_ylabel("F1 Score")
lns.extend(ax2.plot(xcnn, ycnn, label='CNN', color='black'))
ax2.set_xlabel("CNN Prob")
import matplotlib.patches as mpatches
if 'MF' in hist:
assert 'CNN' in hist
ax12 = ax1.twinx()
bins = np.linspace(*ax1.get_xlim(), num=nxbins)
#ax12.set_ylim(0, 100000)
ax12.hist(hist['MF'][~pd.isnull(hist['MF'])], alpha=0.3, bins=bins, color='purple',
#weights=np.ones(len(hist['MF']))/len(hist['MF'])
)
lns.append(mpatches.Patch(color='purple', label='MF Score Dist.', alpha=0.3))
ax12.set_yscale('log')
ax22 = ax2.twinx()
bins = np.linspace(*ax2.get_xlim(), num=nxbins)
#ax22.set_ylim(0, 100000)
ax22.hist(hist['CNN'], alpha=0.3, bins=bins, color='black',
#weights=np.ones(len(hist['CNN']))/len(hist['CNN'])
)
lns.append(mpatches.Patch(color='black', label='CNN Score Dist.', alpha=0.3))
ax22.set_yscale('log')
if ax12.get_ylim()[1] > ax22.get_ylim()[1]:
ax22.set_ylim(ax12.get_ylim())
else:
ax12.set_ylim(ax22.get_ylim())
#ylim1 = ax12.get_ylim()
#ylim2 = ax22.get_ylim()
#ylim = (min(ylim1[0], ylim2[0]), max(ylim1[1], ylim2[1]))
#print(ylim)
#for _temp in [ax12, ax22]:
#_temp.set_ylim(ylim)
#_temp.set_yscale('log')
ax12.set_ylabel("Counts", fontsize=font)
#ax12.set_yscale('log')
labs = [l.get_label() for l in lns]
plt.title(title)
plt.legend(lns, labs, loc='lower center', prop={"size":font-8})
if save_path is not None: plt.savefig(save_path, dpi=500, bbox_inches="tight")
return
def get_F1_CNN_and_MF(vdf, col_cnn='score_wdust (trained>%s)', col_mf ='mf_peaksig', col_label='Truth(>%s)',
mass_thresh='5e13', method='and', save_path=None, font=DEFAULT_FONT):
plt.rcParams.update({'font.size': font})
import itertools
if mass_thresh == '5e13':
cnn_range = (0, 0.997)
mf_range = (3, 15)
else:
#cnn_range = (0.4, 0.8)
#mf_range = (3, 15)
cnn_range = (0.2, 0.9)
mf_range = (3, 25)
cnn_range = np.linspace(cnn_range[0], cnn_range[1])
mf_range = np.linspace(mf_range[0], mf_range[1])
#criteria = itertools.product(cnn_range, mf_range)
criteria = [(c,m) for c in cnn_range for m in mf_range]
if method == 'or':
Fscore = lambda cc, mc: _get_Fbeta(vdf[col_label], ((vdf[col_cnn] > cc) | (vdf[col_mf] > mc)).astype(int))
elif method == 'and':
Fscore = lambda cc, mc: _get_Fbeta(vdf[col_label], ((vdf[col_cnn] > cc) & (vdf[col_mf] > mc)).astype(int))
elif method == 'rankproduct':
rnk_cnn = vdf[col_cnn].rank() / len(vdf)
rnk_mf = vdf[col_mf].rank() / float(vdf[col_mf].count())
return get_F1(rnk_cnn * rnk_mf, vdf[col_label], xlim=(0.7, .985), xlabel='rank product', save_path=save_path, font=font)
cnn_x = np.asarray([c[0] for c in criteria])
mf_y = np.asarray([c[1] for c in criteria])
vals = np.asarray([Fscore(cc,mc) for cc,mc in criteria])
cm = plt.cm.get_cmap('YlGn')
sc = plt.scatter(cnn_x, mf_y, c=vals, cmap=cm)
plt.scatter(*criteria[np.argmax(vals)], s=100, c='black', marker='x', linewidth=3)
cbar = plt.colorbar(sc)
cbar.set_label("F1 Score", rotation=270, labelpad=20)
plt.xlabel("CNN Threshold")
plt.ylabel("MF Threshold")
if save_path is not None: plt.savefig(save_path, dpi=500, bbox_inches="tight")
return criteria[np.argmax(vals)], np.max(vals)
import glob
import ipdb
class PytorchResultReader(object):
def __init__(self, data_dir = CNN_MODEL_OUTPUT_DIR,
exp_name="ratio1-20_convbody=R-50-C4_SGD_lr=0.005_wd=0.003_steps=1000-4000_comp=skymap",
xlim=(0.5, 0.7)):
self.data_dir = data_dir
if exp_name is None:
exp_name = [f for f in os.listdir(self.data_dir) if "_lr" in f and '_wd' in f]
self.exp_names = [exp_name] if isinstance(exp_name,str) else exp_name
labels = pd.read_pickle(FULL_DATA_LABEL_PATH)
test_labels, val_labels = labels[labels['which'] == 'test'], labels[labels['which'] == 'valid']
np.random.seed(0)
self.labels = test_labels.iloc[np.random.permutation(len(test_labels))]
np.random.seed(0)
self.val_labels = val_labels.iloc[np.random.permutation(len(val_labels))]
self.xlim = xlim
def _read_one(self, exp_name, xlim=None):
dir_path = os.path.join(self.data_dir, exp_name)
fs = sorted(glob.glob(os.path.join(dir_path, 'results', 'epoch*.pkl')),
key=lambda x: int(x.replace(".pkl", "").split("epoch")[1]))
if len(fs) ==0: return None, None, None
dfs = {w: | pd.DataFrame(columns=['acc', 'loss', 'F1', "F1_thres"], dtype=float) | pandas.DataFrame |
import pandas as pd
import os
import sys
sys.path.append('..')
from subprocess import getstatusoutput
import pandas as pd
from gpu import config
class DataBase(object):
""" An abstract class for datasets.
"""
def __init__(self, file, reset=False):
self.name = 'None'
self.file = file
self.columns = []
def set(nr, key, value, save=True):
self.df[key].iloc[nr] = value
if save:
self.local()
def local(self):
self.df.to_csv(self.file, index=None, header=True)
def __getitem__(self, index):
return self.df.iloc[index].values
def __len__(self):
return self.df.shape[0]
def __new__(cls, *args, **kwargs):
"""singleton mode
"""
if not hasattr(cls, '_instance'):
cls._instance = super().__new__(cls)
return cls._instance
class _GpuCore(object):
""" GPU information for each GPU.
include gpu memary in using/free/total
and each process on this gpu
"""
def __init__(self, nr=-1):
self.nr = nr
# procs --> precesses
# for each processing:
# pid : this processing pid
# name : command line
# gpu_mem :
self.procs = []
self.total = 0
self.used = 0
self.free = 0
def data(self):
total = int(self.total.split()[0].strip())
used = int(self.used.split()[0].strip())
free = int(self.free.split()[0].strip())
processes = [[x['pid'], x['name'],
int(x['gpu_mem'].split()[0].strip()) ] for x in self.procs]
return total, used, free, processes
def updata(self):
''' console command : sudo nvidia-smi -q -i 5 -d PIDS,MEMORY
output:
GPU 00000000:0B:00.0
FB Memory Usage
Total : 11172 MiB
Used : 10795 MiB
Free : 377 MiB
Processes
Process ID : 44070
Type : C
Name : /home/ljg/anaconda3/envs/tensorflow/bin/python
Used GPU Memory : 10785 MiB
'''
stat, output = getstatusoutput('nvidia-smi -q -i %d -d PIDS,MEMORY' % self.nr)
if stat == 0:
output = [x.strip() for x in output.strip().split('\n')]
print(output)
mem_inx = output.index('FB Memory Usage')
# memeary
self.total = output[mem_inx+1].split(':')[-1].strip() # 11172 MiB
self.used = output[mem_inx+2].split(':')[-1].strip()
self.free = output[mem_inx+3].split(':')[-1].strip()
# processes
try:
proc_inx = output.index('Processes')
for inx in range(proc_inx+1, len(output), 4):
# find a process
if output[inx].split(':')[0].strip() == 'Process ID':
P = {}
P['pid'] = int(output[inx].split(':')[1].strip())
P['name'] = output[inx+2].split(':')[1].strip()
P['gpu_mem'] = output[inx+3].split(':')[1].strip()
self.procs.append(P)
except Exception as e:
# No processes
print(e)
pass
# updata GpuData
pass
class GPUs(DataBase):
""" runtime GPU status.
"""
def __init__(self):
super(GPUs, self).__init__(None, reset=False)
self._gpus = []
for i in range(config.NR_GPU):
gpu = _GpuCore(nr=i)
gpu.updata()
self._gpus.append(gpu)
def __getitem__(self, inx):
return self._gpus[inx]
def updata(self):
for i in range(config.NR_GPU):
self._gpus[i].updata()
class GpuData(DataBase):
def __init__(self, file, reset=False):
super(GpuData, self).__init__(file, reset)
self.name = 'gpu database'
self.file = file
# GPU status
# nr : Nomber of gpu range from 0 to 9
# status : free/requested/using/release/other
# onwer : who is using this gpu.
# start. : time of requesting this gpu
# end. : time of auto free this gpu
# why. : why auto free this gpu, [requested, released]
#
self.columns = ['nr', 'status', 'onwer', 'start', 'end', 'why']
if not reset:
if not os.path.exists(self.file):
self.reset()
else:
self.df = pd.read_csv(file)
else:
self.reset()
def reset(self):
""" Reset gpu database """
if os.path.exists(self.file):
os.remove(self.file)
self.df = | pd.DataFrame(columns=self.columns) | pandas.DataFrame |
# pylint: disable=E1101
from datetime import datetime
import datetime as dt
import os
import warnings
import nose
import struct
import sys
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.compat import iterkeys
from pandas.core.frame import DataFrame, Series
from pandas.core.common import is_categorical_dtype
from pandas.io.parsers import read_csv
from pandas.io.stata import (read_stata, StataReader, InvalidColumnName,
PossiblePrecisionLoss, StataMissingValue)
import pandas.util.testing as tm
from pandas.tslib import NaT
from pandas import compat
class TestStata(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta')
self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta')
self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta')
self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta')
self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta')
self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta')
self.dta3_113 = os.path.join(self.dirpath, 'stata3_113.dta')
self.dta3_114 = os.path.join(self.dirpath, 'stata3_114.dta')
self.dta3_115 = os.path.join(self.dirpath, 'stata3_115.dta')
self.dta3_117 = os.path.join(self.dirpath, 'stata3_117.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4_113 = os.path.join(self.dirpath, 'stata4_113.dta')
self.dta4_114 = os.path.join(self.dirpath, 'stata4_114.dta')
self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta')
self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta')
self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta')
self.csv14 = os.path.join(self.dirpath, 'stata5.csv')
self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta')
self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta')
self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta')
self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta')
self.csv15 = os.path.join(self.dirpath, 'stata6.csv')
self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta')
self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta')
self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta')
self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta')
self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta')
self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta')
self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta')
self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta')
self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta')
self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta')
self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta')
self.dta19_115 = os.path.join(self.dirpath, 'stata10_115.dta')
self.dta19_117 = os.path.join(self.dirpath, 'stata10_117.dta')
self.dta20_115 = os.path.join(self.dirpath, 'stata11_115.dta')
self.dta20_117 = os.path.join(self.dirpath, 'stata11_117.dta')
self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta')
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
def test_read_empty_dta(self):
empty_ds = DataFrame(columns=['unit'])
# GH 7369, make sure can read a 0-obs dta file
with tm.ensure_clean() as path:
empty_ds.to_stata(path,write_index=False)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
def test_data_method(self):
# Minimal testing of legacy data method
reader_114 = StataReader(self.dta1_114)
with warnings.catch_warnings(record=True) as w:
parsed_114_data = reader_114.data()
reader_114 = StataReader(self.dta1_114)
parsed_114_read = reader_114.read()
tm.assert_frame_equal(parsed_114_data, parsed_114_read)
def test_read_dta1(self):
reader_114 = StataReader(self.dta1_114)
parsed_114 = reader_114.read()
reader_117 = StataReader(self.dta1_117)
parsed_117 = reader_117.read()
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta2(self):
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('datetime interp under 2.6 is faulty')
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date',
'monthly_date', 'quarterly_date', 'half_yearly_date',
'yearly_date']
)
expected['yearly_date'] = expected['yearly_date'].astype('O')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed_114 = self.read_dta(self.dta2_114)
parsed_115 = self.read_dta(self.dta2_115)
parsed_117 = self.read_dta(self.dta2_117)
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(self.dta2_113)
# Remove resource warnings
w = [x for x in w if x.category is UserWarning]
# should get warning for each call to read_dta
tm.assert_equal(len(w), 3)
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta3(self):
parsed_113 = self.read_dta(self.dta3_113)
parsed_114 = self.read_dta(self.dta3_114)
parsed_115 = self.read_dta(self.dta3_115)
parsed_117 = self.read_dta(self.dta3_117)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected['year'] = expected['year'].astype(np.int16)
expected['quarter'] = expected['quarter'].astype(np.int8)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta4(self):
parsed_113 = self.read_dta(self.dta4_113)
parsed_114 = self.read_dta(self.dta4_114)
parsed_115 = self.read_dta(self.dta4_115)
parsed_117 = self.read_dta(self.dta4_117)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled'])
# these are all categoricals
expected = pd.concat([expected[col].astype('category') for col in expected], axis=1)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
# File containing strls
def test_read_dta12(self):
parsed_117 = self.read_dta(self.dta21_117)
expected = DataFrame.from_records(
[
[1, "abc", "abcdefghi"],
[3, "cba", "qwertywertyqwerty"],
[93, "", "strl"],
],
columns=['x', 'y', 'z'])
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
def test_read_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['year'] = original['year'].astype(np.int32)
original['quarter'] = original['quarter'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_read_write_dta10(self):
original = DataFrame(data=[["string", "object", 1, 1.1,
np.datetime64('2003-12-25')]],
columns=['string', 'object', 'integer', 'floating',
'datetime'])
original["object"] = Series(original["object"], dtype=object)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['integer'] = original['integer'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, {'datetime': 'tc'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_stata_doc_examples(self):
with tm.ensure_clean() as path:
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.to_stata(path)
def test_write_preserves_original(self):
# 9795
np.random.seed(423)
df = pd.DataFrame(np.random.randn(5,4), columns=list('abcd'))
df.ix[2, 'a':'c'] = np.nan
df_copy = df.copy()
df.to_stata('test.dta', write_index=False)
tm.assert_frame_equal(df, df_copy)
def test_encoding(self):
# GH 4626, proper encoding handling
raw = read_stata(self.dta_encoding)
encoded = read_stata(self.dta_encoding, encoding="latin-1")
result = encoded.kreis1849[0]
if compat.PY3:
expected = raw.kreis1849[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, compat.string_types)
else:
expected = raw.kreis1849.str.decode("latin-1")[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, unicode)
with tm.ensure_clean() as path:
encoded.to_stata(path,encoding='latin-1', write_index=False)
reread_encoded = read_stata(path, encoding='latin-1')
tm.assert_frame_equal(encoded, reread_encoded)
def test_read_write_dta11(self):
original = DataFrame([(1, 2, 3, 4)],
columns=['good', compat.u('b\u00E4d'), '8number', 'astringwithmorethan32characters______'])
formatted = DataFrame([(1, 2, 3, 4)],
columns=['good', 'b_d', '_8number', 'astringwithmorethan32characters_'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
# should get a warning for that format.
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta12(self):
original = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_1',
'astringwithmorethan32characters_2',
'+',
'-',
'short',
'delete'])
formatted = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_',
'_0astringwithmorethan32character',
'_',
'_1_',
'_short',
'_delete'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
tm.assert_equal(len(w), 1) # should get a warning for that format.
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta13(self):
s1 = Series(2**9, dtype=np.int16)
s2 = Series(2**17, dtype=np.int32)
s3 = Series(2**33, dtype=np.int64)
original = DataFrame({'int16': s1, 'int32': s2, 'int64': s3})
original.index.name = 'index'
formatted = original
formatted['int64'] = formatted['int64'].astype(np.float64)
with | tm.ensure_clean() | pandas.util.testing.ensure_clean |
import tensorflow as tf
import pandas as pd
import random
import numpy as np
import tensorflow.keras.backend as K
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.neural_network import MLPClassifier
class Main:
if __name__ == '__main__':
usersToTrainLabelEncoder = pd.read_csv('data/featureset.csv', usecols=['USER_ID'])
print(usersToTrainLabelEncoder)
ROW_COUNT = 4842367
ALL_INDICES = [i for i in range(ROW_COUNT)]
random.shuffle(ALL_INDICES)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 200)
data = pd.read_csv('data/output.csv', nrows=200000)
data = data.drop(columns=["DATASET", "SENTENCE_ID"])
train, test = train_test_split(data, test_size=0.01, shuffle=True)
train_current_key_code=pd.get_dummies(train['KEYCODE'], prefix='current')
train_prev_current_key_code= pd.get_dummies(train['KEYCODE_PREV'], prefix='prev')
train_tri_current_key_code= | pd.get_dummies(train['KEYCODE_TRI'], prefix='tri') | pandas.get_dummies |
from contextlib import nullcontext as does_not_raise
from functools import partial
import pandas as pd
from pandas.testing import assert_series_equal
from solarforecastarbiter import datamodel
from solarforecastarbiter.reference_forecasts import persistence
from solarforecastarbiter.conftest import default_observation
import pytest
def load_data_base(data, observation, data_start, data_end):
# slice doesn't care about closed or interval label
# so here we manually adjust start and end times
if 'instant' in observation.interval_label:
pass
elif observation.interval_label == 'ending':
data_start += pd.Timedelta('1s')
elif observation.interval_label == 'beginning':
data_end -= pd.Timedelta('1s')
return data[data_start:data_end]
@pytest.fixture
def powerplant_metadata():
"""1:1 AC:DC"""
modeling_params = datamodel.FixedTiltModelingParameters(
ac_capacity=200, dc_capacity=200, temperature_coefficient=-0.3,
dc_loss_factor=3, ac_loss_factor=0,
surface_tilt=30, surface_azimuth=180)
metadata = datamodel.SolarPowerPlant(
name='Albuquerque Baseline', latitude=35.05, longitude=-106.54,
elevation=1657.0, timezone='America/Denver',
modeling_parameters=modeling_params)
return metadata
@pytest.mark.parametrize('interval_label,closed,end', [
('beginning', 'left', '20190404 1400'),
('ending', 'right', '20190404 1400'),
('instant', None, '20190404 1359')
])
def test_persistence_scalar(site_metadata, interval_label, closed, end):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
data = pd.Series(100., index=data_index)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = | pd.Timestamp('20190404 1300', tz=tz) | pandas.Timestamp |
import gzip
import pickle5 as pickle
# import pickle
from collections import defaultdict
import numpy as np
import pandas as pd
import os
from copy import deepcopy
import datetime
import neat
from tensorflow.python.framework.ops import default_session
from scipy.optimize import curve_fit
from ongoing.prescriptors.base import BasePrescriptor, PRED_CASES_COL, CASES_COL, NPI_COLUMNS, NPI_MAX_VALUES
import ongoing.prescriptors.base as base
path = '5days-results-2d-1-hidden'
num_checkpoint = 26
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
CHECKPOINTS_PREFIX = os.path.join(ROOT_DIR, 'neat-checkpoint-')
# CONFIG_FILE = os.path.join(ROOT_DIR, '{}/config-prescriptor-multiobjective'.format(path))
CONFIG_FILE = os.path.join(ROOT_DIR, '{}/config-prescriptor-{}'.format(path, num_checkpoint))
TMP_PRED_FILE_NAME = os.path.join(ROOT_DIR, 'tmp_predictions_for_prescriptions', 'preds.csv')
TMP_PRESCRIPTION_FILE = os.path.join(ROOT_DIR, 'tmp_prescription.csv')
# Number of days the prescriptors will look at in the past.
# Larger values here may make convergence slower, but give
# prescriptors more context. The number of inputs of each neat
# network will be NB_LOOKBACK_DAYS * (NPI_COLUMNS + 1) + NPI_COLUMNS.
# The '1' is for previous case data, and the final NPI_COLUMNS
# is for IP cost information.
NB_LOOKBACK_DAYS = 21
# Number of countries to use for training. Again, lower numbers
# here will make training faster, since there will be fewer
# input variables, but could potentially miss out on useful info.
NB_EVAL_COUNTRIES = 10
# Number of prescriptions to make per country.
# This can be set based on how many solutions in PRESCRIPTORS_FILE
# we want to run and on time constraints.
NB_PRESCRIPTIONS = 10
# Number of days to fix prescribed IPs before changing them.
# This could be a useful toggle for decision makers, who may not
# want to change policy every day. Increasing this value also
# can speed up the prescriptor, at the cost of potentially less
# interesting prescriptions.
ACTION_DURATION = 14
# Range of days the prescriptors will be evaluated on.
# To save time during training, this range may be significantly
# shorter than the maximum days a prescriptor can be evaluated on.
EVAL_START_DATE = '2020-08-01'
EVAL_END_DATE = '2020-08-02'
# Maximum number of generations to run (unlimited if None)
NB_GENERATIONS = 200
# Path to file containing neat prescriptors. Here we simply use a
# recent checkpoint of the population from train_prescriptor.py,
# but this is likely not the most complementary set of prescriptors.
# Many approaches can be taken to generate/collect more diverse sets.
# Note: this set can contain up to 10 prescriptors for evaluation.
# PRESCRIPTORS_FILE = os.path.join(ROOT_DIR, '{}/neat-checkpoint-{}'.format(path, num_checkpoint))
PRESCRIPTORS_FILE = os.path.join(ROOT_DIR, '{}/neat-checkpoint-{}_short_pickle4'.format(path, num_checkpoint))
def dominates(one, other):
"""Return true if each objective of *one* is not strictly worse than
the corresponding objective of *other* and at least one objective is
strictly better.
"""
not_equal = False
for self_wvalue, other_wvalue in zip(one, other):
if self_wvalue > other_wvalue:
not_equal = True
elif self_wvalue < other_wvalue:
return False
return not_equal
def sortNondominatedNSGA2(pop_arr, k, first_front_only=False):
"""Sort the first *k* *individuals* into different nondomination levels
using the "Fast Nondominated Sorting Approach" proposed by Deb et al.,
see [Deb2002]_. This algorithm has a time complexity of :math:`O(MN^2)`,
where :math:`M` is the number of objectives and :math:`N` the number of
individuals.
:param individuals: A list of individuals to select from.
:param k: The number of individuals to select.
:param first_front_only: If :obj:`True` sort only the first front and
exit.
:returns: A list of Pareto fronts (lists), the first list includes
nondominated individuals.
.. [Deb2002] Deb, Pratab, Agarwal, and Meyarivan, "A fast elitist
non-dominated sorting genetic algorithm for multi-objective
optimization: NSGA-II", 2002.
"""
if k == 0:
return []
map_fit_ind = defaultdict(list)
for ind in pop_arr:
map_fit_ind[ind.fitness_mult].append(ind)
fits = list(map_fit_ind.keys())
current_front = []
next_front = []
dominating_fits = defaultdict(int)
dominated_fits = defaultdict(list)
# Rank first Pareto front
for i, fit_i in enumerate(fits):
for fit_j in fits[i + 1:]:
if dominates(fit_i, fit_j):
dominating_fits[fit_j] += 1
dominated_fits[fit_i].append(fit_j)
elif dominates(fit_j, fit_i):
dominating_fits[fit_i] += 1
dominated_fits[fit_j].append(fit_i)
if dominating_fits[fit_i] == 0:
current_front.append(fit_i)
fronts = [[]]
for fit in current_front:
fronts[-1].extend(map_fit_ind[fit])
pareto_sorted = len(fronts[-1])
# Rank the next front until all individuals are sorted or
# the given number of individual are sorted.
if not first_front_only:
N = min(len(pop_arr), k)
while pareto_sorted < N:
fronts.append([])
for fit_p in current_front:
for fit_d in dominated_fits[fit_p]:
dominating_fits[fit_d] -= 1
if dominating_fits[fit_d] == 0:
next_front.append(fit_d)
pareto_sorted += len(map_fit_ind[fit_d])
fronts[-1].extend(map_fit_ind[fit_d])
current_front = next_front
next_front = []
return fronts
def assignCrowdingDist(individuals):
"""Assign a crowding distance to each individual's fitness.
It is done per front.
"""
if len(individuals) == 0:
return
distances = [0.0] * len(individuals)
crowd = [(ind.fitness_mult, i) for i, ind in enumerate(individuals)]
nobj = len(individuals[0].fitness_mult)
for i in range(nobj):
crowd.sort(key=lambda element: element[0][i])
distances[crowd[0][1]] = float("inf")
distances[crowd[-1][1]] = float("inf")
if crowd[-1][0][i] == crowd[0][0][i]:
continue
norm = nobj * float(crowd[-1][0][i] - crowd[0][0][i])
for prev, cur, next in zip(crowd[:-2], crowd[1:-1], crowd[2:]):
distances[cur[1]] += (next[0][i] - prev[0][i]) / norm
# find max and min distance
max_val = -float("inf")
min_val = float("inf")
flag_plus_inf = False
flag_minus_inf = False
for dist in distances:
if dist != float("inf") and max_val < dist:
max_val = dist
pass
if dist != -float("inf") and min_val > dist:
min_val = dist
pass
if dist == float("inf"):
flag_plus_inf = True
elif dist == -float("inf"):
flag_minus_inf = True
pass
# set values equal to inf to be max + 0.5
# set values equal to -inf to be max - 0.5
# and rescale the rest
if flag_plus_inf:
max_val += 0.5
if flag_minus_inf:
min_val -= 0.5
for i in range(0, len(distances)):
if distances[i] == float("inf"):
distances[i] = 1.
elif distances[i] == -float("inf"):
distances[i] = 0.
else:
distances[i] = (distances[i] - min_val) / (max_val - min_val)
pass
pass
for i, dist in enumerate(distances):
individuals[i].crowding_dist = dist / 2
pass
pass
def get_best_n_points(n, x_arr, y_arr):
# 1. fit the curve
# define the true objective function
def objective(x, a, b, c):
return a + b / (c - x)
# fit curve
popt, _ = curve_fit(objective, x_arr, y_arr)
# get coefficients
a, b, c = popt
# define a sequence of inputs between the smallest and largest known inputs
x_line = np.arange(min(x_arr), max(x_arr), 1)
# calculate the output for the range
y_line = objective(x_line, a, b, c)
# 2. find arc length
arc_len_arr = []
for pos in range(0, len(x_line) - 1):
p1 = np.array([x_line[pos], y_line[pos]])
p2 = np.array([x_line[pos + 1], y_line[pos + 1]])
arc_len_arr.append(np.linalg.norm(p2 - p1))
arc_len_arr = np.array(arc_len_arr)
# distance delta
d = sum(arc_len_arr) / (n-1)
# cumul_sum of art length
arc_len_arr_cum = np.cumsum(arc_len_arr)
# 3. choose ref. points
# positions of reference points
points_pos = [0]
for i in range(1, (n-1)):
dist = abs(arc_len_arr_cum - i * d)
points_pos.append(np.argmin(dist) + 1)
pass
points_pos.append(len(x_line) - 1)
ref_points = np.array([x_line[points_pos], y_line[points_pos]]).T
# 4. approximate ref. points
all_my_points = np.array([x_arr, y_arr]).T
chosen_points = []
for ref_point in ref_points:
dist = np.linalg.norm((all_my_points - ref_point), axis=1)
pos = np.argmin(dist)
chosen_points.append(pos)
pass
ref_points_pos = points_pos
return chosen_points
class Neat(BasePrescriptor):
def __init__(self, seed=base.SEED, eval_start_date=EVAL_START_DATE, eval_end_date=EVAL_END_DATE,
nb_eval_countries=NB_EVAL_COUNTRIES, nb_lookback_days=NB_LOOKBACK_DAYS, nb_prescriptions=NB_PRESCRIPTIONS, nb_generations=NB_GENERATIONS,
action_duration=ACTION_DURATION, config_file=CONFIG_FILE, prescriptors_file=PRESCRIPTORS_FILE, hist_df=None, verbose=True):
super().__init__(seed=seed)
self.eval_start_date = pd.to_datetime(eval_start_date, format='%Y-%m-%d')
self.eval_end_date = pd.to_datetime(eval_end_date, format='%Y-%m-%d')
self.nb_eval_countries = nb_eval_countries
self.nb_lookback_days = nb_lookback_days
self.nb_prescriptions = nb_prescriptions
self.nb_generations = nb_generations
self.action_duration = action_duration
self.config_file = config_file
self.prescriptors_file = prescriptors_file
self.hist_df = hist_df
self.verbose = verbose
def fit(self, hist_df=None):
if hist_df is not None:
self.hist_df = hist_df
# As a heuristic, use the top NB_EVAL_COUNTRIES w.r.t. ConfirmedCases
# so far as the geos for evaluation.
eval_geos = list(self.hist_df.groupby('GeoID').max()['ConfirmedCases'].sort_values(
ascending=False).head(self.nb_eval_countries).index)
if self.verbose:
print("Nets will be evaluated on the following geos:", eval_geos)
# Pull out historical data for all geos
past_cases = {}
past_ips = {}
for geo in eval_geos:
geo_df = self.hist_df[self.hist_df['GeoID'] == geo]
past_cases[geo] = np.maximum(0, np.array(geo_df[CASES_COL]))
past_ips[geo] = np.array(geo_df[NPI_COLUMNS])
# Gather values for scaling network output
ip_max_values_arr = np.array([NPI_MAX_VALUES[ip] for ip in NPI_COLUMNS])
# Load configuration.
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
self.config_file)
# Create the population, which is the top-level object for a NEAT run.
p = neat.Population(config)
if self.verbose:
# Add a stdout reporter to show progress in the terminal.
p.add_reporter(neat.StdOutReporter(show_species_detail=True))
# Add statistics reporter to provide extra info about training progress.
stats = neat.StatisticsReporter()
p.add_reporter(stats)
# Add checkpointer to save population every generation and every 10 minutes.
p.add_reporter(neat.Checkpointer(generation_interval=1,
time_interval_seconds=600,
filename_prefix=CHECKPOINTS_PREFIX))
# Function that evaluates the fitness of each prescriptor model, equal costs
def eval_genomes_multy_ones(genomes, config):
# Every generation sample a different set of costs per geo,
# so that over time solutions become robust to different costs.
cost_df = base.generate_costs(self.hist_df, mode='equal')
cost_df = base.add_geo_id(cost_df)
geo_costs = {}
for geo in eval_geos:
costs = cost_df[cost_df['GeoID'] == geo]
cost_arr = np.array(costs[NPI_COLUMNS])[0]
geo_costs[geo] = cost_arr
# Evaluate each individual
for genome_id, genome in genomes:
if genome.fitness is not None:
continue
# Create net from genome
net = neat.nn.FeedForwardNetwork.create(genome, config)
# Set up dictionary to keep track of prescription
df_dict = {'CountryName': [], 'RegionName': [], 'Date': []}
for ip_col in NPI_COLUMNS:
df_dict[ip_col] = []
# Set initial data
eval_past_cases = deepcopy(past_cases)
eval_past_ips = deepcopy(past_ips)
# Compute prescribed stringency incrementally
stringency = np.zeros(config.genome_config.num_outputs)
# Make prescriptions one day at a time, feeding resulting
# predictions from the predictor back into the prescriptor.
for date in pd.date_range(self.eval_start_date, self.eval_end_date):
date_str = date.strftime("%Y-%m-%d")
# Prescribe for each geo
for geo in eval_geos:
# Prepare input data. Here we use log to place cases
# on a reasonable scale; many other approaches are possible.
X_cases = np.log(eval_past_cases[geo][-self.nb_lookback_days:] + 1)
X_ips = eval_past_ips[geo][-self.nb_lookback_days:]
X_costs = geo_costs[geo]
X = np.concatenate([X_cases.flatten(),
X_ips.flatten(),
X_costs])
# Get prescription
prescribed_ips = net.activate(X)
# Map prescription to integer outputs
prescribed_ips = (prescribed_ips * ip_max_values_arr).round()
# Add it to prescription dictionary
country_name, region_name = (geo.split(' / ') + [np.nan])[:2]
df_dict['CountryName'].append(country_name)
df_dict['RegionName'].append(region_name)
df_dict['Date'].append(date_str)
for ip_col, prescribed_ip in zip(NPI_COLUMNS, prescribed_ips):
df_dict[ip_col].append(prescribed_ip)
# Update stringency. This calculation could include division by
# the number of IPs and/or number of geos, but that would have
# no effect on the ordering of candidate solutions.
stringency += prescribed_ips
# Create dataframe from prescriptions.
pres_df = pd.DataFrame(df_dict)
# Make prediction given prescription for all countries
pred_df = self.get_predictions(self.eval_start_date.strftime("%Y-%m-%d"), date_str, pres_df)
# Update past data with new day of prescriptions and predictions
pres_df = base.add_geo_id(pres_df)
pred_df = base.add_geo_id(pred_df)
new_pres_df = pres_df[pres_df['Date'] == date_str]
new_pred_df = pred_df[pred_df['Date'] == date_str]
for geo in eval_geos:
geo_pres = new_pres_df[new_pres_df['GeoID'] == geo]
geo_pred = new_pred_df[new_pred_df['GeoID'] == geo]
# Append array of prescriptions
pres_arr = np.array([geo_pres[ip_col].values[0] for ip_col in NPI_COLUMNS]).reshape(1, -1)
eval_past_ips[geo] = np.concatenate([eval_past_ips[geo], pres_arr])
# Append predicted cases
eval_past_cases[geo] = np.append(eval_past_cases[geo],
geo_pred[PRED_CASES_COL].values[0])
# Compute fitness. There are many possibilities for computing fitness and ranking
# candidates. Here we choose to minimize the product of ip stringency and predicted
# cases. This product captures the area of the 2D objective space that dominates
# the candidate. We minimize it by including a negation. To place the fitness on
# a reasonable scale, we take means over all geos and days. Note that this fitness
# function can lead directly to the degenerate solution of all ips 0, i.e.,
# stringency zero. To achieve more interesting behavior, a different fitness
# function may be required.
new_cases = pred_df[PRED_CASES_COL].mean().mean()
fitness_mult = list(-stringency)
fitness_mult.append(-new_cases)
genome.fitness_mult = tuple(fitness_mult)
if self.verbose:
print('Evaluated Genome', genome_id)
print('New cases:', new_cases)
print('Stringency:', stringency)
print('Fitness:', genome.fitness_mult)
# Function that evaluates the fitness of each prescriptor model, equal costs
def eval_genomes_2d_ones(genomes, config):
# Every generation sample a different set of costs per geo,
# so that over time solutions become robust to different costs.
cost_df = base.generate_costs(self.hist_df, mode='equal')
cost_df = base.add_geo_id(cost_df)
geo_costs = {}
for geo in eval_geos:
costs = cost_df[cost_df['GeoID'] == geo]
cost_arr = np.array(costs[NPI_COLUMNS])[0]
geo_costs[geo] = cost_arr
# Evaluate each individual
for genome_id, genome in genomes:
# Create net from genome
net = neat.nn.FeedForwardNetwork.create(genome, config)
# Set up dictionary to keep track of prescription
df_dict = {'CountryName': [], 'RegionName': [], 'Date': []}
for ip_col in NPI_COLUMNS:
df_dict[ip_col] = []
# Set initial data
eval_past_cases = deepcopy(past_cases)
eval_past_ips = deepcopy(past_ips)
# Compute prescribed stringency incrementally
stringency = 0.
# Make prescriptions one day at a time, feeding resulting
# predictions from the predictor back into the prescriptor.
for date in pd.date_range(self.eval_start_date, self.eval_end_date):
date_str = date.strftime("%Y-%m-%d")
# Prescribe for each geo
for geo in eval_geos:
# Prepare input data. Here we use log to place cases
# on a reasonable scale; many other approaches are possible.
X_cases = np.log(eval_past_cases[geo][-self.nb_lookback_days:] + 1)
X_ips = eval_past_ips[geo][-self.nb_lookback_days:]
X_costs = geo_costs[geo]
X = np.concatenate([X_cases.flatten(),
X_ips.flatten(),
X_costs])
# Get prescription
prescribed_ips = net.activate(X)
# Map prescription to integer outputs
prescribed_ips = (prescribed_ips * ip_max_values_arr).round()
# Add it to prescription dictionary
country_name, region_name = (geo.split(' / ') + [np.nan])[:2]
df_dict['CountryName'].append(country_name)
df_dict['RegionName'].append(region_name)
df_dict['Date'].append(date_str)
for ip_col, prescribed_ip in zip(NPI_COLUMNS, prescribed_ips):
df_dict[ip_col].append(prescribed_ip)
# Update stringency. This calculation could include division by
# the number of IPs and/or number of geos, but that would have
# no effect on the ordering of candidate solutions.
stringency += np.sum(geo_costs[geo] * prescribed_ips)
# Create dataframe from prescriptions.
pres_df = pd.DataFrame(df_dict)
# Make prediction given prescription for all countries
pred_df = self.get_predictions(self.eval_start_date.strftime("%Y-%m-%d"), date_str, pres_df)
# Update past data with new day of prescriptions and predictions
pres_df = base.add_geo_id(pres_df)
pred_df = base.add_geo_id(pred_df)
new_pres_df = pres_df[pres_df['Date'] == date_str]
new_pred_df = pred_df[pred_df['Date'] == date_str]
for geo in eval_geos:
geo_pres = new_pres_df[new_pres_df['GeoID'] == geo]
geo_pred = new_pred_df[new_pred_df['GeoID'] == geo]
# Append array of prescriptions
pres_arr = np.array([geo_pres[ip_col].values[0] for ip_col in NPI_COLUMNS]).reshape(1, -1)
eval_past_ips[geo] = np.concatenate([eval_past_ips[geo], pres_arr])
# Append predicted cases
eval_past_cases[geo] = np.append(eval_past_cases[geo],
geo_pred[PRED_CASES_COL].values[0])
# Compute fitness. There are many possibilities for computing fitness and ranking
# candidates. Here we choose to minimize the product of ip stringency and predicted
# cases. This product captures the area of the 2D objective space that dominates
# the candidate. We minimize it by including a negation. To place the fitness on
# a reasonable scale, we take means over all geos and days. Note that this fitness
# function can lead directly to the degenerate solution of all ips 0, i.e.,
# stringency zero. To achieve more interesting behavior, a different fitness
# function may be required.
new_cases = pred_df[PRED_CASES_COL].mean().mean()
genome.fitness_mult = (-new_cases, -stringency)
if self.verbose:
print('Evaluated Genome', genome_id)
print('New cases:', new_cases)
print('Stringency:', stringency)
print('Fitness:', genome.fitness_mult)
# Function that evaluates the fitness of each prescriptor model, random costs
def eval_genomes_2d(genomes, config):
# Every generation sample a different set of costs per geo,
# so that over time solutions become robust to different costs.
cost_df = base.generate_costs(self.hist_df, mode='random')
cost_df = base.add_geo_id(cost_df)
geo_costs = {}
for geo in eval_geos:
costs = cost_df[cost_df['GeoID'] == geo]
cost_arr = np.array(costs[NPI_COLUMNS])[0]
geo_costs[geo] = cost_arr
# Evaluate each individual
for genome_id, genome in genomes:
# Create net from genome
net = neat.nn.FeedForwardNetwork.create(genome, config)
# Set up dictionary to keep track of prescription
df_dict = {'CountryName': [], 'RegionName': [], 'Date': []}
for ip_col in NPI_COLUMNS:
df_dict[ip_col] = []
# Set initial data
eval_past_cases = deepcopy(past_cases)
eval_past_ips = deepcopy(past_ips)
# Compute prescribed stringency incrementally
stringency = 0.
# Make prescriptions one day at a time, feeding resulting
# predictions from the predictor back into the prescriptor.
for date in pd.date_range(self.eval_start_date, self.eval_end_date):
date_str = date.strftime("%Y-%m-%d")
# Prescribe for each geo
for geo in eval_geos:
# Prepare input data. Here we use log to place cases
# on a reasonable scale; many other approaches are possible.
X_cases = np.log(eval_past_cases[geo][-self.nb_lookback_days:] + 1)
X_ips = eval_past_ips[geo][-self.nb_lookback_days:]
X_costs = geo_costs[geo]
X = np.concatenate([X_cases.flatten(),
X_ips.flatten(),
X_costs])
# Get prescription
prescribed_ips = net.activate(X)
# Map prescription to integer outputs
prescribed_ips = (prescribed_ips * ip_max_values_arr).round()
# Add it to prescription dictionary
country_name, region_name = (geo.split(' / ') + [np.nan])[:2]
df_dict['CountryName'].append(country_name)
df_dict['RegionName'].append(region_name)
df_dict['Date'].append(date_str)
for ip_col, prescribed_ip in zip(NPI_COLUMNS, prescribed_ips):
df_dict[ip_col].append(prescribed_ip)
# Update stringency. This calculation could include division by
# the number of IPs and/or number of geos, but that would have
# no effect on the ordering of candidate solutions.
stringency += np.sum(geo_costs[geo] * prescribed_ips)
# Create dataframe from prescriptions.
pres_df = | pd.DataFrame(df_dict) | pandas.DataFrame |
import os
from datetime import datetime
import time
import tqdm
import pickle
import pandas as pd
import random
from sklearn.preprocessing import LabelEncoder
import numpy as np
import torch
import math
import copy
import random
from multiprocessing import Pool
import multiprocessing
from collections import Counter
class Preprocess:
def __init__(self,args):
self.args = args
self.train_data = None
self.test_data = None
def get_train_data(self):
return self.train_data
def get_test_data(self):
return self.test_data
def split_data(self, data, ratio=0.7, shuffle=True, seed=0):
"""
split data into two parts with a given ratio.
"""
if self.args.cv_strategy :
if self.args.sep_grade :
# valid_user_path = os.path.join(self.args.data_dir,'cv_strategy',"cv_train_2.pkl")
valid_user_path = os.path.join(self.args.data_dir,"cv_valid_index.pickle")
if os.path.exists(valid_user_path):
with open(valid_user_path,"rb") as file :
valid_idx = pickle.load(file)
data_1 = data[~data['userID'].isin(valid_idx)]
data_2 = data[data['userID'].isin(valid_idx)]
else :
valid_user_path = os.path.join(self.args.data_dir,"cv_valid_index.pickle")
if os.path.exists(valid_user_path):
with open(valid_user_path,"rb") as file :
valid_idx = pickle.load(file)
data_1 = data[~data['userID'].isin(valid_idx)]
data_2 = data[data['userID'].isin(valid_idx)]
# else :
# train_user_path = os.path.join(self.args.data_dir,"cv_train_index.pickle")
# valid_user_path = os.path.join(self.args.data_dir,"cv_valid_index.pickle")
# with open(train_user_path,"rb") as file :
# train_idx = pickle.load(file)
# with open(valid_user_path,"rb") as file :
# valid_idx = pickle.load(file)
# data_1 = data[data['userID'].isin(train_idx)]
# data_2 = data[data['userID'].isin(valid_idx)]
else :
idx_list = list(set(data['userID']))
size = int(len(idx_list) * ratio)
train_idx = random.sample(idx_list,size)
data_1 = data[data['userID'].isin(train_idx)]
data_2 = data[~data['userID'].isin(train_idx)]
return data_1, data_2
def __save_labels(self, encoder, name):
le_path = os.path.join(self.args.asset_dir, name + '_classes.npy')
np.save(le_path, encoder.classes_)
def __preprocessing(self, df, is_train = True):
# Encoding the Categorical Embedding
cols = list(set(self.args.cate_col + self.args.temp_col)) # not to encode twice
if not os.path.exists(self.args.asset_dir):
os.makedirs(self.args.asset_dir)
other = []
for col in cols:
le = LabelEncoder()
if col in ['assessmentItemID', 'testId', 'KnowledgeTag', 'grade', 'last_problem', 'problem_number'] :
if is_train:
#For UNKNOWN class
a = df[col].unique().tolist()
a = sorted(a) if str(type(a[0])) == "<class 'int'>" else a
a = a + ['unknown']
le.fit(a)
self.__save_labels(le, col)
else:
label_path = os.path.join(self.args.asset_dir,col+'_classes.npy')
le.classes_ = np.load(label_path)
df[col]= df[col].astype(str)
df[col] = df[col].apply(lambda x: x if x in le.classes_ else 'unknown')
#모든 컬럼이 범주형이라고 가정
df[col]= df[col].astype(str)
test = le.transform(df[col])
df[col] = test
else :
if is_train :
unq = df[col].unique().tolist()
unq = sorted(unq) if str(type(unq[0])) == "<class 'int'>" else unq
other += list(map(lambda x : col+'_'+str(x),unq))
df[col] = df[col].apply(lambda x : col+'_'+str(x))
else :
label_path = os.path.join(self.args.asset_dir,'other_classes.npy')
le.classes_ = np.load(label_path)
df[col]= df[col].astype(str)
df[col] = df[col].apply(lambda x : col+'_'+x)
df[col] = df[col].apply(lambda x: x if x in le.classes_ else 'unknown')
if other :
other += ['unknown']
le = LabelEncoder()
le.fit(other)
self.__save_labels(le, 'other')
label_path = os.path.join(self.args.asset_dir,'other_classes.npy')
if os.path.exists(label_path):
le.classes_ = np.load(label_path)
for col in cols:
if col in ['assessmentItemID', 'testId', 'KnowledgeTag', 'grade', 'last_problem', 'problem_number'] :
continue
else :
df[col]= df[col].astype(str)
test = le.transform(df[col])
df[col] = test
if not is_train and self.args.sep_grade:
ddf = df[df['answerCode']==-1]
df = df[df.set_index(['userID','grade']).index.isin(ddf.set_index(['userID','grade']).index)]
return df
def __feature_engineering(self, df,file_name, is_train):
data_path = os.path.join(self.args.asset_dir,f"{file_name[:-4]}_FE.pkl") # .csv빼고
if os.path.exists(data_path):
df = pd.read_pickle(data_path)
else :
df.sort_values(by=['userID','Timestamp'], inplace=True)
df['hour'] = df['Timestamp'].dt.hour
df['dow'] = df['Timestamp'].dt.dayofweek
diff = df.loc[:, ['userID','Timestamp']].groupby('userID').diff().fillna(pd.Timedelta(seconds=0))
diff = diff.fillna(pd.Timedelta(seconds=0))
diff = diff['Timestamp'].apply(lambda x: x.total_seconds())
# 푸는 시간
df['elapsed'] = diff
df['elapsed'] = df['elapsed'].apply(lambda x : x if x <650 and x >=0 else 0)
df['grade']=df['testId'].apply(lambda x : int(x[1:4])//10)
df['mid'] = df['testId'].apply(lambda x : int(x[-3:]))
df['problem_number'] = df['assessmentItemID'].apply(lambda x : int(x[-3:]))
if is_train :
sub_data_path = os.path.join("/opt/ml/input/data/train_dataset/test_data.csv") # .csv빼고
sub_df = pd.read_csv(sub_data_path)
full_df = pd.concat([df,sub_df[sub_df['answerCode']!= -1]])
else :
sub_data_path = os.path.join(self.args.asset_dir,"train_data_FE.csv") # .csv빼고
sub_df = pd.read_csv(sub_data_path)
full_df = pd.concat([df[df['answerCode']!= -1],sub_df])
correct_t = full_df.groupby(['testId'])['answerCode'].agg(['mean', 'sum'])
correct_t.columns = ["test_mean", 'test_sum']
correct_k = full_df.groupby(['KnowledgeTag'])['answerCode'].agg(['mean', 'sum'])
correct_k.columns = ["tag_mean", 'tag_sum']
correct_a = full_df.groupby(['assessmentItemID'])['answerCode'].agg(['mean', 'sum'])
correct_a.columns = ["ass_mean", 'ass_sum']
correct_p = full_df.groupby(['problem_number'])['answerCode'].agg(['mean', 'sum'])
correct_p.columns = ["prb_mean", 'prb_sum']
correct_h = full_df.groupby(['hour'])['answerCode'].agg(['mean', 'sum'])
correct_h.columns = ["hour_mean", 'hour_sum']
correct_d = full_df.groupby(['dow'])['answerCode'].agg(['mean', 'sum'])
correct_d.columns = ["dow_mean", 'dow_sum']
df = pd.merge(df, correct_t, on=['testId'], how="left")
df = pd.merge(df, correct_k, on=['KnowledgeTag'], how="left")
df = pd.merge(df, correct_a, on=['assessmentItemID'], how="left")
df = pd.merge(df, correct_p, on=['problem_number'], how="left")
df = pd.merge(df, correct_h, on=['hour'], how="left")
df = pd.merge(df, correct_d, on=['dow'], how="left")
o_df = full_df[full_df['answerCode']==1]
x_df = full_df[full_df['answerCode']==0]
elp_k = full_df.groupby(['KnowledgeTag'])['elapsed'].agg('mean').reset_index()
elp_k.columns = ['KnowledgeTag',"tag_elp"]
elp_k_o = o_df.groupby(['KnowledgeTag'])['elapsed'].agg('mean').reset_index()
elp_k_o.columns = ['KnowledgeTag', "tag_elp_o"]
elp_k_x = x_df.groupby(['KnowledgeTag'])['elapsed'].agg('mean').reset_index()
elp_k_x.columns = ['KnowledgeTag', "tag_elp_x"]
df = pd.merge(df, elp_k, on=['KnowledgeTag'], how="left")
df = pd.merge(df, elp_k_o, on=['KnowledgeTag'], how="left")
df = pd.merge(df, elp_k_x, on=['KnowledgeTag'], how="left")
ass_k = full_df.groupby(['assessmentItemID'])['elapsed'].agg('mean').reset_index()
ass_k.columns = ['assessmentItemID',"ass_elp"]
ass_k_o = o_df.groupby(['assessmentItemID'])['elapsed'].agg('mean').reset_index()
ass_k_o.columns = ['assessmentItemID',"ass_elp_o"]
ass_k_x = x_df.groupby(['assessmentItemID'])['elapsed'].agg('mean').reset_index()
ass_k_x.columns = ['assessmentItemID',"ass_elp_x"]
df = pd.merge(df, ass_k, on=['assessmentItemID'], how="left")
df = pd.merge(df, ass_k_o, on=['assessmentItemID'], how="left")
df = | pd.merge(df, ass_k_x, on=['assessmentItemID'], how="left") | pandas.merge |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Import necessary packages
import re
import json
import collections
import numpy as np
import pandas as pd
from collections import Counter
def read_txt(path_txt: str) -> np.array:
"""
Read the TXT file and convert it into numpy array
:param path_txt: The path of the TXT file
:return txt: The TXT info as numpy array
"""
txt = pd.read_table(path_txt, encoding='utf-8', keep_default_na=False)
txt = np.array(txt)
return txt
def read_csv(path_txt: str) -> np.array:
"""
Read the CSV file and convert it into numpy array
:param path_txt: The path of the CSV file
:return txt: The CSV info as numpy array
"""
csv = pd.read_csv(path_txt, encoding='utf-8', keep_default_na=False)
csv = np.array(csv)
return csv
def read_xls(path_txt: str) -> np.array:
"""
Read the XLS file and convert it into numpy array
:param path_txt: The path of the XLS file
:return txt: The XLS info as the numpy array
"""
xls = pd.read_excel(path_txt, keep_default_na=False)
xls = np.array(xls)
return xls
def read_xls_sheet(path_txt: str, sheet_name: str) -> np.array:
"""
Read the xls' sheet file and convert it into numpy array
:param sheet_name: The sheet name of the xls file
:param path_txt: The path of the xls file
:return txt: The xls sheet info as numpy array
"""
xls = | pd.read_excel(path_txt, header=None, sheet_name=sheet_name, keep_default_na=False) | pandas.read_excel |
import json
from pathlib import Path
import pandas as pd
from os import path
from collections import Counter
from configs import Level, LEVEL_MAP, PROCESS_METRICS_FIELDS
from db.QueryBuilder import get_level_refactorings
from refactoring_statistics.plot_utils import box_plot_seaborn
from refactoring_statistics.query_utils import retrieve_columns, get_metrics_stable_level
from utils.log import log_init, log_close, log
import datetime
import time
INPUT_DIRECTORY = "results/predictions/reproduction/"
SAVE_DIRECTORY = "results/Evaluation/reproduction/"
# metrics
CLASS_METRICS_Fields = ["classCbo",
# "classLcom", to large for plotting
"classLCC",
"classTCC",
"classRfc",
"classWmc"]
CLASS_ATTRIBUTES_QTY_Fields = ["classUniqueWordsQty",
"classNumberOfMethods",
"classStringLiteralsQty",
"classNumberOfPublicFields",
"classVariablesQty",
# "classLoc" to large for plotting
]
ALL_METRICS = CLASS_METRICS_Fields + CLASS_ATTRIBUTES_QTY_Fields + PROCESS_METRICS_FIELDS
# import all json files in the given directory and return them as pd dataframe
def import_evaluation(dir_path: str):
path_list = Path(dir_path).glob('**/*.json')
evaluation_data = pd.DataFrame()
prediction_data = pd.DataFrame()
for file_path in path_list:
with open(str(file_path), 'r') as file:
current_data = json.load(file)
current_evaluation = json.loads(current_data["test_scores"])
current_predictions = json.loads(current_data["test_results"])
current_predictions["model_name"] = current_evaluation["model_name"]
current_predictions["refactoring_name"] = current_evaluation["refactoring type"]
current_evaluation['level'] = get_refactoring_level(current_evaluation["refactoring type"])
current_predictions['level'] = get_refactoring_level(current_evaluation["refactoring type"])
prediction_data = prediction_data.append(current_predictions, ignore_index=True)
evaluation_data = evaluation_data.append(current_evaluation, ignore_index=True)
return evaluation_data, prediction_data
def extract_predictions(current_prediction_raw):
return list(zip(current_prediction_raw["db_id"].values(), current_prediction_raw["label"].values(),
current_prediction_raw["prediction"].values()))
# extract all test scores from the given data
def extract_columns(data, scores):
return data[["model_name", "refactoring type", "validation_sets", "level"] + scores]
def get_top_k(dict, k):
if len(dict.items()) > 0:
k = min(len(dict.items()), k)
top_k = sorted(dict.items(), key=lambda x: abs(x[1]) if not isinstance(x[1], list) else abs(x[1][0]),
reverse=True)[:k]
return top_k
else:
return []
def extract_top_k_feature_importance(data_features, k):
features = data_features["feature_importance"]
features_top_k = []
for index, value in features.items():
current_features = json.loads(value)
current_top_k = get_top_k(current_features, k)
features_top_k.append(current_top_k)
return features_top_k
def extract_top_k_permutation_importance(data_features, k):
features = data_features["permutation_importance"]
features_top_k = []
for index, value in features.items():
current_features = json.loads(value)
for key, current_validation in current_features.items():
current_top_k = get_top_k(current_validation, k)
features_top_k.append(current_top_k)
return features_top_k
def extract_top_k_coef(data_features, k):
features = data_features["feature_coefficients"]
features_top_k = []
for index, value in features.items():
current_features = json.loads(value)
current_top_k = get_top_k(current_features, k)
features_top_k.append(current_top_k)
return features_top_k
def get_refactoring_level(refactoring_name):
for level in Level:
refactorings = LEVEL_MAP[level]
if refactoring_name in refactorings:
return level
return Level.NONE
# extract all feature importances from the given data
# Also enrich the feature importance by filtering for the top 1, 5 and 10 features and filtering for > 1.% features
def add_feature_importances(data, feature_importances):
data_features = extract_columns(data, feature_importances)
# extract top k
for k in [1, 5, 10]:
data_features[f"feature_importance Top-{k}"] = extract_top_k_feature_importance(data_features, k)
data_features[f"feature_coefficients Top-{k}"] = extract_top_k_coef(data_features, k)
data_features[f"permutation_importance Top-{k}"] = extract_top_k_permutation_importance(data_features, k)
return data_features
def extract_feature_importances_statistic(data_features, feature_importances):
columns = []
for k in [1, 5, 10]:
for importance in feature_importances:
columns += [f"{importance} Top-{k}"]
grouped_importances_model_level = data_features.groupby(['model_name', 'level'])[columns].agg(count_appearances)
grouped_importances_model = data_features.groupby(['model_name'])[columns].agg(count_appearances)
return | pd.concat([grouped_importances_model_level, grouped_importances_model]) | pandas.concat |
import json
import networkx as nx
import numpy as np
import os
import pandas as pd
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm
from config import logger, config
def read_profile_data():
profile_na = np.zeros(67)
profile_na[0] = -1
profile_na = pd.DataFrame(profile_na.reshape(1, -1))
profile_df = pd.read_csv(config.profile_file)
profile_na.columns = profile_df.columns
profile_df = profile_df.append(profile_na)
return profile_df
def merge_raw_data():
tr_queries = pd.read_csv(config.train_query_file, parse_dates=['req_time'])
te_queries = pd.read_csv(config.test_query_file, parse_dates=['req_time'])
tr_plans = pd.read_csv(config.train_plan_file, parse_dates=['plan_time'])
te_plans = pd.read_csv(config.test_plan_file, parse_dates=['plan_time'])
tr_click = pd.read_csv(config.train_click_file)
trn = tr_queries.merge(tr_click, on='sid', how='left')
trn = trn.merge(tr_plans, on='sid', how='left')
trn = trn.drop(['click_time'], axis=1)
trn['click_mode'] = trn['click_mode'].fillna(0)
tst = te_queries.merge(te_plans, on='sid', how='left')
tst['click_mode'] = -1
df = pd.concat([trn, tst], axis=0, sort=False)
df = df.drop(['plan_time'], axis=1)
df = df.reset_index(drop=True)
df['weekday'] = df['req_time'].dt.weekday
df['day'] = df['req_time'].dt.day
df['hour'] = df['req_time'].dt.hour
df = df.drop(['req_time'], axis=1)
logger.info('total data size: {}'.format(df.shape))
logger.info('data columns: {}'.format(', '.join(df.columns)))
return df
def extract_plans(df):
plans = []
for sid, plan in tqdm(zip(df['sid'].values, df['plans'].values)):
try:
p = json.loads(plan)
for x in p:
x['sid'] = sid
plans.extend(p)
except:
pass
return pd.DataFrame(plans)
def generate_od_features(df):
feat = df[['o','d']].drop_duplicates()
feat = feat.merge(df.groupby('o')[['day', 'hour', 'pid', 'click_mode']].nunique().reset_index(), how='left', on='o')
feat.rename(columns={'day': 'o_nunique_day',
'hour': 'o_nunique_hour',
'pid': 'o_nunique_pid',
'click_mode': 'o_nunique_click'}, inplace=True)
feat = feat.merge(df.groupby('d')[['day', 'hour', 'pid', 'click_mode']].nunique().reset_index(), how='left', on='d')
feat.rename(columns={'day': 'd_nunique_day',
'hour': 'd_nunique_hour',
'pid': 'd_nunique_pid',
'click_mode': 'd_nunique_click'}, inplace=True)
feat = feat.merge(df.groupby(['o', 'd'])[['day', 'hour', 'pid', 'click_mode']].nunique().reset_index(), how='left', on=['o', 'd'])
feat.rename(columns={'day': 'od_nunique_day',
'hour': 'od_nunique_hour',
'pid': 'od_nunique_pid',
'click_mode': 'od_nunique_click'}, inplace=True)
return feat
def generate_pid_features(df):
feat = df.groupby('pid')[['hour', 'day']].nunique().reset_index()
feat.rename(columns={'hour': 'pid_nunique_hour', 'day': 'pid_nunique_day'}, inplace=True)
feat['nunique_hour_d_nunique_day'] = feat['pid_nunique_hour'] / feat['pid_nunique_day']
feat = feat.merge(df.groupby('pid')[['o', 'd']].nunique().reset_index(), how='left', on='pid')
feat.rename(columns={'o': 'pid_nunique_o', 'd': 'pid_nunique_d'}, inplace=True)
feat['nunique_o_d_nunique_d'] = feat['pid_nunique_o'] / feat['pid_nunique_d']
return feat
def generate_od_cluster_features(df):
G = nx.Graph()
G.add_nodes_from(df['o'].unique().tolist())
G.add_nodes_from(df['d'].unique().tolist())
edges = df[['o','d']].apply(lambda x: (x[0],x[1]), axis=1).tolist()
G.add_edges_from(edges)
cluster = nx.clustering(G)
cluster_df = pd.DataFrame([{'od': key, 'cluster': cluster[key]} for key in cluster.keys()])
return cluster_df
def gen_od_feas(data):
data['o1'] = data['o'].apply(lambda x: float(x.split(',')[0]))
data['o2'] = data['o'].apply(lambda x: float(x.split(',')[1]))
data['d1'] = data['d'].apply(lambda x: float(x.split(',')[0]))
data['d2'] = data['d'].apply(lambda x: float(x.split(',')[1]))
data = data.drop(['o', 'd'], axis=1)
return data
def gen_plan_feas(data):
n = data.shape[0]
mode_list_feas = np.zeros((n, 12))
max_dist, min_dist, mean_dist, std_dist = np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
max_price, min_price, mean_price, std_price = np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
max_eta, min_eta, mean_eta, std_eta = np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
min_dist_mode, max_dist_mode, min_price_mode, max_price_mode, min_eta_mode, max_eta_mode, first_mode = np.zeros(
(n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
mode_texts = []
for i, plan in tqdm(enumerate(data['plans'].values)):
try:
cur_plan_list = json.loads(plan)
except:
cur_plan_list = []
if len(cur_plan_list) == 0:
mode_list_feas[i, 0] = 1
first_mode[i] = 0
max_dist[i] = -1
min_dist[i] = -1
mean_dist[i] = -1
std_dist[i] = -1
max_price[i] = -1
min_price[i] = -1
mean_price[i] = -1
std_price[i] = -1
max_eta[i] = -1
min_eta[i] = -1
mean_eta[i] = -1
std_eta[i] = -1
min_dist_mode[i] = -1
max_dist_mode[i] = -1
min_price_mode[i] = -1
max_price_mode[i] = -1
min_eta_mode[i] = -1
max_eta_mode[i] = -1
mode_texts.append('word_null')
else:
distance_list = []
price_list = []
eta_list = []
mode_list = []
for tmp_dit in cur_plan_list:
distance_list.append(int(tmp_dit['distance']))
if tmp_dit['price'] == '':
price_list.append(0)
else:
price_list.append(int(tmp_dit['price']))
eta_list.append(int(tmp_dit['eta']))
mode_list.append(int(tmp_dit['transport_mode']))
mode_texts.append(
' '.join(['word_{}'.format(mode) for mode in mode_list]))
distance_list = np.array(distance_list)
price_list = np.array(price_list)
eta_list = np.array(eta_list)
mode_list = np.array(mode_list, dtype='int')
mode_list_feas[i, mode_list] = 1
distance_sort_idx = np.argsort(distance_list)
price_sort_idx = np.argsort(price_list)
eta_sort_idx = np.argsort(eta_list)
max_dist[i] = distance_list[distance_sort_idx[-1]]
min_dist[i] = distance_list[distance_sort_idx[0]]
mean_dist[i] = np.mean(distance_list)
std_dist[i] = np.std(distance_list)
max_price[i] = price_list[price_sort_idx[-1]]
min_price[i] = price_list[price_sort_idx[0]]
mean_price[i] = np.mean(price_list)
std_price[i] = np.std(price_list)
max_eta[i] = eta_list[eta_sort_idx[-1]]
min_eta[i] = eta_list[eta_sort_idx[0]]
mean_eta[i] = np.mean(eta_list)
std_eta[i] = np.std(eta_list)
first_mode[i] = mode_list[0]
max_dist_mode[i] = mode_list[distance_sort_idx[-1]]
min_dist_mode[i] = mode_list[distance_sort_idx[0]]
max_price_mode[i] = mode_list[price_sort_idx[-1]]
min_price_mode[i] = mode_list[price_sort_idx[0]]
max_eta_mode[i] = mode_list[eta_sort_idx[-1]]
min_eta_mode[i] = mode_list[eta_sort_idx[0]]
feature_data = pd.DataFrame(mode_list_feas)
feature_data.columns = ['mode_feas_{}'.format(i) for i in range(12)]
feature_data['max_dist'] = max_dist
feature_data['min_dist'] = min_dist
feature_data['mean_dist'] = mean_dist
feature_data['std_dist'] = std_dist
feature_data['max_price'] = max_price
feature_data['min_price'] = min_price
feature_data['mean_price'] = mean_price
feature_data['std_price'] = std_price
feature_data['max_eta'] = max_eta
feature_data['min_eta'] = min_eta
feature_data['mean_eta'] = mean_eta
feature_data['std_eta'] = std_eta
feature_data['max_dist_mode'] = max_dist_mode
feature_data['min_dist_mode'] = min_dist_mode
feature_data['max_price_mode'] = max_price_mode
feature_data['min_price_mode'] = min_price_mode
feature_data['max_eta_mode'] = max_eta_mode
feature_data['min_eta_mode'] = min_eta_mode
feature_data['first_mode'] = first_mode
logger.info('mode tfidf...')
tfidf_enc = TfidfVectorizer(ngram_range=(1, 2))
tfidf_vec = tfidf_enc.fit_transform(mode_texts)
svd_enc = TruncatedSVD(n_components=10, n_iter=20, random_state=2019)
mode_svd = svd_enc.fit_transform(tfidf_vec)
mode_svd = pd.DataFrame(mode_svd)
mode_svd.columns = ['svd_mode_{}'.format(i) for i in range(10)]
data = pd.concat([data, feature_data, mode_svd], axis=1)
data = data.drop(['plans'], axis=1)
return data
def gen_profile_feas(data):
profile_data = read_profile_data()
x = profile_data.drop(['pid'], axis=1).values
svd = TruncatedSVD(n_components=20, n_iter=20, random_state=2019)
svd_x = svd.fit_transform(x)
svd_feas = pd.DataFrame(svd_x)
svd_feas.columns = ['svd_fea_{}'.format(i) for i in range(20)]
svd_feas['pid'] = profile_data['pid'].values
data['pid'] = data['pid'].fillna(-1)
data = data.merge(svd_feas, on='pid', how='left')
return data
def group_weekday_and_hour(row):
if row['weekday'] == 0 or row['weekday'] == 6:
w = 0
else:
w = row['weekday']
if row['hour'] > 7 and row['hour'] < 18: # 7:00 - 18:00
h = row['hour']
elif row['hour'] >= 18 and row['hour'] < 21: # 18:00 - 21:00
h = 1
elif row['hour'] >= 21 or row['hour'] < 6: # 21:00 - 6:00
h = 0
else: # 6:00 - 7:00
h = 2
return str(w) + '_' + str(h)
def gen_ratio_feas(data):
data['dist-d-eta'] = data['mean_dist'] / data['mean_eta']
data['price-d-dist'] = data['mean_price'] / data['mean_dist']
data['price-d-eta'] = data['mean_price'] / data['mean_eta']
data['o1-d-d1'] = data['o1'] / data['d1']
data['o2-d-d2'] = data['o2'] / data['d2']
return data
def gen_fly_dist_feas(data):
data['fly-dist'] = ((data['d1'] - data['o1'])**2 + (data['d2'] - data['o2'])**2)**0.5
data['fly-dist-d-dist'] = data['fly-dist'] / data['mean_dist']
data['fly-dist-d-eta'] = data['fly-dist'] / data['mean_eta']
data['price-d-fly-dist'] = data['mean_price'] / data['fly-dist']
return data
def gen_aggregate_profile_feas(data):
aggr = data.groupby('pid')['sid'].agg(['count'])
aggr.columns = ['%s_%s' % ('sid', col) for col in aggr.columns.values]
aggr = aggr.reset_index()
aggr.loc[aggr['pid'] == -1.0,'sid_count'] = 0 # reset in case pid == -1
data = data.merge(aggr, how='left', on=['pid'])
return data
def gen_pid_feat(data):
feat = pd.read_csv(config.pid_feature_file)
data = data.merge(feat, how='left', on='pid')
return data
def gen_od_feat(data):
feat = pd.read_csv(config.od_feature_file)
tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','o','d'])
te_sid = pd.read_csv(config.test_query_file, usecols=['sid','o','d'])
sid = pd.concat((tr_sid, te_sid))
logger.info('sid shape={}'.format(sid.shape))
feat = sid.merge(feat, how='left', on=['o','d']).drop(['o','d'], axis=1)
logger.info('feature shape={}'.format(feat.shape))
logger.info('feature columns={}'.format(feat.columns))
data = data.merge(feat, how='left', on='sid')
click_cols = [c for c in feat.columns if c.endswith('click')]
data.drop(click_cols, axis=1, inplace=True)
return data
def gen_od_cluster_feat(data):
feat = pd.read_csv(config.od_cluster_feature_file)
tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','o','d'])
te_sid = pd.read_csv(config.test_query_file, usecols=['sid','o','d'])
sid = pd.concat((tr_sid, te_sid))
f = feat.copy()
feat = sid.merge(feat, how='left', left_on='o', right_on='od').drop(['od','o'], axis=1)
feat.rename(columns={'cluster': 'o_cluster'}, inplace=True)
feat = feat.merge(f, how='left', left_on='d', right_on='od').drop(['od','d'], axis=1)
feat.rename(columns={'cluster': 'd_cluster'}, inplace=True)
data = data.merge(feat, how='left', on='sid')
return data
def gen_od_eq_feat(data):
data['o1-eq-d1'] = (data['o1'] == data['d1']).astype(int)
data['o2-eq-d2'] = (data['o2'] == data['d2']).astype(int)
data['o-eq-d'] = data['o1-eq-d1']*data['o2-eq-d2']
data['o1-m-o2'] = np.abs(data['o1'] - data['o2'])
data['d1-m-d2'] = np.abs(data['d1'] - data['d2'])
data['od_area'] = data['o1-m-o2']*data['d1-m-d2']
data['od_ratio'] = data['o1-m-o2']/data['d1-m-d2']
return data
def gen_od_mode_cnt_feat(data):
feat = pd.read_csv(config.od_mode_cnt_feature_file)
tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','o','d'])
te_sid = pd.read_csv(config.test_query_file, usecols=['sid','o','d'])
sid = | pd.concat((tr_sid, te_sid)) | pandas.concat |
import numpy as np
import pandas as pd
import scipy.stats as stats
from faker import Faker
from preparation.transformers.base import BaseTransformer
MAPS = {}
class CategoricalTransformer(BaseTransformer):
mapping = None
intervals = None
dtype = None
def __init__(self, anonymize=False, fuzzy=False, clip=False):
self.anonymize = anonymize
self.fuzzy = fuzzy
self.clip = clip
def _get_faker(self):
if isinstance(self.anonymize, (tuple, list)):
category, *args = self.anonymize
else:
category = self.anonymize
args = tuple()
try:
faker_method = getattr(Faker(), category)
def faker():
return faker_method(*args)
return faker
except AttributeError as attrerror:
error = 'Category "{}" couldn\'t be found on faker'.format(self.anonymize)
raise ValueError(error) from attrerror
def _anonymize(self, data):
faker = self._get_faker()
uniques = data.unique()
fake_data = [faker() for x in range(len(uniques))]
mapping = dict(zip(uniques, fake_data))
MAPS[id(self)] = mapping
return data.map(mapping)
@staticmethod
def _get_intervals(data):
frequencies = data.value_counts(dropna=False).reset_index()
# Sort also by index to make sure that results are always the same
name = data.name or 0
sorted_freqs = frequencies.sort_values([name, 'index'], ascending=False)
frequencies = sorted_freqs.set_index('index', drop=True)[name]
start = 0
end = 0
elements = len(data)
intervals = dict()
for value, frequency in frequencies.items():
prob = frequency / elements
end = start + prob
mean = (start + end) / 2
std = (end - mean) / 24
intervals[value] = (start, end, mean, std)
start = end
return intervals
def fit(self, data):
self.mapping = dict()
self.dtype = data.dtype
if isinstance(data, np.ndarray):
data = pd.Series(data)
if self.anonymize:
data = self._anonymize(data)
self.intervals = self._get_intervals(data)
def _get_value(self, category):
start, end, mean, std = self.intervals[category]
min_value = (start - mean) / std
max_value = (end - mean) / std
if self.fuzzy:
return stats.truncnorm.rvs(min_value, max_value, loc=mean, scale=std)
return mean
def transform(self, data):
if not isinstance(data, pd.Series):
data = pd.Series(data)
if self.anonymize:
data = data.map(MAPS[id(self)])
return data.fillna(np.nan).apply(self._get_value).to_numpy()
def _normalize(self, data):
if self.clip:
return data.clip(0, 1)
return np.mod(data, 1)
def reverse_transform(self, data):
if not isinstance(data, pd.Series):
if len(data.shape) > 1:
data = data[:, 0]
data = pd.Series(data)
data = self._normalize(data)
result = pd.Series(index=data.index, dtype=self.dtype)
for category, values in self.intervals.items():
start, end = values[:2]
result[(start < data) & (data < end)] = category
return result
class OneHotEncodingTransformer(BaseTransformer):
dummy_na = None
dummies = None
def __init__(self, error_on_unknown=True):
self.error_on_unknown = error_on_unknown
@staticmethod
def _prepare_data(data):
if isinstance(data, list):
data = np.array(data)
if len(data.shape) > 2:
raise ValueError('Unexpected format.')
if len(data.shape) == 2:
if data.shape[1] != 1:
raise ValueError('Unexpected format.')
data = data[:, 0]
return data
def fit(self, data):
data = self._prepare_data(data)
self.dummy_na = pd.isnull(data).any()
self.dummies = list(pd.get_dummies(data, dummy_na=self.dummy_na).columns)
def transform(self, data):
data = self._prepare_data(data)
dummies = | pd.get_dummies(data, dummy_na=self.dummy_na) | pandas.get_dummies |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from gensim.models import LdaModel
from gensim.matutils import Sparse2Corpus
from scipy import sparse
from itertools import product
from random import shuffle
from time import time
import spacy
import logging
pd.set_option('display.expand_frame_repr', False)
np.random.seed(42)
nlp = spacy.load('en')
logging.basicConfig(
filename='gensim.log',
level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%H:%M:%S')
def format_time(t):
m_, s = divmod(t, 60)
h, m = divmod(m_, 60)
return f'{h:>02.0f}:{m:>02.0f}:{s:>02.0f}'
clean_text = Path('clean_text.txt')
# experiment setup
cols = ['vocab_size', 'test_vocab', 'min_df', 'max_df', 'binary', 'num_topics', 'passes', 'perplexity']
experiment_path = Path('experiments')
# get text files
clean_docs = clean_text.read_text().split('\n')
print('\n', len(clean_docs))
train_docs, test_docs = train_test_split(clean_docs, test_size=.1)
# dtm params
min_dfs = [50, 100, 250, 500]
max_dfs = [.1, .25, .5, 1.0]
binarys = [True, False]
dtm_params = list(product(*[min_dfs, max_dfs, binarys]))
n = len(dtm_params)
shuffle(dtm_params)
topicss = [3, 5, 7, 10, 15, 20, 25, 50]
passess = [1, 25]
model_params = list(product(*[topicss, passess]))
corpus = id2word = train_corpus = train_tokens = test_corpus = vocab_size = test_vocab = None
start = time()
for i, (min_df, max_df, binary) in enumerate(dtm_params, 1):
print(min_df, max_df, binary)
result = []
vocab_path = experiment_path / str(min_df) / str(max_df) / str(int(binary))
if vocab_path.exists():
continue
else:
vocab_path.mkdir(exist_ok=True, parents=True)
vectorizer = CountVectorizer(min_df=min_df,
max_df=max_df,
binary=binary)
train_dtm = vectorizer.fit_transform(train_docs)
train_corpus = Sparse2Corpus(train_dtm, documents_columns=False)
train_tokens = vectorizer.get_feature_names()
test_dtm = vectorizer.transform(test_docs)
test_corpus = Sparse2Corpus(test_dtm, documents_columns=False)
test_vocab = test_dtm.count_nonzero()
dtm = vectorizer.fit_transform(clean_docs)
sparse.save_npz(vocab_path / f'dtm.npz', dtm)
tokens = vectorizer.get_feature_names()
vocab_size = len(tokens)
pd.Series(tokens).to_csv(vocab_path / f'tokens.csv', index=False)
id2word = | pd.Series(tokens) | pandas.Series |
"""Concept analysis functionality.
For details on the workflow of a concept analysis see
:py:meth:`ConceptAnalysis.analysis`.
In short:
:Input: All of
- The *concept* (defined via concept data)
- The *main model*
- The *layers* to analyse and compare
:Output: All of
- The *layer* hosting the best embedding,
- The *best embedding*,
- The *quality metric values* for the best embedding
"""
# Copyright (c) 2020 Continental Automotive GmbH
import enum
import logging
import os
from typing import Tuple, Dict, Any, Sequence, Callable, List, Optional, Union
import numpy as np
import pandas as pd
import torch
from hybrid_learning.datasets import data_visualization as datavis
from . import visualization as vis
from .concepts import ConceptTypes, Concept, SegmentationConcept2D
from .embeddings import ConceptEmbedding
# For type hints:
from .models import ConceptDetectionModel2D, ConceptDetection2DTrainTestHandle
LOGGER = logging.getLogger(__name__)
class EmbeddingReduction(enum.Enum):
"""Aggregator callables to get the mean from a list of embeddings."""
MEAN_NORMALIZED_DIST = (ConceptEmbedding.mean,)
"""Embedding with distance function the mean of those of the
normed representations"""
MEAN_DIST = (ConceptEmbedding.mean_by_distance,)
"""Embedding with distance the mean of the distance functions"""
MEAN_ANGLE = (ConceptEmbedding.mean_by_angle,)
"""Embedding with distance function the mean of the distance functions
weighted by cosine distance of the normal vectors"""
DEFAULT = MEAN_NORMALIZED_DIST
"""The default instance to be used."""
def __init__(self,
func: Callable[[Sequence[ConceptEmbedding]],
ConceptEmbedding]):
"""The init routine for enum members makes function available as
instance fields.
It is automatically called for all defined enum instances.
"""
self.function: Callable[[Sequence[ConceptEmbedding]],
ConceptEmbedding] = func
"""Actual function that reduces a list of embeddings to a new one.
.. note::
The function is manually saved as attribute during ``__init__``
due to the following issue:
Enums currently do not support functions as values, as explained in
`this
<https://stackoverflow.com/questions/40338652>`_ and
`this discussion
<https://mail.python.org/pipermail/python-ideas/2017-April/045435.html>`_.
The chosen workaround follows
`this suggestion <https://stackoverflow.com/a/30311492>`_
*(though the code is not used)*.
"""
def __call__(self,
embeddings: Sequence[ConceptEmbedding]) -> ConceptEmbedding:
"""Call aggregation function behind the instance on the embeddings."""
return self.function(embeddings)
class ConceptAnalysis:
r"""Handle for conducting a concept embedding analysis.
Saves the analysis settings and can run a complete analysis.
The core methods are:
- :py:meth:`analysis`: plain analysis (collect
:math:`\text{cross_val_runs}\cdot\text{num_val_splits}`
embeddings for each layer in :py:attr`layer_infos`)
- :py:meth:`best_embedding`: aggregate embeddings of an analysis per layer,
then choose best one
- :py:meth:`best_embedding_with_logging`: combination of the latter two
with automatic logging and result saving
"""
def __init__(self,
concept: Concept,
model: torch.nn.Module,
layer_infos: Union[Dict[str, Dict[str, Any]],
Sequence[str]] = None,
cross_val_runs: int = 1,
num_val_splits: int = 5,
emb_reduction: EmbeddingReduction = EmbeddingReduction.DEFAULT,
show_train_progress_bars: bool = True,
concept_model_args: Dict[str, Any] = None,
train_val_args: Dict[str, Any] = None,
):
"""Init.
:param concept: concept to find the embedding of
:param model: the DNN
:param layer_infos: information about the layers in which to look for
the best concept embedding; it may be given either as sequence of
layer IDs or as dict where the indices are the layer keys in
the model's :py:meth:`torch.nn.Module.named_modules` dict;
used keys:
- kernel_size: fixed kernel size to use for this layer
(overrides value from ``concept_model_args``)
- lr: learning rate to use
:param num_val_splits: the number of validation splits to use for
each cross-validation run
:param cross_val_runs: for a layer, several concept models are
trained in different runs; the runs differ by model initialization,
and the validation data split;
``cross_val_runs`` is the number of cross-validation runs,
i.e. collections of runs with num_val_splits distinct validation
sets each
:param emb_reduction: aggregation function to reduce list of
embeddings to one
:param show_train_progress_bars: whether to show the training
progress bars of the models
:param concept_model_args: dict with arguments for the concept model
initialization
:param train_val_args: any further arguments to initialize the concept
model handle
"""
if not concept.type == ConceptTypes.SEGMENTATION:
raise NotImplementedError(
("Analysis only available for segmentation concepts,"
"but concept was of type {}").format(concept.type))
self.concept: Concept = concept
"""The concept to find the embedding for."""
self.model: torch.nn.Module = model
"""The model in which to find the embedding."""
self.layer_infos: Dict[str, Dict[str, Any]] = layer_infos \
if isinstance(layer_infos, dict) \
else {l_id: {} for l_id in layer_infos}
"""Information about the layers in which to look for the best concept
embedding; the indices are the layer keys in the model's
:py:meth:`torch.nn.Module.named_modules` dict"""
self.cross_val_runs: int = cross_val_runs
"""The number of cross-validation runs to conduct for each layer.
A cross-validation run consists of :py:attr:`num_val_splits` training
runs with distinct validation sets. The resulting embeddings of all
runs of all cross-validation runs are then used to obtain the layer's
best concept embedding."""
self.num_val_splits: int = num_val_splits
"""The number of validation splits per cross-validation run."""
self.emb_reduction: EmbeddingReduction = emb_reduction
"""Aggregation function to reduce a list of embeddings from several
runs to one."""
self.show_train_progress_bars: bool = show_train_progress_bars
"""Whether to show the training progress bars of the models"""
self.train_val_args: Dict[str, Any] = train_val_args \
if train_val_args is not None else {}
"""Any training and evaluation arguments for the concept model
initialization."""
self.concept_model_args: Dict[str, Any] = concept_model_args \
if concept_model_args is not None else {}
"""Any arguments for initializing a new concept model."""
@property
def settings(self) -> Dict[str, Any]:
"""Settings dict to reproduce instance."""
return dict(
concept=self.concept,
model=self.model,
layer_infos=self.layer_infos,
cross_val_runs=self.cross_val_runs,
num_val_splits=self.num_val_splits,
emb_reduction=self.emb_reduction,
show_train_progress_bars=self.show_train_progress_bars,
train_val_args=self.train_val_args,
concept_model_args=self.concept_model_args
)
def __repr__(self):
setts = self.settings
# handle dict attribute representation
for k, val in setts.items():
if isinstance(val, dict) and len(val) > 0:
setts[k] = '{\n' + ',\n'.join(
["\t{!s}:\t{!s}".format(sub_k, sub_v)
for sub_k, sub_v in val.items()]) + '\n}'
return (str(self.__class__.__name__) + '(\n' +
',\n'.join(
["{!s} =\t{!s}".format(k, v) for k, v in setts.items()])
+ '\n)')
def best_embedding(self,
analysis_results: Dict[
str, Dict[int, Tuple[ConceptEmbedding, pd.Series]]]
= None) -> ConceptEmbedding:
"""Conduct an analysis and from results derive the best embedding.
:param analysis_results: optionally the results of a previously run
analysis; defaults to running a new analysis via :py:meth:`analysis`
:return: the determined best embedding of all layers analysed
"""
analysis_results = analysis_results or self.analysis()
best_embs_stds_stats: Dict[
str, Tuple[ConceptEmbedding, Tuple, pd.Series]] = {}
for layer_id, results_per_run in analysis_results.items():
best_embs_stds_stats[layer_id] = \
self.embedding_reduction(results_per_run)
best_layer_id = self.best_layer_from_stats(best_embs_stds_stats)
LOGGER.info("Concept %s final layer: %s", self.concept.name,
best_layer_id)
best_embedding, _, _ = best_embs_stds_stats[best_layer_id]
return best_embedding
def analysis(self) -> Dict[str,
Dict[int, Tuple[ConceptEmbedding, pd.Series]]]:
"""Conduct a concept embedding analysis.
For each layer in :py:attr:`layer_infos`:
- train :py:attr:`cross_val_runs` x :py:attr:`num_val_splits`
concept models,
- collect their evaluation results,
- convert them to embeddings.
:return: a dictionary of
``{layer_id: {run: (embedding,
pandas.Series with {pre_: metric_val}}}``
"""
results_per_layer: Dict[
str, Dict[int, Tuple[ConceptEmbedding, pd.Series]]] = {}
for layer_id in self.layer_infos:
results_per_run: Dict[int, Tuple[ConceptEmbedding, pd.Series]] = \
self.analysis_for_layer(layer_id)
results_per_layer[layer_id] = results_per_run
return results_per_layer
@classmethod
def analysis_results_to_pandas(cls, analysis_results):
"""Provide :py:class:`pandas.DataFrame` multi-indexed by layer and
run w/ info for each run.
The information for each run is the one obtained by
:py:meth:`emb_info_to_pandas`.
:param analysis_results: analysis results in the for as produced by
:py:meth:`analysis`
:returns: a :py:class:`pandas.DataFrame` with run result information
multi-indexed by ``(layer, run)``
"""
return pd.DataFrame({(layer_id, run): cls.emb_info_to_pandas(emb, stats)
for layer_id, runs in analysis_results.items()
for run, (emb, stats) in runs.items()
}).transpose()
@classmethod
def best_emb_infos_to_pandas(cls,
results: Dict[str, Tuple[
ConceptEmbedding,
Tuple[np.ndarray, float, float],
pd.Series]]) -> pd.DataFrame:
"""Provide :py:class:`pandas.DataFrame` indexed by layer ID wt/ info
about embeddings.
The format of results must be a dictionary indexed by the layer ID
and with values as provided by :py:meth:`embedding_reduction`
"""
return pd.DataFrame({layer_id: cls.emb_info_to_pandas(emb, stats, var)
for layer_id, (emb, var, stats) in results.items()
}).transpose()
@classmethod
def save_best_emb_results(
cls,
results: Dict[str, Tuple[ConceptEmbedding,
Tuple[np.ndarray, float, float],
pd.Series]],
folder_path: str):
"""Save results of embedding reduction.
The format of results must be a dict with layer IDs as keys and
values as provided by :py:meth:`embedding_reduction`.
"""
info = cls.best_emb_infos_to_pandas(results)
info['embedding'] = None
for layer in info.index:
emb: ConceptEmbedding = results[layer][0]
emb_fn = "{} best.npz".format(layer)
# Save and note in the info frame:
emb.save(os.path.join(folder_path, emb_fn))
info.loc[layer, 'embedding'] = emb_fn
info.to_csv(os.path.join(folder_path, "best_emb_stats.csv"))
@classmethod
def save_analysis_results(cls,
results: Dict[str, Dict[
int, Tuple[ConceptEmbedding, pd.Series]]],
folder_path: str):
"""Save analysis results.
The format is one retrievable by :py:meth:`load_analysis_results`.
The results are saved in the following files within ``folder_path``
- ``<layer> <run>.npz``: npz file with embedding resulting from
``<run>`` on ``<layer>``; can be loaded to an embedding using
:py:meth:`hybrid_learning.concepts.embeddings.ConceptEmbedding.load`
- ``stats.csv``: CSV file holding a :py:class:`pandas.DataFrame` with
each rows holding an embedding statistics;
additional columns are ``'layer'``, ``'run'``, and ``'embedding'``,
where the ``'embedding'`` column holds the path to the npz-saved
embedding corresponding of the row relative to the location of
``stats.csv``
:param results: results dictionary in the format returned by
:py:meth:`analysis`
:param folder_path: the root folder to save files under;
must not yet exist
"""
info = cls.analysis_results_to_pandas(results)
info['embedding'] = None
for layer, run in info.index:
emb: ConceptEmbedding = results[layer][run][0]
emb_fn = "{} {}.npz".format(layer, run)
# Save and note in the info frame:
emb.save(os.path.join(folder_path, emb_fn))
info.loc[(layer, run), 'embedding'] = emb_fn
info.to_csv(os.path.join(folder_path, "stats.csv"))
@staticmethod
def load_analysis_results(folder_path: str
) -> Dict[str, Dict[int, Tuple[ConceptEmbedding,
pd.Series]]]:
"""Load analysis results previously saved.
The saving format is assumed to be that of
:py:meth:`save_analysis_results`."""
if not os.path.isdir(folder_path):
raise ValueError("Folder {} does not exist!".format(folder_path))
stats_frame = pd.read_csv(os.path.join(folder_path, "stats.csv"))
assert all([col in stats_frame.columns
for col in ("layer", "run", "embedding")])
stats_frame.set_index(['layer', 'run'])
layers = stats_frame.index.get_level_values('layer').unique()
runs = stats_frame.index.get_level_values('run').unique()
analysis_results = {layer: {run: None for run in runs}
for layer in layers}
for layer in layers:
for run in runs:
row = stats_frame.loc[(layer, run)]
emb = ConceptEmbedding.load(
os.path.join(folder_path, row['embedding']))
stat = row.drop('embedding', axis=1)
analysis_results[layer][run] = (emb, stat)
return analysis_results
def analysis_for_layer(self, layer_id: str
) -> Dict[int, Tuple[ConceptEmbedding, pd.Series]]:
"""Get a concept embedding of the given concept in the given layer.
:param layer_id: ID of the layer to find embedding in; key in
:py:attr:`layer_infos`
:return: a tuple of the best found embedding, the standard deviation,
and its performance
"""
c_model = self.concept_model_for_layer(layer_id)
c_handle: ConceptDetection2DTrainTestHandle = \
self.concept_model_handle(c_model)
if 'lr' in self.layer_infos[layer_id]:
c_handle.optimizer.lr = self.layer_infos[layer_id]['lr']
stats_per_run = {}
for cross_val_run in range(self.cross_val_runs):
states, _, _ = zip(*c_handle.cross_validate(
num_splits=self.num_val_splits,
run_info_templ=("{}, cv {}/{}, ".format(
layer_id, cross_val_run + 1, self.cross_val_runs) +
"run {run}/{runs}"),
show_progress_bars=self.show_train_progress_bars))
for split, state_dict in enumerate(states):
c_model.load_state_dict(state_dict)
embedding = c_model.to_embedding()
metrics: pd.Series = self.evaluate_embedding(embedding)
# storing & logging
run = split + cross_val_run * self.num_val_splits
stats_per_run[run] = (embedding, metrics)
context = "Concept {}, layer {}, run {}".format(
self.concept.name, layer_id, run)
LOGGER.info("%s:\n%s", context,
self.emb_info_to_string(embedding, metrics))
return stats_per_run
def concept_model_handle(self,
c_model: ConceptDetectionModel2D = None,
emb: ConceptEmbedding = None,
layer_id: str = None
) -> ConceptDetection2DTrainTestHandle:
"""Train and eval handle for the given concept model.
The concept model to handle can either be specified directly or is
created from an embedding or from a given ``layer_id``.
:param c_model: the concept model to provide a handle for
:param emb: if ``c_model`` is not given, it is initialized using
:py:meth:`concept_model_from_embedding` on ``emb``
:param layer_id: if c_model and emb is not given, it is initialized
using :py:meth:`concept_model_for_layer` on ``layer_id``
:return: a handle for the specified or created concept model
"""
if c_model is None:
if emb is not None:
c_model = self.concept_model_from_embedding(emb)
elif layer_id is not None:
c_model = self.concept_model_for_layer(layer_id)
else:
raise ValueError("Either c_model, emb, or layer_id must "
"be given.")
return ConceptDetection2DTrainTestHandle(c_model, **self.train_val_args)
def concept_model_for_layer(self, layer_id):
"""Return a concept model for the given layer ID.
:param layer_id: ID of the layer the concept model should be attached
to; key in :py:attr:`layer_infos`
:returns: concept model for :py:attr:`concept` attached to given
layer in :py:attr:`model`
"""
c_model: ConceptDetectionModel2D = ConceptDetectionModel2D(
concept=SegmentationConcept2D.new(self.concept),
model=self.model, layer_id=layer_id,
**{'kernel_size': self.layer_infos[layer_id].get('kernel_size',
None),
**self.concept_model_args}
)
return c_model
@staticmethod
def concept_model_from_embedding(embedding: ConceptEmbedding
) -> ConceptDetectionModel2D:
"""Get concept model from embedding for training and eval."""
return ConceptDetectionModel2D.from_embedding(embedding)
@staticmethod
def emb_info_to_string(
emb: ConceptEmbedding, stats: pd.Series = None,
std_dev: Tuple[np.ndarray, float, float] = None) -> str:
"""Printable quick info about the given embedding with stats
(and standard deviation)."""
info: pd.Series = ConceptAnalysis.emb_info_to_pandas(emb, stats,
std_dev=std_dev)
# Formatting
float_format: str = "{: < 14.6f}"
exp_format: str = "{: < 14.6e}"
for idx in [i for i in info.index if "std" in i]:
info[idx] = exp_format.format(info[idx])
return info.to_string(float_format=float_format.format)
@staticmethod
def emb_info_to_pandas(emb: ConceptEmbedding, stats: pd.Series = None,
std_dev: Tuple[np.ndarray, float, float] = None
) -> pd.Series:
"""Quick info about embedding with stats (and standard dev)
as :py:class:`pandas.Series`."""
stats_info = stats if stats is not None else {}
emb_info = {"normal vec len": np.linalg.norm(emb.normal_vec),
"support factor": float(emb.support_factor),
"scaling factor": float(emb.scaling_factor)}
std_info = {"std dev normal vec (len)": np.linalg.norm(std_dev[0]),
"std dev support factor": std_dev[1],
"std dev scaling factor": std_dev[2]} \
if std_dev is not None else {}
return | pd.Series({**stats_info, **emb_info, **std_info}) | pandas.Series |
from collections import deque
from datetime import datetime
import operator
import re
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
from pandas.core.computation.expressions import _MIN_ELEMENTS, _NUMEXPR_INSTALLED
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
msgs = [
r"Invalid comparison between dtype=datetime64\[ns\] and ndarray",
"invalid type promotion",
(
# npdev 1.20.0
r"The DTypes <class 'numpy.dtype\[.*\]'> and "
r"<class 'numpy.dtype\[.*\]'> do not have a common DType."
),
]
msg = "|".join(msgs)
with pytest.raises(TypeError, match=msg):
x >= y
with pytest.raises(TypeError, match=msg):
x > y
with pytest.raises(TypeError, match=msg):
x < y
with pytest.raises(TypeError, match=msg):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
msg = (
"'(<|>)=?' not supported between "
"instances of 'numpy.ndarray' and 'Timestamp'"
)
with pytest.raises(TypeError, match=msg):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError, match=msg):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
def test_df_flex_cmp_ea_dtype_with_ndarray_series(self):
ii = pd.IntervalIndex.from_breaks([1, 2, 3])
df = pd.DataFrame({"A": ii, "B": ii})
ser = pd.Series([0, 0])
res = df.eq(ser, axis=0)
expected = pd.DataFrame({"A": [False, False], "B": [False, False]})
tm.assert_frame_equal(res, expected)
ser2 = pd.Series([1, 2], index=["A", "B"])
res2 = df.eq(ser2, axis=1)
tm.assert_frame_equal(res2, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = pd.Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.skipif(not _NUMEXPR_INSTALLED, reason="numexpr not installed")
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100
df = pd.DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = getattr(df, opname)(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = pd.Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = pd.DataFrame({0: dti + tdi, 1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range("2016-01-01", periods=3)
ser = pd.Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
df = pd.DataFrame({"A": dti, "B": ser})
other = pd.DataFrame({"A": ser, "B": ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{
"A": pd.Series(
["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]"
),
"B": ser * 2,
}
)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame(
self, all_arithmetic_operators, float_frame, mixed_float_frame
):
# one instance of parametrized fixture
op = all_arithmetic_operators
def f(x, y):
# r-versions not in operator-stdlib; get op without "r" and invert
if op.startswith("__r"):
return getattr(operator, op.replace("__r", "__"))(y, x)
return getattr(operator, op)(x, y)
result = getattr(float_frame, op)(2 * float_frame)
expected = f(float_frame, 2 * float_frame)
tm.assert_frame_equal(result, expected)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
@pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"])
def test_arith_flex_frame_mixed(
self, op, int_frame, mixed_int_frame, mixed_float_frame
):
f = getattr(operator, op)
# vs mix int
result = getattr(mixed_int_frame, op)(2 + mixed_int_frame)
expected = f(mixed_int_frame, 2 + mixed_int_frame)
# no overflow in the uint
dtype = None
if op in ["__sub__"]:
dtype = dict(B="uint64", C=None)
elif op in ["__add__", "__mul__"]:
dtype = dict(C=None)
tm.assert_frame_equal(result, expected)
_check_mixed_int(result, dtype=dtype)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
# vs plain int
result = getattr(int_frame, op)(2 * int_frame)
expected = f(int_frame, 2 * int_frame)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame):
# one instance of parametrized fixture
op = all_arithmetic_operators
# Check that arrays with dim >= 3 raise
for dim in range(3, 6):
arr = np.ones((1,) * dim)
msg = "Unable to coerce to Series/DataFrame"
with pytest.raises(ValueError, match=msg):
getattr(float_frame, op)(arr)
def test_arith_flex_frame_corner(self, float_frame):
const_add = float_frame.add(1)
tm.assert_frame_equal(const_add, float_frame + 1)
# corner cases
result = float_frame.add(float_frame[:0])
tm.assert_frame_equal(result, float_frame * np.nan)
result = float_frame[:0].add(float_frame)
tm.assert_frame_equal(result, float_frame * np.nan)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], fill_value=3)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], axis="index", fill_value=3)
def test_arith_flex_series(self, simple_frame):
df = simple_frame
row = df.xs("a")
col = df["two"]
# after arithmetic refactor, add truediv here
ops = ["add", "sub", "mul", "mod"]
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
tm.assert_frame_equal(f(row), op(df, row))
tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
tm.assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
tm.assert_frame_equal(df.div(row), df / row)
tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH 7325
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="int64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="float64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
def test_arith_flex_zero_len_raises(self):
# GH 19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = pd.Series([], dtype=object)
df_len0 = pd.DataFrame(columns=["A", "B"])
df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
with pytest.raises(NotImplementedError, match="fill_value"):
df.add(ser_len0, fill_value="E")
with pytest.raises(NotImplementedError, match="fill_value"):
df_len0.sub(df["A"], axis=None, fill_value=3)
def test_flex_add_scalar_fill_value(self):
# GH#12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float")
df = pd.DataFrame({"foo": dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
tm.assert_frame_equal(res, exp)
class TestFrameArithmetic:
def test_td64_op_nat_casting(self):
# Make sure we don't accidentally treat timedelta64(NaT) as datetime64
# when calling dispatch_to_series in DataFrame arithmetic
ser = pd.Series(["NaT", "NaT"], dtype="timedelta64[ns]")
df = pd.DataFrame([[1, 2], [3, 4]])
result = df * ser
expected = pd.DataFrame({0: ser, 1: ser})
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_rowlike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
expected = pd.DataFrame(
[[2, 4], [4, 6], [6, 8]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + rowlike
tm.assert_frame_equal(result, expected)
result = rowlike + df
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_collike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
expected = pd.DataFrame(
[[1, 2], [5, 6], [9, 10]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + collike
tm.assert_frame_equal(result, expected)
result = collike + df
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_rowlike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
exvals = [
getattr(df.loc["A"], opname)(rowlike.squeeze()),
getattr(df.loc["B"], opname)(rowlike.squeeze()),
getattr(df.loc["C"], opname)(rowlike.squeeze()),
]
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index)
result = getattr(df, opname)(rowlike)
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_collike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
exvals = {
True: getattr(df[True], opname)(collike.squeeze()),
False: getattr(df[False], opname)(collike.squeeze()),
}
dtype = None
if opname in ["__rmod__", "__rfloordiv__"]:
# Series ops may return mixed int/float dtypes in cases where
# DataFrame op will return all-float. So we upcast `expected`
dtype = np.common_type(*[x.values for x in exvals.values()])
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype)
result = getattr(df, opname)(collike)
tm.assert_frame_equal(result, expected)
def test_df_bool_mul_int(self):
# GH 22047, GH 22163 multiplication by 1 should result in int dtype,
# not object dtype
df = pd.DataFrame([[False, True], [False, False]])
result = df * 1
# On appveyor this comes back as np.int32 instead of np.int64,
# so we check dtype.kind instead of just dtype
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
result = 1 * df
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
def test_arith_mixed(self):
left = pd.DataFrame({"A": ["a", "b", "c"], "B": [1, 2, 3]})
result = left + left
expected = pd.DataFrame({"A": ["aa", "bb", "cc"], "B": [2, 4, 6]})
tm.assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = pd.DataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]})
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
tm.assert_series_equal(result[col], op(df[col], 1))
_test_op(df, operator.add)
_test_op(df, operator.sub)
_test_op(df, operator.mul)
_test_op(df, operator.truediv)
_test_op(df, operator.floordiv)
_test_op(df, operator.pow)
_test_op(df, lambda x, y: y + x)
_test_op(df, lambda x, y: y - x)
_test_op(df, lambda x, y: y * x)
_test_op(df, lambda x, y: y / x)
_test_op(df, lambda x, y: y ** x)
_test_op(df, lambda x, y: x + y)
_test_op(df, lambda x, y: x - y)
_test_op(df, lambda x, y: x * y)
_test_op(df, lambda x, y: x / y)
_test_op(df, lambda x, y: x ** y)
@pytest.mark.parametrize(
"values", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3), deque([1, 2])]
)
def test_arith_alignment_non_pandas_object(self, values):
# GH#17901
df = pd.DataFrame({"A": [1, 1], "B": [1, 1]})
expected = pd.DataFrame({"A": [2, 2], "B": [3, 3]})
result = df + values
tm.assert_frame_equal(result, expected)
def test_arith_non_pandas_object(self):
df = pd.DataFrame(
np.arange(1, 10, dtype="f8").reshape(3, 3),
columns=["one", "two", "three"],
index=["a", "b", "c"],
)
val1 = df.xs("a").values
added = pd.DataFrame(df.values + val1, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val1, added)
added = pd.DataFrame((df.values.T + val1).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df["two"])
added = pd.DataFrame(df.values + val2, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val2, added)
added = pd.DataFrame((df.values.T + val2).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val2, axis="index"), added)
val3 = np.random.rand(*df.shape)
added = pd.DataFrame(df.values + val3, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val3), added)
def test_operations_with_interval_categories_index(self, all_arithmetic_operators):
# GH#27415
op = all_arithmetic_operators
ind = pd.CategoricalIndex(pd.interval_range(start=0.0, end=2.0))
data = [1, 2]
df = pd.DataFrame([data], columns=ind)
num = 10
result = getattr(df, op)(num)
expected = pd.DataFrame([[getattr(n, op)(num) for n in data]], columns=ind)
tm.assert_frame_equal(result, expected)
def test_frame_with_frame_reindex(self):
# GH#31623
df = pd.DataFrame(
{
"foo": [pd.Timestamp("2019"), pd.Timestamp("2020")],
"bar": [pd.Timestamp("2018"), pd.Timestamp("2021")],
},
columns=["foo", "bar"],
)
df2 = df[["foo"]]
result = df - df2
expected = pd.DataFrame(
{"foo": [pd.Timedelta(0), pd.Timedelta(0)], "bar": [np.nan, np.nan]},
columns=["bar", "foo"],
)
tm.assert_frame_equal(result, expected)
def test_frame_with_zero_len_series_corner_cases():
# GH#28600
# easy all-float case
df = pd.DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "B"])
ser = pd.Series(dtype=np.float64)
result = df + ser
expected = pd.DataFrame(df.values * np.nan, columns=df.columns)
tm.assert_frame_equal(result, expected)
result = df == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
# non-float case should not raise on comparison
df2 = pd.DataFrame(df.values.view("M8[ns]"), columns=df.columns)
result = df2 == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_zero_len_frame_with_series_corner_cases():
# GH#28600
df = pd.DataFrame(columns=["A", "B"], dtype=np.float64)
ser = pd.Series([1, 2], index=["A", "B"])
result = df + ser
expected = df
tm.assert_frame_equal(result, expected)
def test_frame_single_columns_object_sum_axis_1():
# GH 13758
data = {
"One": pd.Series(["A", 1.2, np.nan]),
}
df = pd.DataFrame(data)
result = df.sum(axis=1)
expected = pd.Series(["A", 1.2, 0])
tm.assert_series_equal(result, expected)
# -------------------------------------------------------------------
# Unsorted
# These arithmetic tests were previously in other files, eventually
# should be parametrized and put into tests.arithmetic
class TestFrameArithmeticUnsorted:
def test_frame_add_tz_mismatch_converts_to_utc(self):
rng = pd.date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern")
df = pd.DataFrame(np.random.randn(len(rng)), index=rng, columns=["a"])
df_moscow = df.tz_convert("Europe/Moscow")
result = df + df_moscow
assert result.index.tz is pytz.utc
result = df_moscow + df
assert result.index.tz is pytz.utc
def test_align_frame(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = pd.DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.values[1::2] = np.nan
tm.assert_frame_equal(result, expected)
half = ts[::2]
result = ts + half.take(np.random.permutation(len(half)))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"op", [operator.add, operator.sub, operator.mul, operator.truediv]
)
def test_operators_none_as_na(self, op):
df = DataFrame(
{"col1": [2, 5.0, 123, None], "col2": [1, 2, 3, 4]}, dtype=object
)
# since filling converts dtypes from object, changed expected to be
# object
filled = df.fillna(np.nan)
result = op(df, 3)
expected = op(filled, 3).astype(object)
expected[com.isna(expected)] = None
tm.assert_frame_equal(result, expected)
result = op(df, df)
expected = op(filled, filled).astype(object)
expected[com.isna(expected)] = None
tm.assert_frame_equal(result, expected)
result = op(df, df.fillna(7))
tm.assert_frame_equal(result, expected)
result = op(df.fillna(7), df)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize("op,res", [("__eq__", False), ("__ne__", True)])
# TODO: not sure what's correct here.
@pytest.mark.filterwarnings("ignore:elementwise:FutureWarning")
def test_logical_typeerror_with_non_valid(self, op, res, float_frame):
# we are comparing floats vs a string
result = getattr(float_frame, op)("foo")
assert bool(result.all().all()) is res
def test_binary_ops_align(self):
# test aligning binary ops
# GH 6681
index = MultiIndex.from_product(
[list("abc"), ["one", "two", "three"], [1, 2, 3]],
names=["first", "second", "third"],
)
df = DataFrame(
np.arange(27 * 3).reshape(27, 3),
index=index,
columns=["value1", "value2", "value3"],
).sort_index()
idx = pd.IndexSlice
for op in ["add", "sub", "mul", "div", "truediv"]:
opa = getattr(operator, op, None)
if opa is None:
continue
x = Series([1.0, 10.0, 100.0], [1, 2, 3])
result = getattr(df, op)(x, level="third", axis=0)
expected = pd.concat(
[opa(df.loc[idx[:, :, i], :], v) for i, v in x.items()]
).sort_index()
tm.assert_frame_equal(result, expected)
x = Series([1.0, 10.0], ["two", "three"])
result = getattr(df, op)(x, level="second", axis=0)
expected = (
pd.concat([opa(df.loc[idx[:, i], :], v) for i, v in x.items()])
.reindex_like(df)
.sort_index()
)
tm.assert_frame_equal(result, expected)
# GH9463 (alignment level of dataframe with series)
midx = MultiIndex.from_product([["A", "B"], ["a", "b"]])
df = DataFrame(np.ones((2, 4), dtype="int64"), columns=midx)
s = pd.Series({"a": 1, "b": 2})
df2 = df.copy()
df2.columns.names = ["lvl0", "lvl1"]
s2 = s.copy()
s2.index.name = "lvl1"
# different cases of integer/string level names:
res1 = df.mul(s, axis=1, level=1)
res2 = df.mul(s2, axis=1, level=1)
res3 = df2.mul(s, axis=1, level=1)
res4 = df2.mul(s2, axis=1, level=1)
res5 = df2.mul(s, axis=1, level="lvl1")
res6 = df2.mul(s2, axis=1, level="lvl1")
exp = DataFrame(
np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype="int64"), columns=midx
)
for res in [res1, res2]:
tm.assert_frame_equal(res, exp)
exp.columns.names = ["lvl0", "lvl1"]
for res in [res3, res4, res5, res6]:
tm.assert_frame_equal(res, exp)
def test_add_with_dti_mismatched_tzs(self):
base = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC")
idx1 = base.tz_convert("Asia/Tokyo")[:2]
idx2 = base.tz_convert("US/Eastern")[1:]
df1 = DataFrame({"A": [1, 2]}, index=idx1)
df2 = DataFrame({"A": [1, 1]}, index=idx2)
exp = DataFrame({"A": [np.nan, 3, np.nan]}, index=base)
tm.assert_frame_equal(df1 + df2, exp)
def test_combineFrame(self, float_frame, mixed_float_frame, mixed_int_frame):
frame_copy = float_frame.reindex(float_frame.index[::2])
del frame_copy["D"]
frame_copy["C"][:5] = np.nan
added = float_frame + frame_copy
indexer = added["A"].dropna().index
exp = (float_frame["A"] * 2).copy()
tm.assert_series_equal(added["A"].dropna(), exp.loc[indexer])
exp.loc[~exp.index.isin(indexer)] = np.nan
tm.assert_series_equal(added["A"], exp.loc[added["A"].index])
assert np.isnan(added["C"].reindex(frame_copy.index)[:5]).all()
# assert(False)
assert np.isnan(added["D"]).all()
self_added = float_frame + float_frame
tm.assert_index_equal(self_added.index, float_frame.index)
added_rev = frame_copy + float_frame
assert np.isnan(added["D"]).all()
assert np.isnan(added_rev["D"]).all()
# corner cases
# empty
plus_empty = float_frame + DataFrame()
assert np.isnan(plus_empty.values).all()
empty_plus = DataFrame() + float_frame
assert np.isnan(empty_plus.values).all()
empty_empty = DataFrame() + DataFrame()
assert empty_empty.empty
# out of order
reverse = float_frame.reindex(columns=float_frame.columns[::-1])
tm.assert_frame_equal(reverse + float_frame, float_frame * 2)
# mix vs float64, upcast
added = float_frame + mixed_float_frame
_check_mixed_float(added, dtype="float64")
added = mixed_float_frame + float_frame
_check_mixed_float(added, dtype="float64")
# mix vs mix
added = mixed_float_frame + mixed_float_frame
_check_mixed_float(added, dtype=dict(C=None))
# with int
added = float_frame + mixed_int_frame
_check_mixed_float(added, dtype="float64")
def test_combine_series(
self, float_frame, mixed_float_frame, mixed_int_frame, datetime_frame
):
# Series
series = float_frame.xs(float_frame.index[0])
added = float_frame + series
for key, s in added.items():
tm.assert_series_equal(s, float_frame[key] + series[key])
larger_series = series.to_dict()
larger_series["E"] = 1
larger_series = Series(larger_series)
larger_added = float_frame + larger_series
for key, s in float_frame.items():
tm.assert_series_equal(larger_added[key], s + series[key])
assert "E" in larger_added
assert np.isnan(larger_added["E"]).all()
# no upcast needed
added = mixed_float_frame + series
assert np.all(added.dtypes == series.dtype)
# vs mix (upcast) as needed
added = mixed_float_frame + series.astype("float32")
_check_mixed_float(added, dtype=dict(C=None))
added = mixed_float_frame + series.astype("float16")
_check_mixed_float(added, dtype=dict(C=None))
# FIXME: don't leave commented-out
# these raise with numexpr.....as we are adding an int64 to an
# uint64....weird vs int
# added = mixed_int_frame + (100*series).astype('int64')
# _check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C =
# 'int64', D = 'int64'))
# added = mixed_int_frame + (100*series).astype('int32')
# _check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C =
# 'int32', D = 'int64'))
# TimeSeries
ts = datetime_frame["A"]
# 10890
# we no longer allow auto timeseries broadcasting
# and require explicit broadcasting
added = datetime_frame.add(ts, axis="index")
for key, col in datetime_frame.items():
result = col + ts
tm.assert_series_equal(added[key], result, check_names=False)
assert added[key].name == key
if col.name == ts.name:
assert result.name == "A"
else:
assert result.name is None
smaller_frame = datetime_frame[:-5]
smaller_added = smaller_frame.add(ts, axis="index")
tm.assert_index_equal(smaller_added.index, datetime_frame.index)
smaller_ts = ts[:-5]
smaller_added2 = datetime_frame.add(smaller_ts, axis="index")
tm.assert_frame_equal(smaller_added, smaller_added2)
# length 0, result is all-nan
result = datetime_frame.add(ts[:0], axis="index")
expected = DataFrame(
np.nan, index=datetime_frame.index, columns=datetime_frame.columns
)
tm.assert_frame_equal(result, expected)
# Frame is all-nan
result = datetime_frame[:0].add(ts, axis="index")
expected = DataFrame(
np.nan, index=datetime_frame.index, columns=datetime_frame.columns
)
tm.assert_frame_equal(result, expected)
# empty but with non-empty index
frame = datetime_frame[:1].reindex(columns=[])
result = frame.mul(ts, axis="index")
assert len(result) == len(ts)
def test_combineFunc(self, float_frame, mixed_float_frame):
result = float_frame * 2
tm.assert_numpy_array_equal(result.values, float_frame.values * 2)
# vs mix
result = mixed_float_frame * 2
for c, s in result.items():
tm.assert_numpy_array_equal(s.values, mixed_float_frame[c].values * 2)
_check_mixed_float(result, dtype=dict(C=None))
result = DataFrame() * 2
assert result.index.equals(DataFrame().index)
assert len(result.columns) == 0
def test_comparisons(self, simple_frame, float_frame):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
row = simple_frame.xs("a")
ndim_5 = np.ones(df1.shape + (1, 1, 1))
def test_comp(func):
result = func(df1, df2)
tm.assert_numpy_array_equal(result.values, func(df1.values, df2.values))
msg = (
"Unable to coerce to Series/DataFrame, "
"dimension must be <= 2: (30, 4, 1, 1, 1)"
)
with pytest.raises(ValueError, match=re.escape(msg)):
func(df1, ndim_5)
result2 = func(simple_frame, row)
tm.assert_numpy_array_equal(
result2.values, func(simple_frame.values, row.values)
)
result3 = func(float_frame, 0)
tm.assert_numpy_array_equal(result3.values, func(float_frame.values, 0))
msg = "Can only compare identically-labeled DataFrame"
with pytest.raises(ValueError, match=msg):
func(simple_frame, simple_frame[:2])
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_strings_to_numbers_comparisons_raises(self, compare_operators_no_eq_ne):
# GH 11565
df = DataFrame(
{x: {"x": "foo", "y": "bar", "z": "baz"} for x in ["a", "b", "c"]}
)
f = getattr(operator, compare_operators_no_eq_ne)
msg = "'[<>]=?' not supported between instances of 'str' and 'int'"
with pytest.raises(TypeError, match=msg):
f(df, 0)
def test_comparison_protected_from_errstate(self):
missing_df = tm.makeDataFrame()
missing_df.iloc[0]["A"] = np.nan
with np.errstate(invalid="ignore"):
expected = missing_df.values < 0
with np.errstate(invalid="raise"):
result = (missing_df < 0).values
tm.assert_numpy_array_equal(result, expected)
def test_boolean_comparison(self):
# GH 4576
# boolean comparisons with a tuple/list give unexpected results
df = DataFrame(np.arange(6).reshape((3, 2)))
b = np.array([2, 2])
b_r = np.atleast_2d([2, 2])
b_c = b_r.T
lst = [2, 2, 2]
tup = tuple(lst)
# gt
expected = DataFrame([[False, False], [False, True], [True, True]])
result = df > b
tm.assert_frame_equal(result, expected)
result = df.values > b
tm.assert_numpy_array_equal(result, expected.values)
msg1d = "Unable to coerce to Series, length must be 2: given 3"
msg2d = "Unable to coerce to DataFrame, shape must be"
msg2db = "operands could not be broadcast together with shapes"
with pytest.raises(ValueError, match=msg1d):
# wrong shape
df > lst
with pytest.raises(ValueError, match=msg1d):
# wrong shape
result = df > tup
# broadcasts like ndarray (GH#23000)
result = df > b_r
tm.assert_frame_equal(result, expected)
result = df.values > b_r
tm.assert_numpy_array_equal(result, expected.values)
with pytest.raises(ValueError, match=msg2d):
df > b_c
with pytest.raises(ValueError, match=msg2db):
df.values > b_c
# ==
expected = DataFrame([[False, False], [True, False], [False, False]])
result = df == b
tm.assert_frame_equal(result, expected)
with pytest.raises(ValueError, match=msg1d):
result = df == lst
with pytest.raises(ValueError, match=msg1d):
result = df == tup
# broadcasts like ndarray (GH#23000)
result = df == b_r
tm.assert_frame_equal(result, expected)
result = df.values == b_r
tm.assert_numpy_array_equal(result, expected.values)
with pytest.raises(ValueError, match=msg2d):
df == b_c
assert df.values.shape != b_c.shape
# with alignment
df = DataFrame(
np.arange(6).reshape((3, 2)), columns=list("AB"), index=list("abc")
)
expected.index = df.index
expected.columns = df.columns
with pytest.raises(ValueError, match=msg1d):
result = df == lst
with pytest.raises(ValueError, match=msg1d):
result = df == tup
def test_inplace_ops_alignment(self):
# inplace ops / ops alignment
# GH 8511
columns = list("abcdefg")
X_orig = DataFrame(
np.arange(10 * len(columns)).reshape(-1, len(columns)),
columns=columns,
index=range(10),
)
Z = 100 * X_orig.iloc[:, 1:-1].copy()
block1 = list("bedcf")
subs = list("bcdef")
# add
X = X_orig.copy()
result1 = (X[block1] + Z).reindex(columns=subs)
X[block1] += Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] + Z[block1]).reindex(columns=subs)
X[block1] += Z[block1]
result4 = X.reindex(columns=subs)
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result1, result3)
tm.assert_frame_equal(result1, result4)
# sub
X = X_orig.copy()
result1 = (X[block1] - Z).reindex(columns=subs)
X[block1] -= Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] - Z[block1]).reindex(columns=subs)
X[block1] -= Z[block1]
result4 = X.reindex(columns=subs)
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result1, result3)
tm.assert_frame_equal(result1, result4)
def test_inplace_ops_identity(self):
# GH 5104
# make sure that we are actually changing the object
s_orig = Series([1, 2, 3])
df_orig = DataFrame(np.random.randint(0, 5, size=10).reshape(-1, 5))
# no dtype change
s = s_orig.copy()
s2 = s
s += 1
tm.assert_series_equal(s, s2)
tm.assert_series_equal(s_orig + 1, s)
assert s is s2
assert s._mgr is s2._mgr
df = df_orig.copy()
df2 = df
df += 1
tm.assert_frame_equal(df, df2)
| tm.assert_frame_equal(df_orig + 1, df) | pandas._testing.assert_frame_equal |
import os
import pandas as pd
from pandas.api.types import is_string_dtype
import numpy as np
from numpy import log
from scipy.linalg import norm
from scipy.stats import entropy
import nltk
import nltk.data
from nltk.tokenize import sent_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from textblob import TextBlob # Consider changing
from gensim.models import word2vec
from sklearn.manifold import TSNE
from sklearn.preprocessing import normalize
from sklearn.decomposition import PCA
from sklearn.linear_model import Perceptron
from polo2 import PoloDb
from polo2 import PoloFile
class PoloCorpus(PoloDb):
ngram_prefixes = ['no', 'uni', 'bi', 'tri', 'quadri']
def __init__(self, config):
"""Initialize corpus object"""
# Import Configs
self.config = config
self.config.set_config_attributes(self)
if not os.path.isfile(self.cfg_src_file_name):
raise ValueError("Missing source file. Check value of `src_file_name` in INI file.")
self.dbfile = config.generate_corpus_db_file_path()
PoloDb.__init__(self, self.dbfile)
# self.db = PoloDb(self.dbfile) # Why not do this?
if self.cfg_nltk_data_path:
nltk.data.path.append(self.cfg_nltk_data_path)
# For tokenizing into sentences
# fixme: TOKENIZER ASSUMES ENGLISH -- PARAMETIZE THIS
nltk.download('punkt')
nltk.download('tagsets')
nltk.download('averaged_perceptron_tagger')
self.tokenizer = nltk.data.load('nltk:tokenizers/punkt/english.pickle')
def import_table_doc(self, src_file_name=None, normalize=True):
"""Import source file into doc table"""
# Read in file content
if not src_file_name:
src_file_name = self.cfg_src_file_name
doc = pd.read_csv(src_file_name, header=0, sep=self.cfg_src_file_sep, lineterminator='\n')
doc.index.name = 'doc_id'
# todo: Find a more efficient way of handling this -- such as not duplicating!
# This is a legacy of an older procedure which now has performance implications.
if 'doc_original' not in doc.columns:
doc['doc_original'] = doc.doc_content
# todo: Put this in a separate and configurable function for general text normalization.
# Preliminary normalization of documents
doc['doc_content'] = doc.doc_content.str.replace(r'\n+', ' ', regex=True) # Remove newlines
doc['doc_content'] = doc.doc_content.str.replace(r'<[^>]+>', ' ', regex=True) # Remove tags
doc['doc_content'] = doc.doc_content.str.replace(r'\s+', ' ', regex=True) # Collapse spaces
# Remove empty docs
doc = doc[~doc.doc_content.isnull()]
doc.reindex()
self.put_table(doc, 'doc', index=True)
def import_table_stopword(self, use_nltk=False):
"""Import stopwords"""
swset = set()
# fixme: Cast integers in config object
# fixme: Parametize language
if int(self.cfg_use_nltk) == 1:
nltk_stopwords = set(stopwords.words('english'))
swset.update(nltk_stopwords)
if self.cfg_extra_stops and os.path.isfile(self.cfg_extra_stops):
src = PoloFile(self.cfg_extra_stops)
swset.update([word for word in src.read_bigline().split()])
swdf = pd.DataFrame({'token_str': list(swset)})
self.put_table(swdf, 'stopword')
# todo: Consider changing table name to DOCTERM or TOKEN
def add_table_doctoken(self):
"""Create doctoken and doctokenbow tables; update doc table"""
docs = self.get_table('doc', set_index=True)
# todo: Consider dividing this in two parts, the first to create a Phrase model with Gensim
# This takes a long time
doctokens = pd.DataFrame([(sentences[0], j, k, token[0], token[1])
for sentences in docs.apply(lambda x: (x.name, sent_tokenize(x.doc_content)), 1)
for j, sentence in enumerate(sentences[1])
for k, token in enumerate(nltk.pos_tag(nltk.word_tokenize(sentence)))],
columns=['doc_id', 'sentence_id', 'token_ord', 'token_str', 'pos'])
doctokens = doctokens.set_index(['doc_id', 'sentence_id', 'token_ord'])
# Normalize
doctokens.token_str = doctokens.token_str.str.lower()
doctokens.token_str = doctokens.token_str.str.replace(r'[^a-z]+', '', regex=True)
doctokens = doctokens[~doctokens.token_str.str.match(r'^\s*$')]
# todo: Instead of removing stopwords, identify with feature
stopwords = self.get_table('stopword').token_str.tolist()
doctokens = doctokens[~doctokens.token_str.isin(stopwords)]
self.put_table(doctokens, 'doctoken', if_exists='replace', index=True)
# Creates a BOW model for the doc, removing words in sequence and only keeping counts
doctokenbow = pd.DataFrame(doctokens.groupby('doc_id').token_str.value_counts())
doctokenbow.columns = ['token_count']
self.put_table(doctokenbow, 'doctokenbow', index=True)
# Add token counts to doc
docs['token_count'] = doctokenbow.groupby('doc_id').token_count.sum()
self.put_table(docs, 'doc', if_exists='replace', index=True)
# fixme: TOKEN should be the TERM table (aka VOCAB)
def add_table_token(self):
"""Get token data from doctoken and doctokenbow"""
doctoken = self.get_table('doctoken')
token = pd.DataFrame(doctoken.token_str.value_counts())
token.sort_index(inplace=True)
token.reset_index(inplace=True)
token.columns = ['token_str', 'token_count']
token.index.name = 'token_id'
# Add pos_max to token
pos_max = doctoken.groupby(['token_str', 'pos']).pos.count().unstack().idxmax(1)
token['pos_max'] = token.token_str.map(pos_max)
# Replace token_str with token_id in doctokenbow
token.reset_index(inplace=True)
doctokenbow = self.get_table('doctokenbow')
doctokenbow = doctokenbow.merge(token[['token_id', 'token_str']], on="token_str")
doctokenbow = doctokenbow[['doc_id', 'token_id', 'token_count']]
doctokenbow.sort_values('doc_id', inplace=True)
doctokenbow.set_index(['doc_id', 'token_id'], inplace=True)
self.put_table(doctokenbow, 'doctokenbow', if_exists='replace', index=True)
# Add doc counts to token
token.set_index('token_id', inplace=True)
token['doc_count'] = doctokenbow.groupby('token_id').count()
self.put_table(token, 'token', index=True)
# fixme: Use a better sentiment detector
def _get_sentiment(self, doc):
doc2 = TextBlob(doc)
return doc2.sentiment
def add_tfidf_to_doctokenbow(self):
"""Add TFIDF data to doctokenbow table"""
doctokenbow = self.get_table('doctokenbow', set_index=True)
tokens = self.get_table('token', set_index=True)
docs = pd.read_sql_query("SELECT doc_id, token_count FROM doc", self.conn, index_col='doc_id')
num_docs = docs.index.size
# Compute local and gloabl token (actually term) significance
self.alpha = .4
doc_max = doctokenbow.groupby('doc_id').token_count.max()
tokens['df'] = doctokenbow.groupby('token_id').token_count.count()
# n_docs = len(doctokenbow.index.levels[0])
tokens['idf'] = np.log2(num_docs/tokens.df)
tokens['dfidf'] = tokens.df * tokens.idf
doctokenbow['tf'] = self.alpha + (1 - self.alpha) * (doctokenbow.token_count / doc_max)
doctokenbow['tfidf'] = doctokenbow.tf * tokens.idf
doctokenbow['tfidf_l2'] = doctokenbow['tfidf'] / doctokenbow.groupby(['doc_id']).apply(lambda x: norm(x.tfidf, 2))
tokens['tfidf_sum'] = doctokenbow.groupby('token_id').tfidf_l2.sum()
tokens['tfidf_avg'] = doctokenbow.groupby('token_id').tfidf_l2.mean()
self.put_table(doctokenbow, 'doctokenbow', if_exists='replace', index=True)
self.put_table(tokens, 'token', if_exists='replace', index=True)
def add_stems_to_token(self):
"""Add stems to token table"""
# We only use one stemmer since stemmers suck anyway :-)
porter_stemmer = PorterStemmer()
tokens = self.get_table('token', set_index=True)
tokens['token_stem_porter'] = tokens.token_str.apply(porter_stemmer.stem)
self.put_table(tokens, 'token', if_exists='replace', index=True)
def add_sentimant_to_doc(self):
"""Add sentiment to doc table"""
doc = self.get_table('doc', set_index=True)
doc['doc_sentiment'] = doc.doc_content.apply(self._get_sentiment)
doc['doc_sentiment_polarity'] = doc.doc_sentiment.apply(lambda x: round(x[0], 1))
doc['doc_sentiment_subjectivity'] = doc.doc_sentiment.apply(lambda x: round(x[1], 2))
del(doc['doc_sentiment'])
self.put_table(doc, 'doc', index=True)
def add_tables_ngram_and_docngram(self, n=2):
"""Create ngram and docngram tables for n (using stems)"""
key = {2:'bi', 3:'tri'}
try:
infix = key[n]
except KeyError as e:
print('Invalid ngram length. Must be 2 or 3')
return False
sql = {}
sql['bi'] = """
SELECT dt_x.doc_id AS doc_id,
t_x.token_stem_porter AS tx,
t_y.token_stem_porter AS ty,
t_x.token_stem_porter || '_' || t_y.token_stem_porter AS ngram,
count() AS tf
FROM doctoken dt_x
JOIN doctoken dt_y ON (dt_x.doc_id = dt_y.doc_id
AND dt_x.sentence_id = dt_y.sentence_id
AND dt_y.rowid = (dt_x.rowid + 1))
JOIN token t_x ON dt_x.token_str = t_x.token_str
JOIN token t_y ON dt_y.token_str = t_y.token_str
GROUP BY dt_x.doc_id, ngram
"""
sql['tri'] = """
SELECT dt_x.doc_id AS doc_id,
t_x.token_stem_porter AS tx,
t_y.token_stem_porter AS ty,
t_z.token_stem_porter AS tz,
t_x.token_stem_porter || '_' || t_y.token_stem_porter || '_' || t_z.token_stem_porter AS ngram,
count() AS tf
FROM doctoken dt_x
JOIN doctoken dt_y ON (dt_x.doc_id = dt_y.doc_id
AND dt_x.sentence_id = dt_y.sentence_id
AND dt_y.rowid = (dt_x.rowid + 1))
JOIN doctoken dt_z ON (dt_x.doc_id = dt_z.doc_id
AND dt_x.sentence_id = dt_z.sentence_id
AND dt_z.rowid = (dt_y.rowid + 1))
JOIN token t_x ON dt_x.token_str = t_x.token_str
JOIN token t_y ON dt_y.token_str = t_y.token_str
JOIN token t_z ON dt_z.token_str = t_z.token_str
GROUP BY dt_x.doc_id, ngram
"""
docngrams = pd.read_sql(sql[infix], self.conn)
self.put_table(docngrams, 'ngram{}doc'.format(infix), index=False)
def add_stats_to_ngrams(self, type='bi'):
"""Create distinct ngram tables with stats"""
sql1 = """
SELECT g.doc_id, d.doc_label, g.ngram, g.tf
FROM ngram{}doc g
JOIN doc d USING(doc_id)
""".format(type)
sql2 = """
SELECT ngram, doc_label, sum(tf) AS tf_sum
FROM (
SELECT g.doc_id, d.doc_label, g.ngram, g.tf
FROM ngram{}doc g
JOIN doc d USING(doc_id)
)
GROUP BY doc_label, ngram
""".format(type)
sql3 = """
WITH stats(n) AS (SELECT COUNT() AS n FROM doc)
SELECT ngram, count() AS c, (SELECT n FROM stats) AS n,
CAST(COUNT() AS REAL) / CAST((SELECT n FROM stats) AS REAL) AS df
FROM ngram{}doc
GROUP BY ngram
ORDER BY c DESC
""".format(type)
docngram = pd.read_sql_query(sql1, self.conn, index_col='doc_id')
labelngram = | pd.read_sql_query(sql2, self.conn, index_col=['ngram','doc_label']) | pandas.read_sql_query |
import pandas as pd
def preprocess_repo2(df,asset,Trading_env):
df = df.to_frame()
index = df.index.strftime('%Y-%m-%d')
index = [str.replace("-", "") for str in index]
liste = []
for i in index:
liste.append(i)
liste.append(i)
df_context = Trading_env.preprocess_context_data(asset)
df['gold'] = df_context.iloc[:,0]
df['interest'] = df_context.iloc[:,1]
df['index'] = df_context.iloc[:,2]
df['similar'] = df_context.iloc[:,3]
df['vix'] = df_context.iloc[:,4]
columns = df.columns.values.tolist()
columns[0] = 'adjcp'
df.columns = columns
#df = df.rename(columns={'AAPL': 'adjcp'})
df = df.reset_index(drop=True)
df.index = range(0, len(df) * 2, 2)
for i in range(1, len(df) * 2, 2):
line = pd.DataFrame({"gold": 1, "interest": 1, "index": 1, "similar": 1, 'adjcp': 1, 'vix':1}, index=[i])
df = df.append(line, ignore_index=False)
df = df.sort_index().reset_index(drop=True)
df["datadate"] = liste
liste = []
for i in range(0, len(df) // 2):
liste.append(i)
liste.append(i)
df.index = liste
df["datadate"] = pd.to_numeric(df["datadate"])
fold1 = df[(df.datadate > 20100103) & (df.datadate <= 20161231)]
fold2 = df[(df.datadate > 20170101) & (df.datadate <= 20171231)]
fold3 = df[(df.datadate > 20180101) & (df.datadate <= 20181231)]
fold4 = df[(df.datadate > 20190101) & (df.datadate <= 20191231)]
ind1, ind2, ind3 = [], [], []
longerfold = fold1.append(fold2)
for i in range(0, len(fold1) // 2):
ind1.append(i)
ind1.append(i)
for i in range(0, len(fold2) // 2):
ind2.append(i)
ind2.append(i)
for i in range(0, len(longerfold) // 2):
ind3.append(i)
ind3.append(i)
fold1.index = ind1
fold2.index = ind2
try:
fold3.index = ind2[:len(fold3.index)]
fold4.index = ind2[:len(fold4.index)]
except ValueError:
fold3.index = ind2[:len(fold3.index)]+[len(fold2) // 2 +2,len(fold2) // 2+2,len(fold2) // 2 +3,len(fold2) // 2+3]
longerfold.index = ind3
return [[fold1, fold2, fold3], [longerfold, fold3, fold4]]
def merge_folds_test(fold1,fold2):
longerfold = fold1.append(fold2)
ind3 = []
for i in range(0, len(longerfold) // 2):
ind3.append(i)
ind3.append(i)
longerfold.index = ind3
return longerfold
def preprocess_repo2_corona(df,asset,Trading_env):
df = df.to_frame()
index = df.index.strftime('%Y-%m-%d')
index = [str.replace("-", "") for str in index]
liste = []
for i in index:
liste.append(i)
liste.append(i)
df_context = Trading_env.preprocess_context_data(asset, True)
df['gold'] = df_context.iloc[:,0]
df['interest'] = df_context.iloc[:,1]
df['index'] = df_context.iloc[:,2]
df['similar'] = df_context.iloc[:,3]
df['vix'] = df_context.iloc[:,4]
columns = df.columns.values.tolist()
columns[0] = 'adjcp'
df.columns = columns
#df = df.rename(columns={'AAPL': 'adjcp'})
df = df.reset_index(drop=True)
df.index = range(0, len(df) * 2, 2)
for i in range(1, len(df) * 2, 2):
line = | pd.DataFrame({"gold": 1, "interest": 1, "index": 1, "similar": 1, 'adjcp': 1, 'vix':1}, index=[i]) | pandas.DataFrame |
from datetime import timedelta
import pytest
from pandas import PeriodIndex, Series, Timedelta, date_range, period_range, to_datetime
import pandas._testing as tm
class TestToTimestamp:
def test_to_timestamp(self):
index = period_range(freq="A", start="1/1/2001", end="12/1/2009")
series = Series(1, index=index, name="foo")
exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC")
result = series.to_timestamp(how="end")
exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
assert result.name == "foo"
exp_index = date_range("1/1/2001", end="1/1/2009", freq="AS-JAN")
result = series.to_timestamp(how="start")
tm.assert_index_equal(result.index, exp_index)
def _get_with_delta(delta, freq="A-DEC"):
return date_range(
to_datetime("1/1/2001") + delta,
to_datetime("12/31/2009") + delta,
freq=freq,
)
delta = timedelta(hours=23)
result = series.to_timestamp("H", "end")
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
delta = timedelta(hours=23, minutes=59)
result = series.to_timestamp("T", "end")
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
result = series.to_timestamp("S", "end")
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
index = period_range(freq="H", start="1/1/2001", end="1/2/2001")
series = Series(1, index=index, name="foo")
exp_index = date_range("1/1/2001 00:59:59", end="1/2/2001 00:59:59", freq="H")
result = series.to_timestamp(how="end")
exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
assert result.name == "foo"
def test_to_timestamp_raises(self, index):
# https://github.com/pandas-dev/pandas/issues/33327
ser = | Series(index=index, dtype=object) | pandas.Series |
import pytest
import numpy as np
import pandas as pd
from pandas._testing import assert_frame_equal
from wetterdienst.dwd.util import (
coerce_field_types,
build_parameter_set_identifier,
)
from wetterdienst.util.enumeration import parse_enumeration_from_template
from wetterdienst.dwd.observations import (
DWDObservationPeriod,
DWDObservationResolution,
DWDObservationParameterSet,
)
from wetterdienst.exceptions import InvalidEnumeration
def test_parse_enumeration_from_template():
assert (
parse_enumeration_from_template("climate_summary", DWDObservationParameterSet)
== DWDObservationParameterSet.CLIMATE_SUMMARY
)
assert (
parse_enumeration_from_template("kl", DWDObservationParameterSet)
== DWDObservationParameterSet.CLIMATE_SUMMARY
)
with pytest.raises(InvalidEnumeration):
parse_enumeration_from_template("climate", DWDObservationParameterSet)
def test_coerce_field_types():
df = pd.DataFrame(
{
"QN": ["1"],
"RS_IND_01": ["1"],
"DATE": ["1970010100"],
"END_OF_INTERVAL": ["1970010100:00"],
"V_VV_I": ["P"],
}
)
expected_df = pd.DataFrame(
{
"QN": pd.Series([1], dtype=pd.Int64Dtype()),
"RS_IND_01": pd.Series([1], dtype=pd.Int64Dtype()),
"DATE": [pd.Timestamp("1970-01-01")],
"END_OF_INTERVAL": [pd.Timestamp("1970-01-01")],
"V_VV_I": pd.Series(["P"], dtype= | pd.StringDtype() | pandas.StringDtype |
import pytest
from cellrank.tl._colors import _map_names_and_colors, _create_categorical_colors
import numpy as np
import pandas as pd
from pandas.api.types import is_categorical_dtype
from matplotlib.colors import is_color_like
class TestColors:
def test_create_categorical_colors_too_many_colors(self):
with pytest.raises(ValueError):
_create_categorical_colors(1000)
def test_create_categorical_colors_no_categories(self):
c = _create_categorical_colors(0)
assert c == []
def test_create_categorical_colors_neg_categories(self):
with pytest.raises(RuntimeError):
_create_categorical_colors(-1)
def test_create_categorical_colors_normal_run(self):
colors = _create_categorical_colors(62)
assert len(colors) == 62
assert all(map(lambda c: isinstance(c, str), colors))
assert all(map(lambda c: is_color_like(c), colors))
class TestMappingColors:
def test_mapping_colors_not_categorical(self):
query = pd.Series(["foo", "bar", "baz"], dtype="str")
reference = pd.Series(["foo", np.nan, "bar", "baz"], dtype="category")
with pytest.raises(TypeError):
_map_names_and_colors(reference, query)
def test_mapping_colors_invalid_size(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", np.nan, "bar", "baz"], dtype="category")
with pytest.raises(ValueError):
_map_names_and_colors(reference, query)
def test_mapping_colors_different_index(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category", index=[2, 3, 4])
reference = pd.Series(["foo", "bar", "baz"], dtype="category", index=[1, 2, 3])
with pytest.raises(ValueError):
_map_names_and_colors(reference, query)
def test_mapping_colors_invalid_colors(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
with pytest.raises(ValueError):
_map_names_and_colors(
reference, query, colors_reference=["red", "green", "foo"]
)
def test_mapping_colors_too_few_colors(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
with pytest.raises(ValueError):
_map_names_and_colors(reference, query, colors_reference=["red", "green"])
def test_mapping_colors_simple_1(self):
x = pd.Series(["a", "b", np.nan, "b", np.nan]).astype("category")
y = pd.Series(["b", np.nan, np.nan, "d", "a"]).astype("category")
expected = pd.Series(["a_1", "a_2", "b"])
expected_index = pd.Index(["a", "b", "d"])
res = _map_names_and_colors(x, y)
assert isinstance(res, pd.Series)
np.testing.assert_array_equal(res.values, expected.values)
np.testing.assert_array_equal(res.index.values, expected_index.values)
def test_mapping_colors_simple_2(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
res = _map_names_and_colors(reference, query)
assert isinstance(res, pd.Series)
assert len(res) == 3
assert is_categorical_dtype(res)
def test_mapping_colors_simple_colors(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
res, c = _map_names_and_colors(
reference, query, colors_reference=["red", "green", "blue"]
)
assert isinstance(res, pd.Series)
assert len(res) == 3
assert is_categorical_dtype(res)
assert isinstance(c, list)
assert c == ["#ff0000", "#008000", "#0000ff"]
def test_mapping_colors_too_many_colors(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
res, c = _map_names_and_colors(
reference, query, colors_reference=["red", "green", "blue", "black"]
)
assert isinstance(res, pd.Series)
assert len(res) == 3
assert is_categorical_dtype(res)
assert isinstance(c, list)
assert c == ["#ff0000", "#008000", "#0000ff"]
def test_mapping_colors_different_color_representation(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
res, c = _map_names_and_colors(
reference, query, colors_reference=[(1, 0, 0), "green", (0, 0, 1, 0)]
)
assert isinstance(res, pd.Series)
assert len(res) == 3
assert is_categorical_dtype(res)
assert isinstance(c, list)
assert c == ["#ff0000", "#008000", "#0000ff"]
def test_mapping_colors_non_unique_colors(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
res, c = _map_names_and_colors(
reference, query, colors_reference=["red", "red", "red"]
)
assert isinstance(res, pd.Series)
assert len(res) == 3
assert | is_categorical_dtype(res) | pandas.api.types.is_categorical_dtype |
# Recorder that records agent states as dataframes and also stores a carla recording, in synchronous mode
#!/usr/bin/env python
# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import glob
import os
import sys
import pandas as pd
from tqdm import tqdm
from pathlib import Path
CARLA_VERSION = "0.9.11"
try:
# sys.path.append("./libs/carla-0.9.9-py3.7-linux-x86_64.egg")
if CARLA_VERSION == "0.9.9":
sys.path.append("./libs/carla-0.9.9-py3.7-linux-x86_64.egg")
elif CARLA_VERSION == "0.9.11":
sys.path.append("./libs/carla-0.9.11-py3.7-linux-x86_64.egg")
except IndexError:
pass
import carla
import logging
import pathlib
import click
current_dir = pathlib.Path(__file__).parent.absolute()
def get_metadata(actor, frame_id):
type_id = actor.type_id
def splitCarlaVec(vect):
return vect.x, vect.y, vect.z
id = actor.id
# clsname = ClientSideBoundingBoxes.get_class_name(actor)
tf = actor.get_transform()
roll, pitch, yaw = tf.rotation.roll, tf.rotation.pitch, tf.rotation.yaw
loc = actor.get_location()
pos_x, pos_y, pos_z = splitCarlaVec(loc)
try:
bbox3d = actor.bounding_box
bbox3d_offset_x, bbox3d_offset_y, bbox3d_offset_z = splitCarlaVec(
bbox3d.location
)
bbox3d_extent_x, bbox3d_extent_y, bbox3d_extent_z = splitCarlaVec(bbox3d.extent)
except:
bbox3d_offset_x, bbox3d_offset_y, bbox3d_offset_z = None, None, None
bbox3d_extent_x, bbox3d_extent_y, bbox3d_extent_z = None, None, None
velocity_x, velocity_y, velocity_z = splitCarlaVec(actor.get_velocity())
acc_x, acc_y, acc_z = splitCarlaVec(actor.get_acceleration())
angular_vel_x, angular_vel_y, angular_vel_z = splitCarlaVec(
actor.get_angular_velocity()
)
try:
# need to do this because Carla's Actor object doesnt support getattr
traffic_light_state = actor.state.name
except:
traffic_light_state = None
return (
frame_id,
id,
type_id,
pos_x,
pos_y,
pos_z,
roll,
pitch,
yaw,
velocity_x,
velocity_y,
velocity_z,
acc_x,
acc_y,
acc_z,
angular_vel_x,
angular_vel_y,
angular_vel_z,
bbox3d_offset_x,
bbox3d_offset_y,
bbox3d_offset_z,
bbox3d_extent_x,
bbox3d_extent_y,
bbox3d_extent_z,
traffic_light_state,
)
def run(client, round_name, dest_folder="", session_duration_sec=10):
# num_vehicles = 70
# safe = True # avoid spawning vehicles prone to accidents"
actor_list = []
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO)
dest_folder = Path(dest_folder)
try:
SESSION_DURATION = session_duration_sec # seconds # TODO is it possible to read this from the carla recording file? or an external config?
FPS = 5
DELTA_T = 1 / FPS
# RECORDING_FILENAME = ""
START_TIME = 0.0
DURATION = 0.0
# client.set_timeout(2.0)
world = client.get_world()
# blueprints = world.get_blueprint_library().filter("vehicle.*")
# traffic_manager = client.get_trafficmanager()
settings = client.get_world().get_settings()
if not settings.synchronous_mode:
# traffic_manager.set_synchronous_mode(True)
synchronous_master = True
settings.synchronous_mode = True
settings.fixed_delta_seconds = DELTA_T
client.get_world().apply_settings(settings)
else:
synchronous_master = False
session_recording = f"{round_name}"
destination_filename = dest_folder / Path(session_recording)
world.tick()
# fmt: off
df_columns = [
"frame_id", "id", "type_id", "pos_x", "pos_y", "pos_z", "roll", "pitch", "yaw",
"velocity_x", "velocity_y", "velocity_z", "acc_x", "acc_y", "acc_z",
"angular_vel_x", "angular_vel_y", "angular_vel_z",
"bbox3d_offset_x", "bbox3d_offset_y", "bbox3d_offset_z",
"bbox3d_extent_x", "bbox3d_extent_y", "bbox3d_extent_z", "traffic_light_color",
]
# fmt: on
# get all non vehicle agents
actors = world.get_actors()
non_vehicles = [
x
for x in actors
if ("vehicle" not in x.type_id and "traffic_light" not in x.type_id)
] # signs, traffic lights etc
frame_id = 0
df_arr = []
non_vehicle_arr = [get_metadata(actor, frame_id) for actor in non_vehicles]
df_arr += non_vehicle_arr
pbar = tqdm(total=FPS * SESSION_DURATION)
while frame_id < (FPS * SESSION_DURATION):
actors = world.get_actors()
vehicles_and_lights = [
x
for x in actors
if "vehicle" in x.type_id or "traffic_light" in x.type_id
]
metadata_arr = [
get_metadata(actor, frame_id) for actor in vehicles_and_lights
]
df_arr += metadata_arr
frame_id += 1
pbar.update(1)
world.tick()
df = | pd.DataFrame(df_arr, columns=df_columns) | pandas.DataFrame |
import numpy as np
import pandas as pd
class make_fisher_info_matrix():
def __init__(self, dataDir, testCosts):
self.txset = pd.read_csv(f"{ dataDir }/ratrtpcrabvec.csv")
self.highs = pd.read_csv(f"{ dataDir }/ratrtpcrabhighs.csv")
self.highs = self.highs.drop(columns=['Infection.State']).to_numpy()
self.testCosts = testCosts
self.Spec = [0.975,0.97,0.977] # Spec is the specificity values for 0=RAT,1=RTPCR,2=AB
self.Sen = [0.5,0.95,0.921] # Sen are the sensitivity values for 0=RAT,1=RTPCR,2=AB
self.ttypevalid, self.patternCost = self.getCosting()
self.patternCost = self.patternCost[1:]
self.lenw = len(self.patternCost)
def set_sensitivities(self, senRAT, senRTPCR, senIGG):
self.Sen = [senRAT, senRTPCR, senIGG]
def set_specificities(self, specRAT, specRTPCR, specIGG):
self.Spec = [specRAT, specRTPCR, specIGG]
def get_lenw(self):
return self.lenw
def get_patternCost(self):
return self.patternCost
def get_ttypevalid(self):
return self.ttypevalid
def getCosting(self):
ttypevalid = self.txset.copy()
#null value based values
ttypevalid['RAT'] = 1 - ttypevalid['RAT'].apply(lambda x: 1 if pd.isna(x) else 0)
ttypevalid['RTPCR'] = 1 - ttypevalid['RTPCR'].apply(lambda x: 1 if pd.isna(x) else 0)
ttypevalid['AB'] = 1 - ttypevalid['AB'].apply(lambda x: 1 if | pd.isna(x) | pandas.isna |
import shutil
import os
import numpy as np
import pandas as pd
import lib.scoring as scoring
from decimal import Decimal
from pathlib import Path
from Crypto.Cipher import PKCS1_OAEP, AES
from Crypto.PublicKey import RSA
from lib.blockchain import Challenge, Participant
def compute_error(participant: Participant, force=False) -> float:
"""computes the RMSE of a participant's prediction w.r.t. correct values"""
challenge = participant.challenge
competition = challenge.competition
challenge_scores_file = challenge.get_challenge_dir().joinpath("_challenge_scores.csv")
if challenge_scores_file.exists() and not force:
df = pd.read_csv(challenge_scores_file)
return df.error.loc[df.address == participant.address].values[0]
# check if requested challenge is not the last
if challenge.get_phase() != 4:
raise IndexError(f"challenge {challenge.number} is not closed")
if challenge.number == competition.get_latest_challenge_number():
raise IndexError(f"no challenge after challenge {challenge.number}")
next_challenge = competition.get_challenge(challenge.number + 1)
if next_challenge.get_phase() == 0:
raise IndexError(f"dataset not available for challenge {challenge.number}")
# get requested assets and challenge final values
requested_assets = get_requested_assets(challenge, force=force)
values = get_values(next_challenge, force=force)
valued_assets = [asset for asset, value in values]
# if the participant did not submit a prediction return NaN
submitters = challenge.get_all_submitter_addresses() # TODO: a set would be better
if participant.address not in submitters:
return np.nan
# get the predictions, and if it cannot be decrypted of is not valid return NaN
try:
predictions_pairs = get_predictions(participant)
except (ValueError, AssertionError) as e:
print(f"exception: {str(e)}")
return np.nan
if not scoring.validate_prediction(requested_assets, predictions_pairs):
return np.nan
# sort predictions values according to assets list
predictions_by_asset = dict(predictions_pairs)
predictions = [predictions_by_asset[asset] for asset in valued_assets]
# return error computed from predictions and correct values
return scoring.compute_raw_score(challenge.number, predictions, [value for _, value in values])
def compute_challenge_scores(challenge: Challenge, force=False) -> {str: float}:
"""compute the list of all participant challenge scores for a given challenge"""
challenge_scores_file = challenge.get_challenge_dir().joinpath("_challenge_scores.csv")
if challenge_scores_file.exists() and not force:
df = pd.read_csv(challenge_scores_file)
return dict(zip(df.address, df.challenge_score))
# read all submitters to the challenge
participants = challenge.get_all_participants()
# compute all participant errors
errors = [compute_error(participant, force) for participant in participants]
# compute the normalized rank of each submitter from the error
challenge_scores = scoring.compute_challenge_scores(challenge.number, errors)
# save errors and challenge scores to file
df = pd.DataFrame()
df["address"] = [participant.address for participant in participants]
df["error"] = errors
df["challenge_score"] = challenge_scores
df.to_csv(challenge_scores_file, index=False)
# return a dictionary with the challenge scores
return {participant.address: score for participant, score in zip(participants, challenge_scores)}
def compute_challenge_score(participant: Participant, force=False) -> float:
"""computes the challenge score of a participant_address to a challenge"""
challenge_scores = compute_challenge_scores(participant.challenge, force)
return challenge_scores.get(participant.address, np.nan)
def compute_competition_score(participant: Participant, force=False) -> float:
"""computes the competition score of a participant_address to a challenge"""
challenge = participant.challenge
competition = challenge.competition
# gets last challenge scores of a submitter according to challenge window size
# and compute the competition score
window_size = scoring.get_window_size(challenge.number)
first_challenge = max(1, challenge.number - window_size + 1)
challenge_scores = [compute_challenge_score(challenge.get_participant(participant.address), force)
for challenge in [competition.get_challenge(challenge_number)
for challenge_number in range(first_challenge, challenge.number + 1)]]
return scoring.compute_competition_score(challenge.number, challenge_scores)
def compute_challenge_rewards(challenge: Challenge) -> {str, Decimal}:
"""computes the challenge rewards of challenge"""
challenge_pool = challenge.get_challenge_pool()
addresses = [staker.address for staker in challenge.get_all_participants()]
scores_by_address = compute_challenge_scores(challenge)
scores = [scores_by_address.get(address) for address in addresses]
rewards = scoring.compute_challenge_rewards(challenge.number, scores, challenge_pool)
return dict(zip(addresses, rewards))
def compute_competition_rewards(challenge: Challenge) -> {str, Decimal}:
"""computes the competition reward of challenge"""
competition_pool = challenge.get_competition_pool()
addresses = [staker.address for staker in challenge.get_all_participants()]
challenge_scores = [compute_challenge_score(participant) for participant in challenge.get_all_participants()]
competition_scores = [compute_competition_score(participant) for participant in challenge.get_all_participants()]
rewards = scoring.compute_competition_rewards(challenge.number, competition_scores,
challenge_scores, competition_pool)
return dict(zip(addresses, rewards))
def get_stakes(challenge: Challenge) -> {str, Decimal}:
"""returns the stakes of all participants to a challenge"""
return {staker.address: staker.get_stake() for staker in challenge.get_all_participants()}
def compute_stake_rewards(challenge: Challenge) -> {str, Decimal}:
"""computes all stake rewards in a challenge"""
stake_pool = challenge.get_stake_pool()
stakers = challenge.get_all_participants()
addresses = [staker.address for staker in stakers]
stakes = [staker.get_stake() for staker in stakers]
rewards = scoring.compute_stake_rewards(challenge.number, stakes, stake_pool)
return dict(zip(addresses, rewards))
def get_predictions(participant: Participant) -> [(str, Decimal)]:
"""gets all predictions of a submitter to a challenge"""
# get the submission cid and download the prediction file if needed
submission_zip_file = participant.download_submission_file(verbose=True)
# expand the file
submission_dir = submission_zip_file.parent.joinpath(submission_zip_file.stem)
shutil.unpack_archive(submission_zip_file, submission_dir)
# decrypt the file
encrypted_symmetric_key_file = submission_dir.joinpath("encrypted_symmetric_key.pem")
private_key_file = participant.challenge.download_private_key_file(verbose=True)
symmetric_key_file = submission_dir.joinpath("_symmetric_key.bin")
_asymmetric_decrypt_file(encrypted_symmetric_key_file, private_key_file, symmetric_key_file)
# decrypt and read the originator file and check if the originator is the submitter
encrypted_originator_file = submission_dir.joinpath("originator.bin")
originator_file = submission_dir.joinpath("_originator.txt")
_decrypt_file(encrypted_originator_file, symmetric_key_file, originator_file)
with open(originator_file, "r") as fin:
originator_address = fin.read().strip()
assert originator_address[2:].lower() == participant.address.lower(),\
f"originator != participant ({submission_zip_file.name})"
# check and decrypt the submission file
encrypted_prediction_filenames = [filename for filename in os.listdir(submission_dir)
if submission_dir.joinpath(filename).is_file()
and submission_dir.joinpath(filename).match("*.bin")
and filename not in ["originator.bin", "_symmetric_key.bin"]]
assert len(encrypted_prediction_filenames) == 1, f"multiple prediction files ({submission_zip_file.name})"
encrypted_prediction_file = submission_dir.joinpath(encrypted_prediction_filenames[0])
prediction_file = submission_dir.joinpath("_predictions.csv")
_decrypt_file(encrypted_prediction_file, symmetric_key_file, prediction_file)
# load the file
df = pd.read_csv(prediction_file, header=None)
# re-read skipping header if present
has_header = isinstance(df.iloc[0,1], str)
if has_header:
df = pd.read_csv(prediction_file)
assert len(df.columns) == 2, f"too many columns ({submission_zip_file.name})"
return df.to_records(index=False)
def get_requested_assets(challenge: Challenge, force=False) -> [str]:
"""gets assets to be predicted at the beginning of the challenge"""
# download and unzip the dataset
dataset_zip_file = challenge.download_dataset_file(force=force, verbose=True)
dataset_dir = dataset_zip_file.parent.joinpath(dataset_zip_file.stem)
if not dataset_dir.exists() or force:
shutil.unpack_archive(dataset_zip_file, dataset_dir)
# load the test dataset if needed in memory
assets_dataset_file = dataset_dir.joinpath("_assets.csv")
if not assets_dataset_file.exists() or force:
validation_dataset_file = dataset_dir.joinpath("dataset/validation_dataset.csv")
df = | pd.read_csv(validation_dataset_file) | pandas.read_csv |
import numpy as np
import pandas as pd
from IPython import embed
from keras.models import load_model
from keras import backend as K
from qlknn.models.ffnn import determine_settings, _prescale, clip_to_bounds
def rmse(y_true, y_pred):
return K.sqrt(K.mean(K.square( y_true-y_pred )))
class KerasNDNN():
def __init__(self, model, feature_names, target_names,
feature_prescale_factor, feature_prescale_bias,
target_prescale_factor, target_prescale_bias,
feature_min=None, feature_max=None,
target_min=None, target_max=None,
target_names_mask=None,
):
self.model = model
self._feature_names = feature_names
self._target_names = target_names
self._feature_prescale_factor = feature_prescale_factor
self._feature_prescale_bias = feature_prescale_bias
self._target_prescale_factor = target_prescale_factor
self._target_prescale_bias = target_prescale_bias
if feature_min is None:
feature_min = pd.Series({var: -np.inf for var in self._feature_names})
self._feature_min = feature_min
if feature_max is None:
feature_max = pd.Series({var: np.inf for var in self._feature_names})
self._feature_max = feature_max
if target_min is None:
target_min = pd.Series({var: -np.inf for var in self._target_names})
self._target_min = target_min
if target_max is None:
target_max = pd.Series({var: np.inf for var in self._target_names})
self._target_max = target_max
self._target_names_mask = target_names_mask
def get_output(self, inp, clip_low=False, clip_high=False, low_bound=None, high_bound=None, safe=True, output_pandas=True, shift_output_by=0):
"""
This should accept a pandas dataframe, and should return a pandas dataframe
"""
nn_input, safe, clip_low, clip_high, low_bound, high_bound = \
determine_settings(self, inp, safe, clip_low, clip_high, low_bound, high_bound)
nn_input = _prescale(nn_input,
self._feature_prescale_factor.values,
self._feature_prescale_bias.values)
# Apply all NN layers an re-scale the outputs
branched_in = [nn_input.loc[:, self._branch1_names].values,
nn_input.loc[:, self._branch2_names].values]
nn_out = self.model.predict(branched_in) # Get prediction
output = (nn_out - np.atleast_2d(self._target_prescale_bias)) / np.atleast_2d(self._target_prescale_factor)
output -= shift_output_by
output = clip_to_bounds(output, clip_low, clip_high, low_bound, high_bound)
if output_pandas:
output = pd.DataFrame(output, columns=self._target_names)
if self._target_names_mask is not None:
output.columns = self._target_names_mask
return output
class Daniel7DNN(KerasNDNN):
_branch1_names = ['Ati', 'An', 'q', 'smag', 'x', 'Ti_Te']
_branch2_names = ['Ate']
def __init__(self, model, feature_names, target_names,
feature_prescale_factor, feature_prescale_bias,
target_prescale_factor, target_prescale_bias,
feature_min=None, feature_max=None,
target_min=None, target_max=None,
target_names_mask=None,
):
super().__init__(model, feature_names, target_names,
feature_prescale_factor, feature_prescale_bias,
target_prescale_factor, target_prescale_bias,
feature_min=feature_min, feature_max=feature_max,
target_min=target_min, target_max=target_max,
target_names_mask=target_names_mask,
)
self.shift = self.find_shift()
@classmethod
def from_files(cls, model_file, standardization_file):
model = load_model(model_file, custom_objects={'rmse': rmse})
stds = pd.read_csv(standardization_file)
feature_names = pd.Series(cls._branch1_names + cls._branch2_names)
target_names = | pd.Series(['efeETG_GB']) | pandas.Series |
# coding: utf-8
# In[1]:
from __future__ import division, print_function, absolute_import
from past.builtins import basestring
import os
import gzip
import pandas as pd
from twip.constant import DATA_PATH
from gensim.models import TfidfModel, LsiModel
from gensim.corpora import Dictionary
# In[2]:
import matplotlib
from IPython.display import display, HTML
get_ipython().magic(u'matplotlib inline')
np = pd.np
display(HTML("<style>.container { width:100% !important; }</style>"))
pd.set_option('display.max_rows', 6)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 800)
pd.set_option('precision', 2)
get_ipython().magic(u'precision 4')
get_ipython().magic(u'pprint')
# In[3]:
from sklearn.linear_model import SGDRegressor
from sklearn.svm import SVR
# In[6]:
lsi = LsiModel.load(os.path.join(DATA_PATH, 'lsi100'))
lsi2 = LsiModel.load(os.path.join(DATA_PATH, 'lsi2'))
# In[7]:
with gzip.open(os.path.join(DATA_PATH, 'tweet_topic_vectors.csv.gz'), 'rb') as f:
topics = pd.DataFrame.from_csv(f, encoding='utf8')
topics = topics.fillna(0)
# In[8]:
dates = pd.read_csv(os.path.join(DATA_PATH, 'datetimes.csv.gz'), engine='python')
nums = pd.read_csv(os.path.join(DATA_PATH, 'numbers.csv.gz'), engine='python')
# In[9]:
nums.favorite_count.hist(bins=[0,1,2,3,4,5,7,10,15,25,40,100,1000])
from matplotlib import pyplot as plt
plt.yscale('log', nonposy='clip')
plt.xscale('log', nonposy='clip')
plt.xlabel('Number of Favorites')
plt.ylabel('Number of Tweets')
# When I first ran this, my dataframes weren't "aligned".
# So it's very important to check your datasets after every load.
# The correspondence between dates and topics and numerical features is critical for training!
# In[10]:
print(len(dates))
print(len(topics))
print(len(nums))
print(sum(nums.favorite_count >= 1))
# In[11]:
sum(nums.index == dates.index) == len(dates)
# In[12]:
sum(nums.index == topics.index) == len(dates)
# In[13]:
sgd = SGDRegressor()
sgd
# In[14]:
sgd = SGDRegressor().fit(topics.values, nums.favorite_count)
# Well, that was **much** faster...
# In[15]:
predicted_favorites = sgd.predict(topics.values)
predicted_favorites
# In[16]:
np.sum(predicted_favorites >= 1)
# Well that seems more "balanced" at least.
# And it's nice to have a continuous score.
# In[17]:
np.sum(nums.favorite_count.values >= 1)
# In[18]:
from pug.nlp.stats import Confusion
# In[19]:
results = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
from pandas import DataFrame
import matplotlib.pyplot as plt
class ccor(object):
"""
Docstring for function ecopy.ccor
====================
Conducts canonical correlation analysis for two matrices
Y1 and Y2
Use
----
ccor(Y1, Y2, stand_1=False, stand_2=False, varNames_1=None, varNames_2=None, siteNames=None)
Returns an object of class ccor
Parameters
----------
Y1: A pandas.DataFrame or numpy.ndarray containing one set of response variables
Y2: A pandas.DataFrame or numpy.ndarray containing a second set of response variables
varNames_1: A list of variable names for matrix Y1. If None, inherits from column names of Y1
varNames_2: A list of variable names for matrix Y2. If None, inherits from column names of Y2
stand_1: Whether or not to standardize columns of Y1
stand_2: Whether or not to standardize columns of Y2
siteNames: A list of site/row names. If None, inherits from index of Y1
Attributes
---------
Scores1: Scores of each row of matrix Y1
Scores2: Scores of each row of matrix Y2
loadings1: Variable loadings of Y1
loadings2: Variable loadings of Y2
evals: Eigenvalues associated with each axis
Methods
--------
summary(): A summary table for each canonical axis
biplot(matrix=1, xax=1, yax=1)
matrix: Which matrix should be plotted. matrix=1 plots the scores and loadings
of matrix=2, while matrix=2 plots the scores and loadings of matrix 2
xax: Which canonical axis should on the x-axis
yax: Which canonical axis should be on the y-axis
Example
--------
import ecopy as ep
import numpy as np
Y1 = np.random.normal(size=20*5).reshape(20, 5)
Y2 = np.random.normal(size=20*3).reshape(20, 3)
cc = ep.ccor(Y1, Y2)
cc.summary()
cc.biplot()
"""
def __init__(self, Y1, Y2, varNames_1=None, varNames_2=None, stand_1=False, stand_2=False, siteNames=None):
if not isinstance(Y1, (DataFrame, np.ndarray)):
msg = 'Matrix Y1 must be a pandas.DataFrame or numpy.ndarray'
raise ValueError(msg)
if not isinstance(Y2, (DataFrame, np.ndarray)):
msg = 'Matrix Y2 must be a pandas.DataFrame or numpy.ndarray'
raise ValueError(msg)
if isinstance(Y2, DataFrame):
if Y2.isnull().any().any():
msg = 'Matrix Y2 contains null values'
raise ValueError(msg)
if isinstance(Y2, np.ndarray):
if Y2.dtype=='object':
msg = 'Matrix Y2 cannot be a numpy.ndarray with object dtype'
raise ValueError(msg)
if np.isnan(Y2).any():
msg = 'Matrix Y2 contains null values'
raise ValueError(msg)
if isinstance(Y1, DataFrame):
if Y1.isnull().any().any():
msg = 'Matrix Y1 contains null values'
raise ValueError(msg)
if (Y1.dtypes == 'object').any():
msg = 'Matrix Y1 can only contain numeric values'
raise ValueError(msg)
if isinstance(Y1, np.ndarray):
if np.isnan(Y1).any():
msg = 'Matrix Y1 contains null values'
raise ValueError(msg)
if varNames_1 is None:
if isinstance(Y1, DataFrame):
varNames_1 = Y1.columns
elif isinstance(Y1, np.ndarray):
varNames_1 = ['Y1 {0}'.format(x) for x in range(1, Y1.shape[1]+1)]
if varNames_2 is None:
if isinstance(Y2, DataFrame):
varNames_2 = Y2.columns
elif isinstance(Y2, np.ndarray):
varNames_2 = ['Y2 {0}'.format(x) for x in range(1, Y2.shape[1]+1)]
if siteNames is None:
if isinstance(Y1, DataFrame):
siteNames = Y1.index.values
elif isinstance(Y1, np.ndarray):
siteNames = ['Site {0}'.format(x) for x in range(1, Y1.shape[0]+1)]
if Y1.shape[0] != Y2.shape[0]:
msg = 'Matrices must have same number of rows'
raise ValueError(msg)
Y1 = np.array(Y1)
Y2 = np.array(Y2)
if stand_1:
Y1 = (Y1 - Y1.mean(axis=0)) / Y1.std(axis=0)
if stand_2:
Y2 = (Y2 - Y2.mean(axis=0)) / Y2.std(axis=0)
df = float(Y1.shape[0] - 1)
D1 = Y1 - Y1.mean(axis=0)
D2 = Y2 - Y2.mean(axis=0)
S1 = D1.T.dot(D1) * 1./df
S2 = D2.T.dot(D2) * 1./df
S12 = D1.T.dot(D2) * 1./df
Chol1 = np.linalg.pinv(np.linalg.cholesky(S1).T)
Chol2 = np.linalg.pinv(np.linalg.cholesky(S2).T)
K = Chol1.T.dot(S12).dot(Chol2)
V, W, U = np.linalg.svd(K)
U = U.T
CoefY1 = Chol1.dot(V)
CoefY2 = Chol2.dot(U)
self.Scores1 = DataFrame(Y1.dot(CoefY1), index=siteNames)
self.Scores2 = DataFrame(Y2.dot(CoefY2), index=siteNames)
self.loadings1 = np.corrcoef(Y1, self.Scores1, rowvar=0)[:5, 5:]
self.loadings2 = np.corrcoef(Y2, self.Scores2, rowvar=0)[:3, 3:]
axes1 = ['CA Axis {0}'.format(x) for x in range(1, self.loadings1.shape[1]+1)]
self.loadings1 = DataFrame(self.loadings1, index=varNames_1, columns=axes1)
axes2 = ['CA Axis {0}'.format(x) for x in range(1, self.loadings2.shape[1]+1)]
self.loadings2 = | DataFrame(self.loadings2, index=varNames_2, columns=axes2) | pandas.DataFrame |
from collections import defaultdict
import pandas as pd
import numpy as np
import logging
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.svm import SVC
from sklearn.dummy import DummyClassifier
from sklearn.model_selection import LeaveOneOut
from sklearn.utils import shuffle
from sklearn import metrics
from module import labeling
file = "./data/metadata/questionnaire.csv"
metadf = pd.read_csv(file, sep=";", index_col="name")
def _video_of(name, nrs):
return list(map(lambda nr: metadf.loc[name]["video%d"%nr], nrs))
classifiers = {
"RandomForest": RandomForestClassifier(
n_estimators=100),
"AdaBoost": AdaBoostClassifier(
n_estimators=100),
"SVC": SVC(gamma='auto'),
"MostFrequent": DummyClassifier(strategy="most_frequent"),
"Random": DummyClassifier(strategy="uniform"),
}
labelConfig = labeling.SimpleConfig() #labeling.RankingThresholdConfig() # RankingThresholdConfig() SimpleConfig() OnlineConfig() ExpertConfig()
testlabelConfig = labeling.SimpleConfig() #labeling.RankingThresholdConfig() # RankingThresholdConfig() SimpleConfig() OnlineConfig()
videoCombis = [
[[1,3], [2,4]],
[[2,4], [1,3]],
[[1,4], [2,3]],
[[2,3], [1,4]],
#[[2,4],[2,4]],
#[[1,2,3,4],[1,2,3,4]]
]
def run_clf(clf, train_x, train_y, test_x, test_y, state):
logging.getLogger('distributed.utils_perf').setLevel(logging.CRITICAL)
clf.random_state = state
model = clf.fit(train_x, train_y)
test_yp = model.predict(test_x)
score = metrics.accuracy_score(test_y, test_yp)
cm = metrics.confusion_matrix(test_y, test_yp, labels=["tense", "relax"]) # ACHTUNG: Geht nur bei diesen zwei Label !!!
verteilung_klassen_true = pd.Series(test_y).value_counts(normalize=True)
verteilung_klassen_pred = | pd.Series(test_yp) | pandas.Series |
import sys
import os
import warnings
import itertools
import subprocess
import numpy as np
import pandas as pd
import slack
import scipy.stats as st
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Patch
from matplotlib.gridspec import GridSpec
exec(open(os.path.abspath(os.path.join(
os.path.dirname(__file__), os.path.pardir, 'visualisation', 'light_mode.py'))).read())
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from rotvel_correlation.simstats import Simstats
warnings.filterwarnings("ignore")
pathSave = '/cosma6/data/dp004/dc-alta2/C-Eagle-analysis-work/rotvel_correlation'
def bayesian_blocks(t):
"""Bayesian Blocks Implementation
By <NAME>. License: BSD
Based on algorithm outlined in http://adsabs.harvard.edu/abs/2012arXiv1207.5578S
Parameters
----------
t : ndarray, length N
data to be histogrammed
Returns
-------
bins : ndarray
array containing the (N+1) bin edges
Notes
-----
This is an incomplete implementation: it may fail for some
datasets. Alternate fitness functions and prior forms can
be found in the paper listed above.
"""
# copy and sort the array
t = np.sort(t)
N = t.size
# create length-(N + 1) array of cell edges
edges = np.concatenate([t[:1],
0.5 * (t[1:] + t[:-1]),
t[-1:]])
block_length = t[-1] - edges
# arrays needed for the iteration
nn_vec = np.ones(N)
best = np.zeros(N, dtype=float)
last = np.zeros(N, dtype=int)
#-----------------------------------------------------------------
# Start with first data cell; add one cell at each iteration
#-----------------------------------------------------------------
for K in range(N):
# Compute the width and count of the final bin for all possible
# locations of the K^th changepoint
width = block_length[:K + 1] - block_length[K + 1]
count_vec = np.cumsum(nn_vec[:K + 1][::-1])[::-1]
# evaluate fitness function for these possibilities
fit_vec = count_vec * (np.log(count_vec) - np.log(width))
fit_vec -= 4 # 4 comes from the prior on the number of changepoints
fit_vec[1:] += best[:K]
# find the max of the fitness: this is the K^th changepoint
i_max = np.argmax(fit_vec)
last[K] = i_max
best[K] = fit_vec[i_max]
#-----------------------------------------------------------------
# Recover changepoints by iteratively peeling off the last block
#-----------------------------------------------------------------
change_points = np.zeros(N, dtype=int)
i_cp = N
ind = N
while True:
i_cp -= 1
change_points[i_cp] = ind
if ind == 0:
break
ind = last[ind - 1]
change_points = change_points[i_cp:]
return edges[change_points]
def freedman_diaconis(x: np.ndarray) -> np.ndarray:
"""
The binwidth is proportional to the interquartile range (IQR) and inversely proportional to cube root of a.size.
Can be too conservative for small datasets, but is quite good for large datasets. The IQR is very robust to
outliers.
:param x: np.ndarray
The 1-dimensional x-data to bin.
:return: np.ndarray
The bins edges computed using the FD method.
"""
return np.histogram_bin_edges(x, bins='fd')
def equal_number_FD(x: np.ndarray) -> np.ndarray:
"""
Takes the number of bins computed using the FD method, but then selects the bin edges splitting
the dataset in bins with equal number of data-points.
:param x: np.ndarray
The 1-dimensional x-data to bin.
:return: np.ndarray
The bins edges computed using the equal-N method.
"""
nbin = len(np.histogram_bin_edges(x, bins='fd')) - 1
npt = len(x)
return np.interp(np.linspace(0, npt, nbin + 1),
np.arange(npt),
np.sort(x))
# Print some overall stats about the datasets
sys.stdout = open(os.devnull, 'w')
read_apertures = [Simstats(simulation_name='macsis', aperture_id=i).read_simstats() for i in range(20)]
sys.stdout = sys.__stdout__
for apid, stat in enumerate(read_apertures):
print(f"Aperture radius {apid} \t --> \t {stat['R_aperture'][0]/stat['R_200_crit'][0]:1.2f} R_200_crit")
del read_apertures
sys.stdout = open(os.devnull, 'w')
read_redshifts = [Simstats(simulation_name=i, aperture_id=0).read_simstats() for i in ['macsis', 'celr_e']]
sys.stdout = sys.__stdout__
for sim_name, stat in zip(['macsis', 'celr_e'], read_redshifts):
print('\n')
for zid, redshift in enumerate(stat.query('cluster_id == 0')['redshift_float']):
print(f"Simulation: {sim_name:<10s} Redshift {zid:2d} --> {redshift:1.2f}")
del read_redshifts
# Start with one single aperture
aperture_id = 9
simstats = list()
simstats.append(Simstats(simulation_name='macsis', aperture_id=aperture_id))
simstats.append(Simstats(simulation_name='celr_e', aperture_id=aperture_id))
simstats.append(Simstats(simulation_name='celr_b', aperture_id=aperture_id))
stats_out = [sim.read_simstats() for sim in simstats]
attrs = [sim.read_metadata() for sim in simstats]
print(f"\n{' stats_out DATASET INFO ':-^50s}")
print(stats_out[0].info())
# Create SQL query
query_COLLECTIVE = list()
query_COLLECTIVE.append('redshift_float < 0.02')
query_COLLECTIVE.append('M_200_crit > 10**9')
query_COLLECTIVE.append('thermodynamic_merging_index_T < 1')
stats_filtered = [stat.query(' and '.join(query_COLLECTIVE)) for stat in stats_out]
# Generate plots catalog
x_labels = ['redshift_float', 'R_500_crit', 'R_aperture', 'M_2500_crit', 'M_aperture_T',
'peculiar_velocity_T_magnitude', 'angular_momentum_T_magnitude',
'dynamical_merging_index_T', 'thermodynamic_merging_index_T',
'substructure_fraction_T']
y_labels = ['M_200_crit','rotTvelT','rot0rot4','rot1rot4','dynamical_merging_index_T',
'thermodynamic_merging_index_T','substructure_fraction_T']
data_entries = list(itertools.product(x_labels, y_labels))
x_labels = []
y_labels = []
for entry in data_entries:
if entry[0] is not entry[1]:
x_labels.append(entry[0])
y_labels.append(entry[1])
xscale = []
yscale = []
for x in x_labels:
scale = 'log' if 'M' in x or 'velocity' in x else 'linear'
xscale.append(scale)
for y in y_labels:
scale = 'log' if 'M' in y or 'velocity' in y else 'linear'
yscale.append(scale)
data_summary = {
'x' : x_labels,
'y' : y_labels,
'xscale' : xscale,
'yscale' : yscale,
}
summary = pd.DataFrame(data=data_summary, columns=data_summary.keys())
summary = summary[summary['y'].str.contains('rot')]
summary = summary[~summary['x'].str.contains('redshift')]
print(f"\n{' summary DATASET PLOTS INFO ':-^40s}\n", summary)
# Activate the plot factory
print(f"\n{' RUNNING PLOT FACTORY ':-^50s}")
data_entries = summary.to_dict('r')
x_binning = bayesian_blocks
print(f"[+] Binning method for x_data set to `{x_binning.__name__}`.")
for entry_index, data_entry in enumerate(data_entries):
filename = f"{data_entry['x'].replace('_', '')}_{data_entry['y'].replace('_', '')}_aperture{aperture_id}.pdf"
are_files = [os.path.isfile(os.path.join(pathSave, 'scatter', filename)),
os.path.isfile(os.path.join(pathSave, 'kdeplot', filename)),
os.path.isfile(os.path.join(pathSave, 'median', filename))]
#if any(are_files): continue
fig = plt.figure(figsize=(15, 10))
gs = GridSpec(2, 3, figure=fig)
gs.update(wspace=0., hspace=0.)
info_ax0 = fig.add_subplot(gs[0]); info_ax0.axis('off')
ax1 = fig.add_subplot(gs[1])
info_ax1 = fig.add_subplot(gs[2]); info_ax1.axis('off')
ax2 = fig.add_subplot(gs[3], sharex=ax1, sharey=ax1)
ax3 = fig.add_subplot(gs[4], sharex=ax2, sharey=ax2)
ax4 = fig.add_subplot(gs[5], sharex=ax3, sharey=ax3)
ax = [ax1, ax2, ax3, ax4]
plt.setp(ax[0].get_xticklabels(), visible=False)
plt.setp(ax[2].get_yticklabels(), visible=False)
plt.setp(ax[3].get_yticklabels(), visible=False)
xlims = [np.min(pd.concat(stats_filtered)[data_entry['x']]), np.max(pd.concat(stats_filtered)[data_entry['x']])]
ylims = [np.min(pd.concat(stats_filtered)[data_entry['y']]), np.max(pd.concat(stats_filtered)[data_entry['y']])]
# Unresolved issue with the Latex labels
# Some contain an extra `$` at the end of the string, which should not be there.
label_x = attrs[0]['Columns/labels'][data_entry['x']]
label_y = attrs[0]['Columns/labels'][data_entry['y']]
if label_x.endswith('$'): label_x = label_x.rstrip('$')
if label_y.endswith('$'): label_y = label_y.rstrip('$')
ax[0].set_ylabel(label_y)
ax[1].set_ylabel(label_y)
ax[1].set_xlabel(label_x)
ax[2].set_xlabel(label_x)
ax[3].set_xlabel(label_x)
simstats_palette = ['#1B9E77','#D95F02','#7570B3','#E7298A']
z_range = [np.min(pd.concat(stats_filtered)['redshift_float']),
np.max(pd.concat(stats_filtered)['redshift_float'])]
z_range_str = f'{z_range[0]:1.2f} - {z_range[1]:1.2f}' if round(z_range[0]) < round(z_range[1]) else f'{z_range[0]:1.2f}'
items_labels = [
f"{label_x.split(r'quad')[0]} -\\ {label_y.split(r'quad')[0]}",
f"Number of clusters: {np.sum([attr['Number of clusters'] for attr in attrs]):d}",
f"$z$ = {z_range_str:s}",
f"Aperture radius = {stats_filtered[0]['R_aperture'][0] / stats_filtered[0]['R_200_crit'][0]:2.2f} $R_{{200\\ true}}$"
]
info_ax0.text(0.03, 0.97, '\n'.join(items_labels), horizontalalignment='left', verticalalignment='top', size=15, transform=info_ax0.transAxes)
axisinfo_kwargs = dict(
horizontalalignment='right',
verticalalignment='top',
size=15
)
handles = [Patch(facecolor=simstats_palette[i], label=attrs[i]['Simulation'], edgecolor='k', linewidth=1) for i in range(len(attrs))]
leg = info_ax1.legend(handles=handles, loc='lower right', handlelength=1, fontsize=20)
info_ax1.add_artist(leg)
##################################################################################################
# SCATTER PLOTS #
##################################################################################################
plot_type = 'scatterplot'
for ax_idx, axes in enumerate(ax):
axes.set_xscale(data_entry['xscale'])
axes.set_yscale(data_entry['yscale'])
axes.tick_params(direction='in', length=5, top=True, right=True)
if ax_idx == 0:
axes.scatter(
pd.concat(stats_filtered)[data_entry['x']],
pd.concat(stats_filtered)[data_entry['y']],
s=5,
c=simstats_palette[ax_idx-1]
)
axes.text(0.95, 0.95, f'\\textsc{{Total}}', transform=axes.transAxes, **axisinfo_kwargs)
else:
axes.scatter(
stats_filtered[ax_idx-1][data_entry['x']],
stats_filtered[ax_idx-1][data_entry['y']],
s=5,
c=simstats_palette[ax_idx-1]
)
axes.text(0.95, 0.95, f"\\textsc{{{attrs[ax_idx-1]['Simulation']}}}", transform=axes.transAxes, **axisinfo_kwargs)
if not os.path.exists(os.path.join(pathSave, plot_type)):
os.makedirs(os.path.join(pathSave, plot_type))
plt.savefig(os.path.join(pathSave, plot_type, filename))
print(f"[+] Plot {entry_index:3d}/{len(data_entries)} Figure saved: {plot_type:>15s} >> {filename}")
##################################################################################################
# kde PLOTS #
##################################################################################################
plot_type = 'kdeplot'
fig_kde = fig
ax_kde = [fig_kde.axes[i] for i in [1, 3, 4, 5]]
for axes in ax_kde:
for artist in axes.lines + axes.collections:
artist.remove()
x_space = np.linspace(xlims[0], xlims[1], 101)
y_space = np.linspace(ylims[0], ylims[1], 101)
if data_entry['xscale'] is 'log':
x_space = np.linspace(np.log10(xlims[0]), np.log10(xlims[1]), 101)
if data_entry['yscale'] is 'log':
y_space = np.linspace(np.log10(ylims[0]), np.log10(ylims[1]), 101)
xx, yy = np.meshgrid(x_space, y_space)
positions = np.vstack([xx.ravel(), yy.ravel()])
for ax_idx, axes in enumerate(ax_kde):
if ax_idx == 0:
x = pd.concat(stats_filtered)[data_entry['x']]
y = pd.concat(stats_filtered)[data_entry['y']]
values = np.vstack([x if data_entry['xscale'] is 'linear' else np.log10(x), y])
kernel = st.gaussian_kde(values)
f = np.reshape(kernel(positions).T, xx.shape)
#cfset = axes.contourf(xx, yy, f, cmap='Blues')
cset = axes.contour(xx if data_entry['xscale'] is 'linear' else 10**xx, yy, f, colors=simstats_palette[ax_idx-1])
axes.scatter(x, y, s=3, c=simstats_palette[ax_idx-1], alpha=0.2)
axes.text(0.95, 0.95, f'\\textsc{{Total}}', transform=axes.transAxes, **axisinfo_kwargs)
else:
x = stats_filtered[ax_idx-1][data_entry['x']]
y = stats_filtered[ax_idx-1][data_entry['y']]
values = np.vstack([x if data_entry['xscale'] is 'linear' else np.log10(x), y])
kernel = st.gaussian_kde(values)
f = np.reshape(kernel(positions).T, xx.shape)
#cfset = axes.contourf(xx, yy, f, cmap='Blues')
cset = axes.contour(xx if data_entry['xscale'] is 'linear' else 10**xx, yy, f, colors=simstats_palette[ax_idx-1])
axes.scatter(x, y, s=3, c=simstats_palette[ax_idx-1], alpha=0.2)
axes.text(0.95, 0.95, f"\\textsc{{{attrs[ax_idx-1]['Simulation']}}}", transform=axes.transAxes, **axisinfo_kwargs)
if not os.path.exists(os.path.join(pathSave, plot_type)):
os.makedirs(os.path.join(pathSave, plot_type))
plt.savefig(os.path.join(pathSave, plot_type, filename))
print(f"[+] Plot {entry_index:3d}/{len(data_entries)} Figure saved: {plot_type:>15s} >> {filename}")
##################################################################################################
# MEDIAN PLOTS #
##################################################################################################
plot_type = 'median'
fig_median = fig
ax_median = [fig_median.axes[i] for i in [1, 3, 4, 5]]
for axes in ax_median:
for artist in axes.lines + axes.collections:
artist.remove()
perc84 = Line2D([], [], color='k', marker='^', linestyle='-.', markersize=12, label=r'$84^{th}$ percentile')
perc50 = Line2D([], [], color='k', marker='o', linestyle='-', markersize=12, label=r'median')
perc16 = Line2D([], [], color='k', marker='v', linestyle='--', markersize=12, label=r'$16^{th}$ percentile')
leg1 = fig_median.axes[2].legend(handles=[perc84, perc50, perc16], loc='center right', handlelength=2, fontsize=20)
fig_median.axes[2].add_artist(leg1)
xlims = [np.min(pd.concat(stats_filtered)[data_entry['x']]), np.max(pd.concat(stats_filtered)[data_entry['x']])]
ylims = [np.min(pd.concat(stats_filtered)[data_entry['y']]), np.max(pd.concat(stats_filtered)[data_entry['y']])]
x_space = np.linspace(np.log10(xlims[0]), np.log10(xlims[1]), 101)
y_space = np.linspace(ylims[0], ylims[1], 101)
for ax_idx, axes in enumerate(ax_median):
axes.set_xlim([xlims[0] - 0.1 * np.diff(xlims), xlims[1] + 0.1 * np.diff(xlims)])
axes.set_ylim([ylims[0] - 0.1 * np.diff(ylims), ylims[1] + 0.1 * np.diff(ylims)])
axes_to_data = axes.transAxes + axes.transData.inverted()
ax_frame = axes_to_data.transform
if ax_idx == 0:
x = pd.concat(stats_filtered)[data_entry['x']]
y = | pd.concat(stats_filtered) | pandas.concat |
import numpy as np
import pandas as pd
class MinMaxScaler:
"""
MinMaxScaler is used to normalize the input array or dataframe such that the array values get into the range-[0,1].
Note: This version of MinMaxScaler only accepts a dataframe or an array of as input.
:return: It returns a multidimensional array
"""
def fit(self,X):
if type(X)!=type(pd.DataFrame()) and type(X)!=type(np.array([1,2])): # checks for the datatype
raise TypeError(f"MinMaxScaler accepts either a dataframe or a numpy array as input. It does not accept {type(X)} as input dtype")
if type(X)==type( | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from matplotlib import pylab
from textwrap import fill
from . import univariate
def cross_table(variables, category1, category2, data, use_names=True):
"""
Gives a cross table of category1 and category2
Args:
variables: Variables class
category1: name of category
category2: name of category
data: structured data in pandas Data Frame
use_names: return object used variable names instead of ids
"""
# if category in ["country", "region", "district", "clinic"]:
# return data[category].value_counts()
if category1 not in variables.groups:
raise KeyError("Category1 does not exists")
if category2 not in variables.groups:
raise KeyError("Category1 does not exists")
ids1 = sorted(variables.groups[category1])
ids2 = sorted(variables.groups[category2])
if use_names:
columns = [variables.name(i) for i in ids1]
else:
columns = ids1
results = | pd.DataFrame(columns=columns) | pandas.DataFrame |
import pandas as pd
class NewWave:
def __init__(self, data):
self.df = data
# ->Covid Tracker Shutdown Services
def tracker(self):
self.df["date"] = | pd.to_datetime(self.df["date"], format="%Y-%m-%d") | pandas.to_datetime |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import umap
from sklearn.preprocessing import StandardScaler
# load the dataframes and attach labels to the weller and wu set
ww = pd.read_csv(snakemake.input.ww, index_col=0)
jor = pd.read_csv(snakemake.input.jor, index_col=0)
labs = pd.read_csv(snakemake.input.labs, index_col=0)
labels = labs[['assembly_id', 'spore_forming']]
labels = labels.rename({'assembly_id' : 'genome'}, axis=1)
ww_labels = pd.merge(ww, labels, on='genome')
# Get all of this into array form
y = ww_labels['spore_forming'].values
X_ww = ww_labels.drop(['spore_forming', 'genome'], axis=1).values
ww_names = ww_labels['genome'].values
X_jor = jor.drop('genome', axis=1).values
jor_names = jor['genome'].values
# standardize
ss = StandardScaler()
jor_std = ss.fit_transform(X_jor)
ww_std = ss.transform(X_ww)
# fit UMAP
um = umap.UMAP()
jor_umap = um.fit_transform(jor_std)
ww_umap = um.transform(ww_std)
# save these reduced versions (because umap take long?)
ww_dict_umap = {'genome': ww_names, 'umap_0': ww_umap[:, 0],
'umap_1': ww_umap[:, 1], 'spore_forming': y}
jor_dict_umap = {'genome': jor_names, 'umap_0': jor_umap[:, 0],
'umap_1': jor_umap[:, 1]}
wwdf = pd.DataFrame(ww_dict_umap)
wwdf.to_csv(snakemake.output.ww_umap)
jordf = | pd.DataFrame(jor_dict_umap) | pandas.DataFrame |
import numpy as np
from scipy import optimize
import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from scipy.stats import norm
import qng
class ErlangcEstimator(BaseEstimator):
""" Erlang-C formula for probability of wait in an M/M/c queue.
No parameters are actually fit. This is just an analytical formula implemented
as an sklearn Estimator so that it can be used in pipelines.
Parameters
----------
col_idx_arate : float
Column number in X corresponding to arrival rate
col_idx_meansvctime : float
Column number in X corresponding to mean service time
col_idx_numservers : int
Column number in X corresponding to number of servers (c) in system
"""
def __init__(self, col_idx_arate, col_idx_meansvctime, col_idx_numservers):
self.col_idx_arate = col_idx_arate
self.col_idx_meansvctime = col_idx_meansvctime
self.col_idx_numservers = col_idx_numservers
def fit(self, X, y=None):
"""Empty fit method since no parameters to be fit
Checks shapes of X, y and sets is_fitted_ to True.
Use ``predict`` to get predicted y values.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,) or (n_samples, n_outputs)
The target values (real numbers in
regression).
Returns
-------
self : object
Returns self.
"""
if y is not None:
X, y = check_X_y(X, y, accept_sparse=False)
else:
X = check_array(X, accept_sparse=False)
self.is_fitted_ = True
# `fit` should always return `self`
return self
def predict(self, X):
""" Compute Erlang-C using qng library
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input samples.
Returns
-------
y : ndarray, shape (n_samples,)
Returns an array of ones.
"""
X = check_array(X, accept_sparse=False)
check_is_fitted(self, 'is_fitted_')
X_df = pd.DataFrame(X)
y = X_df.apply(
lambda x: qng.erlangc(x[self.col_idx_arate] * x[self.col_idx_meansvctime], int(x[self.col_idx_numservers])),
axis=1)
return np.array(y)
class LoadEstimator(BaseEstimator):
""" Load as approximation for mean occupancy
No parameters are actually fit. This is just an analytical formula implemented
as an sklearn Estimator so that it can be used in pipelines.
Parameters
----------
col_idx_arate : float
Column number in X corresponding to arrival rate
col_idx_meansvctime : float
Column number in X corresponding to mean service time
"""
def __init__(self, col_idx_arate, col_idx_meansvctime):
self.col_idx_arate = col_idx_arate
self.col_idx_meansvctime = col_idx_meansvctime
def fit(self, X, y=None):
"""Empty fit method since no parameters to be fit
Checks shapes of X, y and sets is_fitted_ to True.
Use ``predict`` to get predicted y values.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,) or (n_samples, n_outputs)
The target values (real numbers in
regression).
Returns
-------
self : object
Returns self.
"""
if y is not None:
X, y = check_X_y(X, y, accept_sparse=False)
else:
X = check_array(X, accept_sparse=False)
self.is_fitted_ = True
# `fit` should always return `self`
return self
def predict(self, X):
""" Compute load as arrival_rate * avg_svc_time
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input samples.
Returns
-------
y : ndarray, shape (n_samples,)
Returns an array of ones.
"""
X = check_array(X, accept_sparse=False)
check_is_fitted(self, 'is_fitted_')
X_df = | pd.DataFrame(X) | pandas.DataFrame |
import pandas as pd
import matplotlib as plt
def teamSearch(teamName):
teams = pd.read_html("https://en.wikipedia.org/wiki/Wikipedia:WikiProject_National_Basketball_Association/National_Basketball_Association_team_abbreviations", header=0)
team_names = | pd.DataFrame(columns=["Abbreviation/Acronym", "Franchise"]) | pandas.DataFrame |
#Import necessary package
import requests
import re
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import datetime as dt
import configparser
import os
import json
#Configure parameter
config = configparser.ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), 'config.ini'))
mall = config['general']['mall']
shoplistapi = config['api']['shoplistapi']
fnblistapi = config['api']['fnblistapi']
entertainmentlistapi = config['api']['entertainmentlistapi']
shopdetailurl = config['url']['shopdetailurl']
def getShopCategory():
#Create empty DataFrame for shop category
shopcategory = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 29 08:35:09 2019
@author: user
"""
# execute primary input data building script
# import build_input_res_heating
print('####################')
print('BUILDING INPUT DATA FOR INCLUDING DEMAND-SIDE RESPONSE, ENERGY EFFICIENCY AND DHW BOILERS')
print('####################')
import os
import itertools
import hashlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import grimsel.auxiliary.sqlutils.aux_sql_func as aql
import datetime
import seaborn as sns
from grimsel.auxiliary.aux_general import print_full
from grimsel.auxiliary.aux_general import translate_id
import config_local as conf
from grimsel.auxiliary.aux_general import expand_rows
base_dir = conf.BASE_DIR
data_path = conf.PATH_CSV
data_path_prv = conf.PATH_CSV + '_res_heating'
seed = 2
np.random.seed(seed)
db = conf.DATABASE
sc = conf.SCHEMA
#db = 'grimsel_1'
#sc = 'lp_input_ht_ee_dsm'
def append_new_rows(df, tb):
list_col = list(aql.get_sql_cols(tb, sc, db).keys())
aql.write_sql(df[list_col], db=db, sc=sc, tb=tb, if_exists='append')
def del_new_rows(ind, tb, df):
del_list = df[ind].drop_duplicates()
for i in ind:
del_list[i] = '%s = '%i + del_list[i].astype(str)
del_str = ' OR '.join(del_list.apply(lambda x: '(' + ' AND '.join(x) + ')', axis=1))
exec_strg = '''
DELETE FROM {sc}.{tb}
WHERE {del_str}
'''.format(tb=tb, sc=sc, del_str=del_str)
aql.exec_sql(exec_strg, db=db)
def replace_table(df, tb):
print('Replace table %s'%tb)
# list_col = list(aql.get_sql_cols(tb, sc, db).keys())
aql.write_sql(df, db=db, sc=sc, tb=tb, if_exists='replace')
def append_new_cols(df, tb):
#
list_col = list(aql.get_sql_cols(tb, sc, db).keys())
col_new = dict.fromkeys((set(df.columns.tolist()) - set(list_col)))
for key, value in col_new.items():
col_new[key] = 'DOUBLE PRECISION'
# col_new = dict.fromkeys((set(list_col[0].columns.tolist()) - set(list_col)),1)
aql.add_column(df_src=df,tb_tgt=[sc,tb],col_new=col_new,on_cols=list_col, db=db)
# %% DHW loads
dfload_arch_dhw = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_dhw_dec.csv')
dfload_arch_dhw['DateTime'] = dfload_arch_dhw['DateTime'].astype('datetime64[ns]')
# dfload_arch_dhw = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_dhw_dec')
dferg_arch_dhw = dfload_arch_dhw.groupby('nd_id')['erg_tot'].sum().reset_index()
dferg_arch_dhw['nd_id_new'] = dferg_arch_dhw.nd_id
dfload_arch_dhw_central = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_dhw_cen.csv')
# dfload_arch_dhw_central = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_dhw_cen')
dferg_arch_dhw_central = dfload_arch_dhw_central.groupby('nd_id')['erg_tot'].sum().reset_index()
dferg_arch_dhw_central['nd_id_new'] = dferg_arch_dhw_central.nd_id
# dfload_dhw_elec = pd.read_csv(os.path.join(base_dir,'../heat_dhw/dhw_el_load_night_charge.csv'),sep=';')
# dfload_dhw_elec['DateTime'] = pd.to_datetime(dfload_dhw_elec.DateTime)
# dfload_dhw_remove = pd.merge(dfload_arch_dhw,dfload_dhw_elec.drop(columns='dhw_mw'), on='DateTime' )
# dfload_dhw_remove = pd.merge(dfload_dhw_remove,dferg_arch_dhw.drop(columns='nd_id_new').rename(columns={'erg_tot':'erg_year'}),on='nd_id'
# ).assign(load_dhw_rem = lambda x: x.dhw_rel_load*x.erg_year)
# %% Central DHW loads
#Bau load
dfload_arch_dhw_central = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_dhw_cen.csv')
# dfload_arch_dhw_central = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_dhw_cen')
dfload_arch_dhw_central['erg_tot'] = dfload_arch_dhw_central.erg_tot/24 # MWh -> MW
dfload_arch_dhw_central['erg_tot_retr_1pc'] = dfload_arch_dhw_central.erg_tot # here already in MW previous line
dfload_arch_dhw_central['erg_tot_retr_2pc'] = dfload_arch_dhw_central.erg_tot # here already in MW previous line
dfload_arch_dhw_central = dfload_arch_dhw_central.set_index('DateTime')
dfload_arch_dhw_central.index = pd.to_datetime(dfload_arch_dhw_central.index)
#fossil load
dfload_arch_dhw_central_fossil = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_dhw_cen_fossil.csv')
# dfload_arch_dhw_central_fossil = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_dhw_cen_fossil')
dfload_arch_dhw_central_fossil['erg_tot_fossil'] = dfload_arch_dhw_central_fossil.erg_tot/24 # MWh -> MW
dfload_arch_dhw_central_fossil['erg_tot_retr_1pc_fossil'] = dfload_arch_dhw_central_fossil.erg_tot/24 # MWh -> MW
dfload_arch_dhw_central_fossil['erg_tot_retr_2pc_fossil'] = dfload_arch_dhw_central_fossil.erg_tot/24 # MWh -> MW
dfload_arch_dhw_central_fossil = dfload_arch_dhw_central_fossil.drop(columns='erg_tot')
dfload_arch_dhw_central_fossil = dfload_arch_dhw_central_fossil.set_index('DateTime')
dfload_arch_dhw_central_fossil.index = pd.to_datetime(dfload_arch_dhw_central_fossil.index)
dfload_arch_dhw_central = dfload_arch_dhw_central.reset_index()
dfload_arch_dhw_central = pd.merge(dfload_arch_dhw_central,dfload_arch_dhw_central_fossil,on=['index','doy','nd_id'])
dfload_arch_dhw_central = dfload_arch_dhw_central.set_index('DateTime')
dfload_arch_dhw_central.index = pd.to_datetime(dfload_arch_dhw_central.index)
# %% Seperation for aw and bw heat pumps DHW central
dfload_arch_dhw_central_aw = dfload_arch_dhw_central.copy()
dfload_arch_dhw_central_aw[['erg_tot', 'erg_tot_fossil',
'erg_tot_retr_1pc', 'erg_tot_retr_2pc', 'erg_tot_retr_1pc_fossil',
'erg_tot_retr_2pc_fossil']] *= 0.615
dfload_arch_dhw_central_ww = dfload_arch_dhw_central.copy()
dfload_arch_dhw_central_ww[['erg_tot', 'erg_tot_fossil',
'erg_tot_retr_1pc', 'erg_tot_retr_2pc', 'erg_tot_retr_1pc_fossil',
'erg_tot_retr_2pc_fossil']] *= 0.385
# %% DSR loads
dfload_arch_dsr_sfh_1day = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_dsr_sfh_1day.csv')
dfload_arch_dsr_mfh_1day = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_dsr_mfh_1day.csv')
# dfload_arch_dsr_sfh_1day = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_dsr_sfh_1day')
# dfload_arch_dsr_mfh_1day = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_dsr_mfh_1day')
dfload_arch_dsr_sfh_1h = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_dsr_sfh_1h.csv')
dfload_arch_dsr_sfh_1h['DateTime'] = dfload_arch_dsr_sfh_1h['DateTime'].astype('datetime64[ns]')
dfload_arch_dsr_mfh_1h = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_dsr_mfh_1h.csv')
dfload_arch_dsr_mfh_1h['DateTime'] = dfload_arch_dsr_mfh_1h['DateTime'].astype('datetime64[ns]')
# dfload_arch_dsr_sfh_1h = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_dsr_sfh_1h')
# dfload_arch_dsr_mfh_1h = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_dsr_mfh_1h')
dfload_arch_dsr_1day = pd.concat([dfload_arch_dsr_sfh_1day,dfload_arch_dsr_mfh_1day])
dfload_arch_dsr_1day['erg_dsr_1day_MW'] = dfload_arch_dsr_1day.erg_dsr_1day/24 # MWh -> MW
dfload_arch_dsr_1h = pd.concat([dfload_arch_dsr_sfh_1h,dfload_arch_dsr_mfh_1h])
dfload_arch_dsr_1h_2015 = dfload_arch_dsr_1h.loc[dfload_arch_dsr_1h.nd_id.str.contains('2015')]
dfload_arch_dsr_1h_2015 = dfload_arch_dsr_1h_2015.reset_index(drop=True)
dferg_arch_dsr = dfload_arch_dsr_1day.groupby('nd_id')['erg_dsr_1day'].sum().reset_index()
# dferg_arch_dsr_1h = dfload_arch_dsr_1h.groupby('nd_id')['erg_dsr_1h'].sum().reset_index()
dferg_arch_dsr_1day = dfload_arch_dsr_1day.groupby('nd_id')['erg_dsr_1day'].sum().reset_index()
dferg_arch_dsr['nd_id_new'] = dferg_arch_dsr.nd_id.str[0:13]
dferg_arch_dsr_1day['nd_id_new'] = dferg_arch_dsr.nd_id.str[0:13]
dferg_arch_dsr_1day['erg_dsr_2015'] = dferg_arch_dsr_1day.loc[dferg_arch_dsr_1day.nd_id.str.contains('DSR_2015')].erg_dsr_1day
dferg_arch_dsr_1day['erg_dsr_2035'] = dferg_arch_dsr_1day.loc[dferg_arch_dsr_1day.nd_id.str.contains('DSR_2035')].erg_dsr_1day
dferg_arch_dsr_1day['erg_dsr_2050'] = dferg_arch_dsr_1day.loc[dferg_arch_dsr_1day.nd_id.str.contains('DSR_2050')].erg_dsr_1day
dferg_arch_dsr_1day['erg_dsr_best_2035'] = dferg_arch_dsr_1day.loc[dferg_arch_dsr_1day.nd_id.str.contains('DSR_best_2035')].erg_dsr_1day
# dferg_arch_dsr_1day = dferg_arch_dsr_1day.fillna(method='ffill').loc[dferg_arch_dsr_1day.nd_id.str.contains('2050')].reset_index(drop=True)
dferg_arch_dsr_1day = dferg_arch_dsr_1day.fillna(method='ffill').loc[dferg_arch_dsr_1day.nd_id.str.contains('DSR_best_2035')].reset_index(drop=True)
# %% EE loads just others (without DSR hourly demand)
dfload_arch_ee_sfh = | pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_ee_sfh_diff_wo_dsr.csv') | pandas.read_csv |
import glob
import logging
import os
import numpy as np
import pandas as pd
import ssbio.protein.sequence.utils.alignment
from ssbio.utils import percentage_to_float
log = logging.getLogger(__name__)
def sequence_checker(reference_seq_aln, structure_seq_aln,
seq_ident_cutoff=0.5, allow_missing_on_termini=0.2,
allow_mutants=False, allow_deletions=False,
allow_insertions=False, allow_unresolved=False):
"""Report if a structure's sequence meets coverage checks to a reference sequence.
First aligns a sequence from a chain of a PDB structure to "reference" sequence.
Then creates a DataFrame of results to check for everything.
Args:
reference_seq_aln (str, Seq, SeqRecord): Reference sequence, alignment form
structure_seq_aln (str, Seq, SeqRecord): Structure sequence, alignment form
seq_ident_cutoff (float): Percent sequence identity cutoff, in decimal form
allow_missing_on_termini (float): Percentage of the total length of the reference sequence which will be ignored
when checking for modifications. Example: if 0.1, and reference sequence is 100 AA, then only residues
5 to 95 will be checked for modifications.
allow_mutants (bool): If mutations should be allowed or checked for
allow_deletions (bool): If deletions should be allowed or checked for
allow_insertions (bool): If insertions should be allowed or checked for
allow_unresolved (bool): If unresolved residues should be allowed or checked for
Returns:
bool: If the structure's sequence meets the quality checks.
"""
reference_seq_aln = ssbio.protein.sequence.utils.cast_to_str(reference_seq_aln)
structure_seq_aln = ssbio.protein.sequence.utils.cast_to_str(structure_seq_aln)
results = ssbio.protein.sequence.utils.alignment.pairwise_alignment_stats(reference_seq_aln=reference_seq_aln,
other_seq_aln=structure_seq_aln)
# Check percent identity cutoff
stats_percent_ident = results['percent_identity']
log.debug('{}: percent identity'.format(stats_percent_ident))
if stats_percent_ident < seq_ident_cutoff:
log.debug('Alignment does not meet percent identity cutoff')
return False
else:
log.debug('Alignment meets percent identity cutoff')
# Get cutoff stuff ready
ref_seq_len = len(reference_seq_aln.replace('-', ''))
allow_missing_on_termini /= 2
# If any differences appear before start, they are ignored
start = ref_seq_len - (ref_seq_len * (1 - allow_missing_on_termini))
# If any differences appear before end, they are ignored
end = ref_seq_len - (ref_seq_len * allow_missing_on_termini)
no_deletions_in_pdb = False
no_insertions_in_pdb = False
no_mutants_in_pdb = False
no_unresolved_in_pdb = False
# Check everything
if not allow_deletions:
# Get indices of the deletions
deletions = results['deletions']
# If there are no deletions, that's great
if len(deletions) == 0:
log.debug('No deletion regions')
no_deletions_in_pdb = True
else:
log.debug('{} deletion region(s)'.format(len(deletions)))
# If the deletion appears before or after the cutoff, that's also great
for deletion in deletions:
if deletion[0][1] < start or deletion[0][0] > end:
no_deletions_in_pdb = True
log.debug('Deletion region(s) are not within structure core')
else:
no_deletions_in_pdb = False
log.debug('Deletions within structure')
log.debug('{} > {} or {} < {}'.format(deletion[0][1], start, deletion[0][0], end))
break
else:
no_deletions_in_pdb = True
if not allow_insertions:
# Get indices of the insertions
insertions = results['insertions']
# If there are no insertions, that's great
if len(insertions) == 0:
log.debug('No insertion regions')
no_insertions_in_pdb = True
else:
log.debug('{} insertion region(s)'.format(len(insertions)))
# If the insertion appears before or after the cutoff, that's also great
for insertion in insertions:
if insertion[0][1] < start or insertion[0][0] > end:
no_insertions_in_pdb = True
log.debug('Insertion region(s) are not within structure core')
else:
no_insertions_in_pdb = False
log.debug('Insertion regions within structure')
break
else:
no_insertions_in_pdb = True
if not allow_mutants:
# Get indices of the mutants
mutations_full = results['mutations']
mutations = [x[1] for x in mutations_full]
# If there are no mutants, that's great
if len(mutations) == 0:
log.debug('No point mutations')
no_mutants_in_pdb = True
else:
log.debug('{} point mutation(s)'.format(len(mutations)))
# If the mutant appears before or after the cutoff, that's also great
for mutation in mutations:
if mutation < start or mutation > end:
no_mutants_in_pdb = True
log.debug('Mutation region(s) are not within structure core')
else:
no_mutants_in_pdb = False
log.debug('Mutantion regions within structure')
break
else:
no_mutants_in_pdb = True
if not allow_unresolved:
# Get indices of the unresolved residues
unresolved = results['unresolved']
# If there are no unresolved, that's great
if len(unresolved) == 0:
log.debug('No unresolved mutations')
no_unresolved_in_pdb = True
else:
log.debug('{} unresolved residue(s)'.format(len(unresolved)))
# If the unresolved residue appears before or after the cutoff, that's also great
for unr in unresolved:
if unr < start or unr > end:
no_unresolved_in_pdb = True
log.debug('Unresolved region(s) are not within structure core')
else:
no_unresolved_in_pdb = False
log.debug('Unresolved residues within structure')
break
else:
no_unresolved_in_pdb = True
if no_deletions_in_pdb and no_insertions_in_pdb and no_mutants_in_pdb and no_unresolved_in_pdb:
return True
else:
return False
def parse_procheck(quality_directory):
"""Parses all PROCHECK files in a directory and returns a Pandas DataFrame of the results
Args:
quality_directory: path to directory with PROCHECK output (.sum files)
Returns:
Pandas DataFrame: Summary of PROCHECK results
"""
# TODO: save as dict instead, offer df as option
# TODO: parse for one file instead
procheck_summaries = glob.glob(os.path.join(quality_directory, '*.sum'))
if len(procheck_summaries) == 0:
return pd.DataFrame()
all_procheck = {}
for summ in procheck_summaries:
structure_id = os.path.basename(summ).split('.sum')[0]
procheck_dict = {}
with open(summ) as f_in:
lines = (line.rstrip() for line in f_in) # All lines including the blank ones
lines = (line for line in lines if line) # Non-blank lines
for line in lines:
if len(line.split()) > 1:
if line.split()[1] == 'Ramachandran':
procheck_dict['procheck_rama_favored'] = percentage_to_float(line.split()[3])
procheck_dict['procheck_rama_allowed'] = percentage_to_float(line.split()[5])
procheck_dict['procheck_rama_allowed_plus'] = percentage_to_float(line.split()[7])
procheck_dict['procheck_rama_disallowed'] = percentage_to_float(line.split()[9])
if line.split()[1] == 'G-factors':
procheck_dict['procheck_gfac_dihedrals'] = line.split()[3]
procheck_dict['procheck_gfac_covalent'] = line.split()[5]
procheck_dict['procheck_gfac_overall'] = line.split()[7]
all_procheck[structure_id] = procheck_dict
DF_PROCHECK = | pd.DataFrame.from_dict(all_procheck, orient='index') | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
#import sys
#sys.path.append("../")
def read_round(filename):
mask = [False, False, True, True, False, False, False, True, True]
header = ["epic_number", "mix_mean", "mix_sd", "mix_neff", "mix_rhat", "logdeltaQ_mean", "logdeltaQ_sd", "logdeltaQ_neff", "logdeltaQ_rhat",
"logQ0_mean", "logQ0_sd", "logQ0_neff", "logQ0_rhat", "logperiod_mean", "logperiod_sd", "logperiod_neff", "logperiod_rhat",
"logamp_mean", "logamp_sd", "logamp_neff", "logamp_rhat", "logs2_mean", "logs2_sd", "logs2_neff", "logs2_rhat", "acfpeak"]
with open(filename, "r") as file:
lines = file.readlines()
nstars = (np.int((len(lines)/7)))
data = np.zeros((nstars, 26))
for i in range(nstars):
data[i, 0] = lines[7*i].split()[0]
for j in range(6):
data[i, 4*j+1:4*(j+1)+1] = np.array(lines[7*i+j].split())[mask]
acfpeak = lines[7*i+6].split()[2]
if "None" in acfpeak:
data[i, 25] = np.nan
else:
data[i, 25] = acfpeak
return | pd.DataFrame(data=data, columns=header) | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
Period("2011-01", freq="M"),
Period("2013", freq="A"),
Period("2011-05", freq="M"),
]
)
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
class TestPeriodIndexSeriesComparisonConsistency:
""" Test PeriodIndex and Period Series Ops consistency """
# TODO: needs parametrization+de-duplication
def _check(self, values, func, expected):
# Test PeriodIndex and Period Series Ops consistency
idx = pd.PeriodIndex(values)
result = func(idx)
# check that we don't pass an unwanted type to tm.assert_equal
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.Period("2011-03", freq="M")
exp = np.array([False, False, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x == pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: x != pd.NaT
exp = np.array([True, True, True, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x < pd.Period("2011-03", freq="M")
exp = np.array([True, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
# ------------------------------------------------------------------
# Arithmetic
class TestPeriodFrameArithmetic:
def test_ops_frame_period(self):
# GH#13043
df = pd.DataFrame(
{
"A": [pd.Period("2015-01", freq="M"), pd.Period("2015-02", freq="M")],
"B": [pd.Period("2014-01", freq="M"), pd.Period("2014-02", freq="M")],
}
)
assert df["A"].dtype == "Period[M]"
assert df["B"].dtype == "Period[M]"
p = pd.Period("2015-03", freq="M")
off = p.freq
# dtype will be object because of original dtype
exp = pd.DataFrame(
{
"A": np.array([2 * off, 1 * off], dtype=object),
"B": np.array([14 * off, 13 * off], dtype=object),
}
)
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -1 * exp)
df2 = pd.DataFrame(
{
"A": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
"B": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
}
)
assert df2["A"].dtype == "Period[M]"
assert df2["B"].dtype == "Period[M]"
exp = pd.DataFrame(
{
"A": np.array([4 * off, 4 * off], dtype=object),
"B": np.array([16 * off, 16 * off], dtype=object),
}
)
tm.assert_frame_equal(df2 - df, exp)
tm.assert_frame_equal(df - df2, -1 * exp)
class TestPeriodIndexArithmetic:
# ---------------------------------------------------------------
# __add__/__sub__ with PeriodIndex
# PeriodIndex + other is defined for integers and timedelta-like others
# PeriodIndex - other is defined for integers, timedelta-like others,
# and PeriodIndex (with matching freq)
def test_parr_add_iadd_parr_raises(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
# An earlier implementation of PeriodIndex addition performed
# a set operation (union). This has since been changed to
# raise a TypeError. See GH#14164 and GH#13077 for historical
# reference.
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
def test_pi_sub_isub_pi(self):
# GH#20049
# For historical reference see GH#14164, GH#13077.
# PeriodIndex subtraction originally performed set difference,
# then changed to raise TypeError before being implemented in GH#20049
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
off = rng.freq
expected = pd.Index([-5 * off] * 5)
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_sub_pi_with_nat(self):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = rng[1:].insert(0, pd.NaT)
assert other[1:].equals(rng[1:])
result = rng - other
off = rng.freq
expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off])
tm.assert_index_equal(result, expected)
def test_parr_sub_pi_mismatched_freq(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="H", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(IncompatibleFrequency):
rng - other
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1_d = "19910905"
p2_d = "19920406"
p1 = pd.PeriodIndex([p1_d], freq=tick_classes(n))
p2 = pd.PeriodIndex([p2_d], freq=tick_classes(n))
expected = pd.PeriodIndex([p2_d], freq=p2.freq.base) - pd.PeriodIndex(
[p1_d], freq=p1.freq.base
)
tm.assert_index_equal((p2 - p1), expected)
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(pd.offsets.YearEnd, "month"),
(pd.offsets.QuarterEnd, "startingMonth"),
(pd.offsets.MonthEnd, None),
(pd.offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
freq = offset(n, normalize=False, **kwds)
p1 = pd.PeriodIndex([p1_d], freq=freq)
p2 = pd.PeriodIndex([p2_d], freq=freq)
result = p2 - p1
expected = pd.PeriodIndex([p2_d], freq=freq.base) - pd.PeriodIndex(
[p1_d], freq=freq.base
)
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])
def test_parr_add_sub_float_raises(self, op, other, box_with_array):
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")
pi = dti.to_period("D")
pi = tm.box_expected(pi, box_with_array)
with pytest.raises(TypeError):
op(pi, other)
@pytest.mark.parametrize(
"other",
[
# datetime scalars
pd.Timestamp.now(),
pd.Timestamp.now().to_pydatetime(),
pd.Timestamp.now().to_datetime64(),
# datetime-like arrays
pd.date_range("2016-01-01", periods=3, freq="H"),
pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"),
pd.date_range("2016-01-01", periods=3, freq="S")._data,
pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data,
# Miscellaneous invalid types
],
)
def test_parr_add_sub_invalid(self, other, box_with_array):
# GH#23215
rng = pd.period_range("1/1/2000", freq="D", periods=3)
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
other + rng
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
other - rng
# -----------------------------------------------------------------
# __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]
def test_pi_add_sub_td64_array_non_tick_raises(self):
rng = pd.period_range("1/1/2000", freq="Q", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
with pytest.raises(IncompatibleFrequency):
rng + tdarr
with pytest.raises(IncompatibleFrequency):
tdarr + rng
with pytest.raises(IncompatibleFrequency):
rng - tdarr
with pytest.raises(TypeError):
tdarr - rng
def test_pi_add_sub_td64_array_tick(self):
# PeriodIndex + Timedelta-like is allowed only with
# tick-like frequencies
rng = pd.period_range("1/1/2000", freq="90D", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = pd.period_range("12/31/1999", freq="90D", periods=3)
result = rng + tdi
tm.assert_index_equal(result, expected)
result = rng + tdarr
tm.assert_index_equal(result, expected)
result = tdi + rng
tm.assert_index_equal(result, expected)
result = tdarr + rng
tm.assert_index_equal(result, expected)
expected = pd.period_range("1/2/2000", freq="90D", periods=3)
result = rng - tdi
tm.assert_index_equal(result, expected)
result = rng - tdarr
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
tdarr - rng
with pytest.raises(TypeError):
tdi - rng
# -----------------------------------------------------------------
# operations with array/Index of DateOffset objects
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_add_offset_array(self, box):
# GH#18849
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
offs = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = pd.PeriodIndex([pd.Period("2015Q2"), pd.Period("2015Q4")])
with tm.assert_produces_warning(PerformanceWarning):
res = pi + offs
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = offs + pi
tm.assert_index_equal(res2, expected)
unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
# addition/subtraction ops with incompatible offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi + unanchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
unanchored + pi
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_sub_offset_array(self, box):
# GH#18824
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
other = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = PeriodIndex([pi[n] - other[n] for n in range(len(pi))])
with tm.assert_produces_warning(PerformanceWarning):
res = pi - other
tm.assert_index_equal(res, expected)
anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi - anchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
anchored - pi
def test_pi_add_iadd_int(self, one):
# Variants of `one` for #19012
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng + one
expected = pd.period_range("2000-01-01 10:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_int(self, one):
"""
PeriodIndex.__sub__ and __isub__ with several representations of
the integer 1, e.g. int, np.int64, np.uint8, ...
"""
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng - one
expected = pd.period_range("2000-01-01 08:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng -= one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize("five", [5, np.array(5, dtype=np.int64)])
def test_pi_sub_intlike(self, five):
rng = period_range("2007-01", periods=50)
result = rng - five
exp = rng + (-five)
tm.assert_index_equal(result, exp)
def test_pi_sub_isub_offset(self):
# offset
# DateOffset
rng = pd.period_range("2014", "2024", freq="A")
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range("2009", "2019", freq="A")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
rng = pd.period_range("2014-01", "2016-12", freq="M")
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range("2013-08", "2016-07", freq="M")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_offset_n_gt1(self, box_transpose_fail):
# GH#23215
# add offset to PeriodIndex with freq.n > 1
box, transpose = box_transpose_fail
per = pd.Period("2016-01", freq="2M")
pi = pd.PeriodIndex([per])
expected = pd.PeriodIndex(["2016-03"], freq="2M")
pi = tm.box_expected(pi, box, transpose=transpose)
expected = tm.box_expected(expected, box, transpose=transpose)
result = pi + per.freq
tm.assert_equal(result, expected)
result = per.freq + pi
tm.assert_equal(result, expected)
def test_pi_add_offset_n_gt1_not_divisible(self, box_with_array):
# GH#23215
# PeriodIndex with freq.n > 1 add offset with offset.n % freq.n != 0
pi = pd.PeriodIndex(["2016-01"], freq="2M")
expected = pd.PeriodIndex(["2016-04"], freq="2M")
# FIXME: with transposing these tests fail
pi = tm.box_expected(pi, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = pi + to_offset("3M")
tm.assert_equal(result, expected)
result = to_offset("3M") + pi
tm.assert_equal(result, expected)
# ---------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_pi_add_intarray(self, int_holder, op):
# GH#19959
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
other = int_holder([4, -1])
result = op(pi, other)
expected = pd.PeriodIndex([pd.Period("2016Q1"), pd.Period("NaT")])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_pi_sub_intarray(self, int_holder):
# GH#19959
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
other = int_holder([4, -1])
result = pi - other
expected = pd.PeriodIndex([pd.Period("2014Q1"), pd.Period("NaT")])
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - pi
# ---------------------------------------------------------------
# Timedelta-like (timedelta, timedelta64, Timedelta, Tick)
# TODO: Some of these are misnomers because of non-Tick DateOffsets
def test_pi_add_timedeltalike_minute_gt1(self, three_days):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# minute frequency with n != 1. A more general case is tested below
# in test_pi_add_timedeltalike_tick_gt1, but here we write out the
# expected result more explicitly.
other = three_days
rng = pd.period_range("2014-05-01", periods=3, freq="2D")
expected = pd.PeriodIndex(["2014-05-04", "2014-05-06", "2014-05-08"], freq="2D")
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = pd.PeriodIndex(["2014-04-28", "2014-04-30", "2014-05-02"], freq="2D")
result = rng - other
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - rng
@pytest.mark.parametrize("freqstr", ["5ns", "5us", "5ms", "5s", "5T", "5h", "5d"])
def test_pi_add_timedeltalike_tick_gt1(self, three_days, freqstr):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# tick-like frequency with n != 1
other = three_days
rng = pd.period_range("2014-05-01", periods=6, freq=freqstr)
expected = pd.period_range(rng[0] + other, periods=6, freq=freqstr)
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = pd.period_range(rng[0] - other, periods=6, freq=freqstr)
result = rng - other
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
other - rng
def test_pi_add_iadd_timedeltalike_daily(self, three_days):
# Tick
other = three_days
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
expected = pd.period_range("2014-05-04", "2014-05-18", freq="D")
result = rng + other
tm.assert_index_equal(result, expected)
rng += other
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_timedeltalike_daily(self, three_days):
# Tick-like 3 Days
other = three_days
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
expected = pd.period_range("2014-04-28", "2014-05-12", freq="D")
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_daily(self, not_daily):
other = not_daily
rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=D\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_pi_add_iadd_timedeltalike_hourly(self, two_hours):
other = two_hours
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
expected = pd.period_range("2014-01-01 12:00", "2014-01-05 12:00", freq="H")
result = rng + other
tm.assert_index_equal(result, expected)
rng += other
tm.assert_index_equal(rng, expected)
def test_pi_add_timedeltalike_mismatched_freq_hourly(self, not_hourly):
other = not_hourly
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=H\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
def test_pi_sub_isub_timedeltalike_hourly(self, two_hours):
other = two_hours
rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
expected = pd.period_range("2014-01-01 08:00", "2014-01-05 08:00", freq="H")
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_add_iadd_timedeltalike_annual(self):
# offset
# DateOffset
rng = pd.period_range("2014", "2024", freq="A")
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range("2019", "2029", freq="A")
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_annual(self, mismatched_freq):
other = mismatched_freq
rng = pd.period_range("2014", "2024", freq="A")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_pi_add_iadd_timedeltalike_M(self):
rng = pd.period_range("2014-01", "2016-12", freq="M")
expected = pd.period_range("2014-06", "2017-05", freq="M")
result = rng + pd.offsets.MonthEnd(5)
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_monthly(self, mismatched_freq):
other = mismatched_freq
rng = pd.period_range("2014-01", "2016-12", freq="M")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=M\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_parr_add_sub_td64_nat(self, box_transpose_fail):
# GH#23320 special handling for timedelta64("NaT")
box, transpose = box_transpose_fail
pi = pd.period_range("1994-04-01", periods=9, freq="19D")
other = np.timedelta64("NaT")
expected = pd.PeriodIndex(["NaT"] * 9, freq="19D")
obj = tm.box_expected(pi, box, transpose=transpose)
expected = tm.box_expected(expected, box, transpose=transpose)
result = obj + other
| tm.assert_equal(result, expected) | pandas.util.testing.assert_equal |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import json
import pickle
import datetime
from pathlib import Path
from dataclasses import dataclass, field, asdict, fields
from typing import List, Union, Iterable
from seir.argparser import DataClassArgumentParser
from seir.cli import MetaCLI, LockdownCLI, OdeParamCLI, FittingCLI, BaseDistributionCLI, BaseCLI
from seir.parameters import FittingParams
from seir.ode import CovidSeirODE
from seir.solvers import ScipyOdeIntSolver
from seir.data import DsfsiData, CovidData, TimestampData, extend_data_samples, append_data_time
from seir.fitting import BayesSIRFitter
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
@dataclass
class DataCLI(BaseCLI):
data_source: str = field(
default='dsfsi/total',
metadata={
"help": "Selects the data source for the fitting procedure. If a csv file, it will look for a deaths, "
"hospitalised, critical, infected, and recovered columns and fit the output of the model to those "
"(if selected for fitting). Can also point to dsfsi/<province> to load the DSFSI data for a "
"particular province Defaults to dsfsi/total."
}
)
population_source: str = field(
default=None,
metadata={
"help": "A csv file containing a column labeled 'ageband' and 'population' that yields the number of "
"people in each ten year age group, from 0-9, 10-19, ..., 80+. This should correspond to the data "
"selected for fitting. NOTE: This is not needed when DSFSI data are selected as the data source."
}
)
lockdown_date: str = field(
default=None,
metadata={
"help": "The day of the start of a lockdown period. The model internally computes time values (such as "
"the seeding time t0) relative to this date. Should be in YYYY/mm/dd format. This is not needed when "
"DSFSI data is used, as it is set to 2020/03/27)."
}
)
min_date: str = field(
default='2020/04/05',
metadata={
"help": "Minimum date from which to fit. Can help reduce noise due to early reporting faults."
}
)
max_date: str = field(
default=None,
metadata={
"help": "Maximum date from which to fit. Can help reduce noise at the end of reported data, due to lag."
}
)
lockdown_date_dt: datetime.datetime = field(init=False)
min_date_dt: datetime.datetime = field(init=False)
max_date_dt: datetime.datetime = field(init=False)
dsfsi_province: str = field(default=None, init=False)
mode: str = field(default=None, init=False)
def __post_init__(self):
if self.data_source.split('/')[0] == 'dsfsi':
self.mode = 'dsfsi'
self.lockdown_date = '2020/03/27'
if len(self.data_source.split('/')) == 1:
self.dsfsi_province = 'total'
else:
province = self.data_source.split('/')[1]
self.dsfsi_province = province.upper() if not province.lower() == 'total' else province.lower()
elif Path(self.data_source).is_file():
self.mode = 'file'
if not Path(self.population_source).is_file():
raise ValueError('--population_source not correctly specified for given data source.')
else:
raise ValueError('--data_source flag does not point to a dsfsi dataset or a local file.')
self.lockdown_date = '2020/03/27' if self.lockdown_date is None else self.lockdown_date
self.lockdown_date_dt = pd.to_datetime(self.lockdown_date,
format='%Y/%m/%d') if self.lockdown_date is not None else None
self.min_date_dt = pd.to_datetime(self.min_date, format='%Y/%m/%d') if self.min_date is not None else None
self.max_date_dt = pd.to_datetime(self.max_date, format='%Y/%m/%d') if self.max_date is not None else None
@dataclass
class InitialCLI(BaseDistributionCLI):
_defaults_dict = {
't0': -50,
'prop_e0': [0, 1e-5],
'prop_immune': [0],
}
t0: int = field(
default=-50,
metadata={
"help": "Initial time at which to process y0"
}
)
prop_e0: List[float] = field(
default_factory=lambda: None,
metadata={
"help": "Proportion of exposed individuals at t0. Used to seed the SEIR model."
}
)
prop_immune: List[float] = field(
default_factory=lambda: None,
metadata={
"help": "Proportion of initial population who are immune to the disease and will never contract it."
}
)
@dataclass
class InputOutputCLI:
from_config: str = field(
default=None,
metadata={
"help": "Load parameter values from a config file instead of the command line"
}
)
output_dir: str = field(
default=None,
metadata={
"help": "Location to place output files"
}
)
overwrite: bool = field(
default=False,
metadata={
"help": "Whether to overwrite the contents of the output directory"
}
)
output_path: Path = field(init=False)
run_path: Path = field(init=False)
def __post_init__(self):
if self.output_dir is None:
self.output_dir = './results'
self.output_path = Path(self.output_dir)
if not self.output_path.is_dir():
self.output_path.mkdir()
if (
self.output_path.is_dir()
and any(self.output_path.iterdir())
and not self.from_config
and not self.overwrite
):
raise ValueError('Detected files in output directory. Define a new output directory or use --overwrite to '
'overcome.')
self.run_path = self.output_path.joinpath('runs/')
if not self.run_path.is_dir():
self.run_path.mkdir()
def save_all_cli_to_config(clis: Union[BaseCLI, Iterable[BaseCLI]], directory: Union[str, Path], exclude: list = None):
if exclude is None:
exclude = []
if isinstance(clis, BaseCLI):
clis = [clis]
json_data = {}
for cli in clis:
x = asdict(cli)
f = [f.name for f in fields(cli) if f.init and f.name not in exclude]
xx = {k: v for k, v in x.items() if k in f}
json_data.update(xx)
if isinstance(directory, str):
directory = Path(directory)
if not directory.is_dir():
directory.mkdir()
json.dump(json_data, directory.joinpath('config.json').open('w'), indent=4)
def plot_priors_posterior(prior_dict: dict, posterior_dict: dict, params_to_plot: Iterable):
for param in params_to_plot:
assert param in prior_dict, \
f"Parameter {param} not found in given prior dictionary"
assert param in posterior_dict, \
f"Parameter {param} not found in given posterior dictionary"
if not param == 'rel_beta_lockdown':
assert isinstance(prior_dict[param], np.ndarray), \
f"Parameter in prior dict {param} is not a numpy array"
assert prior_dict[param].ndim == posterior_dict[param].ndim and 2 >= prior_dict[param].ndim > 0, \
f"Mismatch of dimensions for parameter {param}."
nb_plots = 0
for param in params_to_plot:
if param == 'rel_beta_lockdown':
for x in prior_dict[param]:
if x.ndim > 0:
nb_plots += x.shape[0]
elif prior_dict[param].ndim == 1:
nb_plots += 1
else:
nb_plots += prior_dict[param].shape[0]
# plot params on a square grid
n = int(np.ceil(np.sqrt(nb_plots)))
fig, axes = plt.subplots(n, n, figsize=(3 * n, 3 * n))
axes = axes.flat
i = 0
for param in params_to_plot:
if param == 'rel_beta_lockdown':
for x in range(len(prior_dict[param])):
if prior_dict[param][x].ndim == 2:
for nb_group in range(prior_dict[param][x].shape[0]):
sns.distplot(prior_dict[param][x][nb_group], color='C0', ax=axes[i])
sns.distplot(posterior_dict[param][x][nb_group], color='C1', ax=axes[i])
axes[i].set_title(f"{param}_{x}_{nb_group}")
i += 1
elif prior_dict[param].ndim == 1:
sns.distplot(prior_dict[param], color='C0', ax=axes[i])
sns.distplot(posterior_dict[param], color='C1', ax=axes[i])
axes[i].set_title(param)
i += 1
else:
for nb_group in range(prior_dict[param].shape[0]):
sns.distplot(prior_dict[param][nb_group], color='C0', ax=axes[i])
sns.distplot(posterior_dict[param][nb_group], color='C1', ax=axes[i])
axes[i].set_title(f"{param}_{nb_group}")
i += 1
return fig, axes
def append_samples(a: np.ndarray, b: np.ndarray):
if isinstance(a, np.ndarray):
assert isinstance(b, np.ndarray)
if a.ndim > 0 and b.ndim > 0 and a.shape[-1] > 1 and b.shape[-1] > 1:
return np.concatenate([a, b], axis=-1)
return a
def process_runs(run_path: Path, nb_runs: int) -> dict:
all_priors = None
for run in range(nb_runs):
prior_dict = pickle.load(run_path.joinpath(f'run{run:02}_prior_dict.pkl').open('rb'))
if all_priors is None:
all_priors = prior_dict
else:
for k, v in all_priors.items():
if isinstance(v, list):
for i in range(len(v)):
all_priors[k][i] = append_samples(all_priors[k][i], prior_dict[k][i])
else:
all_priors[k] = append_samples(all_priors[k], prior_dict[k])
return all_priors
def main():
sns.set(style='darkgrid')
argparser = DataClassArgumentParser(
[MetaCLI, LockdownCLI, OdeParamCLI, FittingCLI, InitialCLI, InputOutputCLI, DataCLI])
meta_cli, lockdown_cli, ode_cli, fitting_cli, initial_cli, output_cli, data_cli = argparser.parse_args_into_dataclasses()
if output_cli.from_config:
meta_cli, lockdown_cli, ode_cli, fitting_cli, initial_cli, output_cli, data_cli = argparser.parse_json_file(
output_cli.from_config)
save_all_cli_to_config([meta_cli, lockdown_cli, ode_cli, fitting_cli, initial_cli, output_cli, data_cli],
directory=output_cli.output_path, exclude=['from_config'])
if data_cli.mode == 'dsfsi':
df_pop = pd.read_csv('data/sa_age_band_population.csv')
population_band = df_pop[data_cli.dsfsi_province].values
if not meta_cli.age_heterogeneity:
population_band = np.sum(population_band)
population_band = np.expand_dims(population_band, axis=1)
data = DsfsiData(province=data_cli.dsfsi_province,
filter_kwargs={'min_date': data_cli.min_date_dt,
'max_date': data_cli.max_date_dt})
elif data_cli.mode == 'file':
data_fp = Path(data_cli.data_source)
pop_fp = Path(data_cli.population_source)
if data_fp.suffix != '.csv':
raise ValueError('Only csv files are supported as data sources')
if pop_fp.suffix != '.csv':
raise ValueError('Only csv files are supported as population sources')
df_pop = | pd.read_csv(pop_fp, index_col='ageband') | pandas.read_csv |
import pandas as pd
import numpy as np
from dateutil.relativedelta import relativedelta
import re
import os
import xgboost as xgb
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
from sklearn.ensemble import RandomForestClassifier
PATH = 'data/'
submissions_path = 'submissions/'
train = pd.read_csv(f"{PATH}train.csv")
test = pd.read_csv(f"{PATH}test.csv")
def replace_lower_freq(df, col, replace_val, threshold):
value_counts = df[col].value_counts()
to_remove = value_counts[value_counts <= threshold].index
return df[col].replace(to_remove, replace_val)
def add_datepart(df, fldname, drop=True):
fld = df[fldname]
if not np.issubdtype(fld.dtype, np.datetime64):
df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True)
targ_pre = re.sub('[Dd]ate$', '', fldname)
for n in ('Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear',
'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start'):
df[targ_pre+n] = getattr(fld.dt, n.lower())
df[targ_pre+'Elapsed'] = fld.astype(np.int64) // 10**9
if drop:
df.drop(fldname, axis=1, inplace=True)
def preprocess(df, id_col, target_col):
df['Gender'] = df['Gender'].map({'Male': 1, 'Female': 0})
df['City_Code'] = replace_lower_freq(df, 'City_Code', "C00000", 5)
df['City_Code'] = df['City_Code'].fillna("C00000")
df['City_Code'] = df['City_Code'].str.lstrip('C').astype(int)
df['City_Category'] = df['City_Category'].fillna("M")
df['City_Category'] = df['City_Category'].map({'A': 1, 'B': 2, 'C': 3, 'M': -1})
df['Employer_Code'] = replace_lower_freq(df, 'Employer_Code', "COM0000000", 5)
df['Employer_Code'] = df['Employer_Code'].fillna("COM0000000")
df['Employer_Code'] = df['Employer_Code'].str.lstrip('COM').astype(int)
df['Employer_Category1'] = df['Employer_Category1'].fillna("M")
df['Employer_Category1'] = df['Employer_Category1'].map({'A': 1, 'B': 2, 'C': 3, 'M': -1})
df['Employer_Category2'] = df['Employer_Category2'].fillna(-1)
df['Employer_Category2'] = df['Employer_Category2'].astype(int)
df['DOB'] = df['DOB'].fillna('11/01/82')
df['DOB'] = pd.to_datetime(df['DOB'])
df.loc[df['DOB'].dt.year > 2000, 'DOB'] = df.loc[df['DOB'].dt.year > 2000, 'DOB'].apply(lambda x: x-relativedelta(years=100))
df['Lead_Creation_Date'] = | pd.to_datetime(df['Lead_Creation_Date']) | pandas.to_datetime |
"""
execution environment: cdips, + pipe-trex .pth file in
/home/lbouma/miniconda3/envs/cdips/lib/python3.7/site-packages
python -u paper_plot_all_figures.py &> logs/paper_plot_all.log &
"""
from glob import glob
import datetime, os, pickle, shutil, subprocess
import numpy as np, pandas as pd
import matplotlib.pyplot as plt
from numpy import array as nparr
from datetime import datetime
from astropy.io import fits
from astropy.io.votable import from_table, writeto, parse
from astropy.coordinates import SkyCoord
from astropy import units as u
from astrobase import lcmath
from astrobase.lcmath import phase_magseries
from lcstatistics import compute_lc_statistics_fits
import lcstatistics as lcs
from cdips.utils import tess_noise_model as tnm
from cdips.utils import collect_cdips_lightcurves as ccl
from cdips.plotting import plot_star_catalog as psc
from cdips.plotting import plot_catalog_to_gaia_match_statistics as xms
from cdips.plotting import plot_wcsqa as wcsqa
from cdips.plotting import plot_quilt_PCs as pqp
from cdips.plotting import plot_quilt_s6_s7 as pqps
from cdips.plotting import savefig
import imageutils as iu
import matplotlib.colors as colors
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.patches as patches
import matplotlib.patheffects as path_effects
from skim_cream import plot_initial_period_finding_results
from collections import Counter
OUTDIR = '/nfs/phtess2/ar0/TESS/PROJ/lbouma/cdips/results/paper_V_figures/'
if not os.path.exists(OUTDIR):
os.mkdir(OUTDIR)
CLUSTERDATADIR = '/home/lbouma/proj/cdips/data/cluster_data'
LCDIR = '/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/'
OC_MG_CAT_ver=0.5
def main():
# fig N: T magnitude CDF for all CDIPS target stars.
plot_target_star_cumulative_counts(OC_MG_CAT_ver=OC_MG_CAT_ver, overwrite=1)
# fig N: HRD for CDIPS TARGET (not LC) stars.
sectors = None
plot_hrd_scat(sectors, overwrite=1, closest_subset=1)
plot_hrd_scat(sectors, overwrite=1, close_subset=1)
plot_hrd_scat(sectors, overwrite=1)
# fig N: histogram of CDIPS target star age.
plot_target_star_hist_logt(OC_MG_CAT_ver=OC_MG_CAT_ver, overwrite=1)
assert 0
# fig N: pmRA and pmDEC scatter for CDIPS LC stars.
plot_pm_scat(sectors, overwrite=1, close_subset=1)
plot_pm_scat(sectors, overwrite=1, close_subset=0)
# fig N: histogram of ages of LC stars
plot_hist_logt(sectors, overwrite=1)
# fig N: histogram (or CDF) of T magnitude for LC stars
plot_cdf_T_mag(sectors, overwrite=1)
sectors = [6,7,8,9,10,11,12,13]
# fig N: RMS vs catalog T mag for LC stars, with TFA LCs
plot_rms_vs_mag(sectors, overwrite=1)
# fig N: positions of field and cluster LC stars (currently all cams)
plot_cluster_and_field_star_scatter(sectors=sectors, overwrite=1,
galacticcoords=True)
plot_cluster_and_field_star_scatter(sectors=sectors, overwrite=1)
# plot_singleccd_rms_vs_mag(sectors, overwrite=0)
# fig N: average autocorrelation fn of LCs
plot_avg_acf(sectors, size=10000, overwrite=1, cleanprevacf=False)
# fig N: stages of image processing.
plot_stages_of_image_processing(niceimage=1, overwrite=1)
plot_stages_of_image_processing(niceimage=0, overwrite=1)
# fig N: catalog_to_gaia_match_statistics for CDIPS target stars
plot_catalog_to_gaia_match_statistics(overwrite=1)
# fig N: quilt of interesting light curves, phase-folded
pqps.plot_quilt_s6_s7(overwrite=1)
# fig N: 3x2 quilt of phased PC
pqp.plot_quilt_PCs(overwrite=1, paper_aspect_ratio=0)
pqp.plot_quilt_PCs(overwrite=1, paper_aspect_ratio=1)
# timeseries figures
for sector in range(6,8):
for cam in range(1,5):
for ccd in range(1,5):
try:
plot_detrended_light_curves(
sector=sector, cam=cam, ccd=ccd, overwrite=0, seed=42
)
except Exception as e:
print('{}-{}-{} failed, because {}'.
format(sector,cam,ccd,repr(e)))
pass
plot_external_parameters_vs_time(
sector=6, cam=1, ccd=1, overwrite=1, seed=43)
plot_raw_light_curve_systematics(
sector=6, cam=1, ccd=2, overwrite=1, seed=43)
plot_raw_light_curve_systematics(
sector=7, cam=2, ccd=4, overwrite=1, seed=42)
# fig N: wcs quality verification for one photometric reference
plot_wcs_verification(overwrite=1)
# fig N: target star provenance
plot_target_star_reference_pie_chart(OC_MG_CAT_ver=OC_MG_CAT_ver, overwrite=1)
# fig N: tls_sde_vs_period_scatter
plot_tls_sde_vs_period_scatter(sectors, overwrite=1)
# fig N: LS period vs color evolution in time
plot_LS_period_vs_color_and_age(sectors, overwrite=1,
OC_MG_CAT_ver=OC_MG_CAT_ver)
# fig N: histogram (or CDF) of TICCONT. unfortunately this is only
# calculated for CTL stars, so by definition it has limited use
plot_cdf_cont(sectors, overwrite=0)
def get_Tmag(fitspath):
with fits.open(fitspath) as hdulist:
mag = hdulist[0].header['TESSMAG']
return mag
def get_mag(fitspath, ap='IRM2'):
with fits.open(fitspath) as hdulist:
mag = hdulist[1].data[ap]
return mag
def plot_external_parameters_vs_time(sector=6, cam=1, ccd=2, overwrite=1,
seed=42):
outpath = os.path.join(
OUTDIR,
'external_parameters_vs_time_sec{}cam{}ccd{}.png'.
format(sector, cam, ccd)
)
if os.path.exists(outpath) and not overwrite:
print('found {} and not overwrite; return'.format(outpath))
return
#
# data dir with what lcs exist, and what their T mags are.
#
dfpath = os.path.join(
OUTDIR,
'detrended_light_curves_sec{}cam{}ccd{}.csv'.
format(sector, cam, ccd)
)
if not os.path.exists(dfpath):
lcdir = (
'/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-{}/cam{}_ccd{}'.
format(sector, cam, ccd)
)
lcpaths = glob(os.path.join(lcdir, '*_llc.fits'))
Tmags = np.array([get_Tmag(l) for l in lcpaths])
df = pd.DataFrame({'lcpaths':lcpaths,'Tmags':Tmags})
df.to_csv(dfpath, index=False, sep=',')
else:
df = pd.read_csv(dfpath, sep=',')
sel = (df['Tmags'] > 13) & (df['Tmags'] < 14)
lcpaths = nparr(df['lcpaths'][sel])
np.random.seed(seed)
spath = np.random.choice(lcpaths, size=1, replace=False)[0]
#
# define some keys. open the chosen lc. and get the times of momentum dumps.
#
lc = fits.open(spath)[1].data
magtype = 'IRM2'
keys = [magtype, 'XIC', 'YIC', 'FSV', 'FDV', 'FKV', 'CCDTEMP', 'BGV']
labels = [magtype, 'x', 'y', 's', 'd', 'k', 'T [$^\circ$C]', 'bkgd [ADU]']
time = lc['TMID_BJD']
baddir = (
'/nfs/phtess2/ar0/TESS/FFI/RED_IMGSUB/FULL/'+
's{}/'.format(str(sector).zfill(4))+
'RED_{}-{}-15??_ISP/badframes'.format(cam, ccd)
)
badframes = glob(os.path.join(baddir, '*.fits'))
mom_dump_times = []
qualitys = []
for badframe in badframes:
quality = iu.get_header_keyword(badframe, 'DQUALITY')
if not quality > 0 :
continue
tstart = iu.get_header_keyword(badframe, 'TSTART')
telapse = iu.get_header_keyword(badframe, 'TELAPSE')
bjdrefi = iu.get_header_keyword(badframe, 'BJDREFI')
tmid = bjdrefi + tstart + telapse / 2
mom_dump_times.append(tmid)
qualitys.append(quality)
#
# now make the plot
#
plt.close('all')
fig,axs = plt.subplots(nrows=len(keys), ncols=1,
figsize=(4, 1.2*len(keys)), sharex=True)
axs = axs.flatten()
for ax, key, label in zip(axs, keys, labels):
xval = time
yval = lc[key]
if label in ['x','y']:
yoffset = int(np.mean(yval))
yval -= yoffset
label += '- {:d} [px]'.format(yoffset)
elif label in [magtype]:
yoffset = np.round(np.median(yval), decimals=1)
yval -= yoffset
yval *= 1e3
label += '- {:.1f} [mmag]'.format(yoffset)
ax.scatter(xval, yval, rasterized=True, alpha=0.8, zorder=3, c='k',
lw=0, s=3)
ax.set_ylabel(label, fontsize='small')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_ticks_position('both')
ax.get_yaxis().set_tick_params(which='both', direction='in')
ax.get_xaxis().set_tick_params(which='both', direction='in')
ax.xaxis.set_tick_params(labelsize='small')
ax.yaxis.set_tick_params(labelsize='small')
if label in [magtype]:
ylim = ax.get_ylim()
ax.set_ylim((max(ylim), min(ylim)))
ylim = ax.get_ylim()
ax.vlines(mom_dump_times, min(ylim), max(ylim), color='orangered',
linestyle='--', zorder=0, lw=1, alpha=0.3)
ax.set_ylim((min(ylim), max(ylim)))
ax.set_xlabel('Time $\mathrm{{BJD}}_{{\mathrm{{TDB}}}}$ [days]',
fontsize='small')
fig.tight_layout(h_pad=-0.5, pad=0.2)
savefig(fig, outpath)
def plot_raw_light_curve_systematics(sector=None, cam=None, ccd=None,
overwrite=False, N_to_plot=20, seed=42):
"""
get a random sample of IRM2 light curves from the same camera & ccd. plot
them all together to show how we are systematics dominated.
"""
outpath = os.path.join(
OUTDIR,
'raw_light_curve_systematics_sec{}cam{}ccd{}.png'.
format(sector, cam, ccd)
)
if os.path.exists(outpath) and not overwrite:
print('found {} and not overwrite; return'.format(outpath))
return
dfpath = os.path.join(
OUTDIR,
'raw_light_curve_systematics_sec{}cam{}ccd{}.csv'.
format(sector, cam, ccd)
)
if not os.path.exists(dfpath):
lcdir = (
'/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-{}/cam{}_ccd{}'.
format(sector, cam, ccd)
)
lcpaths = glob(os.path.join(lcdir, '*_llc.fits'))
Tmags = np.array([get_Tmag(l) for l in lcpaths])
df = pd.DataFrame({'lcpaths':lcpaths,'Tmags':Tmags})
df.to_csv(dfpath, index=False, sep=',')
else:
df = pd.read_csv(dfpath, sep=',')
sel = (df['Tmags'] > 13) & (df['Tmags'] < 14)
lcpaths = nparr(df['lcpaths'][sel])
Tmags = nparr(df['Tmags'][sel])
np.random.seed(seed)
spaths = np.random.choice(lcpaths, size=2*N_to_plot, replace=False)
# shape: (N_to_plot x N_observations)
rawmags = nparr([get_mag(s, ap='IRM2') for s in spaths])
pcamags = nparr([get_mag(s, ap='PCA2') for s in spaths])
tfmags = nparr([get_mag(s, ap='TFA2') for s in spaths])
time = fits.open(spaths[0])[1].data['TMID_BJD']
assert time.shape[0] == rawmags.shape[1]
#
# make the stacked plot of raw mags.
#
f, ax = plt.subplots(figsize=(4,8))
colors = plt.cm.tab20b( list(range(N_to_plot)) )
ind = 0
for i in range(N_to_plot):
ind += 1
mag = rawmags[ind,:]
if np.all(pd.isnull(mag)):
continue
mag -= np.nanmean(mag)
offset = i*0.15
expected_norbits = 2
orbitgap = 0.5
norbits, groups = lcmath.find_lc_timegroups(time, mingap=orbitgap)
if norbits != expected_norbits:
errmsg = 'got {} orbits, expected {}. groups are {}'.format(
norbits, expected_norbits, repr(groups))
raise AssertionError
for group in groups:
tg_time = time[group]
tg_mag = mag[group]
ax.plot(tg_time, tg_mag+offset, c=colors[i], lw=0.5,
rasterized=True)
ax.set_xlabel('Time $\mathrm{{BJD}}_{{\mathrm{{TDB}}}}$ [days]')
ax.set_ylabel('Magnitude [arbitrary offset]')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_ticks_position('both')
ax.get_yaxis().set_tick_params(which='both', direction='in')
ax.get_xaxis().set_tick_params(which='both', direction='in')
f.tight_layout(pad=0.2)
savefig(f, outpath)
def plot_detrended_light_curves(sector=None, cam=None, ccd=None,
overwrite=False, N_to_plot=20, seed=42):
"""
use the sample of light curves from plot_raw_light_curve_systematics to
show how super-awesome the detrending is.
"""
outpath = os.path.join(
OUTDIR,
'detrended_light_curves_sec{}cam{}ccd{}.png'.
format(sector, cam, ccd)
)
if os.path.exists(outpath) and not overwrite:
print('found {} and not overwrite; return'.format(outpath))
return
dfpath = os.path.join(
OUTDIR,
'detrended_light_curves_sec{}cam{}ccd{}.csv'.
format(sector, cam, ccd)
)
if not os.path.exists(dfpath):
lcdir = (
'/nfs/phtess2/ar0/TESS/PROJ/lbouma/CDIPS_LCS/sector-{}/cam{}_ccd{}'.
format(sector, cam, ccd)
)
lcpaths = glob(os.path.join(lcdir, '*_llc.fits'))
Tmags = np.array([get_Tmag(l) for l in lcpaths])
df = pd.DataFrame({'lcpaths':lcpaths,'Tmags':Tmags})
df.to_csv(dfpath, index=False, sep=',')
else:
df = pd.read_csv(dfpath, sep=',')
sel = (df['Tmags'] > 13) & (df['Tmags'] < 14)
lcpaths = nparr(df['lcpaths'][sel])
Tmags = nparr(df['Tmags'][sel])
np.random.seed(seed)
spaths = np.random.choice(lcpaths, size=2*N_to_plot, replace=False)
# shape: (N_to_plot x N_observations)
rawmags = nparr([get_mag(s, ap='IRM2') for s in spaths])
pcamags = nparr([get_mag(s, ap='PCA2') for s in spaths])
tfamags = nparr([get_mag(s, ap='TFA2') for s in spaths])
time = fits.open(spaths[0])[1].data['TMID_BJD']
assert time.shape[0] == rawmags.shape[1]
#
# make the stacked plot of raw mags.
#
f, ax = plt.subplots(figsize=(4,8))
colors = plt.cm.tab20b( list(range(N_to_plot)) )
ind = 0
i = 0
while i < N_to_plot-1:
ind += 1
rawmag = rawmags[ind,:]
pcamag = pcamags[ind,:]
tfamag = tfamags[ind,:]
if (
np.any(np.isnan(rawmag)) or
np.any(np.isnan(pcamag)) or
np.any(np.isnan(tfamag))
):
continue
rawmag -= np.nanmean(rawmag)
pcamag -= np.nanmean(pcamag)
tfamag -= np.nanmean(tfamag)
offset = i*0.45
i += 1
expected_norbits = 2
orbitgap = 0.5
norbits, groups = lcmath.find_lc_timegroups(time, mingap=orbitgap)
if norbits != expected_norbits:
errmsg = 'got {} orbits, expected {}. groups are {}'.format(
norbits, expected_norbits, repr(groups))
raise AssertionError
for group in groups:
tg_time = time[group]
tg_rawmag = rawmag[group]
tg_pcamag = pcamag[group]
tg_tfamag = tfamag[group]
ax.plot(tg_time, tg_rawmag+offset, c=colors[i], lw=0.5,
rasterized=True)
ax.plot(tg_time, tg_pcamag+offset-0.07, c=colors[i], lw=0.5,
rasterized=True)
ax.plot(tg_time, tg_tfamag+offset-0.14, c=colors[i], lw=0.5,
rasterized=True)
ax.set_xlabel('Time $\mathrm{{BJD}}_{{\mathrm{{TDB}}}}$ [days]')
ax.set_ylabel('Magnitude [arbitrary offset]')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_ticks_position('both')
ax.get_yaxis().set_tick_params(which='both', direction='in')
ax.get_xaxis().set_tick_params(which='both', direction='in')
f.tight_layout(pad=0.2)
savefig(f, outpath)
def plot_stages_of_image_processing(niceimage=1, overwrite=0):
if niceimage:
outpath = os.path.join(
OUTDIR, 'stages_of_image_processing_good.png')
else:
outpath = os.path.join(
OUTDIR, 'stages_of_image_processing_bad.png')
if os.path.exists(outpath) and not overwrite:
print('found {} and not overwrite; return'.format(outpath))
return
if niceimage:
# mid orbit, good registration
imgid = 'tess2018363152939'
else:
# near periapse, during scattered light, bad registration
imgid = 'tess2018358075939'
datadir = '/nfs/phtess2/ar0/TESS/FFI/RED/sector-6/cam1_ccd2/'
bkgdfile = os.path.join(
datadir,imgid+'-s0006-1-2-0126_cal_img_bkgd.fits')
calfile = os.path.join(
datadir,imgid+'-s0006-1-2-0126_cal_img.fits')
diffdir = '/nfs/phtess2/ar0/TESS/FFI/RED_IMGSUB/FULL/s0006/RED_1-2-1501_ISP'
difffile = os.path.join(
diffdir,
'rsub-d2f9343c-{}-s0006-1-2-0126_cal_img_bkgdsub-xtrns.fits'.
format(imgid)
)
##########################################
vmin, vmax = 10, int(1e3)
bkgd_img, _ = iu.read_fits(bkgdfile)
cal_img, _ = iu.read_fits(calfile)
diff_img, _ = iu.read_fits(difffile)
plt.close('all')
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
fig, axs = plt.subplots(ncols=2, nrows=3)
# top right: log of calibrated image
lognorm = colors.LogNorm(vmin=vmin, vmax=vmax)
cset1 = axs[0,1].imshow(cal_img, cmap='binary_r', vmin=vmin, vmax=vmax,
norm=lognorm)
#txt = axs[0,1].text(0.02, 0.96, 'image', ha='left', va='top',
# fontsize='small', transform=axs[0,1].transAxes,
# color='black')
#txt.set_path_effects([path_effects.Stroke(linewidth=1, foreground='white'),
# path_effects.Normal()])
diff_vmin, diff_vmax = -1000, 1000
diffnorm = colors.SymLogNorm(linthresh=0.03, linscale=0.03, vmin=diff_vmin,
vmax=diff_vmax)
# top left: background map
axs[0,0].imshow(bkgd_img - np.median(cal_img), cmap='RdBu_r',
vmin=diff_vmin, vmax=diff_vmax, norm=diffnorm)
#txt = axs[0,0].text(0.02, 0.96, 'background', ha='left', va='top',
# fontsize='small', transform=axs[0,0].transAxes,
# color='black')
#txt.set_path_effects([path_effects.Stroke(linewidth=1, foreground='white'),
# path_effects.Normal()])
# middle left: calibrated - background
cset2 = axs[1,0].imshow(cal_img - bkgd_img, cmap='RdBu_r', vmin=diff_vmin,
vmax=diff_vmax, norm=diffnorm)
#txt = axs[1,0].text(0.02, 0.96, 'image - background', ha='left', va='top',
# fontsize='small', transform=axs[1,0].transAxes,
# color='black')
#txt.set_path_effects([path_effects.Stroke(linewidth=1, foreground='white'),
# path_effects.Normal()])
# middle right: calibrated - median
axs[1,1].imshow(cal_img - np.median(cal_img), cmap='RdBu_r',
vmin=diff_vmin, vmax=diff_vmax, norm=diffnorm)
#txt = axs[1,1].text(0.02, 0.96, 'image - median(image)', ha='left', va='top',
# fontsize='small', transform=axs[1,1].transAxes,
# color='black')
#txt.set_path_effects([path_effects.Stroke(linewidth=1, foreground='white'),
# path_effects.Normal()])
# lower left: difference image (full)
toplen = 57
top = cm.get_cmap('Oranges_r', toplen)
bottom = cm.get_cmap('Blues', toplen)
newcolors = np.vstack((top(np.linspace(0, 1, toplen)),
np.zeros(((256-2*toplen),4)),
bottom(np.linspace(0, 1, toplen))))
newcmp = ListedColormap(newcolors, name='lgb_cmap')
cset3 = axs[2,0].imshow(diff_img, cmap='RdBu_r', vmin=diff_vmin,
vmax=diff_vmax, norm=diffnorm)
#txt = axs[2,0].text(0.02, 0.96, 'difference', ha='left', va='top',
# fontsize='small', transform=axs[2,0].transAxes,
# color='black')
#txt.set_path_effects([path_effects.Stroke(linewidth=1, foreground='white'),
# path_effects.Normal()])
rect = patches.Rectangle((300, 300), 512, 512, linewidth=0.6,
edgecolor='black', facecolor='none',
linestyle='--')
axs[2,0].add_patch(rect)
# lower right: difference image (zoom)
sel = [slice(300,812), slice(300,812)]
axs[2,1].imshow(diff_img[sel], cmap='RdBu_r',
vmin=diff_vmin, vmax=diff_vmax, norm=diffnorm)
#txt = axs[2,1].text(0.02, 0.96, 'difference (zoom)', ha='left', va='top',
# fontsize='small', transform=axs[2,1].transAxes,
# color='black')
#txt.set_path_effects([path_effects.Stroke(linewidth=1, foreground='white'),
# path_effects.Normal()])
for ax in axs.flatten():
ax.set_xticklabels('')
ax.set_yticklabels('')
ax.get_xaxis().set_tick_params(which='both', direction='in')
ax.get_yaxis().set_tick_params(which='both', direction='in')
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
divider0 = make_axes_locatable(axs[0,1])
divider1 = make_axes_locatable(axs[1,1])
divider2 = make_axes_locatable(axs[2,1])
cax0 = divider0.append_axes('right', size='5%', pad=0.05)
cax1 = divider1.append_axes('right', size='5%', pad=0.05)
cax2 = divider2.append_axes('right', size='5%', pad=0.05)
cb1 = fig.colorbar(cset1, ax=axs[0,1], cax=cax0, extend='both')
cb2 = fig.colorbar(cset2, ax=axs[1,1], cax=cax1, extend='both')
cb3 = fig.colorbar(cset3, ax=axs[2,1], cax=cax2, extend='both')
cb2.set_ticks([-1e3,-1e2,-1e1,0,1e1,1e2,1e3])
cb2.set_ticklabels(['-$10^3$','-$10^2$','-$10^1$','0',
'$10^1$','$10^2$','$10^3$'])
cb3.set_ticks([-1e3,-1e2,-1e1,0,1e1,1e2,1e3])
cb3.set_ticklabels(['-$10^3$','-$10^2$','-$10^1$','0',
'$10^1$','$10^2$','$10^3$'])
fig.tight_layout(h_pad=0, w_pad=-14, pad=0)
fig.savefig(outpath, bbox_inches='tight', dpi=400)
print('{}: made {}'.format(datetime.utcnow().isoformat(), outpath))
def plot_tls_sde_vs_period_scatter(sectors, overwrite=1):
outpath = os.path.join(
OUTDIR, 'tls_sde_vs_period_scatter.png')
if os.path.exists(outpath) and not overwrite:
print('found {} and not overwrite; return'.format(outpath))
return
if len(sectors) != 2:
raise AssertionError
f,axs = plt.subplots(nrows=2, sharex=True, figsize=(4,6))
for sector, ax in zip(sectors, axs):
pfdir = ('/nfs/phtess2/ar0/TESS/PROJ/lbouma/cdips/'
'results/cdips_lc_periodfinding/sector-{}'.format(sector))
pfpath = os.path.join(
pfdir, 'initial_period_finding_results_with_limit.csv')
df = pd.read_csv(pfpath, sep=',')
ax.scatter(df['tls_period'], df['tls_sde'], c='k', alpha=1, s=0.2,
rasterized=True, linewidths=0)
ax.scatter(df['tls_period'], df['limit'], c='C1', alpha=1, rasterized=True,
linewidths=0, zorder=2, s=1)
#ax.scatter(df['tls_period'], df['limit'], c='C1', alpha=1, rasterized=True,
# linewidths=0, zorder=2, s=0.2)
txt = ('$N_{{\mathrm{{above}}}}$: '+
'{}'.format(len(df[df['tls_sde']>df['limit']]))+
'\n$N_{{\mathrm{{below}}}}$: '+
'{}'.format(len(df[df['tls_sde']<df['limit']])) )
ax.text(0.96, 0.96, txt, ha='right', va='top', fontsize='medium',
transform=ax.transAxes)
ax.set_xscale('log')
ax.set_ylim([0,40])
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_ticks_position('both')
ax.get_yaxis().set_tick_params(which='both', direction='in')
ax.get_xaxis().set_tick_params(which='both', direction='in')
f.text(0.5,-0.01, 'TLS peak period [days]', ha='center')
f.text(-0.03,0.5, 'TLS SDE', va='center', rotation=90)
f.tight_layout(h_pad=0.2, pad=0.2)
savefig(f, outpath)
def plot_avg_acf(sectors, size=10000, overwrite=0, percentiles=[25,50,75],
cleanprevacf=True):
outpath = os.path.join(OUTDIR, 'avg_acf.png')
if os.path.exists(outpath) and not overwrite:
print('found {} and not overwrite; return'.format(outpath))
return
#
# collect acfs for however many random LCs passed
#
np.random.seed(42)
lcpaths = []
for sector in sectors:
lcpaths.append(
np.random.choice(
glob(os.path.join(LCDIR, 'sector-{}'.format(sector),
'cam?_ccd?', 'hlsp_*llc.fits')),
size=size,
replace=False
)
)
lcpaths = nparr(lcpaths).flatten()
acfdir = os.path.join(OUTDIR, 'avg_acf_data')
if cleanprevacf:
# annoyingly fails. maybe nfs problem?
pass
#cmd = 'rm -rf {}'.format(acfdir)
#subprocess.call(cmd)
if not os.path.exists(acfdir):
os.mkdir(acfdir)
acfstatfiles = glob(os.path.join(acfdir,'*_acf_stats.csv'))
if len(acfstatfiles)<10:
lcs.parallel_compute_acf_statistics(
lcpaths, acfdir, nworkers=40,
eval_times_hr=np.arange(1,301,1),
dtrtypes=['IRM','PCA','TFA']
)
acfstatfiles = glob(os.path.join(acfdir,'*_acf_stats.csv'))
df = lcs.read_acf_stat_files(acfstatfiles)
plt.close('all')
fig, axs = plt.subplots(ncols=3, figsize=(2.5*3,3))
linestyles = ['--','-',':']
apstrs = ['IRM2','PCA2','TFA2']
for ax, apstr in zip(axs, apstrs):
timelags = np.sort(np.unique(df['LAG_TIME_HR']))
percentile_dict = {}
for timelag in timelags:
percentile_dict[timelag] = {}
sel = df['LAG_TIME_HR']==timelag
for percentile in percentiles:
val = np.nanpercentile(df[sel][apstr+'_ACF'], percentile)
percentile_dict[timelag][percentile] = np.round(val,7)
pctile_df = | pd.DataFrame(percentile_dict) | pandas.DataFrame |
import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
Series,
concat,
date_range,
)
import pandas._testing as tm
from pandas.api.indexers import (
BaseIndexer,
FixedForwardWindowIndexer,
)
from pandas.core.window.indexers import (
ExpandingIndexer,
FixedWindowIndexer,
VariableOffsetWindowIndexer,
)
from pandas.tseries.offsets import BusinessDay
def test_bad_get_window_bounds_signature():
class BadIndexer(BaseIndexer):
def get_window_bounds(self):
return None
indexer = BadIndexer()
with pytest.raises(ValueError, match="BadIndexer does not implement"):
Series(range(5)).rolling(indexer)
def test_expanding_indexer():
s = Series(range(10))
indexer = ExpandingIndexer()
result = s.rolling(indexer).mean()
expected = s.expanding().mean()
tm.assert_series_equal(result, expected)
def test_indexer_constructor_arg():
# Example found in computation.rst
use_expanding = [True, False, True, False, True]
df = DataFrame({"values": range(5)})
class CustomIndexer(BaseIndexer):
def get_window_bounds(self, num_values, min_periods, center, closed):
start = np.empty(num_values, dtype=np.int64)
end = np.empty(num_values, dtype=np.int64)
for i in range(num_values):
if self.use_expanding[i]:
start[i] = 0
end[i] = i + 1
else:
start[i] = i
end[i] = i + self.window_size
return start, end
indexer = CustomIndexer(window_size=1, use_expanding=use_expanding)
result = df.rolling(indexer).sum()
expected = DataFrame({"values": [0.0, 1.0, 3.0, 3.0, 10.0]})
tm.assert_frame_equal(result, expected)
def test_indexer_accepts_rolling_args():
df = DataFrame({"values": range(5)})
class CustomIndexer(BaseIndexer):
def get_window_bounds(self, num_values, min_periods, center, closed):
start = np.empty(num_values, dtype=np.int64)
end = np.empty(num_values, dtype=np.int64)
for i in range(num_values):
if center and min_periods == 1 and closed == "both" and i == 2:
start[i] = 0
end[i] = num_values
else:
start[i] = i
end[i] = i + self.window_size
return start, end
indexer = CustomIndexer(window_size=1)
result = df.rolling(indexer, center=True, min_periods=1, closed="both").sum()
expected = DataFrame({"values": [0.0, 1.0, 10.0, 3.0, 4.0]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("constructor", [Series, DataFrame])
@pytest.mark.parametrize(
"func,np_func,expected,np_kwargs",
[
("count", len, [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, np.nan], {}),
("min", np.min, [0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 6.0, 7.0, 8.0, np.nan], {}),
(
"max",
np.max,
[2.0, 3.0, 4.0, 100.0, 100.0, 100.0, 8.0, 9.0, 9.0, np.nan],
{},
),
(
"std",
np.std,
[
1.0,
1.0,
1.0,
55.71654452,
54.85739087,
53.9845657,
1.0,
1.0,
0.70710678,
np.nan,
],
{"ddof": 1},
),
(
"var",
np.var,
[
1.0,
1.0,
1.0,
3104.333333,
3009.333333,
2914.333333,
1.0,
1.0,
0.500000,
np.nan,
],
{"ddof": 1},
),
(
"median",
np.median,
[1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 7.0, 8.0, 8.5, np.nan],
{},
),
],
)
@pytest.mark.filterwarnings("ignore:min_periods:FutureWarning")
def test_rolling_forward_window(constructor, func, np_func, expected, np_kwargs):
# GH 32865
values = np.arange(10.0)
values[5] = 100.0
indexer = FixedForwardWindowIndexer(window_size=3)
match = "Forward-looking windows can't have center=True"
with pytest.raises(ValueError, match=match):
rolling = constructor(values).rolling(window=indexer, center=True)
getattr(rolling, func)()
match = "Forward-looking windows don't support setting the closed argument"
with pytest.raises(ValueError, match=match):
rolling = constructor(values).rolling(window=indexer, closed="right")
getattr(rolling, func)()
rolling = constructor(values).rolling(window=indexer, min_periods=2)
result = getattr(rolling, func)()
# Check that the function output matches the explicitly provided array
expected = constructor(expected)
tm.assert_equal(result, expected)
# Check that the rolling function output matches applying an alternative
# function to the rolling window object
expected2 = constructor(rolling.apply(lambda x: np_func(x, **np_kwargs)))
tm.assert_equal(result, expected2)
# Check that the function output matches applying an alternative function
# if min_periods isn't specified
# GH 39604: After count-min_periods deprecation, apply(lambda x: len(x))
# is equivalent to count after setting min_periods=0
min_periods = 0 if func == "count" else None
rolling3 = constructor(values).rolling(window=indexer, min_periods=min_periods)
result3 = getattr(rolling3, func)()
expected3 = constructor(rolling3.apply(lambda x: np_func(x, **np_kwargs)))
tm.assert_equal(result3, expected3)
@pytest.mark.parametrize("constructor", [Series, DataFrame])
def test_rolling_forward_skewness(constructor):
values = np.arange(10.0)
values[5] = 100.0
indexer = FixedForwardWindowIndexer(window_size=5)
rolling = constructor(values).rolling(window=indexer, min_periods=3)
result = rolling.skew()
expected = constructor(
[
0.0,
2.232396,
2.229508,
2.228340,
2.229091,
2.231989,
0.0,
0.0,
np.nan,
np.nan,
]
)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"func,expected",
[
("cov", [2.0, 2.0, 2.0, 97.0, 2.0, -93.0, 2.0, 2.0, np.nan, np.nan]),
(
"corr",
[
1.0,
1.0,
1.0,
0.8704775290207161,
0.018229084250926637,
-0.861357304646493,
1.0,
1.0,
np.nan,
np.nan,
],
),
],
)
def test_rolling_forward_cov_corr(func, expected):
values1 = np.arange(10).reshape(-1, 1)
values2 = values1 * 2
values1[5, 0] = 100
values = np.concatenate([values1, values2], axis=1)
indexer = FixedForwardWindowIndexer(window_size=3)
rolling = DataFrame(values).rolling(window=indexer, min_periods=3)
# We are interested in checking only pairwise covariance / correlation
result = getattr(rolling, func)().loc[(slice(None), 1), 0]
result = result.reset_index(drop=True)
expected = Series(expected)
expected.name = result.name
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"closed,expected_data",
[
["right", [0.0, 1.0, 2.0, 3.0, 7.0, 12.0, 6.0, 7.0, 8.0, 9.0]],
["left", [0.0, 0.0, 1.0, 2.0, 5.0, 9.0, 5.0, 6.0, 7.0, 8.0]],
],
)
def test_non_fixed_variable_window_indexer(closed, expected_data):
index = date_range("2020", periods=10)
df = DataFrame(range(10), index=index)
offset = | BusinessDay(1) | pandas.tseries.offsets.BusinessDay |
from collections import defaultdict
from sklearn import preprocessing
import pandas as pd
from math import *
import datetime
#from .pygradu import portcalls
from .gridify import *
import math
EARTH_RADIUS_KM = 6371.0
class Graph():
def __init__(self):
"""
self.edges is a dict of all possible next nodes
e.g. {'X': ['A', 'B', 'C', 'E'], ...}
self.weights has all the weights between two nodes,
with the two nodes as a tuple as the key
e.g. {('X', 'A'): 7, ('X', 'B'): 2, ...}
"""
self.edges = defaultdict(list)
self.costs = {}
self.type_weights = {}
self.positions = {}
self.use_dirways = True
self.use_turn_penalty = False
self.use_shallow_penalty = False
def add_edge(self, from_node, to_node, cost):
# Note: assumes edges are bi-directional
if to_node not in self.edges[from_node]:
self.edges[from_node].append(to_node)
self.costs[(from_node, to_node)] = cost
def cost(self, current_node, next_node, current_course, next_course, dirways_graph, shallow_graph):
cost = self.costs[(current_node, next_node)]
if self.use_dirways and next_node in dirways_graph:
return 0.05
shallow_penalty = 0
if self.use_shallow_penalty and next_node in shallow_graph:
shallow_penalty = 0.2
turn_penalty = 0
if self.use_turn_penalty:
phi = abs(current_course - next_course) % 360
if phi > 180:
change = 360 - phi
else:
change = phi
turn_penalty = change/180 * 0.05
return cost + turn_penalty + shallow_penalty
def print_parameters(self):
print('use_dirways=', self.use_dirways)
print('use_turn_penalty=', self.use_turn_penalty)
print('use_shallow_penalty=', self.use_shallow_penalty)
def df_to_graph(complete_graph):
edges = complete_graph.values
graph = Graph()
# Convert df to format that A* understands
for e in edges:
edge = (e[0], e[1], e[2])
graph.add_edge(*edge)
return graph
class Node():
"""A node class for A* Pathfinding"""
def __init__(self, parent=None, position=None, coords=None, speed=None, course=None, transitions=None):
self.parent = parent
self.position = position
self.coords = coords
self.g = 0
self.h = 0
self.f = 0
self.speed = speed
self.course = course
self.transitions = transitions
def __eq__(self, other):
return self.position == other.position
def __hash__(self):
return hash(self.position)
def deg2rad(deg):
return deg * (math.pi / 180)
def rad2deg(rad):
return rad * (180 / math.pi)
def normalize(value, min, max):
if value < min:
return value + (max - min)
if value > max:
return value - (max - min)
return value
def angleFromCoordinatesInDeg(coordinate1, coordinate2):
lat1 = deg2rad(coordinate1[0])
lat2 = deg2rad(coordinate2[0])
long1 = deg2rad(coordinate1[1])
long2 = deg2rad(coordinate2[1])
dLon = (long2 - long1)
y = math.sin(dLon) * math.cos(lat2)
x = math.cos(lat1) * math.sin(lat2) - math.sin(lat1) * math.cos(lat2) * math.cos(dLon)
bearing = math.atan2(y, x)
return normalize(rad2deg(bearing), 0.0, 360.0)
def distance_from_coords_in_km(coordinate1, coordinate2):
lat1 = deg2rad(coordinate1[0])
lat2 = deg2rad(coordinate2[0])
long1 = deg2rad(coordinate1[1])
long2 = deg2rad(coordinate2[1])
dLat = (lat2 - lat1)
dLon = (long2 - long1)
a = math.sin(dLat / 2) * math.sin(dLat / 2) + math.cos((lat1)) * math.cos((lat2)) * math.sin(dLon / 2) * math.sin(dLon / 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return EARTH_RADIUS_KM * c
def calculate_time(coords1, coords2, speed, start_time):
distance = distance_from_coords_in_km(coords1, coords2) * 1000
return start_time + datetime.timedelta(seconds=distance / speed)
def interpolate_to_time(latlon1, latlon2, speed, start_time, end_time):
bearing = deg2rad(angleFromCoordinatesInDeg(latlon1, latlon2))
lat1 = deg2rad(latlon1[0])
long1 = deg2rad(latlon1[1])
distance = ((end_time-start_time).total_seconds() * speed) / 1000
lat2 = math.asin(math.sin(lat1) * math.cos(distance / EARTH_RADIUS_KM) + math.cos(lat1) * math.sin(distance / EARTH_RADIUS_KM) * math.cos(bearing))
long2 = long1 + math.atan2(math.sin(bearing) * math.sin(distance / EARTH_RADIUS_KM) * math.cos(lat1), math.cos(distance / EARTH_RADIUS_KM) - math.sin(lat1) * math.sin(lat2))
return [rad2deg(lat2), rad2deg(long2)]
def get_speed(avg_speeds, vessel_type, prev_speed, node_pos, transitions):
max_transitions = 3
if transitions > max_transitions:
transitions = max_transitions
multiplier = max_transitions - transitions
# Speed as moving average
try:
speeds_by_type = avg_speeds[node_pos]
pred_speed = speeds_by_type[vessel_type]
if isnan(speeds_by_type[vessel_type]):
pred_speed = 7
except KeyError:
pred_speed = 7
speed = ((prev_speed * multiplier) + (pred_speed * (max_transitions - multiplier))) / max_transitions
if speed > 5:
speed += 0.28
return speed
def pythagoras(x,y):
return math.sqrt(((x.coords[0] - y.coords[0]) ** 2) + ((x.coords[1] - y.coords[1]) ** 2))
# TODO: Calculate timestamps for each node instead of adding the start time into them
def retrace_route(grid, current_node, start_latlon, end_latlon, mmsi, voyage, start_time):
path = []
current = current_node
row = []
while current is not None:
if len(path) is 0:
row = end_latlon
elif current.parent is None:
row = start_latlon
else:
row = grid.extract_coords_lat_lon(current.position)
row.extend([current.position, current.speed, mmsi, voyage, start_time, current.transitions])
path.append(row)
current = current.parent
return path[::-1] # Return reversed pat
def retrace_search_area(grid, closed_list, voyage):
area = []
for node in closed_list:
row = grid.extract_coords_lat_lon(node.position)
row.extend([voyage, node.g, node.h, node.f])
area.append(row)
return area
def distance_to_dest(next_node, end_node, speed):
return pythagoras(next_node, end_node)
def manhattan_distance(x,y, speed):
return sum(abs(a-b) for a, b in zip(x.coords, y.coords))
def diagonal_distance(next_node, end_node, speed):
d = 1
d2 = sqrt(2)
dx = abs(next_node.coords[0] - end_node.coords[0])
dy = abs(next_node.coords[1] - end_node.coords[1])
return (d * (dx + dy) + (d2 - 2 * d) * min(dx, dy))
def a_star(graph, start_latlon, end_latlon, avg_speeds, speed, course, vessel_type, grid, dirways_graph, shallow_graph, mmsi, voyage, start_time):
# open_list is a list of nodes which have been visited, but who's neighbors
# haven't all been inspected, starts off with the start node
# closed_list is a list of nodes which have been visited
# and who's neighbors have been inspected
open_list = set()
closed_list = set()
start_pos = get_node(grid, start_latlon[0], start_latlon[1])
end_pos = get_node(grid, end_latlon[0], end_latlon[1])
start_node = Node(None, start_pos, grid.extract_coords(start_pos), speed, course, 0)
start_node.g = start_node.h = start_node.f = 0
end_node = Node(None, end_pos, grid.extract_coords(end_pos))
end_node.g = end_node.h = end_node.f = 0
# parents contains an adjacency map of all nodes
parents = dict()
parents[start_node.position] = start_node
open_list.add(start_node)
while open_list:
current_node = None
# find a node with the lowest value of f() - evaluation function
for tmp_node in open_list:
if current_node is None or tmp_node.f < current_node.f:
current_node = tmp_node
if current_node is None:
print('Failed to predict route')
return None
# Found the goal
if current_node == end_node:
route = retrace_route(grid, current_node, start_latlon, end_latlon, mmsi, voyage, start_time)
search_area = retrace_search_area(grid, closed_list, voyage)
return [route, search_area]
neighbours = graph.edges[current_node.position]
# for all neighbors of the current node do
for next_node in neighbours:
speed = get_speed(avg_speeds, vessel_type, current_node.speed, current_node.position, current_node.transitions + 1)
course = None
if graph.use_turn_penalty:
current_latlon = grid.extract_coords_lat_lon(current_node.position)
next_latlon = grid.extract_coords_lat_lon(next_node)
course = angleFromCoordinatesInDeg(current_latlon, next_latlon)
next_node = Node(current_node, next_node, grid.extract_coords(next_node), speed, course, current_node.transitions + 1)
if next_node in closed_list:
continue
# G is the sum of all costs from the beginning
next_node.g = current_node.g + graph.cost(current_node.position, next_node.position, current_node.course, next_node.course, dirways_graph, shallow_graph)
next_node.h = diagonal_distance(next_node, end_node, speed)
next_node.f = next_node.g + next_node.f
# if the current node isn't in both open_list and closed_list
# add it to open_list and note n as it's parent
if next_node not in open_list:
open_list.add(next_node)
else:
# otherwise, check if it's quicker to first visit n, then m
# and if it is, update parent data and g data
# and if the node was in the closed_list, move it to open_list
for open_neighbor in open_list:
if open_neighbor == next_node and next_node.g < open_neighbor.g:
open_neighbor.g = next_node.g
open_neighbor.h = next_node.h
open_neighbor.f = next_node.f
open_neighbor.parent = next_node.parent
open_neighbor.transitions = next_node.transitions
open_neighbor.speed = next_node.speed
open_neighbor.course = next_node.course
if next_node in closed_list:
closed_list.remove(next_node)
open_list.add(next_node)
# remove n from the open_list, and add it to closed_list
# because all of his neighbors were inspected
open_list.remove(current_node)
closed_list.add(current_node)
print('Path does not exist!')
print('Voyage=', str(voyage))
return None
def measure_accuracy(grid, real_pos, pred_pos, pred_speed, actual_speed):
nm_multiplier = 0.539956803
distance_nm = distance_from_coords_in_km(real_pos, pred_pos) * nm_multiplier
error_rate_lat = abs(real_pos[0] - pred_pos[0]) / real_pos[0] * 100
error_rate_lon = abs(real_pos[1] - pred_pos[1]) / real_pos[1] * 100
error_rate_speed = None
if actual_speed > 0:
error_rate_speed = abs((actual_speed - pred_speed)) / actual_speed * 100
real_node = get_node(grid, real_pos[0], real_pos[1])
pred_node = get_node(grid, pred_pos[0], pred_pos[1])
return [distance_nm, error_rate_lat, error_rate_lon, error_rate_speed, int(real_node == pred_node)]
def extract_test_voyage_ids(voyages, port_id, n):
voyage_sizes = voyages.loc[voyages['port_id'] == port_id].loc[voyages['speed'] > 2].groupby([ 'voyage']).size().sort_values(ascending=False)
return voyage_sizes.head(n).index.values.tolist()
def get_test_voyage(voyages, voyage_id, minutes_forward):
test_voyage = voyages[voyages.voyage == voyage_id]
ata = test_voyage.head(1).iloc[0].ata
start_time = pd.to_datetime(test_voyage.head(1).iloc[0].timestamp)
if test_voyage.head(1).iloc[0].speed < 1:
return None
test_voyage = test_voyage.loc[test_voyage['timestamp'] <= ata]
test_voyage = test_voyage.loc[test_voyage['timestamp'] <= pd.Timestamp(start_time + datetime.timedelta(minutes=minutes_forward))]
return test_voyage
def get_node(grid, lat, lon):
start = dict()
start['grid_point'] = grid.get_grid_point(lat, lon)
return grid.get_grid_position(start)
def interpolate_by_distance(row, next_row, distanceKm):
brng = deg2rad(angleFromCoordinatesInDeg([row.lat, row.lon], [next_row.lat, next_row.lon]))
lat1 = deg2rad(row.lat)
long1 = deg2rad(row.lon)
lat2 = math.asin(math.sin(lat1) * math.cos(distanceKm / EARTH_RADIUS_KM) + math.cos(lat1) * math.sin(distanceKm / EARTH_RADIUS_KM) * math.cos(brng))
long2 = long1 + math.atan2(math.sin(brng) * math.sin(distanceKm / EARTH_RADIUS_KM) * math.cos(lat1), math.cos(distanceKm / EARTH_RADIUS_KM) - math.sin(lat1) * math.sin(lat2))
row.lat = rad2deg(lat2)
row.lon = rad2deg(long2)
return row
def create_dirways_graph(dirways, grid):
dirways.sort_values(by=['id', 'number'], inplace=True)
dirways.reset_index(inplace=True)
dirways = dirways.groupby('id')
interpolated_dirways = []
distance_km = 1
dirway_nodes = set()
for id, dw_points in dirways:
order_number = 0
interpolated = []
dw_points.reset_index(inplace=True)
for i, current in dw_points.iterrows():
interpolated.append(current)
if i+1 == len(dw_points.lat):
break
next = dw_points.loc[(i+1)]
current.number = order_number
coords = dict()
coords['grid_point'] = grid.get_grid_point(current.lat, current.lon)
node = grid.get_grid_position(coords)
if node not in dirway_nodes:
dirway_nodes.add(node)
while distance_km < distance_from_coords_in_km([current.lat, current.lon], [next.lat, next.lon]):
current = interpolate_by_distance(current, next, distance_km)
order_number += 1
current.number = order_number
interpolated.append(current)
coords = dict()
coords['grid_point'] = grid.get_grid_point(current.lat, current.lon)
node = grid.get_grid_position(coords)
if node not in dirway_nodes:
dirway_nodes.add(node)
interpolated_dirways.append(interpolated)
return dirway_nodes
def get_observations_at_time(voyages, timestamp):
start_time = pd.to_datetime(timestamp)
voyages['course'] = -1
columns = voyages.columns
voyages = voyages.loc[(voyages['timestamp'] >= timestamp) & (voyages['timestamp'] < pd.Timestamp(start_time + datetime.timedelta(minutes=60))) & (voyages['ata'] > timestamp) & (voyages['speed'] > 1)]
voyages = voyages.groupby('voyage')
test_voyages = []
for voyage, observations in voyages:
if len(observations) is 1:
continue
course = angleFromCoordinatesInDeg([observations.iloc[0].lat, observations.iloc[0].lon], [observations.iloc[1].lat, observations.iloc[1].lon])
row = observations.iloc[1]
row.course = course
test_voyages.append(row)
return pd.DataFrame(data=test_voyages, columns=columns)
def predict_routes(observations, grid, graph, avg_speeds, dirways, shallow_graph, print_params=True):
if print_params:
graph.print_parameters()
grid.print_parameters()
routes = []
errors = []
search_areas = []
for i, observation in observations.iterrows():
dirway_graph = None
if graph.use_dirways:
active_dirways = dirways.loc[(dirways.publishtime < observation.ata) & (observation.ata <= dirways.deletetime)]
dirway_graph = create_dirways_graph(active_dirways, grid)
start_coords = [observation.lat, observation.lon]
start_time = pd.to_datetime(observation.timestamp)
end_coords = [observation.end_lat, observation.end_lon]
route = a_star(graph, start_coords, end_coords, avg_speeds, observation.speed, observation.course, observation.vessel_type, grid
, dirway_graph, shallow_graph, observation.mmsi, observation.voyage, start_time)
if route is None:
errors.append([start_coords, end_coords])
else:
routes.extend(route[0])
search_areas.extend(route[1])
# return routes
if print_params:
print('Error count=',len(errors))
print(errors)
return [routes, search_areas]
# def calculate_time(coords1, coords2, speed, start_time):
def calculate_timestamps(routes):
routes.sort_values(by=['voyage', 'number'], inplace=True)
routes['timestamp'] = -1
routes = routes.groupby('voyage')
test = []
for voyage, route in routes:
route = route.reset_index(drop=True)
for i, current in route.iterrows():
timestamp = None
if i == 0:
timestamp = current.start_time
else:
current.speed = (prev.speed+current.speed) / 2
timestamp = calculate_time([prev.lat, prev.lon], [current.lat, current.lon], current.speed, prev.timestamp)
current.timestamp = timestamp
prev = current
test.append(current.values)
return pd.DataFrame(data=test, columns=['lat', 'lon', 'node', 'speed', 'mmsi', 'voyage', 'start_time', 'number', 'timestamp'])
def test_accuracy(grid, predicted, voyages, minutes_forward=None):
predicted.sort_values(by=['voyage', 'number'], inplace=True)
predicted = predicted.groupby('voyage')
results = []
errors = []
for voyage, pred_route in predicted:
start_row = pred_route.head(1).iloc[0]
end_row = pred_route.tail(1).iloc[0]
actual_route = voyages.loc[(voyages['voyage'] == voyage) & (voyages['timestamp'] >= start_row.start_time) & (voyages['timestamp'] <= end_row.timestamp)]
pred_route.sort_values(by=['timestamp'], inplace=True)
pred_route = pred_route.reset_index(drop=True)
for i, obs in actual_route.iterrows():
try:
if minutes_forward is not None and pd.to_datetime(obs.timestamp) > (pd.to_datetime(start_row.start_time) + datetime.timedelta(minutes=minutes_forward)):
break
next = pred_route.loc[pred_route['timestamp'] > obs.timestamp].head(1).iloc[0]
next_index = next.name
pred_pos = None
pred_speed = None
if int(next_index) is 0:
pred_pos = [pred_route.head(1).iloc[0].lat, pred_route.head(1).iloc[0].lon]
pred_speed = pred_route.head(1).iloc[0].speed
else:
prev = pred_route.loc[next_index-1]
pred_speed = (prev.speed + next.speed) / 2
pred_pos = interpolate_to_time([prev.lat, prev.lon], [next.lat, next.lon], pred_speed, prev.timestamp, obs.timestamp)
mins_to_future = (pd.to_datetime(obs.timestamp) - pd.to_datetime(start_row.start_time)).total_seconds() / 60.0
acc_measures = measure_accuracy(grid, [obs.lat, obs.lon], pred_pos, pred_speed, obs.speed)
result = [voyage, obs.vessel_type, obs.end_port, obs.end_port_sea_area, start_row.start_time, obs.timestamp, mins_to_future, obs.lat, obs.lon, pred_pos[0], pred_pos[1], obs.speed, pred_speed]
result.extend(acc_measures)
results.append(result)
except IndexError:
errors.append(obs)
continue
print('error count=', len(errors))
columns = ['voyage', 'vessel_type', 'end_port', 'end_port_sea_area', 'start_time', 'pred_time', 'mins_to_future', 'actual_lat', 'actual_lon', 'pred_lat',
'pred_lon', 'actual_speed', 'pred_speed', 'acc_distance_nm', 'error_rate_lat', 'error_rate_lon', 'error_rate_speed', 'correct_node']
return pd.DataFrame(data=results, columns=columns)
def test_accuracy_to_end(grid, predicted, voyages, minutes_forward=None):
predicted.sort_values(by=['voyage', 'number'], inplace=True)
predicted = predicted.groupby('voyage')
results = []
errors = []
for voyage, pred_route in predicted:
start_row = pred_route.head(1).iloc[0]
end_row = pred_route.tail(1).iloc[0]
actual_route = voyages.loc[(voyages['voyage'] == voyage) & (voyages['timestamp'] >= start_row.start_time) & (voyages['timestamp'] <= end_row.timestamp)]
actual_end = actual_route.tail(1).iloc[0]
pred_route.sort_values(by=['timestamp'], inplace=True)
pred_route = pred_route.reset_index(drop=True)
prev = None
last = None
for i, next in pred_route.iterrows():
last = next
if prev is None:
prev = next
continue
observations_between_preds = actual_route.loc[(actual_route['timestamp'] >= prev.timestamp) & (actual_route['timestamp'] < next.timestamp)]
pred_speed = (prev.speed + next.speed) / 2
if actual_end.ata < next.timestamp:
mins_to_future = (pd.to_datetime(next.timestamp) - pd.to_datetime(start_row.start_time)).total_seconds() / 60.0
acc_measures = measure_accuracy(grid, [actual_end.lat, actual_end.lon], [next.lat, next.lon], pred_speed, actual_end.speed)
result = [voyage, actual_end.vessel_type, actual_end.end_port, actual_end.end_port_sea_area, start_row.start_time, next.timestamp, mins_to_future,
actual_end.lat, actual_end.lon, next.lat, next.lon, actual_end.speed, pred_speed]
result.extend(acc_measures)
results.append(result)
prev = next
continue
for j, obs in observations_between_preds.iterrows():
if minutes_forward is not None and pd.to_datetime(obs.timestamp) > (pd.to_datetime(start_row.start_time) + datetime.timedelta(minutes=minutes_forward)):
break
pred_pos = interpolate_to_time([prev.lat, prev.lon], [next.lat, next.lon], pred_speed, prev.timestamp, obs.timestamp)
mins_to_future = ( | pd.to_datetime(obs.timestamp) | pandas.to_datetime |
from __future__ import print_function, division
import logging
import os, os.path
import re
import math
import copy
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if not on_rtd:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
from scipy.stats import gaussian_kde
from scipy.integrate import quad
else:
np, pd, plt, cm = (None, None, None, None)
gaussian_kde, quad = (None, None)
try:
from sklearn.neighbors import KernelDensity
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import normalize
from sklearn.model_selection import LeaveOneOut
except ImportError:
logging.warning('sklearn not available')
KernelDensity = None
GridSearchCV = None
if not on_rtd:
from isochrones import StarModel, get_ichrone
else:
class StarModel(object):
pass
#from transit import Central, System, Body
from .transit_basic import occultquad, ldcoeffs, minimum_inclination
from .transit_basic import MAInterpolationFunction
from .transit_basic import eclipse_pars
from .transit_basic import eclipse, eclipse_tt, NoEclipseError, NoFitError
from .transit_basic import MAXSLOPE
from .fitebs import fitebs
from .plotutils import setfig, plot2dhist
from .hashutils import hashcombine
from .stars.populations import StarPopulation, MultipleStarPopulation
from .stars.populations import BGStarPopulation, BGStarPopulation_TRILEGAL
from .stars.populations import Observed_BinaryPopulation, Observed_TriplePopulation
# from .stars.populations import DARTMOUTH
from .stars.utils import draw_eccs, semimajor, withinroche
from .stars.utils import mult_masses, randpos_in_circle
from .stars.utils import fluxfrac, addmags
from .stars.utils import RAGHAVAN_LOGPERKDE
from .stars.constraints import UpperLimit
try:
import simpledist.distributions as dists
except ImportError:
logging.warning('simpledist not available')
dists = None
try:
from progressbar import Percentage,Bar,RotatingMarker,ETA,ProgressBar
pbar_ok = True
except ImportError:
pbar_ok = False
from .orbits.populations import OrbitPopulation, TripleOrbitPopulation
SHORT_MODELNAMES = {'Planets':'pl',
'EBs':'eb',
'HEBs':'heb',
'BEBs':'beb',
'EBs (Double Period)':'eb_Px2',
'HEBs (Double Period)':'heb_Px2',
'BEBs (Double Period)':'beb_Px2',
'Blended Planets':'bpl',
'Specific BEB':'sbeb',
'Specific HEB':'sheb'}
INV_SHORT_MODELNAMES = {v:k for k,v in SHORT_MODELNAMES.items()}
DEFAULT_MODELS = ['beb','heb','eb',
'beb_Px2', 'heb_Px2','eb_Px2',
'pl']
if not on_rtd:
from astropy.units import Quantity
import astropy.units as u
import astropy.constants as const
AU = const.au.cgs.value
RSUN = const.R_sun.cgs.value
MSUN = const.M_sun.cgs.value
G = const.G.cgs.value
REARTH = const.R_earth.cgs.value
MEARTH = const.M_earth.cgs.value
else:
Quantity = None
u = None
const = None
AU, RSUN, MSUN, G, REARTH, MEARTH = (None, None, None, None, None, None)
class EclipsePopulation(StarPopulation):
"""Base class for populations of eclipsing things.
This is the base class for populations of various scenarios
that could explain a tranist signal; that is,
astrophysical false positives or transiting planets.
Once set up properly, :func:`EclipsePopulation.fit_trapezoids`
can be used to fit the trapezoidal shape parameters, after
which the likelihood of a transit signal under the model
may be calculated.
Subclasses :class:`vespa.stars.StarPopulation`, which enables
all the functionality of observational constraints.
if prob is not passed; should be able to calculated from given
star/orbit properties.
As with :class:`vespa.stars.StarPopulation`, any subclass must be able
to be initialized with no arguments passed, in order for
:func:`vespa.stars.StarPopulation.load_hdf` to work properly.
:param stars:
``DataFrame`` with star properties. Must contain
``M_1, M_2, R_1, R_2, u1_1, u1_2, u2_1, u2_2``.
Also, either the ``period`` keyword argument must be provided
or a ``period`` column should be in ``stars``.
``stars`` must also have the eclipse parameters:
`'inc, ecc, w, dpri, dsec, b_sec, b_pri, fluxfrac_1, fluxfrac_2``.
:param period: (optional)
Orbital period. If not provided, then ``stars`` must
have period column.
:param model: (optional)
Name of the model.
:param priorfactors: (optional)
Multiplicative factors that quantify the model prior
for this particular model; e.g. ``f_binary``, etc.
:param lhoodcachefile: (optional)
File where likelihood calculation cache is written.
:param orbpop: (optional)
Orbit population.
:type orbpop:
:class:`orbits.OrbitPopulation` or
:class:`orbits.TripleOrbitPopulation`
:param prob: (optional)
Averaged eclipse probability of scenario instances.
If not provided, this should be calculated,
though this is not implemented yet.
:param cadence: (optional)
Observing cadence, in days. Defaults to *Kepler* value.
:param **kwargs:
Additional keyword arguments passed to
:class:`vespa.stars.StarPopulation`.
"""
def __init__(self, stars=None, period=None, model='',
priorfactors=None, lhoodcachefile=None,
orbpop=None, prob=None,
cadence=1626./86400, #Kepler observing cadence, in days
**kwargs):
self.period = period
self.model = model
if priorfactors is None:
priorfactors = {}
self.priorfactors = priorfactors
self.prob = prob #calculate this if not provided?
self.cadence = cadence
self.lhoodcachefile = lhoodcachefile
self.is_specific = False
StarPopulation.__init__(self, stars=stars, orbpop=orbpop,
name=model, **kwargs)
if stars is not None:
if len(self.stars)==0:
raise EmptyPopulationError('Zero elements in {} population'.format(model))
if 'slope' in self.stars:
self._make_kde()
def fit_trapezoids(self, MAfn=None, msg=None, use_pbar=True, **kwargs):
"""
Fit trapezoid shape to each eclipse in population
For each instance in the population, first the correct,
physical Mandel-Agol transit shape is simulated,
and then this curve is fit with a trapezoid model
:param MAfn:
:class:`transit_basic.MAInterpolationFunction` object.
If not passed, then one with default parameters will
be created.
:param msg:
Message to be displayed for progressbar output.
:param **kwargs:
Additional keyword arguments passed to :func:`fitebs.fitebs`.
"""
logging.info('Fitting trapezoid models for {}...'.format(self.model))
if msg is None:
msg = '{}: '.format(self.model)
n = len(self.stars)
deps, durs, slopes = (np.zeros(n), np.zeros(n), np.zeros(n))
secs = np.zeros(n, dtype=bool)
dsec = np.zeros(n)
if use_pbar and pbar_ok:
widgets = [msg+'fitting shape parameters for %i systems: ' % n,Percentage(),
' ',Bar(marker=RotatingMarker()),' ',ETA()]
pbar = ProgressBar(widgets=widgets,maxval=n)
pbar.start()
for i in range(n):
logging.debug('Fitting star {}'.format(i))
pri = (self.stars['dpri'][i] > self.stars['dsec'][i] or
np.isnan(self.stars['dsec'][i]))
sec = not pri
secs[i] = sec
if sec:
dsec[i] = self.stars['dpri'][i]
else:
dsec[i] = self.stars['dsec'][i]
try:
trap_pars = self.eclipse_trapfit(i, secondary=sec, **kwargs)
except NoEclipseError:
logging.error('No eclipse registered for star {}'.format(i))
trap_pars = (np.nan, np.nan, np.nan)
except NoFitError:
logging.error('Fit did not converge for star {}'.format(i))
trap_pars = (np.nan, np.nan, np.nan)
except KeyboardInterrupt:
raise
except:
logging.error('Unknown error for star {}'.format(i))
trap_pars = (np.nan, np.nan, np.nan)
if use_pbar and pbar_ok:
pbar.update(i)
durs[i], deps[i], slopes[i] = trap_pars
logging.info('Done.')
self.stars['depth'] = deps
self.stars['duration'] = durs
self.stars['slope'] = slopes
self.stars['secdepth'] = dsec
self.stars['secondary'] = secs
self._make_kde()
@property
def eclipse_features(self):
stars = self.stars
ok = (stars.depth > 0).values
stars = stars[ok]
texp = self.cadence
# Define features
sec = stars.secondary
pri = ~sec
P = stars.P
T14 = sec*stars.T14_sec + pri*stars.T14_pri
T23 = sec*stars.T23_sec + pri*stars.T23_pri
T14 += texp
T23 = np.clip(T23 - texp, 0, T14)
tau = (T14 - T23)/2.
k = (sec*(stars.radius_A/stars.radius_B) +
~sec*(stars.radius_B/stars.radius_A))
b = sec*(stars.b_sec/k) + pri*stars.b_pri
logd = np.log10(sec*stars.dsec + pri*stars.dpri)
u1 = sec*stars.u1_2 + pri*stars.u1_1
u2 = sec*stars.u2_2 + pri*stars.u2_1
#fluxfrac = sec*stars.fluxfrac_2 + pri*stars.fluxfrac_1
dilution = self.dilution_factor[ok]
X = np.array([P,T14,tau,k,b,logd,u1,u2,dilution,sec]).T
return X
@property
def eclipse_targets(self):
ok = (self.stars.depth > 0).values
stars = self.stars[ok]
duration = np.array(stars.duration)
logdepth = np.array(np.log10(stars.depth))
slope = np.array(stars.slope)
return duration, logdepth, slope
def apply_multicolor_transit(self, band, depth):
raise NotImplementedError('multicolor transit not yet implemented')
@property
def eclipseprob(self):
"""
Array of eclipse probabilities.
"""
#TODO: incorporate eccentricity/omega for exact calculation?
s = self.stars
return ((s['radius_1'] + s['radius_2'])*RSUN /
(semimajor(s['P'],s['mass_1'] + s['mass_2'])*AU))
@property
def mean_eclipseprob(self):
"""Mean eclipse probability for population
"""
return self.eclipseprob.mean()
@property
def modelshort(self):
"""
Short version of model name
Dictionary defined in ``populations.py``::
SHORT_MODELNAMES = {'Planets':'pl',
'EBs':'eb',
'HEBs':'heb',
'BEBs':'beb',
'Blended Planets':'bpl',
'Specific BEB':'sbeb',
'Specific HEB':'sheb'}
"""
try:
name = SHORT_MODELNAMES[self.model]
#add index if specific model is indexed
if hasattr(self,'index'):
name += '-{}'.format(self.index)
return name
except KeyError:
raise KeyError('No short name for model: %s' % self.model)
@property
def dilution_factor(self):
"""
Multiplicative factor (<1) that converts true depth to diluted depth.
"""
return np.ones(len(self.stars))
@property
def depth(self):
"""
Observed primary depth (fitted undiluted depth * dilution factor)
"""
return self.dilution_factor * self.stars['depth']
@property
def secondary_depth(self):
"""
Observed secondary depth (fitted undiluted sec. depth * dilution factor)
"""
return self.dilution_factor * self.stars['secdepth']
def constrain_secdepth(self, thresh):
"""
Constrain the observed secondary depth to be less than a given value
:param thresh:
Maximum allowed fractional depth for diluted secondary
eclipse depth
"""
self.apply_constraint(UpperLimit(self.secondary_depth, thresh, name='secondary depth'))
def apply_secthresh(self, *args, **kwargs):
"""Another name for constrain_secdepth
"""
return self.constrain_secdepth(*args, **kwargs)
def fluxfrac_eclipsing(self, band=None):
"""Stub for future multicolor transit implementation
"""
pass
def depth_in_band(self, band):
"""Stub for future multicolor transit implementation
"""
pass
@property
def prior(self):
"""
Model prior for particular model.
Product of eclipse probability (``self.prob``),
the fraction of scenario that is allowed by the various
constraints (``self.selectfrac``), and all additional
factors in ``self.priorfactors``.
"""
prior = self.prob * self.selectfrac
for f in self.priorfactors:
prior *= self.priorfactors[f]
return prior
def add_priorfactor(self,**kwargs):
"""Adds given values to priorfactors
If given keyword exists already, error will be raised
to use :func:`EclipsePopulation.change_prior` instead.
"""
for kw in kwargs:
if kw in self.priorfactors:
logging.error('%s already in prior factors for %s. use change_prior function instead.' % (kw,self.model))
continue
else:
self.priorfactors[kw] = kwargs[kw]
logging.info('%s added to prior factors for %s' % (kw,self.model))
def change_prior(self, **kwargs):
"""
Changes existing priorfactors.
If given keyword isn't already in priorfactors,
then will be ignored.
"""
for kw in kwargs:
if kw in self.priorfactors:
self.priorfactors[kw] = kwargs[kw]
logging.info('{0} changed to {1} for {2} model'.format(kw,kwargs[kw],
self.model))
def _make_kde(self, use_sklearn=False, bandwidth=None, rtol=1e-6,
sig_clip=50, no_sig_clip=False, cov_all=True,
**kwargs):
"""Creates KDE objects for 3-d shape parameter distribution
KDE represents likelihood as function of trapezoidal
shape parameters (log(delta), T, T/tau).
Uses :class:`scipy.stats.gaussian_kde`` KDE by default;
Scikit-learn KDE implementation tested a bit, but not
fully implemented.
:param use_sklearn:
Whether to use scikit-learn implementation of KDE.
Not yet fully implemented, so this should stay ``False``.
:param bandwidth, rtol:
Parameters for sklearn KDE.
:param **kwargs:
Additional keyword arguments passed to
:class:`scipy.stats.gaussian_kde``.
"""
try:
#define points that are ok to use
first_ok = ((self.stars['slope'] > 0) &
(self.stars['duration'] > 0) &
(self.stars['duration'] < self.period) &
(self.depth > 0))
except KeyError:
logging.warning('Must do trapezoid fits before making KDE.')
return
self.empty = False
if first_ok.sum() < 4:
logging.warning('Empty population ({}): < 4 valid systems! Cannot calculate lhood.'.format(self.model))
self.is_empty = True #will cause is_ruled_out to be true as well.
return
#raise EmptyPopulationError('< 4 valid systems in population')
logdeps = np.log10(np.ma.array(self.depth, mask=~first_ok))
durs = np.ma.array(self.stars['duration'], mask=~first_ok)
slopes = np.ma.array(self.stars['slope'], mask=~first_ok)
#Now sigma-clip those points that passed first cuts
ok = np.ones(len(logdeps), dtype=bool)
for x in [logdeps, durs, slopes]:
med = np.ma.median(x)
mad = np.ma.median((x - med).__abs__())
after_clip = np.ma.masked_where((x - med).__abs__() / mad > sig_clip, x)
ok &= ~after_clip.mask
second_ok = ok & first_ok
assert np.allclose(second_ok, ok)
# Before making KDE for real, first calculate
# covariance and inv_cov of uncut data, to use
# when it's cut, too.
points = np.ma.array([logdeps,
durs,
slopes], mask=np.row_stack((~second_ok, ~second_ok, ~second_ok)))
points = points.compress(~points.mask[0],axis=1).data
#from numpy.linalg import LinAlgError
try:
from scipy import linalg
kde = gaussian_kde(points) #backward compatibility?
inv = linalg.inv(kde._data_covariance)
#print(np.vstack(points), np.shape(np.vstack(points)))
except np.linalg.linalg.LinAlgError:
print(points, np.shape(points))
cov_all = kde._data_covariance
icov_all = kde._data_inv_cov
factor = kde.factor
# OK, now cut the data for constraints & proceed
ok = second_ok & self.distok
points = np.ma.array([durs,
logdeps,
slopes], mask=np.row_stack((~ok, ~ok, ~ok)))
points = points.compress(~points.mask[0],axis=1)
logdeps = points.data[1]
durs = points.data[0]
slopes = points.data[2]
if ok.sum() < 4 and not self.empty:
logging.warning('Empty population ({}): < 4 valid systems! Cannot calculate lhood.'.format(self.model))
self.is_empty = True
return
#raise EmptyPopulationError('< 4 valid systems in population')
if use_sklearn:
self.sklearn_kde = True
logdeps_normed = (logdeps - logdeps.mean())/logdeps.std()
durs_normed = (durs - durs.mean())/durs.std()
slopes_normed = (slopes - slopes.mean())/slopes.std()
#TODO: use sklearn preprocessing to replace below
self.mean_logdepth = logdeps.mean()
self.std_logdepth = logdeps.std()
self.mean_dur = durs.mean()
self.std_dur = durs.std()
self.mean_slope = slopes.mean()
self.std_slope = slopes.std()
points = np.array([logdeps_normed, durs_normed, slopes_normed])
try:
points_skl = normalize(np.transpose([durs, logdeps, slopes]))
except ValueError:
from nose.tools import set_trace; set_trace()
set_trace()
#assert np.allclose(points_pre, points_skl)
#find best bandwidth. For some reason this doesn't work?
if bandwidth is None:
bandwidths = np.linspace(0.05,1,100)
grid = GridSearchCV(KernelDensity(kernel='gaussian'),\
{'bandwidth': bandwidths},\
cv=3)
grid.fit(points_skl)
self._best_bandwidth = grid.best_params_
self.kde = grid.best_estimator_
else:
self.kde = KernelDensity(rtol=rtol, bandwidth=bandwidth).fit(points_skl)
else:
self.sklearn_kde = False
#Yangyang: method 1
points = (points+1e-07*np.random.uniform(-1.0, 1.0, np.shape(points))).data
self.kde = gaussian_kde(points, **kwargs) #backward compatibility?
# Reset covariance based on uncut data
self.kde._data_covariance = cov_all
self.kde._data_inv_cov = icov_all
self.kde._compute_covariance()
def _density(self, dataset):
"""
Evaluate KDE at given points.
Prepares data according to whether sklearn or scipy
KDE in use.
:param log, dur, slope:
Trapezoidal shape parameters.
"""
if self.sklearn_kde:
#TODO: fix preprocessing
#Yangyang's modification(method2):
#pts = np.array([(logd - self.mean_logdepth)/self.std_logdepth,
# (dur - self.mean_dur)/self.std_dur,
# (slope - self.mean_slope)/self.std_slope])
pts = normalize(dataset.T)#(#sample, #features)to make consistent with scipy method, besides their density is in log, then...
return np.exp(self.kde.score_samples(pts))
else:
return self.kde(dataset)
def lhood(self, trsig, recalc=False, cachefile=None):
"""Returns likelihood of transit signal
Returns sum of ``trsig`` MCMC samples evaluated
at ``self.kde``.
:param trsig:
:class:`vespa.TransitSignal` object.
:param recalc: (optional)
Whether to recalculate likelihood (if calculation
is cached).
:param cachefile: (optional)
File that holds likelihood calculation cache.
"""
if not hasattr(self,'kde'):
self._make_kde()
if cachefile is None:
cachefile = self.lhoodcachefile
if cachefile is None:
cachefile = 'lhoodcache.dat'
lhoodcache = _loadcache(cachefile)
key = hashcombine(self, trsig)
if key in lhoodcache and not recalc:
return lhoodcache[key]
if self.is_ruled_out:
return 0
N = trsig.kde.dataset.shape[1]
lh = np.sum(self._density(trsig.kde.dataset)) / N
with open(cachefile, 'a') as fout:
fout.write('%i %g\n' % (key, lh))
return lh
def lhoodplot(self, trsig=None, fig=None,
piechart=True, figsize=None, logscale=True,
constraints='all', suptitle=None, Ltot=None,
maxdur=None, maxslope=None, inverse=False,
colordict=None, cachefile=None, nbins=20,
dur_range=None, slope_range=None, depth_range=None,
recalc=False,**kwargs):
"""
Makes plot of likelihood density function, optionally with transit signal
If ``trsig`` not passed, then just density plot of the likelidhoo
will be made; if it is passed, then it will be plotted
over the density plot.
:param trsig: (optional)
:class:`vespa.TransitSignal` object.
:param fig: (optional)
Argument for :func:`plotutils.setfig`.
:param piechart: (optional)
Whether to include a plot of the piechart that describes
the effect of the constraints on the population.
:param figsize: (optional)
Passed to :func:`plotutils.setfig`.
:param logscale: (optional)
If ``True``, then shading will be based on the log-histogram
(thus showing more detail at low density). Passed to
:func:`vespa.stars.StarPopulation.prophist2d`.
:param constraints: (``'all', 'none'`` or ``list``; optional)
Which constraints to apply in making plot. Picking
specific constraints allows you to visualize in more
detail what the effect of a constraint is.
:param suptitle: (optional)
Title for the figure.
:param Ltot: (optional)
Total of ``prior * likelihood`` for all models. If this is
passed, then "Probability of scenario" gets a text box
in the middle.
:param inverse: (optional)
Intended to allow showing only the instances that are
ruled out, rather than those that remain. Not sure if this
works anymore.
:param colordict: (optional)
Dictionary to define colors of constraints to be used
in pie chart. Intended to unify constraint colors among
different models.
:param cachefile: (optional)
Likelihood calculation cache file.
:param nbins: (optional)
Number of bins with which to make the 2D histogram plot;
passed to :func:`vespa.stars.StarPopulation.prophist2d`.
:param dur_range, slope_range, depth_range: (optional)
Define ranges of plots.
:param **kwargs:
Additional keyword arguments passed to
:func:`vespa.stars.StarPopulation.prophist2d`.
"""
setfig(fig, figsize=figsize)
if trsig is not None:
dep,ddep = trsig.logdepthfit
dur,ddur = trsig.durfit
slope,dslope = trsig.slopefit
ddep = ddep.reshape((2,1))
ddur = ddur.reshape((2,1))
dslope = dslope.reshape((2,1))
if dur_range is None:
dur_range = (0,dur*2)
if slope_range is None:
slope_range = (2,slope*2)
if constraints == 'all':
mask = self.distok
elif constraints == 'none':
mask = np.ones(len(self.stars)).astype(bool)
else:
mask = np.ones(len(self.stars)).astype(bool)
for c in constraints:
if c not in self.distribution_skip:
mask &= self.constraints[c].ok
if inverse:
mask = ~mask
if dur_range is None:
dur_range = (self.stars[mask]['duration'].min(),
self.stars[mask]['duration'].max())
if slope_range is None:
slope_range = (2,self.stars[mask]['slope'].max())
if depth_range is None:
depth_range = (-5,-0.1)
#This may mess with intended "inverse" behavior, probably?
mask &= ((self.stars['duration'] > dur_range[0]) &
(self.stars['duration'] < dur_range[1]))
mask &= ((self.stars['duration'] > dur_range[0]) &
(self.stars['duration'] < dur_range[1]))
mask &= ((self.stars['slope'] > slope_range[0]) &
(self.stars['slope'] < slope_range[1]))
mask &= ((self.stars['slope'] > slope_range[0]) &
(self.stars['slope'] < slope_range[1]))
mask &= ((np.log10(self.depth) > depth_range[0]) &
(np.log10(self.depth) < depth_range[1]))
mask &= ((np.log10(self.depth) > depth_range[0]) &
(np.log10(self.depth) < depth_range[1]))
if piechart:
a_pie = plt.axes([0.07, 0.5, 0.4, 0.5])
self.constraint_piechart(fig=0, colordict=colordict)
ax1 = plt.subplot(222)
if not self.is_ruled_out:
self.prophist2d('duration', 'depth', logy=True, fig=0,
mask=mask, interpolation='bicubic',
logscale=logscale, nbins=nbins, **kwargs)
if trsig is not None:
plt.errorbar(dur,dep,xerr=ddur,yerr=ddep,color='w',marker='x',
ms=12,mew=3,lw=3,capsize=3,mec='w')
plt.errorbar(dur,dep,xerr=ddur,yerr=ddep,color='r',marker='x',
ms=10,mew=1.5)
plt.ylabel(r'log($\delta$)')
plt.xlabel('')
plt.xlim(dur_range)
plt.ylim(depth_range)
yt = ax1.get_yticks()
plt.yticks(yt[1:])
xt = ax1.get_xticks()
plt.xticks(xt[2:-1:2])
ax3 = plt.subplot(223)
if not self.is_ruled_out:
self.prophist2d('depth', 'slope', logx=True, fig=0,
mask=mask, interpolation='bicubic',
logscale=logscale, nbins=nbins, **kwargs)
if trsig is not None:
plt.errorbar(dep,slope,xerr=ddep,yerr=dslope,color='w',marker='x',
ms=12,mew=3,lw=3,capsize=3,mec='w')
plt.errorbar(dep,slope,xerr=ddep,yerr=dslope,color='r',marker='x',
ms=10,mew=1.5)
plt.ylabel(r'$T/\tau$')
plt.xlabel(r'log($\delta$)')
plt.ylim(slope_range)
plt.xlim(depth_range)
yt = ax3.get_yticks()
plt.yticks(yt[1:])
ax4 = plt.subplot(224)
if not self.is_ruled_out:
self.prophist2d('duration', 'slope', fig=0,
mask=mask, interpolation='bicubic',
logscale=logscale, nbins=nbins, **kwargs)
if trsig is not None:
plt.errorbar(dur,slope,xerr=ddur,yerr=dslope,color='w',marker='x',
ms=12,mew=3,lw=3,capsize=3,mec='w')
plt.errorbar(dur,slope,xerr=ddur,yerr=dslope,color='r',marker='x',
ms=10,mew=1.5)
plt.ylabel('')
plt.xlabel(r'$T$ [days]')
plt.ylim(slope_range)
plt.xlim(dur_range)
plt.xticks(xt[2:-1:2])
plt.yticks(ax3.get_yticks())
ticklabels = ax1.get_xticklabels() + ax4.get_yticklabels()
plt.setp(ticklabels,visible=False)
plt.subplots_adjust(hspace=0.001,wspace=0.001)
if suptitle is None:
suptitle = self.model
plt.suptitle(suptitle,fontsize=20)
if Ltot is not None:
lhood = self.lhood(trsig, recalc=recalc)
plt.annotate('%s:\nProbability\nof scenario: %.3f' % (trsig.name,
self.prior*lhood/Ltot),
xy=(0.5,0.5),ha='center',va='center',
bbox=dict(boxstyle='round',fc='w'),
xycoords='figure fraction',fontsize=15)
def eclipse_pars(self, i, secondary=False):
s = self.stars.iloc[i]
P = s['P']
#p0, b, aR = eclipse_pars(P, s['mass_1'], s['mass_2'],
# s['radius_1'], s['radius_2'],
# ecc=s['ecc'], inc=s['inc'],
# w=s['w'])
p0 = s['radius_2']/s['radius_1']
aR = semimajor(P, s['mass_1']+s['mass_2'])*AU/(s['radius_1']*RSUN)
if secondary:
mu1, mu2 = s[['u1_2', 'u2_2']]
b = s['b_sec']
frac = s['fluxfrac_2']
else:
mu1, mu2 = s[['u1_1', 'u2_1']]
b = s['b_pri']
frac = s['fluxfrac_1']
return dict(P=P, p0=p0, b=b, aR=aR, frac=frac, u1=mu1, u2=mu2,
ecc=s['ecc'], w=s['w'])
def eclipse(self, i, secondary=False, **kwargs):
pars = self.eclipse_pars(i, secondary=secondary)
for k,v in pars.items():
kwargs[k] = v
return eclipse(sec=secondary, **kwargs)
def eclipse_trapfit(self, i, secondary=False, **kwargs):
pars = self.eclipse_pars(i, secondary=secondary)
for k,v in pars.items():
kwargs[k] = v
kwargs['cadence'] = self.cadence
return eclipse_tt(sec=secondary, **kwargs)
def eclipse_new(self, i, secondary=False, npoints=200, width=3,
texp=None):
"""
Returns times and fluxes of eclipse i (centered at t=0)
"""
texp = self.cadence
s = self.stars.iloc[i]
e = s['ecc']
P = s['P']
if secondary:
mu1, mu2 = s[['u1_2', 'u2_2']]
w = np.mod(np.deg2rad(s['w']) + np.pi, 2*np.pi)
mass_central, radius_central = s[['mass_2','radius_2']]
mass_body, radius_body = s[['mass_1','radius_1']]
b = s['b_sec'] * s['radius_1']/s['radius_2']
frac = s['fluxfrac_2']
else:
mu1, mu2 = s[['u1_1', 'u2_1']]
w = np.deg2rad(s['w'])
mass_central, radius_central = s[['mass_1','radius_1']]
mass_body, radius_body = s[['mass_2','radius_2']]
b = s['b_pri']
frac = s['fluxfrac_1']
central_kwargs = dict(mass=mass_central, radius=radius_central,
mu1=mu1, mu2=mu2)
central = Central(**central_kwargs)
body_kwargs = dict(radius=radius_body, mass=mass_body, b=b,
period=P, e=e, omega=w)
body = Body(**body_kwargs)
logging.debug('central: {}'.format(central_kwargs))
logging.debug('body: {}'.format(body_kwargs))
s = System(central)
s.add_body(body)
# As of now, body.duration returns strictly circular duration
dur = body.duration
logging.debug('duration: {}'.format(dur))
ts = np.linspace(-width/2*dur, width/2*dur, npoints)
fs = s.light_curve(ts, texp=texp)
fs = 1 - frac*(1-fs)
return ts, fs
@property
def _properties(self):
return ['period','model','priorfactors','prob','lhoodcachefile',
'is_specific', 'cadence'] + \
super(EclipsePopulation,self)._properties
@classmethod
def load_hdf(cls, filename, path=''): #perhaps this doesn't need to be written?
"""
Loads EclipsePopulation from HDF file
Also runs :func:`EclipsePopulation._make_kde` if it can.
:param filename:
HDF file
:param path: (optional)
Path within HDF file
"""
new = StarPopulation.load_hdf(filename, path=path)
#setup lazy loading of starmodel if present
try:
with pd.HDFStore(filename) as store:
if '{}/starmodel'.format(path) in store:
new._starmodel = None
new._starmodel_file = filename
new._starmodel_path = '{}/starmodel'.format(path)
except:
pass
try:
new._make_kde()
except NoTrapfitError:
logging.warning('Trapezoid fit not done.')
return new
@property
def starmodel(self):
if not hasattr(self, '_starmodel'):
raise AttributeError('{} does not have starmodel.'.format(self))
if (hasattr(self, '_starmodel_file') and hasattr(self, '_starmodel_path')):
self._starmodel = StarModel.load_hdf(self._starmodel_file,
path=self._starmodel_path)
return self._starmodel
def resample(self):
"""
Returns a copy of population with stars resampled (with replacement).
Used in bootstrap estimate of FPP uncertainty.
TODO: check to make sure constraints properly copied!
"""
new = copy.deepcopy(self)
N = len(new.stars)
inds = np.random.randint(N, size=N)
# Resample stars
new.stars = new.stars.iloc[inds].reset_index()
# Resample constraints
if hasattr(new, '_constraints'):
for c in new._constraints:
new._constraints[c] = new._constraints[c].resample(inds)
new._make_kde()
return new
class EclipsePopulation_Px2(EclipsePopulation):
def apply_secthresh(self, *args, **kwargs):
logging.warning('Secondary depth cut should not be used on a double-period scenario!')
@property
def depth_difference(self):
return np.absolute(self.depth - self.secondary_depth)
def constrain_oddeven(self, diff):
self.apply_constraint(UpperLimit(self.depth_difference, diff, name='odd-even'))
class PlanetPopulation(EclipsePopulation):
"""Population of Transiting Planets
Subclass of :class:`EclipsePopulation`. This is mostly
a copy of :class:`EBPopulation`, with small modifications.
Star properties may be defined either with either a
:class:`isochrones.StarModel` or by defining just its
``mass`` and ``radius`` (and ``Teff`` and ``logg`` if
desired to set limb darkening coefficients appropriately).
:param period:
Period of signal.
:param rprs:
Point-estimate of Rp/Rs radius ratio.
:param mass, radius: (optional)
Mass and radius of host star. If defined, must be
either tuples of form ``(value, error)`` or
:class:`simpledist.Distribution` objects.
:param Teff, logg: (optional)
Teff and logg point estimates for host star.
These are used only for calculating limb darkening
coefficients.
:param starmodel: (optional)
The preferred way to define the properties of the
host star. If MCMC has been run on this model,
then samples are just read off; if it hasn't,
then it will run it.
:type starmodel:
:class:`isochrones.StarModel`
:param band: (optional)
Photometric band in which eclipse is detected.
:param model: (optional)
Name of the model.
:param n: (optional)
Number of instances to simulate. Default = ``2e4``.
:param fp_specific: (optional)
"Specific occurrence rate" for this type of planets;
that is, the planet occurrence rate integrated
from ``(1-rbin_width)x`` to ``(1+rbin_width)x`` this planet radius. This
goes into the ``priorfactor`` for this model.
:param u1, u2: (optional)
Limb darkening parameters. If not provided, then
calculated based on ``Teff, logg`` or just
defaulted to solar values.
:param rbin_width: (optional)
Fractional width of rbin for ``fp_specific``.
:param MAfn: (optional)
:class:`transit_basic.MAInterpolationFunction` object.
If not passed, then one with default parameters will
be created.
:param lhoodcachefile: (optional)
Likelihood calculation cache file.
"""
def __init__(self, period=None,
cadence=1626./86400, #Kepler observing cadence, in days
rprs=None,
mass=None, radius=None, Teff=None, logg=None,
starmodel=None,
band='Kepler', model='Planets', n=2e4,
fp_specific=None, u1=None, u2=None,
rbin_width=0.3,
MAfn=None, lhoodcachefile=None):
self.period = period
self.cadence = cadence
self.n = n
self.model = model
self.band = band
self.rprs = rprs
self.Teff = Teff
self.logg = logg
self._starmodel = starmodel
if radius is not None and mass is not None or starmodel is not None:
# calculates eclipses
logging.debug('generating planet population...')
self.generate(rprs=rprs, mass=mass, radius=radius,
n=n, fp_specific=fp_specific,
starmodel=starmodel,
rbin_width=rbin_width,
u1=u1, u2=u2, Teff=Teff, logg=logg,
MAfn=MAfn,lhoodcachefile=lhoodcachefile)
def generate(self,rprs=None, mass=None, radius=None,
n=2e4, fp_specific=0.01, u1=None, u2=None,
starmodel=None,
Teff=None, logg=None, rbin_width=0.3,
MAfn=None, lhoodcachefile=None):
"""Generates Population
All arguments defined in ``__init__``.
"""
n = int(n)
if starmodel is None:
if type(mass) is type((1,)):
mass = dists.Gaussian_Distribution(*mass)
if isinstance(mass, dists.Distribution):
mdist = mass
mass = mdist.rvs(1e5)
if type(radius) is type((1,)):
radius = dists.Gaussian_Distribution(*radius)
if isinstance(radius, dists.Distribution):
rdist = radius
radius = rdist.rvs(1e5)
else:
samples = starmodel.random_samples(1e5)
mass = samples['mass_0_0'].values
radius = samples['radius_0_0'].values
Teff = samples['Teff_0_0'].mean()
logg = samples['logg_0_0'].mean()
logging.debug('star mass: {}'.format(mass))
logging.debug('star radius: {}'.format(radius))
logging.debug('Teff: {}'.format(Teff))
logging.debug('logg: {}'.format(logg))
if u1 is None or u2 is None:
if Teff is None or logg is None:
logging.warning('Teff, logg not provided; using solar limb darkening')
u1 = 0.394; u2=0.296
else:
u1,u2 = ldcoeffs(Teff, logg)
#use point estimate of rprs to construct planets in radius bin
#rp = self.rprs*np.median(radius)
#rbin_min = (1-rbin_width)*rp
#rbin_max = (1+rbin_width)*rp
rprs_bin_min = (1-rbin_width)*self.rprs
rprs_bin_max = (1+rbin_width)*self.rprs
radius_p = radius * (np.random.random(int(1e5))*(rprs_bin_max - rprs_bin_min) + rprs_bin_min)
mass_p = (radius_p*RSUN/REARTH)**2.06 * MEARTH/MSUN #hokey, but doesn't matter
logging.debug('planet radius: {}'.format(radius_p))
stars = pd.DataFrame()
#df_orbpop = pd.DataFrame() #for orbit population
tot_prob = None; tot_dprob = None; prob_norm = None
n_adapt = n
while len(stars) < n:
n_adapt = int(n_adapt)
inds = np.random.randint(len(mass), size=n_adapt)
#calculate eclipses.
ecl_inds, df, (prob,dprob) = calculate_eclipses(mass[inds], mass_p[inds],
radius[inds], radius_p[inds],
15, np.inf, #arbitrary
u11s=u1, u21s=u2,
band=self.band,
period=self.period,
calc_mininc=True,
return_indices=True,
MAfn=MAfn)
df['mass_A'] = mass[inds][ecl_inds]
df['mass_B'] = mass_p[inds][ecl_inds]
df['radius_A'] = radius[inds][ecl_inds]
df['radius_B'] = radius_p[inds][ecl_inds]
df['u1'] = u1 * np.ones_like(df['mass_A'])
df['u2'] = u2 * np.ones_like(df['mass_A'])
df['P'] = self.period * np.ones_like(df['mass_A'])
ok = (df['dpri']>0) & (df['T14_pri'] > 0)
stars = pd.concat((stars, df[ok]))
logging.info('{} Transiting planet systems generated (target {})'.format(len(stars),n))
logging.debug('{} nans in stars[dpri]'.format(np.isnan(stars['dpri']).sum()))
if tot_prob is None:
prob_norm = (1/dprob**2)
tot_prob = prob
tot_dprob = dprob
else:
prob_norm = (1/tot_dprob**2 + 1/dprob**2)
tot_prob = (tot_prob/tot_dprob**2 + prob/dprob**2)/prob_norm
tot_dprob = 1/np.sqrt(prob_norm)
n_adapt = min(int(1.2*(n-len(stars)) * n_adapt//len(df)), 5e4)
n_adapt = max(n_adapt, 100)
stars = stars.reset_index()
stars.drop('index', axis=1, inplace=True)
stars = stars.iloc[:n]
stars['mass_1'] = stars['mass_A']
stars['radius_1'] = stars['radius_A']
stars['mass_2'] = stars['mass_B']
stars['radius_2'] = stars['radius_B']
#make OrbitPopulation?
#finish below.
if fp_specific is None:
rp = stars['radius_2'].mean() * RSUN/REARTH
fp_specific = fp_fressin(rp)
priorfactors = {'fp_specific':fp_specific}
self._starmodel = starmodel
EclipsePopulation.__init__(self, stars=stars,
period=self.period, cadence=self.cadence,
model=self.model,
priorfactors=priorfactors, prob=tot_prob,
lhoodcachefile=lhoodcachefile)
@property
def _properties(self):
return ['rprs', 'Teff', 'logg'] + \
super(PlanetPopulation, self)._properties
def save_hdf(self, filename, path='', **kwargs):
super(PlanetPopulation, self).save_hdf(filename, path=path, **kwargs)
self.starmodel.save_hdf(filename, path='{}/starmodel'.format(path), append=True)
#@classmethod
#def load_hdf(cls, filename, path=''):
# pop = super(PlanetPopulation, cls).load_hdf(filename, path=path)
# pop.starmodel = StarModel.load_hdf(filename,
# path='{}/starmodel'.format(path))
# return pop
class EBPopulation(EclipsePopulation, Observed_BinaryPopulation):
"""Population of Eclipsing Binaries (undiluted)
Eclipsing Binary (EB) population is generated by fitting
a two-star model to the observed properties of the system
(photometric and/or spectroscopic), using
:class:`isochrones.starmodel.BinaryStarModel`.
Inherits from :class:`EclipsePopulation` and
:class:`stars.Observed_BinaryPopulation`.
:param period:
Orbital period
:param mags:
Observed apparent magnitudes. Won't work if this is
``None``, which is the default.
:type mags:
``dict``
:param Teff,logg,feh:
Spectroscopic properties of primary, if measured, in ``(value, err)`` format.
:param starmodel: (optional)
Must be a BinaryStarModel.
If MCMC has been run on this model,
then samples are just read off; if it hasn't,
then it will run it.
:type starmodel:
:class:`isochrones.BinaryStarModel`
:param band: (optional)
Photometric bandpass in which transit signal is observed.
:param model: (optional)
Name of model.
:param f_binary: (optional)
Binary fraction to be assumed. Will be one of the ``priorfactors``.
:param n: (optional)
Number of instances to simulate. Default = 2e4.
:param MAfn: (optional)
:class:`transit_basic.MAInterpolationFunction` object.
If not passed, then one with default parameters will
be created.
:param lhoodcachefile: (optional)
Likelihood calculation cache file.
"""
def __init__(self, period=None,
cadence=1626./86400, #Kepler observing cadence, in days
mags=None, mag_errs=None,
Teff=None, logg=None, feh=None,
starmodel=None,
band='Kepler', model='EBs', f_binary=0.4, n=2e4,
MAfn=None, lhoodcachefile=None, **kwargs):
self.period = period
self.cadence = cadence
self.n = n
self.model = model
self.band = band
self.lhoodcachefile = lhoodcachefile
if mags is not None or starmodel is not None:
self.generate(mags=mags, n=n, MAfn=MAfn, mag_errs=mag_errs,
f_binary=f_binary, starmodel=starmodel,
**kwargs)
def generate(self, mags, n=2e4, mag_errs=None,
Teff=None, logg=None, feh=None,
MAfn=None, f_binary=0.4, starmodel=None,
**kwargs):
"""Generates stars and eclipses
All arguments previously defined.
"""
n = int(n)
#create master population from which to create eclipses
pop = Observed_BinaryPopulation(mags=mags, mag_errs=mag_errs,
Teff=Teff,
logg=logg, feh=feh,
starmodel=starmodel,
period=self.period,
n=2*n)
all_stars = pop.stars
#start with empty; will concatenate onto
stars = pd.DataFrame()
df_orbpop = pd.DataFrame()
#calculate eclipses
if MAfn is None:
MAfn = MAInterpolationFunction(pmin=0.007, pmax=1/0.007, nzs=200, nps=400)
tot_prob = None; tot_dprob = None; prob_norm = None
n_adapt = n
while len(stars) < n:
n_adapt = int(n_adapt)
inds = np.random.randint(len(all_stars), size=n_adapt)
s = all_stars.iloc[inds]
#calculate limb-darkening coefficients
u1A, u2A = ldcoeffs(s['Teff_A'], s['logg_A'])
u1B, u2B = ldcoeffs(s['Teff_B'], s['logg_B'])
cur_orbpop_df = pop.orbpop.dataframe.iloc[inds].copy()
#calculate eclipses.
inds, df, (prob,dprob) = calculate_eclipses(s['mass_A'], s['mass_B'],
s['radius_A'], s['radius_B'],
s['{}_mag_A'.format(self.band)],
s['{}_mag_B'.format(self.band)],
u11s=u1A, u21s=u2A,
u12s=u1B, u22s=u2B,
band=self.band,
period=self.period,
calc_mininc=True,
return_indices=True,
MAfn=MAfn)
s = s.iloc[inds].copy()
s.reset_index(inplace=True)
for col in df.columns:
s[col] = df[col]
stars = pd.concat((stars, s))
new_df_orbpop = cur_orbpop_df.iloc[inds].copy()
new_df_orbpop.reset_index(inplace=True)
df_orbpop = pd.concat((df_orbpop, new_df_orbpop))
logging.info('{} Eclipsing EB systems generated (target {})'.format(len(stars),n))
logging.debug('{} nans in stars[dpri]'.format(np.isnan(stars['dpri']).sum()))
logging.debug('{} nans in df[dpri]'.format(np.isnan(df['dpri']).sum()))
if tot_prob is None:
prob_norm = (1/dprob**2)
tot_prob = prob
tot_dprob = dprob
else:
prob_norm = (1/tot_dprob**2 + 1/dprob**2)
tot_prob = (tot_prob/tot_dprob**2 + prob/dprob**2)/prob_norm
tot_dprob = 1/np.sqrt(prob_norm)
n_adapt = min(int(1.2*(n-len(stars)) * n_adapt//len(s)), 5e4)
n_adapt = max(n_adapt, 100)
stars = stars.iloc[:n]
df_orbpop = df_orbpop.iloc[:n]
orbpop = OrbitPopulation.from_df(df_orbpop)
stars = stars.reset_index()
stars.drop('index', axis=1, inplace=True)
stars['mass_1'] = stars['mass_A']
stars['radius_1'] = stars['radius_A']
stars['mass_2'] = stars['mass_B']
stars['radius_2'] = stars['radius_B']
## Why does this make it go on infinite loop??
#Observed_BinaryPopulation.__init__(self, stars=stars, orbpop=orbpop,
# mags=mags, mag_errs=mag_errs,
# Teff=Teff, logg=logg, feh=feh,
# starmodel=starmodel)
###########
self.mags = mags
self.mag_errs = mag_errs
self.Teff = Teff
self.logg = logg
self.feh = feh
self._starmodel = pop.starmodel
priorfactors = {'f_binary':f_binary}
EclipsePopulation.__init__(self, stars=stars, orbpop=orbpop,
period=self.period, cadence=self.cadence,
model=self.model,
priorfactors=priorfactors, prob=tot_prob,
lhoodcachefile=self.lhoodcachefile)
class EBPopulation_Px2(EclipsePopulation_Px2, EBPopulation):
def __init__(self, period=None, model='EBs (Double Period)',
**kwargs):
try:
period *= 2
except:
pass
EBPopulation.__init__(self, period=period, model=model,
**kwargs)
class HEBPopulation(EclipsePopulation, Observed_TriplePopulation):
"""Population of Hierarchical Eclipsing Binaries
Hierarchical Eclipsing Binary (HEB) population is generated
by fitting
a two-star model to the observed properties of the system
(photometric and/or spectroscopic), using
:class:`isochrones.starmodel.BinaryStarModel`.
by
Inherits from :class:`EclipsePopulation` and
:class:`stars.Observed_TriplePopulation`.
:param period:
Orbital period
:param mags,mag_errs:
Observed apparent magnitudes; uncertainties optional. If
uncertainties not provided, :class:`Observed_TriplePopulation`
will default to uncertainties in all bands of 0.05 mag.
:type mags:
``dict``
:param Teff,logg,feh:
Spectroscopic properties of primary, if measured, in ``(value, err)`` format.
:param starmodel: (optional)
Must be a BinaryStarModel.
If MCMC has been run on this model,
then samples are just read off; if it hasn't,
then it will run it.
:type starmodel:
:class:`isochrones.BinaryStarModel`
:param band: (optional)
Photometric bandpass in which transit signal is observed.
:param model: (optional)
Name of model.
:param f_binary: (optional)
Binary fraction to be assumed. Will be one of the ``priorfactors``.
:param n: (optional)
Number of instances to simulate. Default = 2e4.
:param MAfn: (optional)
:class:`transit_basic.MAInterpolationFunction` object.
If not passed, then one with default parameters will
be created.
:param lhoodcachefile: (optional)
Likelihood calculation cache file.
"""
def __init__(self, period=None,
cadence=1626./86400, #Kepler observing cadence, in days
mags=None, mag_errs=None,
Teff=None, logg=None, feh=None,
starmodel=None,
band='Kepler', model='HEBs', f_triple=0.12, n=2e4,
MAfn=None, lhoodcachefile=None, **kwargs):
self.period = period
self.cadence = cadence
self.n = n
self.model = model
self.band = band
self.lhoodcachefile = lhoodcachefile
if mags is not None or starmodel is not None:
self.generate(mags=mags, n=n, MAfn=MAfn, mag_errs=mag_errs,
f_triple=f_triple, starmodel=starmodel,
**kwargs)
def generate(self, mags, n=2e4, mag_errs=None,
Teff=None, logg=None, feh=None,
MAfn=None, f_triple=0.12, starmodel=None,
**kwargs):
"""Generates stars and eclipses
All arguments previously defined.
"""
n = int(n)
#create master population from which to create eclipses
pop = Observed_TriplePopulation(mags=mags, mag_errs=mag_errs,
Teff=Teff,
logg=logg, feh=feh,
starmodel=starmodel,
period=self.period,
n=2*n)
all_stars = pop.stars
#start with empty; will concatenate onto
stars = pd.DataFrame()
df_orbpop_short = pd.DataFrame()
df_orbpop_long = pd.DataFrame()
#calculate eclipses
if MAfn is None:
MAfn = MAInterpolationFunction(pmin=0.007, pmax=1/0.007, nzs=200, nps=400)
tot_prob = None; tot_dprob = None; prob_norm = None
n_adapt = n
while len(stars) < n:
n_adapt = int(n_adapt)
inds = np.random.randint(len(all_stars), size=n_adapt)
s = all_stars.iloc[inds]
#calculate limb-darkening coefficients
u1A, u2A = ldcoeffs(s['Teff_A'], s['logg_A'])
u1B, u2B = ldcoeffs(s['Teff_B'], s['logg_B'])
u1C, u2C = ldcoeffs(s['Teff_C'], s['logg_C'])
cur_orbpop_short_df = pop.orbpop.orbpop_short.dataframe.iloc[inds].copy()
cur_orbpop_long_df = pop.orbpop.orbpop_long.dataframe.iloc[inds].copy()
#calculate eclipses.
inds, df, (prob,dprob) = calculate_eclipses(s['mass_B'], s['mass_C'],
s['radius_B'], s['radius_C'],
s['{}_mag_B'.format(self.band)],
s['{}_mag_C'.format(self.band)],
u11s=u1A, u21s=u2A,
u12s=u1B, u22s=u2B,
band=self.band,
period=self.period,
calc_mininc=True,
return_indices=True,
MAfn=MAfn)
s = s.iloc[inds].copy()
s.reset_index(inplace=True)
for col in df.columns:
s[col] = df[col]
stars = pd.concat((stars, s))
new_df_orbpop_short = cur_orbpop_short_df.iloc[inds].copy()
new_df_orbpop_short.reset_index(inplace=True)
new_df_orbpop_long = cur_orbpop_long_df.iloc[inds].copy()
new_df_orbpop_long.reset_index(inplace=True)
df_orbpop_short = | pd.concat((df_orbpop_short, new_df_orbpop_short)) | pandas.concat |
import numpy as np
import pandas as pd
import pytest
from dppd import dppd
import plotnine as p9
import dppd_plotnine # noqa: F401
from plotnine.data import mtcars
dp, X = dppd()
def test_simple():
actual = dp(mtcars).p9().geom_point({"x": "cyl", "y": "hp"}).pd
assert actual == "test_simple"
def test_scale():
actual = (
dp(mtcars)
.p9()
.geom_point({"x": "cyl", "y": "hp"})
.scale_y_continuous(trans="log10")
.pd
)
assert actual == "test_scale"
def test_simple_add():
actual = dp(mtcars).p9().add_point(x="cyl", y="hp").pd
assert actual == "test_simple_add"
def test_more_than_the_number_of_required_aes_raises():
with pytest.raises(ValueError):
dp(mtcars).p9().add_point("mpg", "hp", "cyl").scale_color_brewer().pd
def test_unmapped():
actual = dp(mtcars).p9().add_point(x="mpg", y="hp", _color="blue").pd
assert actual == "test_unmapped"
def test_hline():
actual = (
dp(mtcars)
.p9()
.add_point(x="mpg", y="hp", _color="blue")
.add_hline(200, _color="red")
.pd
)
assert actual == "test_hline"
def test_spec_by_position_and_kwarg_raises():
with pytest.raises(ValueError):
(
dp(pd.DataFrame({"x": [1, 2], "y": [2, 2]}))
.p9()
.add_crossbar("x", "y", "y", "y", ymin="y")
)
def test_broken_data_mapping_raises_pandas_error():
with pytest.raises(ValueError):
(
dp(pd.DataFrame({"x": [1, 2], "y": [2, 1.5]}))
.p9()
.add_point(x={"1": "shu"}, y=["4"], data=None)
.pd
)
def test_default_order():
actual = (
dp(pd.DataFrame({"x": [1, 2], "y": [2, 1.5]}))
.p9()
.add_crossbar("x", "y", "y-1", "y+.5")
.pd
)
assert actual == "test_default_order"
def test_passing_in_lists():
actual = dp(pd.DataFrame({"y": [2, 1.5]})).p9().add_point(["a", "b"], "y").pd
assert actual == "test_passing_in_lists"
def test_passing_in_lists_unmapped():
actual = (
dp(pd.DataFrame({"x": ["a", "b"], "y": [2, 1.5]}))
.p9()
.add_point(x="x", y="y")
.add_point(_x=[0.5, 0.8], y="y")
.pd
)
assert actual == "test_unmapped_list"
def test_passing_in_scalar():
actual = dp(pd.DataFrame({"y": [2, 1.5]})).p9().add_point('"a"', "y").pd
assert actual == "test_passing_in_scalar"
def test_expression_vs_column():
actual = (
dp(pd.DataFrame({"x": [1, 2], "x*5": [0, 1], "y": [2, 1.5]}))
.p9()
.add_point("x*5", "y")
.pd
)
assert actual == "test_expression_vs_column"
def test_expression_outside_variables():
def times_two(x):
return x * 2
actual = (
dp( | pd.DataFrame({"x": [1, 2], "y": [2, 1.5]}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import os
import sys
import tensorflow as tf
import json
import joblib
import time
from tensorflow import keras
from keras import optimizers
from datetime import datetime,timedelta
from sklearn.preprocessing import MinMaxScaler
from datetime import datetime
pd.set_option('display.max_columns', None)
#---------------------------------------
# variables
#---------------------------------------
start = time.time()
DATASET_NUM = 7
MODEL_NUM = 10
# path
PATH_BASE = './'
PATH_MODEL = PATH_BASE + '/model/'
PATH_RESULT = PATH_BASE + '/result/'
# power capacity
power_nm_list = ['onm1_h','onm2_h','onm3_h','onm4_h']
capacity_list = [89.7, 96.6, 90, 46.2]
RSRS_ID = 0
POWER_NM = power_nm_list[RSRS_ID]
CAPACITY = capacity_list[RSRS_ID]
print("POWER_NM:{}, CAPACITY:{}".format(POWER_NM,CAPACITY))
# timesteps
SHIFT_DAYS = 7
PRED_STEPS = 24
dataX_STEPS = SHIFT_DAYS*PRED_STEPS
#---------------------------------------
# functions
#---------------------------------------
# 이상치 nan 처리
def power_anomal(x) :
if x > CAPACITY :
return np.nan
return x
def sensor_anomal(x) :
if x < -900 :
return np.nan
return x
# load sol omn
def load_power(POWER_NM):
df_power = pd.read_csv(PATH_BASE + '/df_power.csv',index_col=0)
df_power['POWER']=df_power['POWER'].apply(power_anomal).apply(lambda x:x)
df_power.sort_values(by=['DATE'], axis=0)
df_power = df_power.set_index(pd.DatetimeIndex(df_power['DATE']))
df_power.drop(['_id','DATE'], axis=1, inplace=True)
df_power = df_power.interpolate(method='linear',limit_direction='forward')
return df_power
# load sensor
def load_sensor():
df_sensor= pd.read_csv(PATH_BASE + '/df_sensor.csv',index_col=0)
df_sensor.sort_values(by=['DATE'], axis=0)
df_sensor = df_sensor.set_index(pd.DatetimeIndex(df_sensor['DATE']))
df_sensor.drop(['_id','DATE'], axis=1, inplace=True)
df_sensor['uv']=df_sensor['uv'].apply(sensor_anomal).apply(lambda x:x)
df_sensor['solarradiation']=df_sensor['solarradiation'].apply(sensor_anomal).apply(lambda x:x)
df_sensor['humidity']=df_sensor['humidity'].apply(sensor_anomal).apply(lambda x:x)
df_sensor['windspeed']=df_sensor['windspeed'].apply(sensor_anomal).apply(lambda x:x)
df_sensor['windgust']=df_sensor['windgust'].apply(sensor_anomal).apply(lambda x:x)
df_sensor['temp']=df_sensor['temp'].apply(sensor_anomal).apply(lambda x:x)
df_sensor['winddir']=df_sensor['winddir'].apply(sensor_anomal).apply(lambda x:x)
df_sensor = df_sensor.interpolate(method='linear',limit_direction='forward')
return df_sensor
def get_df(df_power, df_sensor, POWER_NM):
# load the scaler
power_scaler = joblib.load(open('{}scaler/power_{}.pkl'.format(PATH_MODEL,POWER_NM[:-2]), 'rb'))
weather_scaler = joblib.load(open('{}scaler/weather.pkl'.format(PATH_MODEL), 'rb'))
# power
scaledpower = power_scaler.fit_transform(df_power.values)
scaledpower_df = pd.DataFrame(scaledpower, columns=df_power.columns, index=list(df_power.index.values))
# weather
df_weather = df_sensor.copy()
df_weather.drop(['dailyrainin','weeklyrainin','monthlyrainin','yearlyrainin'], axis=1, inplace=True)
scaledweather = weather_scaler.fit_transform(df_weather.values)
scaledweather_df = pd.DataFrame(scaledweather, columns=df_weather.columns, index=list(df_weather.index.values))
# JOIN (index merge)
df = pd.merge(scaledpower_df,scaledweather_df, how='outer',left_index=True, right_index=True)
df = df[[ 'POWER', 'solarradiation', 'humidity', 'windspeed', 'windgust', 'temp', 'winddir' ]]
df = df.interpolate(method='linear')
return power_scaler, df
#---------------------------------------
# MODEL_TYPE iteration
#---------------------------------------
total_accRate = 0
total_accRate_list = []
result_pred = pd.DataFrame()
result_acc = pd.DataFrame()
result_target= pd.DataFrame()
for m in range(0,MODEL_NUM):
# for m in range(0,1):
model = tf.keras.models.load_model(PATH_MODEL+'model'+str(m)+'.h5')
print("\n\n MODEL", m, "-"*100)
accRate_sum = 0
#---------------------------------------
# dataset iteration
#---------------------------------------
for T in range(0,DATASET_NUM):
PRED_DAY = datetime(2021, 8, 25, 0,0,0)+timedelta(T)
PRED_DAY = datetime(PRED_DAY.year, PRED_DAY.month, PRED_DAY.day, 0,0,0)
X_START = PRED_DAY - timedelta(7)
X_END = PRED_DAY - timedelta(1)
X_END = datetime(X_END.year, X_END.month, X_END.day, 23,0,0)
# print("X DATA: {} ~ {} => PRED: {} ".format(str(X_START)[:10], str(X_END)[:10], str(PRED_DAY)[:10]))
# get data
df_power = load_power(POWER_NM)
df_sensor = load_sensor()
power_scaler, df = get_df(df_power, df_sensor,POWER_NM)
# create x,y arr
x_arr = []
X_df = df.loc[str(X_START):str(X_END)]
x_arr.append(X_df.iloc[:].values.tolist())
x_arr=np.asarray(x_arr).astype(np.float64)
y_arr = []
Y_df = df.loc[str(PRED_DAY):str(PRED_DAY + timedelta(1))]
y_arr.append(Y_df.iloc[:,[0]].values.tolist())
y_arr=np.asarray(y_arr).astype(np.float64)
#---------------------------------------
# predict
#---------------------------------------
n_dataset= x_arr.shape[0]
predList=[]
accRate=[]
yList=[]
pred = model.predict([x_arr])
pred[pred<0] = 0
pred = pred[:,:,0]
pred = power_scaler.inverse_transform(pred)
predList = pred.reshape(-1,1)
#---------------------------------------
# calculate predictaccRate
#---------------------------------------
if(str(PRED_DAY.strftime("%Y-%m-%d")) > str(df.index[-1])[:10]):
for hr in range(0, PRED_STEPS):
accRate.append(0)
else:
y = power_scaler.inverse_transform(y_arr[:,:,0])
yList = y.reshape(-1,1)
for hr in range(0, PRED_STEPS):
pred = predList[hr]
target = yList[hr]
difference = np.abs(target-pred)
accRate.append(100-np.round(difference/CAPACITY*100, 2))
accRate_df = pd.DataFrame(np.array(accRate).reshape(1,-1))
accRate_df.insert(0,'PRED_DATE',PRED_DAY, allow_duplicates=False)
accRate_df.insert(0,'MODEL',m, allow_duplicates=False)
pred_df = pd.DataFrame(np.array(predList).reshape(1,-1))
pred_df.insert(0,'PRED_DATE',PRED_DAY, allow_duplicates=False)
pred_df.insert(0,'MODEL',m, allow_duplicates=False)
y_df = pd.DataFrame(np.array(yList).reshape(1,-1))
y_df.insert(0,'PRED_DATE',PRED_DAY, allow_duplicates=False)
y_df.insert(0,'MODEL',m, allow_duplicates=False)
mean_accRate = np.round(accRate_df.mean(axis = 1,numeric_only = True)[0],2)
accRate_sum = accRate_sum + mean_accRate
print("dataset {} : {}".format(T+1,mean_accRate))
if result_pred.shape[0] == 0:
result_pred = pred_df
result_acc = accRate_df
result_target= y_df
else:
result_pred = pd.concat([result_pred, pred_df])
result_acc = | pd.concat([result_acc, accRate_df]) | pandas.concat |
# Copyright 2019 Toyota Research Institute. All rights reserved.
"""Unit tests related to batch validation"""
import json
import os
import unittest
import pandas as pd
import numpy as np
import boto3
from botocore.exceptions import NoRegionError, NoCredentialsError
from monty.tempfile import ScratchDir
from beep.validate import ValidatorBeep, validate_file_list_from_json, \
SimpleValidator
from beep import S3_CACHE, VALIDATION_SCHEMA_DIR
TEST_DIR = os.path.dirname(__file__)
TEST_FILE_DIR = os.path.join(TEST_DIR, "test_files")
@unittest.skip
class ValidationArbinTest(unittest.TestCase):
def setUp(self):
# Setup events for testing
try:
kinesis = boto3.client('kinesis')
response = kinesis.list_streams()
self.events_mode = "test"
except NoRegionError or NoCredentialsError as e:
self.events_mode = "events_off"
def test_validation_arbin_bad_index(self):
path = "2017-05-09_test-TC-contact_CH33.csv"
path = os.path.join(TEST_FILE_DIR, path)
v = ValidatorBeep()
v.allow_unknown = True
df = pd.read_csv(path, index_col=0)
self.assertFalse(v.validate_arbin_dataframe(df))
self.assertEqual(v.errors['cycle_index'][0][0][0], 'must be of number type')
# Test bigger file
path = "2017-08-14_8C-5per_3_47C_CH44.csv"
path = os.path.join(TEST_FILE_DIR, path)
v = ValidatorBeep()
v.allow_unknown = True
df = pd.read_csv(path, index_col=0)
self.assertFalse(v.validate_arbin_dataframe(df))
self.assertEqual(v.errors['cycle_index'][0][0][0], 'must be of number type')
def test_validation_arbin_bad_data(self):
path = "2017-12-04_4_65C-69per_6C_CH29.csv"
path = os.path.join(TEST_FILE_DIR, path)
v = ValidatorBeep()
v.allow_unknown = True
df = pd.read_csv(path, index_col=0)
self.assertTrue(v.validate_arbin_dataframe(df))
# Alter the schema on-the-fly to induce error
v.schema['discharge_capacity']['schema']['max'] = 1.8
self.assertFalse(v.validate_arbin_dataframe(df, schema=v.schema))
self.assertEqual(v.errors['discharge_capacity'][0][11264][0], 'max value is 1.8')
# Alter the schema on-the-fly to move on to the next errors
v.schema['discharge_capacity']['schema']['max'] = 2.1
v.schema['step_time'] = {"schema": {"min": 0.0, "type": "float"},
"type": "list"}
self.assertFalse(v.validate_arbin_dataframe(df, schema=None))
self.assertEqual(v.errors['step_time'][0][206][0], 'min value is 0.0')
# Alter schema once more to recover validation
del v.schema['step_time']['schema']['min']
self.assertTrue(v.validate_arbin_dataframe(df, schema=None))
def test_validation_many_from_paths(self):
paths = ["2017-05-09_test-TC-contact_CH33.csv",
"2017-12-04_4_65C-69per_6C_CH29.csv"]
paths = [os.path.join(TEST_FILE_DIR, path) for path in paths]
v = ValidatorBeep()
temp_records = os.path.join(TEST_FILE_DIR, 'temp_records.json')
with open(temp_records, 'w') as f:
f.write("{}")
results = v.validate_from_paths(paths, record_results=False)
self.assertFalse(results["2017-05-09_test-TC-contact_CH33.csv"]["validated"])
errmsg = results["2017-05-09_test-TC-contact_CH33.csv"]["errors"]['cycle_index'][0][0][0]
self.assertEqual(errmsg, 'must be of number type')
self.assertTrue(results["2017-12-04_4_65C-69per_6C_CH29.csv"]["validated"])
v.validate_from_paths(paths, record_results=True, record_path=temp_records)
with open(temp_records, 'r') as f:
results_form_rec = json.load(f)
self.assertFalse(results_form_rec["2017-05-09_test-TC-contact_CH33.csv"]["validated"])
results = v.validate_from_paths(paths, record_results=True, skip_existing=True,
record_path=temp_records)
self.assertEqual(results, {})
@unittest.skip
def test_bad_file(self):
paths = ["2017-08-14_8C-5per_3_47C_CH44.csv"]
paths = [os.path.join(TEST_FILE_DIR, path) for path in paths]
v = ValidatorBeep()
results = v.validate_from_paths(paths, record_results=False)
def test_validation_from_json(self):
with ScratchDir('.'):
os.environ['BEEP_ROOT'] = os.getcwd()
os.mkdir("data-share")
os.mkdir(os.path.join("data-share", "validation"))
paths = ["2017-05-09_test-TC-contact_CH33.csv",
"2017-12-04_4_65C-69per_6C_CH29.csv"]
paths = [os.path.join(TEST_FILE_DIR, path) for path in paths]
# Create dummy json obj
json_obj = {
"mode": self.events_mode,
"file_list": paths,
'run_list': list(range(len(paths)))
}
json_string = json.dumps(json_obj)
json_output = validate_file_list_from_json(json_string)
loaded = json.loads(json_output)
self.assertEqual(loaded['validity'][0], 'invalid')
self.assertEqual(loaded['validity'][1], 'valid')
class ValidationMaccorTest(unittest.TestCase):
# To further develop as Maccor data / schema becomes available
def setUp(self):
# Setup events for testing
try:
kinesis = boto3.client('kinesis')
response = kinesis.list_streams()
self.events_mode = "test"
except NoRegionError or NoCredentialsError as e:
self.events_mode = "events_off"
def test_validation_maccor(self):
path = "xTESLADIAG_000019_CH70.070"
path = os.path.join(TEST_FILE_DIR, path)
v = SimpleValidator(schema_filename=os.path.join(VALIDATION_SCHEMA_DIR, "schema-maccor-2170.yaml"))
v.allow_unknown = True
header = | pd.read_csv(path, delimiter='\t', nrows=0) | pandas.read_csv |
import tensorflow as tf
from tensorflow.keras.datasets import cifar10
import os
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.pylab as plt
import numpy as np
import time
import pathlib
import timeit
import seaborn as sns
(train_images, train_labels), (test_images, test_labels) = cifar10.load_data()
# Normalize pixel values to be between 0 and 1
test_images = test_images / 255.0
## No_quant
testcode = '''
def test():
model = keras.models.load_model('/home/anjir29/Desktop/greedyhpo-main/Main_test_case/CIFAR10/greedy_approch_Cifar10_ResNet50')
model.predict(test_images[0])
'''
time = timeit.repeat(stmt=testcode, repeat=100)
#time = np.array(time)
time = np.reshape(time, (100, 1))
#print(time)
pd.DataFrame(time).to_csv("NQ_greedy_approch_Cifar10_ResNet50.csv")
print("Latency saved...")
testcode = '''
def test():
model = keras.models.load_model('/home/anjir29/Desktop/greedyhpo-main/Main_test_case/CIFAR10/Bayesian_Search_Cifar10_ResNet50')
model.predict(test_images[0])
'''
time = timeit.repeat(stmt=testcode, repeat=100)
#time = np.array(time)
time = np.reshape(time, (100, 1))
#print(time)
pd.DataFrame(time).to_csv("NQ_Bayesian_Search_Cifar10_ResNet50.csv")
print("Latency saved...")
testcode = '''
def test():
model = keras.models.load_model('/home/anjir29/Desktop/greedyhpo-main/Main_test_case/CIFAR10/Random_Search_Cifar10_ResNet50')
model.predict(test_images[0])
'''
time = timeit.repeat(stmt=testcode, repeat=100)
#time = np.array(time)
time = np.reshape(time, (100, 1))
#print(time)
pd.DataFrame(time).to_csv("NQ_Random_Search_Cifar10_ResNet50.csv")
print("Latency saved...")
#Q1
converter = tf.lite.TFLiteConverter.from_saved_model('/home/anjir29/Desktop/greedyhpo-main/Main_test_case/CIFAR10/greedy_approch_Cifar10_ResNet50')
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_quant_model = converter.convert()
tflite_models_dir = pathlib.Path("Quant_Models")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
tflite_model_file = tflite_models_dir/"Q1_greedy_approch_Cifar10_ResNet50.tflite"
tflite_model_file.write_bytes(tflite_quant_model)
interpreter = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter.allocate_tensors()
interpreter_quant = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter_quant.allocate_tensors()
testcode = '''
def test():
test_image = np.expand_dims(test_images[0], axis=0).astype(np.float32)
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
interpreter.set_tensor(input_index, test_image)
interpreter.invoke()
predictions = interpreter.get_tensor(output_index)
'''
time = timeit.repeat(stmt=testcode, repeat=100)
#time = np.array(time)
time = np.reshape(time, (100, 1))
#print(time)
pd.DataFrame(time).to_csv("Q1_greedy_approch_Cifar10_ResNet50.csv")
print("Latency saved...")
converter = tf.lite.TFLiteConverter.from_saved_model('/home/anjir29/Desktop/greedyhpo-main/Main_test_case/CIFAR10/Bayesian_Search_Cifar10_ResNet50')
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_quant_model = converter.convert()
tflite_models_dir = pathlib.Path("Quant_Models")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
tflite_model_file = tflite_models_dir/"Q1_Bayesian_Search_Cifar10_ResNet50.tflite"
tflite_model_file.write_bytes(tflite_quant_model)
interpreter = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter.allocate_tensors()
interpreter_quant = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter_quant.allocate_tensors()
testcode = '''
def test():
test_image = np.expand_dims(test_images[0], axis=0).astype(np.float32)
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
interpreter.set_tensor(input_index, test_image)
interpreter.invoke()
predictions = interpreter.get_tensor(output_index)
'''
time = timeit.repeat(stmt=testcode, repeat=100)
#time = np.array(time)
time = np.reshape(time, (100, 1))
#print(time)
pd.DataFrame(time).to_csv("Q1_Bayesian_Search_Cifar10_ResNet50.csv")
print("Latency saved...")
converter = tf.lite.TFLiteConverter.from_saved_model('/home/anjir29/Desktop/greedyhpo-main/Main_test_case/CIFAR10/Random_Search_Cifar10_ResNet50')
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_quant_model = converter.convert()
tflite_models_dir = pathlib.Path("Quant_Models")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
tflite_model_file = tflite_models_dir/"Q1_Random_Search_Cifar10_ResNet50.tflite"
tflite_model_file.write_bytes(tflite_quant_model)
interpreter = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter.allocate_tensors()
interpreter_quant = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter_quant.allocate_tensors()
testcode = '''
def test():
test_image = np.expand_dims(test_images[0], axis=0).astype(np.float32)
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
interpreter.set_tensor(input_index, test_image)
interpreter.invoke()
predictions = interpreter.get_tensor(output_index)
'''
time = timeit.repeat(stmt=testcode, repeat=100)
#time = np.array(time)
time = np.reshape(time, (100, 1))
#print(time)
pd.DataFrame(time).to_csv("Q1_Random_Search_Cifar10_ResNet50.csv")
print("Latency saved...")
#Q2
converter = tf.lite.TFLiteConverter.from_saved_model('/home/anjir29/Desktop/greedyhpo-main/Main_test_case/CIFAR10/greedy_approch_Cifar10_ResNet50')
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
tflite_quant_model = converter.convert()
tflite_models_dir = pathlib.Path("Quant_Models")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
tflite_model_file = tflite_models_dir/"Q2_greedy_approch_Cifar10_ResNet50.tflite"
tflite_model_file.write_bytes(tflite_quant_model)
interpreter = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter.allocate_tensors()
interpreter_quant = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter_quant.allocate_tensors()
testcode = '''
def test():
test_image = np.expand_dims(test_images[0], axis=0).astype(np.float32)
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
interpreter.set_tensor(input_index, test_image)
interpreter.invoke()
predictions = interpreter.get_tensor(output_index)
'''
time = timeit.repeat(stmt=testcode, repeat=100)
#time = np.array(time)
time = np.reshape(time, (100, 1))
#print(time)
pd.DataFrame(time).to_csv("Q2_greedy_approch_Cifar10_ResNet50.csv")
print("Latency saved...")
converter = tf.lite.TFLiteConverter.from_saved_model('/home/anjir29/Desktop/greedyhpo-main/Main_test_case/CIFAR10/Bayesian_Search_Cifar10_ResNet50')
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
tflite_quant_model = converter.convert()
tflite_models_dir = pathlib.Path("Quant_Models")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
tflite_model_file = tflite_models_dir/"Q2_Bayesian_Search_Cifar10_ResNet50.tflite"
tflite_model_file.write_bytes(tflite_quant_model)
interpreter = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter.allocate_tensors()
interpreter_quant = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter_quant.allocate_tensors()
testcode = '''
def test():
test_image = np.expand_dims(test_images[0], axis=0).astype(np.float32)
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
interpreter.set_tensor(input_index, test_image)
interpreter.invoke()
predictions = interpreter.get_tensor(output_index)
'''
time = timeit.repeat(stmt=testcode, repeat=100)
#time = np.array(time)
time = np.reshape(time, (100, 1))
#print(time)
| pd.DataFrame(time) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
from six import StringIO
import os.path as op
import numpy as np
import pandas as pd
import cooler
import pytest
from cooler.create import (
sanitize_records,
sanitize_pixels,
validate_pixels,
aggregate_records,
BadInputError,
)
testdir = op.dirname(op.realpath(__file__))
datadir = op.join(testdir, "data")
columns = [
"chrom1",
"pos1",
"strand1",
"chrom2",
"pos2",
"strand2",
"name",
"pair_type",
"triu",
]
valid_data = """chr1\t1\t+\tchr2\t100\t-\t.\tLL\t1
chr2\t99\t+\tchr1\t13\t-\t.\tLL\t0
chr2\t13\t+\tchr2\t60\t-\t.\tLL\t1
chr1\t200\t+\tchr2\t50\t-\t.\tLL\t1
chr3\t11\t+\tchr3\t40\t-\t.\tLL\t1
chr1\t234\t+\tchr3\t30\t-\t.\tLL\t1
chr3\t3\t+\tchr2\t20\t-\t.\tLL\t0
chr2\t23\t+\tchr3\t11\t-\t.\tLL\t1
chr1\t123\t+\tchr1\t200\t-\t.\tLL\t1
"""
nuisance_chroms = """chr1\t222\t+\tchr9\t200\t-\t.\tLL\t1
chr9\t222\t+\tchr9\t200\t-\t.\tLL\t1"""
oob_lower = """chr1\t-1\t+\tchr1\t10\t+\t.\tLL\t1"""
oob_upper = """chr1\t123\t+\tchr1\t301\t+\t.\tLL\t1"""
binsize = 10
chromsizes = | pd.Series(index=["chr1", "chr2", "chr3"], data=[300, 300, 300]) | pandas.Series |
# %%
import os
import pandas as pd
import numpy as np
import threading
import time
base_dir = os.getcwd()
# %%
# 初始化表头
header = ['user', 'n_op', 'n_trans', 'op_type_0', 'op_type_1', 'op_type_2', 'op_type_3', 'op_type_4', 'op_type_5',
'op_type_6', 'op_type_7', 'op_type_8', 'op_type_9', 'op_type_perc', 'op_type_std', 'op_type_n', 'op_mode_0',
'op_mode_1', 'op_mode_2', 'op_mode_3', 'op_mode_4', 'op_mode_5', 'op_mode_6', 'op_mode_7', 'op_mode_8',
'op_mode_9', 'op_mode_perc', 'op_mode_std', 'op_mode_n', 'op_device_perc', 'op_device_std',
'op_device_nan_perc', 'op_device_n', 'op_ip_perc', 'op_ip_std', 'op_ip_nan_perc', 'op_ip_n', 'op_net_type_0',
'op_net_type_1', 'op_net_type_2', 'op_net_type_3', 'op_net_type_perc', 'op_net_type_std',
'op_net_type_nan_perc', 'op_channel_0', 'op_channel_1', 'op_channel_2', 'op_channel_3', 'op_channel_4',
'op_channel_perc', 'op_channel_std', 'op_channel_n', 'op_ip_3_perc', 'op_ip_3_std', 'op_ip_3_nan_perc',
'op_ip_3_n', 'op_ip_3_ch_freq', 'op_ip_48h_n', 'op_device_48h_n',
'op_48h_n', 'trans_platform_0', 'trans_platform_1', 'trans_platform_2', 'trans_platform_3',
'trans_platform_4', 'trans_platform_5', 'trans_platform_perc', 'trans_platform_std', 'trans_platform_n',
'trans_tunnel_in_0', 'trans_tunnel_in_1', 'trans_tunnel_in_2', 'trans_tunnel_in_3', 'trans_tunnel_in_4',
'trans_tunnel_in_5', 'trans_tunnel_in_perc', 'trans_tunnel_in_std', 'trans_tunnel_in_n',
'trans_tunnel_in_nan_perc', 'trans_tunnel_out_0', 'trans_tunnel_out_1', 'trans_tunnel_out_2',
'trans_tunnel_out_3', 'trans_tunnel_out_perc', 'trans_tunnel_out_std', 'trans_tunnel_n', 'trans_amount_max',
'trans_amount_avg', 'trans_amount_std', 'trans_type1_0', 'trans_type1_1', 'trans_type1_2', 'trans_type1_3',
'trans_type1_4', 'trans_type1_perc', 'trans_type1_std', 'trans_ip_perc', 'trans_ip_std', 'trans_ip_nan_perc',
'trans_ip_n', 'trans_type2_0', 'trans_type2_1', 'trans_type2_2', 'trans_type2_3', 'trans_type2_4',
'trans_type2_perc', 'trans_type2_std', 'trans_ip_3_perc', 'trans_ip_3_std', 'trans_ip_3_nan_perc',
'trans_ip_3_n', 'trans_ip_3_ch_freq',
'trans_amount_48h_n', 'trans_48h_n', 'trans_platform_48h_n', 'trans_ip_48h_n']
print(len(header))
# %%
feature_train = pd.DataFrame(columns=header)
feature_test_a = pd.DataFrame(columns=header)
feature_test_b = pd.DataFrame(columns=header)
train_base_df = pd.read_csv(base_dir + '/dataset/dataset2/trainset/train_base.csv')
train_op_df = pd.read_csv(base_dir + '/dataset/dataset2/trainset/train_op.csv')
train_trans_df = pd.read_csv(base_dir + '/dataset/dataset2/trainset/train_trans.csv')
test_a_base_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_a_base.csv')
test_a_op_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_a_op.csv')
test_a_trans_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_a_trans.csv')
test_b_base_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_b_base.csv')
test_b_op_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_b_op.csv')
test_b_trans_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_b_trans.csv')
n_train = len(train_base_df)
n_test_a = len(test_a_base_df)
n_test_b = len(test_b_base_df)
# %%
# load encoder
op_type = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_op_type.csv')
mp_op_type = {}
for col in op_type.columns.values:
mp_op_type[col] = op_type[col].values
op_mode = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_op_mode.csv')
mp_op_mode = {}
for col in op_mode.columns.values:
mp_op_mode[col] = op_mode[col].values
net_type = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_op_net_type.csv')
mp_net_type = {}
for col in net_type.columns.values:
mp_net_type[col] = net_type[col].values
channel = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_op_channel.csv')
mp_channel = {}
for col in channel.columns.values:
mp_channel[col] = channel[col].values
platform = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_trans_platform.csv')
mp_platform = {}
for col in platform.columns.values:
mp_platform[col] = platform[col].values
tunnel_in = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_trans_tunnel_in.csv')
mp_tunnel_in = {}
for col in tunnel_in.columns.values:
mp_tunnel_in[col] = tunnel_in[col].values
tunnel_out = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_trans_tunnel_out.csv')
mp_tunnel_out = {}
for col in tunnel_out.columns.values:
mp_tunnel_out[col] = tunnel_out[col].values
type1 = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_trans_type1.csv')
mp_type1 = {}
for col in type1.columns.values:
mp_type1[col] = type1[col].values
type2 = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_trans_type2.csv')
mp_type2 = {}
for col in type2.columns.values:
mp_type2[col] = type2[col].values
# %%
def process(n, isTrain=True, isA=False):
for i in range(n):
if i % 1000 == 0:
print("train - " if isTrain else "test_a - " if isA else "test_b - ", end='')
print(i)
if isTrain:
cur_user = train_base_df['user'].loc[i]
tr_trans_user = train_trans_df[train_trans_df['user'] == cur_user] # 该用户的trans记录
tr_op_user = train_op_df[train_op_df['user'] == cur_user] # 该用户的op记录
elif isA:
cur_user = test_a_base_df['user'].loc[i]
tr_trans_user = test_a_trans_df[test_a_trans_df['user'] == cur_user] # 该用户的trans记录
tr_op_user = test_a_op_df[test_a_op_df['user'] == cur_user] # 该用户的op记录
else:
cur_user = test_b_base_df['user'].loc[i]
tr_trans_user = test_b_trans_df[test_b_trans_df['user'] == cur_user] # 该用户的trans记录
tr_op_user = test_b_op_df[test_b_op_df['user'] == cur_user] # 该用户的op记录
n_tr_trans_user = len(tr_trans_user) # 该用户的trans记录条数
n_tr_op_user = len(tr_op_user) # 该用户的op记录条数
line = [cur_user, n_tr_op_user, n_tr_trans_user] # 一行,即当前用户的所有二次特征
if n_tr_op_user > 0:
### op_type
mode_op_type = tr_op_user['op_type'].mode()[0]
code = mp_op_type[mode_op_type]
line.extend(code)
line.append(sum(tr_op_user['op_type'].apply(lambda x: 1 if x == mode_op_type else 0)) / n_tr_op_user)
s = tr_op_user['op_type'].value_counts()
line.append(np.std(s.values))
line.append(len(s))
### op_mode
mode_op_mode = tr_op_user['op_mode'].mode()[0]
code = mp_op_mode[mode_op_mode]
line.extend(code)
line.append(sum(tr_op_user['op_mode'].apply(lambda x: 1 if x == mode_op_mode else 0)) / n_tr_op_user)
s = tr_op_user['op_mode'].value_counts()
line.append(np.std(s.values))
line.append(len(s))
### op_device
mode_op_device = tr_op_user['op_device'].mode()[0]
line.append(sum(tr_op_user['op_device'].apply(lambda x: 1 if x == mode_op_device else 0)) / n_tr_op_user)
s = tr_op_user['op_device'].value_counts()
line.append(np.std(s.values))
# line.append(tr_op_user['op_device'].isnull().sum() / n_tr_op_user)
line.append(sum(tr_op_user['op_device'].apply(lambda x: 1 if x == 'op_device_nan' else 0)) / n_tr_op_user)
line.append(len(s))
### op_ip
mode_op_ip = tr_op_user['ip'].mode()[0]
line.append(sum(tr_op_user['ip'].apply(lambda x: 1 if x == mode_op_ip else 0)) / n_tr_op_user)
s = tr_op_user['ip'].value_counts()
line.append(np.std(s.values))
# line.append(tr_op_user['ip'].isnull().sum() / n_tr_op_user)
line.append(sum(tr_op_user['ip'].apply(lambda x: 1 if x == 'ip_nan' else 0)) / n_tr_op_user)
line.append(len(s))
### op_net_type
mode_op_net_type = tr_op_user['net_type'].mode()[0]
code = mp_net_type[mode_op_net_type]
line.extend(code)
line.append(sum(tr_op_user['net_type'].apply(lambda x: 1 if x == mode_op_net_type else 0)) / n_tr_op_user)
s = tr_op_user['net_type'].value_counts()
line.append(np.std(s.values))
# line.append(tr_op_user['net_type'].isnull().sum() / n_tr_op_user)
line.append(sum(tr_op_user['net_type'].apply(lambda x: 1 if x == 'net_type_nan' else 0)) / n_tr_op_user)
### channel
mode_op_channel = tr_op_user['channel'].mode()[0]
code = mp_channel[mode_op_channel]
line.extend(code)
line.append(sum(tr_op_user['channel'].apply(lambda x: 1 if x == mode_op_channel else 0)) / n_tr_op_user)
s = tr_op_user['channel'].value_counts()
line.append(np.std(s.values))
line.append(len(s))
### ip_3
mode_op_ip_3 = tr_op_user['ip_3'].mode()[0]
line.append(sum(tr_op_user['ip_3'].apply(lambda x: 1 if x == mode_op_ip_3 else 0)) / n_tr_op_user)
s = tr_op_user['ip_3'].value_counts()
line.append(np.std(s.values))
# line.append(tr_op_user['ip_3'].isnull().sum() / n_tr_op_user)
line.append(sum(tr_op_user['ip_3'].apply(lambda x: 1 if x == 'ip_3_nan' else 0)) / n_tr_op_user)
line.append(len(s))
### 对tm_diff排序
tr_op_user.sort_values('tm_diff', inplace=True)
cnt = 0
l = tr_op_user['ip_3'].values
pre = l[0]
for j in range(1, n_tr_op_user):
if l[j] != pre:
pre = l[j]
cnt += 1
line.append(cnt)
### 48h最高ip种类数量、最高的op_device种类数量、最高的op记录次数
tr_op_tm_max = tr_op_user['tm_diff'].values.max()
tr_op_tm_min = tr_op_user['tm_diff'].values.min()
gap = 48 * 3600
start = tr_op_tm_min
end = start + gap
max_48h_ip_n = 0
max_48h_op_device_n = 0
max_48h_op_n = 0
while start <= tr_op_tm_max:
gap_df = tr_op_user[(start <= tr_op_user['tm_diff']) & (tr_op_user['tm_diff'] < end)]
max_48h_ip_n = max(max_48h_ip_n, gap_df['ip'].nunique())
max_48h_op_device_n = max(max_48h_op_device_n, gap_df['op_device'].nunique())
max_48h_op_n = max(max_48h_op_n, len(gap_df))
start = end
end += gap
line.extend([max_48h_ip_n, max_48h_op_device_n, max_48h_op_n])
else:
line.extend([-1] * 57)
if n_tr_trans_user > 0:
### platform
mode_trans_platform = tr_trans_user['platform'].mode()[0]
code = mp_platform[mode_trans_platform]
line.extend(code)
line.append(
sum(tr_trans_user['platform'].apply(lambda x: 1 if x == mode_trans_platform else 0)) / n_tr_trans_user)
s = tr_trans_user['platform'].value_counts()
line.append(np.std(s.values))
line.append(len(s))
### tunnel_in
mode_trans_tunnel_in = tr_trans_user['tunnel_in'].mode()[0]
code = mp_tunnel_in[mode_trans_tunnel_in]
line.extend(code)
line.append(sum(
tr_trans_user['tunnel_in'].apply(lambda x: 1 if x == mode_trans_tunnel_in else 0)) / n_tr_trans_user)
s = tr_trans_user['tunnel_in'].value_counts()
line.append(np.std(s.values))
line.append(len(s))
# line.append(tr_trans_user['tunnel_in'].isnull().sum() / n_tr_trans_user)
line.append(
sum(tr_trans_user['tunnel_in'].apply(lambda x: 1 if x == 'tunnel_in_nan' else 0)) / n_tr_trans_user)
### tunnel_out
mode_trans_tunnel_out = tr_trans_user['tunnel_out'].mode()[0]
code = mp_tunnel_out[mode_trans_tunnel_out]
line.extend(code)
line.append(sum(
tr_trans_user['tunnel_out'].apply(lambda x: 1 if x == mode_trans_tunnel_out else 0)) / n_tr_trans_user)
s = tr_trans_user['tunnel_out'].value_counts()
line.append(np.std(s.values))
line.append(len(s))
### amount
s = tr_trans_user['amount']
line.append(s.values.max())
line.append(s.values.mean())
line.append(s.values.std())
### type1
mode_trans_type1 = tr_trans_user['type1'].mode()[0]
code = mp_type1[mode_trans_type1]
line.extend(code)
line.append(
sum(tr_trans_user['type1'].apply(lambda x: 1 if x == mode_trans_type1 else 0)) / n_tr_trans_user)
s = tr_trans_user['type1'].value_counts()
line.append(np.std(s.values))
### trans_ip
mode_trans_ip = tr_trans_user['ip'].mode()[0]
line.append(sum(tr_trans_user['ip'].apply(lambda x: 1 if x == mode_trans_ip else 0)) / n_tr_trans_user)
s = tr_trans_user['ip'].value_counts()
line.append(np.std(s.values))
# line.append(tr_trans_user['ip'].isnull().sum() / n_tr_trans_user)
line.append(sum(tr_trans_user['ip'].apply(lambda x: 1 if x == 'ip_nan' else 0)) / n_tr_trans_user)
line.append(len(s))
### type2
mode_trans_type2 = tr_trans_user['type2'].mode()[0]
code = mp_type2[mode_trans_type2]
line.extend(code)
line.append(
sum(tr_trans_user['type2'].apply(lambda x: 1 if x == mode_trans_type2 else 0)) / n_tr_trans_user)
s = tr_trans_user['type2'].value_counts()
line.append(np.std(s.values))
### trans_ip_3
mode_trans_ip_3 = tr_trans_user['ip_3'].mode()[0]
line.append(sum(tr_trans_user['ip_3'].apply(lambda x: 1 if x == mode_trans_ip_3 else 0)) / n_tr_trans_user)
s = tr_trans_user['ip'].value_counts()
line.append(np.std(s.values))
line.append(sum(tr_trans_user['ip_3'].apply(lambda x: 1 if x == 'ip_3_nan' else 0)) / n_tr_trans_user)
line.append(len(s))
### 对tm_diff排序
tr_trans_user.sort_values('tm_diff', inplace=True)
cnt = 0
l = tr_trans_user['ip_3'].values
pre = l[0]
for j in range(1, n_tr_trans_user):
if l[j] != pre:
pre = l[j]
cnt += 1
line.append(cnt)
### 48h最高amount总量、最高的trans数量、最高的platform种类数量、最高的ip种类数量
tr_trans_tm_max = tr_trans_user['tm_diff'].values.max()
tr_trans_tm_min = tr_trans_user['tm_diff'].values.min()
gap = 48 * 3600
start = tr_trans_tm_min
end = start + gap
max_48h_sum_amount = 0
max_48h_trans_n = 0
max_48h_platform_n = 0
max_48h_ip_n = 0
while start <= tr_trans_tm_max:
gap_df = tr_trans_user[(start <= tr_trans_user['tm_diff']) & (tr_trans_user['tm_diff'] < end)]
max_48h_sum_amount = max(max_48h_sum_amount, gap_df['amount'].values.sum())
max_48h_trans_n = max(max_48h_trans_n, len(gap_df))
max_48h_platform_n = max(max_48h_platform_n, gap_df['platform'].nunique())
max_48h_ip_n = max(max_48h_ip_n, gap_df['ip'].nunique())
start = end
end += gap
line.extend([max_48h_sum_amount, max_48h_trans_n, max_48h_platform_n, max_48h_ip_n])
else:
line.extend([-1] * 56)
# print(len(line))
### 填入feature矩阵
if isTrain:
feature_train.loc[len(feature_train)] = line
elif isA:
feature_test_a.loc[len(feature_test_a)] = line
else:
feature_test_b.loc[len(feature_test_b)] = line
# 存
if isTrain:
feature_train.to_csv(base_dir + '/dataset/dataset2/trainset/feature_train.csv', index=False)
elif isA:
feature_test_a.to_csv(base_dir + '/dataset/dataset2/testset/feature_test_a.csv', index=False)
else:
feature_test_b.to_csv(base_dir + '/dataset/dataset2/testset/feature_test_b.csv', index=False)
# %%
process(n_train, isTrain=True)
process(n_test_a, isTrain=False, isA=True)
process(n_test_b, isTrain=False, isA=False)
# %%
# 多线程
def process_threaded(n_train, n_test_a, n_test_b):
def process1():
process(n_train, isTrain=True)
def process2():
process(n_test_a, isTrain=False, isA=True)
def process3():
process(n_test_b, isTrain=False, isA=False)
t1 = threading.Thread(target=process1)
t1.start()
t2 = threading.Thread(target=process2)
t2.start()
t3 = threading.Thread(target=process3)
t3.start()
# %%
process_threaded(n_train, n_test_a, n_test_b)
# %%
# 并入主矩阵
### 以下l六行可以不跑
feature_train = | pd.read_csv(base_dir + '/dataset/dataset2/trainset/feature_train.csv') | pandas.read_csv |
import sys
import pandas as pd
import numpy as np
import math
def topsis():
try :
n = len(sys.argv)
if n != 5:
raise ValueError("Incorrect number of arguments.")
if not sys.argv[1].lower().endswith('.csv') or not sys.argv[4].lower().endswith('.csv'):
raise ValueError("File format of input and result file should be .csv")
try:
df = pd.read_csv(sys.argv[1])
except:
print("Find not found.")
quit()
if(df.shape[1] <3):
raise ValueError("Number of columns should be >= 3")
if(len(sys.argv[2].split(',')) != len(sys.argv[3].split(','))):
raise ValueError("Number of weights and number of impacts are unequal.")
i=0
while i<len(sys.argv[3]):
if sys.argv[3][i] not in ["+","-"]:
raise ValueError("The impacts should be either '+'/'-'")
i+=2
data=df.copy(deep=True)
#Check for all numeric values
li=[]
for i in range(1,len(data.columns)):
li.append( | pd.to_numeric(data.iloc[:,i], errors='coerce') | pandas.to_numeric |
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
from bs4.element import Comment
import copy
import logging
class KeywordAnalysis():
# (chrome | firefox)
BROWSER_SETTING = "chrome"
driver = None
targets_response = None
keywords_counts_url1, keywords_counts_url2 = None, None
log = None
def __init__(self):
super(KeywordAnalysis, self).__init__()
# Parse Input
self.input_keyword_string = input\
("Please Enter Keywords to Search, split multiple keyword with semicolon (for Ex. Narendra Modi;one ): ")
self.input_url_string = input("Please Enter the URLs, split 2 URLs with semicolon (with http/https):")
self.target_urls = copy.deepcopy(str(self.input_url_string).split(";"))
self.keywords_list = copy.deepcopy(str(self.input_keyword_string).split(";"))
# Check Input
if not len(self.target_urls) == 2:
logging.error("Please make sure that you have exactly 2 urls in your url string")
quit()
else:
print("=" * 30)
print("This is URL1:")
print(self.target_urls[0])
print("=" * 30)
print("This is URL2:")
print(self.target_urls[1])
print("=" * 30)
if len(self.keywords_list) <= 0:
logging.error("Please make sure that you have entered at least 1 keywords")
else:
print("Here are your keywords:")
print("=" * 30)
[print(keyword) for keyword in self.keywords_list]
print("Input Verified, Begin Scraping")
def run(self):
# Start Scrapping
self.driver = self.start_selenium()
self.targets_response = self.scrape_sites()
self.keywords_counts_url1, self.keywords_counts_url2 = self.result_analysis()
self.log = self.result_to_pandas()
self.plot_graphs()
def start_selenium(self):
chrome_options = Options()
chrome_options.add_argument("--headless")
if self.BROWSER_SETTING == "firefox":
return webdriver.Firefox(executable_path=r'geckodriver.exe')
elif self.BROWSER_SETTING == "chrome":
return webdriver.Chrome(executable_path=r'chromedriver.exe', chrome_options=chrome_options)
else:
logging.error("Please check your BROWSER_SETTING variable")
@staticmethod
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def scrape_sites(self):
targets_response = []
for target in self.target_urls:
self.driver.get(target)
WebDriverWait(self.driver, 5)
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight*4);")
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight*4);")
bs_obj = BeautifulSoup(self.driver.page_source, 'html.parser')
texts = bs_obj.find_all(text=True)
visible_texts = filter(KeywordAnalysis.tag_visible, texts)
visible_texts_string = u" ".join(t.strip() for t in visible_texts)
targets_response.append(copy.deepcopy(visible_texts_string))
self.driver.close()
return targets_response
def result_analysis(self):
keywords_counts_url1 = []
for keyword in self.keywords_list:
keywords_counts_url1.append(self.targets_response[0].count(keyword))
keywords_counts_url2 = []
for keyword in self.keywords_list:
keywords_counts_url2.append(self.targets_response[1].count(keyword))
return keywords_counts_url1, keywords_counts_url2
def result_to_pandas(self):
log_cols = ["Keywords", "Keyword Counts in URL1", "Keyword Counts in URL2"]
log = pd.DataFrame(columns=log_cols)
for index in range(0, len(self.keywords_list)):
keyword = copy.deepcopy(self.keywords_list[index])
keyword_count_url1 = copy.deepcopy(self.keywords_counts_url1[index])
keyword_count_url2 = copy.deepcopy(self.keywords_counts_url2[index])
log_entry = | pd.DataFrame([[keyword, keyword_count_url1, keyword_count_url2]], columns=log_cols) | pandas.DataFrame |
import numpy as np
import pandas as pd
INVALID_VALUE = -10
def get_price_features(sales_df, X_df):
return _get_price_features(sales_df, X_df, 'item_price')
def get_dollar_value_features(sales_df, X_df):
sales_df['dollar_value'] = (sales_df['item_price'] * sales_df['item_cnt_day']).astype(np.float32)
output_df = _get_price_features(sales_df, X_df, 'dollar_value')
sales_df.drop('dollar_value', axis=1, inplace=True)
return output_df
def _get_price_features(sales_df, X_df, price_col):
"""
sales_df is monthly
"""
# use original sales data.
msg = 'X_df has >1 recent months data. To speed up the process, we are just handling 1 month into the future case'
assert X_df.date_block_num.max() <= sales_df.date_block_num.max() + 1, msg
sales_df = sales_df[sales_df.item_cnt_day > 0].copy()
sales_df.loc[sales_df[price_col] < 0, price_col] = 0
grp = sales_df.groupby(['item_id', 'date_block_num'])[price_col]
# std for 1 entry should be 0. std for 0 entry should be -10
std = grp.std().fillna(0).unstack()
std[sales_df.date_block_num.max() + 1] = 0
std = std.sort_index(axis=1).fillna(method='ffill', axis=1).shift(1, axis=1).fillna(INVALID_VALUE)
std_col = 'std_{}'.format(price_col)
std = std.stack().to_frame(std_col).reset_index()
avg_price = grp.mean().unstack()
avg_price[sales_df.date_block_num.max() + 1] = 0
avg_price = avg_price.sort_index(axis=1).fillna(method='ffill', axis=1).shift(1, axis=1).fillna(INVALID_VALUE)
avg_col = 'avg_{}'.format(price_col)
avg_price = avg_price.stack().to_frame(avg_col).reset_index()
last_price_df = sales_df[['item_id', 'shop_id', 'date_block_num', price_col]].copy()
last_price_df['date_block_num'] += 1
last_pr_col = 'last_{}'.format(price_col)
last_price_df.rename({price_col: last_pr_col}, inplace=True, axis=1)
# index
X_df = X_df.reset_index()
# item_id price
X_df = pd.merge(X_df, avg_price, on=['item_id', 'date_block_num'], how='left')
X_df[avg_col] = X_df[avg_col].fillna(INVALID_VALUE)
# shop_id item_id coupled price
X_df = | pd.merge(X_df, last_price_df, on=['item_id', 'shop_id', 'date_block_num'], how='left') | pandas.merge |
import base64
import io
import textwrap
import dash
import dash_core_components as dcc
import dash_html_components as html
import gunicorn
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
import flask
import pandas as pd
import urllib.parse
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import numpy as np
import math
import scipy.stats
import dash_table
from dash_table.Format import Format, Scheme
from colour import Color
import dash_bootstrap_components as dbc
# from waitress import serve
external_stylesheets = [dbc.themes.BOOTSTRAP, 'https://codepen.io/chriddyp/pen/bWLwgP.css',
"https://codepen.io/sutharson/pen/dyYzEGZ.css",
"https://fonts.googleapis.com/css2?family=Raleway&display=swap",
"https://codepen.io/chriddyp/pen/brPBPO.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# "external_url": "https://codepen.io/chriddyp/pen/brPBPO.css"
# https://raw.githubusercontent.com/aaml-analytics/pca-explorer/master/LoadingStatusStyleSheet.css
styles = {
'pre': {
'border': 'thin lightgrey solid',
'overflowX': 'scroll'
}
}
tabs_styles = {'height': '40px', 'font-family': 'Raleway', 'fontSize': 14}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'Weight': 'bold'
}
tab_selected_style = {
'borderTop': '3px solid #333333',
'borderBottom': '1px solid #d6d6d6 ',
'backgroundColor': '#f6f6f6',
'color': '#333333',
# 'fontColor': '#004a4a',
'fontWeight': 'bold',
'padding': '6px'
}
# APP ABOUT DESCRIPTION
MOF_tool_about = textwrap.wrap(' These tools aim to provide a reproducible and consistent data visualisation platform '
'where experimental and computational researchers can use big data and statistical '
'analysis to find the best materials for specific applications. Principal Component '
'Analysis (PCA) is a dimension reduction technique that can be used to reduce a large '
'set of observable variables to a smaller set of latent variables that still contain '
'most of the information in the large set (feature extraction). This is done by '
'transforming a number of (possibly) correlated variables into some number of orthogonal '
'(uncorrelated) variables called principal components to find the directions of maximal '
'variance. PCA can be used to ease data visualisation by having fewer dimensions to plot '
'or be used as a pre-processing step before using another Machine Learning (ML)'
' algorithm for regression '
'and classification tasks. PCA can be used to improve an ML algorithm performance, '
'reduce overfitting and reduce noise in data.',
width=50)
Scree_plot_about = textwrap.wrap(' The Principal Component Analysis Visualisation Tools runs PCA for the user and '
'populates a Scree plot. This plot allows the user to determine if PCA is suitable '
'for '
'their dataset and if can compromise an X% drop in explained variance to '
'have fewer dimensions.', width=50)
Feature_correlation_filter = textwrap.wrap("Feature correlation heatmaps provide users with feature analysis and "
"feature principal component analysis. This tool will allow users to see the"
" correlation between variables and the"
" covariances/correlations between original variables and the "
"principal components (loadings)."
, width=50)
plots_analysis = textwrap.wrap('Users can keep all variables as features or drop certain variables to produce a '
'Biplot, cos2 plot and contribution plot. The score plot is used to look for clusters, '
'trends, and outliers in the first two principal components. The loading plot is used to'
' visually interpret the first two principal components. The biplot overlays the score '
'plot and the loading plot on the same graph. The squared cosine (cos2) plot shows '
'the importance of a component for a given observation i.e. measures '
'how much a variable is represented in a component. The contribution plot contains the '
'contributions (%) of the variables to the principal components', width=50, )
data_table_download = textwrap.wrap("The user's inputs from the 'Plots' tab will provide the output of the data tables."
" The user can download the scores, eigenvalues, explained variance, "
"cumulative explained variance, loadings, "
"cos2 and contributions from the populated data tables. "
"Note: Wait for user inputs to be"
" computed (faded tab app will return to the original colour) before downloading the"
" data tables. ", width=50)
MOF_GH = textwrap.wrap(" to explore AAML's sample data and read more on"
" AAML's Principal Component Analysis Visualisation Tool Manual, FAQ's & Troubleshooting"
" on GitHub... ", width=50)
####################
# APP LAYOUT #
####################
fig = go.Figure()
fig1 = go.Figure()
app.layout = html.Div([
html.Div([
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/UOC.png',
height='35', width='140', style={'display': 'inline-block', 'padding-left': '1%'}),
html.Img(src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/A2ML-logo.png',
height='50', width='125', style={'float': 'right', 'display': 'inline-block', 'padding-right': '2%'}),
html.H1("Principal Component Analysis Visualisation Tools",
style={'display': 'inline-block', 'padding-left': '11%', 'text-align': 'center', 'fontSize': 36,
'color': 'white', 'font-family': 'Raleway'}),
html.H1("...", style={'fontColor': '#3c3c3c', 'fontSize': 6})
], style={'backgroundColor': '#333333'}),
html.Div([html.A('Refresh', href='/')], style={}),
html.Div([
html.H2("Upload Data", style={'fontSize': 24, 'font-family': 'Raleway', 'color': '#333333'}, ),
html.H3("Upload .txt, .csv or .xls files to starting exploring data...", style={'fontSize': 16,
'font-family': 'Raleway'}),
dcc.Store(id='csv-data', storage_type='session', data=None),
html.Div([dcc.Upload(
id='data-table-upload',
children=html.Div([html.Button('Upload File')],
style={'height': "60px", 'borderWidth': '1px',
'borderRadius': '5px',
'textAlign': 'center',
}),
multiple=False
),
html.Div(id='output-data-upload'),
]), ], style={'display': 'inline-block', 'padding-left': '1%', }),
html.Div([dcc.Tabs([
dcc.Tab(label='About', style=tab_style, selected_style=tab_selected_style,
children=[html.Div([html.H2(" What are AAML's Principal Component Analysis Visualisation Tools?",
style={'fontSize': 18, 'font-family': 'Raleway', 'font-weight': 'bold'
}),
html.Div([' '.join(MOF_tool_about)]
, style={'font-family': 'Raleway'}),
html.H2(["Scree Plot"],
style={'fontSize': 18,
'font-family': 'Raleway', 'font-weight': 'bold'}),
html.Div([' '.join(Scree_plot_about)], style={'font-family': 'Raleway'}),
html.H2(["Feature Correlation"], style={'fontSize': 18,
'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(Feature_correlation_filter)], style={'font-family': 'Raleway', }),
html.H2(["Plots"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(plots_analysis)], style={'font-family': 'Raleway'}),
html.H2(["Data tables"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(data_table_download)], style={'font-family': 'Raleway'}),
# ADD LINK
html.Div([html.Plaintext(
[' Click ', html.A('here ',
href='https://github.com/aaml-analytics/pca-explorer')],
style={'display': 'inline-block',
'fontSize': 14, 'font-family': 'Raleway'}),
html.Div([' '.join(MOF_GH)], style={'display': 'inline-block',
'fontSize': 14,
'font-family': 'Raleway'}),
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof'
'-explorer/master/github.png',
height='40', width='40',
style={'display': 'inline-block', 'float': "right"
})
]
, style={'display': 'inline-block'})
], style={'backgroundColor': '#ffffff', 'padding-left': '1%'}
)]),
dcc.Tab(label='Scree Plot', style=tab_style, selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='PC-Eigen-plot')
],
style={'display': 'inline-block',
'width': '49%'}),
html.Div([dcc.Graph(id='PC-Var-plot')
], style={'display': 'inline-block', 'float': 'right',
'width': '49%'}),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:", dcc.RadioItems(
id='outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '49%', 'padding-left': '1%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-scree',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Label(["You should attempt to use at least..."
, html.Div(id='var-output-container-filter')])
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["As a rule of thumb for the Scree Plot"
" Eigenvalues, the point where the slope of the curve "
"is clearly "
"leveling off (the elbow), indicates the number of "
"components that "
"should be retained as significant."])
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Feature correlation', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([html.Div([dcc.Graph(id='PC-feature-heatmap')
], style={'width': '47%',
'display': 'inline-block',
'float': 'right'}),
html.Div([dcc.Graph(id='feature-heatmap')
], style={'width': '51%',
'display': 'inline-block',
'float': 'left'}),
html.Div([html.Label(["Loading colour bar range:"
, html.Div(
id='color-range-container')])
], style={
'fontSize': 12,
'float': 'right',
'width': '100%',
'padding-left': '85%'}
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='PC-feature-outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label(
["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-heatmap',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([html.Label(["Select color scale:",
dcc.RadioItems(
id='colorscale',
options=[{'label': i, 'value': i}
for i in
['Viridis', 'Plasma']],
value='Plasma'
)]),
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("There are usually two ways multicollinearity, "
"which is when there are a number of variables "
"that are highly correlated, is dealt with:"),
html.P("1) Use PCA to obtain a set of orthogonal ("
"not correlated) variables to analyse."),
html.P("2) Use correlation of determination (R²) to "
"determine which variables are highly "
"correlated and use only 1 in analysis. "
"Cut off for highly correlated variables "
"is ~0.7."),
html.P(
"In any case, it depends on the machine learning algorithm you may apply later. For correlation robust algorithms,"
" such as Random Forest, correlation of features will not be a concern. For non-correlation robust algorithms such as Linear Discriminant Analysis, "
"all high correlation variables should be removed.")
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["Note: Data has been standardised (scale)"])
], style={'padding-left': '1%'})
])
]),
dcc.Tab(label='Plots', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([html.P("Selecting Features")], style={'padding-left': '1%',
'font-weight': 'bold'}),
html.Div([
html.P("Input here affects all plots, datatables and downloadable data output"),
html.Label([
"Would you like to analyse all variables or choose custom variables to "
"analyse:",
dcc.RadioItems(
id='all-custom-choice',
options=[{'label': 'All',
'value': 'All'},
{'label': 'Custom',
'value': 'Custom'}],
value='All'
)])
], style={'padding-left': '1%'}),
html.Div([
html.P("For custom variables input variables you would not like as features in your PCA:"),
html.Label(
[
"Note: Only input numerical variables (non-numerical variables have already "
"been removed from your dataframe)",
dcc.Dropdown(id='feature-input',
multi=True,
)])
], style={'padding': 10, 'padding-left': '1%'}),
]), dcc.Tabs(id='sub-tabs1', style=tabs_styles,
children=[
dcc.Tab(label='Biplot (Scores + loadings)', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='biplot', figure=fig)
], style={'height': '100%', 'width': '75%',
'padding-left': '20%'},
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-biplot',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-biplot',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([
html.Label([
"Graph Update to show either loadings (Loading Plot) or "
"scores and loadings (Biplot):",
dcc.RadioItems(
id='customvar-graph-update',
options=[{'label': 'Biplot',
'value': 'Biplot'},
{'label': 'Loadings',
'value': 'Loadings'}],
value='Biplot')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix. PCA is an unsupervised machine learning technique - it only "
"looks at the input features and does not take "
"into account the output or the target"
" (response) variable.")],
style={'padding-left': '1%'}),
html.Div([
html.P("For variables you have dropped..."),
html.Label([
"Would you like to introduce a first target variable"
" into your data visualisation?"
" (Graph type must be Biplot): "
"",
dcc.RadioItems(
id='radio-target-item',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select first target variable for color scale of scores: ",
dcc.Dropdown(
id='color-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Would you like to introduce a second target variable"
" into your data visualisation??"
" (Graph type must be Biplot):",
dcc.RadioItems(
id='radio-target-item-second',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select second target variable for size scale of scores:",
dcc.Dropdown(
id='size-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([html.Label(["Size range:"
, html.Div(
id='size-second-target-container')])
], style={'display': 'inline-block',
'float': 'right',
'padding-right': '5%'}
),
html.Div([
html.Br(),
html.P(
"A loading plot shows how "
"strongly each characteristic (variable)"
" influences a principal component. The angles between the vectors"
" tell us how characteristics correlate with one another: "),
html.P("1) When two vectors are close, forming a small angle, the two "
"variables they represent are positively correlated. "),
html.P(
"2) If they meet each other at 90°, they are not likely to be correlated. "),
html.P(
"3) When they diverge and form a large angle (close to 180°), they are negative correlated."),
html.P(
"The Score Plot involves the projection of the data onto the PCs in two dimensions."
"The plot contains the original data but in the rotated (PC) coordinate system"),
html.P(
"A biplot merges a score plot and loading plot together.")
], style={'padding-left': '1%'}
),
]),
dcc.Tab(label='Cos2', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='cos2-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-cos2',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'padding-left': '1%',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-cos2',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The squared cosine shows the importance of a "
"component for a given observation i.e. "
"measures "
" how much a variable is represented in a "
"component")
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Contribution', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='contrib-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-contrib',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
], style={'padding-left': '1%'})
], style={'display': 'inline-block',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-contrib',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The contribution plot contains the "
"contributions (in percentage) of the "
"variables to the principal components")
], style={'padding-left': '1%'}),
])
])
]),
dcc.Tab(label='Data tables', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([
html.Label(
["Note: Input in 'Plots' tab will provide output of data tables and the"
" downloadable PCA data"])
], style={'font-weight': 'bold', 'padding-left': '1%'}),
html.Div([html.A(
'Download PCA Data (scores for each principal component)',
id='download-link',
href="",
target="_blank"
)], style={'padding-left': '1%'}),
html.Div([html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(id="eigenA-outlier",
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])], style={'padding-left': '1%',
'display': 'inline-block', 'width': '49%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-data-table',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Div([
html.Label(["Correlation between Features"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-correlation',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-correlation-container'),
]),
html.Div([html.A(
'Download Feature Correlation data',
id='download-link-correlation',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Eigen Analysis of the correlation matrix"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-eigenA',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-eigenA-container'),
]),
html.Div([html.A(
'Download Eigen Analysis data',
id='download-link-eigenA',
href="",
download='Eigen_Analysis_data.csv',
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Loadings (Feature and PC correlation) from PCA"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-loadings',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-loadings-container'),
]),
html.Div([html.A(
'Download Loadings data',
id='download-link-loadings',
download='Loadings_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Cos2 from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-cos2',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-cos2-container'),
]),
html.Div([html.A(
'Download Cos2 data',
id='download-link-cos2',
download='Cos2_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Contributions from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-contrib',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-contrib-container'),
]),
html.Div([html.A(
'Download Contributions data',
id='download-link-contrib',
download='Contributions_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
])])
])
], style={'font-family': 'Raleway'})])
# READ FILE
def parse_contents(contents, filename):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
df.fillna(0)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
df.fillna(0)
elif 'txt' or 'tsv' in filename:
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')), delimiter=r'\s+')
df.fillna(0)
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return df
@app.callback(Output('csv-data', 'data'),
[Input('data-table-upload', 'contents')],
[State('data-table-upload', 'filename')])
def parse_uploaded_file(contents, filename):
if not filename:
return dash.no_update
df = parse_contents(contents, filename)
df.fillna(0)
return df.to_json(date_format='iso', orient='split')
@app.callback(Output('PC-Var-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
data = Var_dff
elif outlier == 'Yes' and matrix_type == 'Correlation':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
data = Var_dff_outlier
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
data = Var_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
data = Var_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Cumulative Proportion of Explained Variance'],
mode='lines', line=dict(color='Red')))
return {'data': traces,
'layout': go.Layout(title='<b>Cumulative Scree Plot Proportion of Explained Variance</b>',
titlefont=dict(family='Helvetica', size=16),
xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True
}, yaxis={'title': 'Cumulative Explained Variance',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True,
'range': [0, 100]},
hovermode='closest', font=dict(family="Helvetica"), template="simple_white")
}
@app.callback(
Output('var-output-container-filter', 'children'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_output(outlier, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int)
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_covar)
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
@app.callback(Output('PC-Eigen-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
eigenvalues = pca.explained_variance_
Eigen_df = pd.DataFrame(data=eigenvalues, columns=['Eigenvalues'])
Eigen_dff = pd.concat([PC_df, Eigen_df], axis=1)
data = Eigen_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier = pca_outlier.explained_variance_
Eigen_df_outlier = pd.DataFrame(data=eigenvalues_outlier, columns=['Eigenvalues'])
Eigen_dff_outlier = pd.concat([PC_df_outlier, Eigen_df_outlier], axis=1)
data = Eigen_dff_outlier
elif outlier == 'No' and matrix_type == "Covariance":
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
eigenvalues_covar = pca_covar.explained_variance_
Eigen_df_covar = pd.DataFrame(data=eigenvalues_covar, columns=['Eigenvalues'])
Eigen_dff_covar = pd.concat([PC_df_covar, Eigen_df_covar], axis=1)
data = Eigen_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier_covar = pca_outlier_covar.explained_variance_
Eigen_df_outlier_covar = pd.DataFrame(data=eigenvalues_outlier_covar, columns=['Eigenvalues'])
Eigen_dff_outlier_covar = pd.concat([PC_df_outlier_covar, Eigen_df_outlier_covar], axis=1)
data = Eigen_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Eigenvalues'], mode='lines'))
return {'data': traces,
'layout': go.Layout(title='<b>Scree Plot Eigenvalues</b>', xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True},
titlefont=dict(family='Helvetica', size=16),
yaxis={'title': 'Eigenvalues', 'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True}, hovermode='closest',
font=dict(family="Helvetica"), template="simple_white", )
}
def round_up(n, decimals=0):
multiplier = 10 ** decimals
return math.ceil(n * multiplier) / multiplier
def round_down(n, decimals=0):
multiplier = 10 ** decimals
return math.floor(n * multiplier) / multiplier
@app.callback([Output('PC-feature-heatmap', 'figure'),
Output('color-range-container', 'children')],
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input("matrix-type-heatmap", "value"),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, colorscale, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
# INCLUDING OUTLIERS
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
# explained variance of the the two principal components
# print(pca.explained_variance_ratio_)
# Explained variance tells us how much information (variance) can be attributed to each of the principal components
# loading of each feature in principle components
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
# OUTLIERS REMOVED
z_scores_hm = scipy.stats.zscore(dff)
abs_z_scores_hm = np.abs(z_scores_hm)
filtered_entries_hm = (abs_z_scores_hm < 3).all(axis=1)
outlier_dff_hm = dff[filtered_entries_hm]
features1_outlier_hm = outlier_dff_hm.columns
features_outlier2 = list(features1_outlier_hm)
outlier_names1_hm = df[filtered_entries_hm]
outlier_names_hm = outlier_names1_hm.iloc[:, 0]
x_outlier_hm = outlier_dff_hm.loc[:, features_outlier2].values
# Separating out the target (if any)
# Standardizing the features
x_outlier_hm = StandardScaler().fit_transform(x_outlier_hm)
pca_outlier_hm = PCA(n_components=len(features_outlier2))
principalComponents_outlier_hm = pca_outlier_hm.fit_transform(x_outlier_hm)
principalDf_outlier_hm = pd.DataFrame(data=principalComponents_outlier_hm
, columns=['PC' + str(i + 1) for i in range(len(features_outlier2))])
# combining principle components and target
finalDf_outlier_hm = pd.concat([outlier_names_hm, principalDf_outlier_hm], axis=1)
dfff_outlier_hm = finalDf_outlier_hm
# calculating loading
loading_outlier_hm = pca_outlier_hm.components_.T * np.sqrt(pca_outlier_hm.explained_variance_)
loading_df_outlier_hm = pd.DataFrame(data=loading_outlier_hm[0:, 0:], index=features_outlier2,
columns=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])])
loading_dff_outlier_hm = loading_df_outlier_hm.T
# COVAR MATRIX
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
loading_dff_covar = loading_df_covar.T
# COVAR MATRIX OUTLIERS REMOVED
if outlier == 'No' and matrix_type == "Correlation":
data = loading_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_dff_outlier_hm
elif outlier == 'No' and matrix_type == "Covariance":
data = loading_dff_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
loading_dff_outlier_covar = loading_df_outlier_covar.T
data = loading_dff_outlier_covar
size_range = [round_up(data.values.min(), 2),round_down(data.values.max(),2) ]
traces.append(go.Heatmap(
z=data, x=features_outlier2, y=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])],
colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "Loading",
# 'tickvals': [round_up(data.values.min(), 2),
# round_up((data.values.min() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down((data.values.max() + data.values.min())/2,2),
# round_down((data.values.max() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down(data.values.max(),2), ]
}
))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>PC and Feature Correlation Analysis</b>'),
xaxis=dict(title_text='Features', title_standoff=50),
titlefont=dict(family='Helvetica', size=16),
hovermode='closest', margin={'b': 110, 't': 50, 'l': 75},
font=dict(family="Helvetica", size=11),
annotations=[
dict(x=-0.16, y=0.5, showarrow=False, text="Principal Components",
xref='paper', yref='paper', textangle=-90,
font=dict(size=12))]
),
}, '{}'.format(size_range)
@app.callback(Output('feature-heatmap', 'figure'),
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input('csv-data', 'data')])
def update_graph_stat(outlier, colorscale, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
if outlier == 'No':
features1 = dff.columns
features = list(features1)
# correlation coefficient and coefficient of determination
correlation_dff = dff.corr(method='pearson', )
r2_dff = correlation_dff * correlation_dff
data = r2_dff
feat = features
elif outlier == 'Yes':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier = correlation_dff_outlier * correlation_dff_outlier
data = r2_dff_outlier
feat = features_outlier
traces.append(go.Heatmap(
z=data, x=feat, y=feat, colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "R²", 'tickvals': [0, 0.2, 0.4, 0.6, 0.8, 1]}))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>Feature Correlation Analysis</b>', y=0.97, x=0.6),
xaxis={},
titlefont=dict(family='Helvetica', size=16),
yaxis={},
hovermode='closest', margin={'b': 110, 't': 50, 'l': 180, 'r': 50},
font=dict(family="Helvetica", size=11)),
}
@app.callback(Output('feature-input', 'options'),
[Input('all-custom-choice', 'value'),
Input('csv-data', 'data')])
def activate_input(all_custom, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
options = []
elif all_custom == 'Custom':
options = [{'label': i, 'value': i} for i in dff.columns]
return options
@app.callback(Output('color-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')], )
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
@app.callback(Output('size-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item-second', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')])
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
# resume covar matrix...
@app.callback(Output('biplot', 'figure'),
[
Input('outlier-value-biplot', 'value'),
Input('feature-input', 'value'),
Input('customvar-graph-update', 'value'),
Input('color-scale-scores', 'value'),
Input('radio-target-item', 'value'),
Input('size-scale-scores', 'value'),
Input('radio-target-item-second', 'value'),
Input('all-custom-choice', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_graph_custom(outlier, input, graph_update, color, target, size, target2, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_dff = pd.concat([zero_scale_df, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
dfff_outlier_scale = finalDf_outlier_scale.fillna(0)
# calculating loading
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
dfff_scale_covar = finalDf_scale_covar.fillna(0)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, line_group_scale_df_covar], axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
# COVARIANCE MATRIX OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
dfff_outlier_scale_covar = finalDf_outlier_scale_covar.fillna(0)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_outlier_scale_covar
trace2_all = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, showscale=False, size=12,
line=dict(width=0.5, color='DarkSlateGrey'),
),
)
####################################################################################################
# INCLUDE THIS
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_covar
variance = Var_outlier_scale_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], line=dict(color="#4f4f4f"),
name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text',
textposition='bottom right', textfont=dict(size=12)
)
lists[counter] = trace1_all
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2_all)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == 'Custom':
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_dff = pd.concat([zero_scale_input_df, line_group_scale_input_df], axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, line_group_scale_input_df_covar], axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
# COVARIANCE MATRIX OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale_input
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_scale_input_outlier
variance = Var_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_input_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_scale_input_outlier_covar
variance = Var_scale_input_outlier_covar
trace2 = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
marker_color=dat[color] if target == 'Yes' else None,
marker_size=dat[size] if target2 == 'Yes' else 12,
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, colorscale='Plasma',
sizeref=max(dat[size]) / (15 ** 2) if target2 == 'Yes' else None,
sizemode='area',
showscale=True if target == 'Yes' else False,
line=dict(width=0.5, color='DarkSlateGrey'),
colorbar=dict(title=dict(text=color if target == 'Yes' else None,
font=dict(family='Helvetica'),
side='right'), ypad=0),
),
)
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_scale_input_outlier_line_graph
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_scale_input_outlier_line_graph_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'],
line=dict(color="#666666" if target == 'Yes' else '#4f4f4f'), name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
)
lists[counter] = trace1
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(
Output('size-second-target-container', 'children'),
[Input('size-scale-scores', 'value'),
Input('outlier-value-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_output(size, outlier, data):
if not data:
return dash.no_update
if size is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
z_scores_dff_size = scipy.stats.zscore(dff)
abs_z_scores_dff_size = np.abs(z_scores_dff_size)
filtered_entries_dff_size = (abs_z_scores_dff_size < 3).all(axis=1)
dff_target_outlier_size = dff[filtered_entries_dff_size]
if outlier == 'Yes':
size_range = [round(dff_target_outlier_size[size].min(), 2), round(dff_target_outlier_size[size].max(), 2)]
elif outlier == 'No':
size_range = [round(dff[size].min(), 2), round(dff[size].max(), 2)]
return '{}'.format(size_range)
@app.callback(Output('cos2-plot', 'figure'),
[
Input('outlier-value-cos2', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-cos2", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
features1 = dff.columns
features = list(features1)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df['cos2'] = (loading_scale_df["PC1"] ** 2) + (loading_scale_df["PC2"] ** 2)
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_df_color = pd.DataFrame(data=loading_scale_df.iloc[:, 2], columns=['cos2'])
zero_scale_dff = pd.concat([zero_scale_df, zero_scale_df_color, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
#
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df["cos2"] = (loading_outlier_scale_df["PC1"] ** 2) + (
loading_outlier_scale_df["PC2"] ** 2)
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_df_color = pd.DataFrame(data=loading_outlier_scale_df.iloc[:, 2], columns=['cos2'])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, zero_outlier_scale_df_color, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
loading_scale_line_graph_sort = loading_scale_line_graph.sort_values(by='cos2')
loading_outlier_scale_line_graph_sort = loading_outlier_scale_line_graph.sort_values(by='cos2')
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df_covar['cos2'] = (loading_scale_df_covar["PC1"] ** 2) + (loading_scale_df_covar["PC2"] ** 2)
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_df_color_covar = pd.DataFrame(data=loading_scale_df_covar.iloc[:, 2], columns=['cos2'])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, zero_scale_df_color_covar, line_group_scale_df_covar],
axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
loading_scale_line_graph_sort_covar = loading_scale_line_graph_covar.sort_values(by='cos2')
# COVARIANCE MATRIX WITH OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df_covar["cos2"] = (loading_outlier_scale_df_covar["PC1"] ** 2) + (
loading_outlier_scale_df_covar["PC2"] ** 2)
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_df_color_covar = pd.DataFrame(data=loading_outlier_scale_df_covar.iloc[:, 2],
columns=['cos2'])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, zero_outlier_scale_df_color_covar,
line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
loading_outlier_scale_line_graph_sort_covar = loading_outlier_scale_line_graph_covar.sort_values(by='cos2')
# scaling data
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph_sort
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph_sort
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_sort_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_sort_covar
variance = Var_outlier_scale_covar
N = len(data['cos2'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter = 0
counter_color = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], mode='lines+text',
name=i, line=dict(color=colorscale[counter_color]),
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
textposition='bottom right', textfont=dict(size=12)
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers',
hoverinfo='skip',
marker=dict(showscale=True, opacity=0,
color=[data["cos2"].min(), data["cos2"].max()],
colorscale=colorscale,
colorbar=dict(title=dict(text="Cos2",
side='right'), ypad=0)
), )
lists[counter] = trace1_all
counter = counter + 1
counter_color = counter_color + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)), mirror=True,
ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)), mirror=True,
ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == "Custom":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# # x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
#
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df["cos2"] = (loading_scale_input_df["PC1"] ** 2) + (loading_scale_input_df["PC2"] ** 2)
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_df_color = pd.DataFrame(data=loading_scale_input_df.iloc[:, 2], columns=['cos2'])
zero_scale_input_dff = pd.concat([zero_scale_input_df, zero_scale_input_df_color, line_group_scale_input_df],
axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# # x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df["cos2"] = (loading_scale_input_outlier_df["PC1"] ** 2) + \
(loading_scale_input_outlier_df["PC2"] ** 2)
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color = pd.DataFrame(data=loading_scale_input_outlier_df.iloc[:, 2],
columns=['cos2'])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, zero_scale_input_outlier_df_color,
line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
loading_scale_input_line_graph_sort = loading_scale_input_line_graph.sort_values(by='cos2')
loading_scale_input_outlier_line_graph_sort = loading_scale_input_outlier_line_graph.sort_values(by='cos2')
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df_covar["cos2"] = (loading_scale_input_df_covar["PC1"] ** 2) + (
loading_scale_input_df_covar["PC2"] ** 2)
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_df_color_covar = pd.DataFrame(data=loading_scale_input_df_covar.iloc[:, 2], columns=['cos2'])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, zero_scale_input_df_color_covar,
line_group_scale_input_df_covar],
axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
loading_scale_input_line_graph_sort_covar = loading_scale_input_line_graph_covar.sort_values(by='cos2')
# COVARIANCE MATRIX WITH OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df_covar["cos2"] = (loading_scale_input_outlier_df_covar["PC1"] ** 2) + \
(loading_scale_input_outlier_df_covar["PC2"] ** 2)
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color_covar = pd.DataFrame(data=loading_scale_input_outlier_df_covar.iloc[:, 2],
columns=['cos2'])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
zero_scale_input_outlier_df_color_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
loading_scale_input_outlier_line_graph_sort_covar = loading_scale_input_outlier_line_graph_covar.sort_values(
by='cos2')
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph_sort
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
variance = Var_scale_input_outlier
data = loading_scale_input_outlier_line_graph_sort
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_sort_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
variance = Var_scale_input_outlier_covar
data = loading_scale_input_outlier_line_graph_sort_covar
N = len(data['cos2'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter_color = 0
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'], name=i, line=dict(color=colorscale[counter_color]),
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, color=[data["cos2"].min(), data["cos2"].max()],
colorscale=colorscale, opacity=0,
colorbar=dict(title=dict(text="Cos2",
side='right'), ypad=0)
), )
lists[counter] = trace1
counter_color = counter_color + 1
counter = counter + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(Output('contrib-plot', 'figure'),
[
Input('outlier-value-contrib', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-contrib", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
features1 = dff.columns
features = list(features1)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df["PC1_cos2"] = loading_scale_df["PC1"] ** 2
loading_scale_df["PC2_cos2"] = loading_scale_df["PC2"] ** 2
loading_scale_df["PC1_contrib"] = \
(loading_scale_df["PC1_cos2"] * 100) / (loading_scale_df["PC1_cos2"].sum(axis=0))
loading_scale_df["PC2_contrib"] = \
(loading_scale_df["PC2_cos2"] * 100) / (loading_scale_df["PC2_cos2"].sum(axis=0))
loading_scale_df["contrib"] = loading_scale_df["PC1_contrib"] + loading_scale_df["PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_scale_dataf = pd.concat([loading_scale_df.iloc[:, 0:2], loading_scale_df.iloc[:, 6]], axis=1)
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_dataf, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_df_color = pd.DataFrame(data=loading_scale_dataf.iloc[:, 2], columns=['contrib'])
zero_scale_dff = pd.concat([zero_scale_df, zero_scale_df_color, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df["PC1_cos2"] = loading_outlier_scale_df["PC1"] ** 2
loading_outlier_scale_df["PC2_cos2"] = loading_outlier_scale_df["PC2"] ** 2
loading_outlier_scale_df["PC1_contrib"] = \
(loading_outlier_scale_df["PC1_cos2"] * 100) / (loading_outlier_scale_df["PC1_cos2"].sum(axis=0))
loading_outlier_scale_df["PC2_contrib"] = \
(loading_outlier_scale_df["PC2_cos2"] * 100) / (loading_outlier_scale_df["PC2_cos2"].sum(axis=0))
loading_outlier_scale_df["contrib"] = loading_outlier_scale_df["PC1_contrib"] + loading_outlier_scale_df[
"PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_outlier_scale_dataf = pd.concat(
[loading_outlier_scale_df.iloc[:, 0:2], loading_outlier_scale_df.iloc[:, 6]], axis=1)
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_dataf, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_df_color = pd.DataFrame(data=loading_outlier_scale_dataf.iloc[:, 2], columns=['contrib'])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, zero_outlier_scale_df_color, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
loading_scale_line_graph_sort = loading_scale_line_graph.sort_values(by='contrib')
loading_outlier_scale_line_graph_sort = loading_outlier_scale_line_graph.sort_values(by='contrib')
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df_covar["PC1_cos2"] = loading_scale_df_covar["PC1"] ** 2
loading_scale_df_covar["PC2_cos2"] = loading_scale_df_covar["PC2"] ** 2
loading_scale_df_covar["PC1_contrib"] = \
(loading_scale_df_covar["PC1_cos2"] * 100) / (loading_scale_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_df_covar["PC2_contrib"] = \
(loading_scale_df_covar["PC2_cos2"] * 100) / (loading_scale_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_df_covar["contrib"] = loading_scale_df_covar["PC1_contrib"] + loading_scale_df_covar[
"PC2_contrib"]
loading_scale_dataf_covar = pd.concat([loading_scale_df_covar.iloc[:, 0:2], loading_scale_df_covar.iloc[:, 6]],
axis=1)
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_dataf_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_df_color_covar = pd.DataFrame(data=loading_scale_dataf_covar.iloc[:, 2], columns=['contrib'])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, zero_scale_df_color_covar, line_group_scale_df_covar],
axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
loading_scale_line_graph_sort_covar = loading_scale_line_graph_covar.sort_values(by='contrib')
# COVARIANCE MATRIX OUTLIERS REMOVED
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df_covar["PC1_cos2"] = loading_outlier_scale_df_covar["PC1"] ** 2
loading_outlier_scale_df_covar["PC2_cos2"] = loading_outlier_scale_df_covar["PC2"] ** 2
loading_outlier_scale_df_covar["PC1_contrib"] = \
(loading_outlier_scale_df_covar["PC1_cos2"] * 100) / (
loading_outlier_scale_df_covar["PC1_cos2"].sum(axis=0))
loading_outlier_scale_df_covar["PC2_contrib"] = \
(loading_outlier_scale_df_covar["PC2_cos2"] * 100) / (
loading_outlier_scale_df_covar["PC2_cos2"].sum(axis=0))
loading_outlier_scale_df_covar["contrib"] = loading_outlier_scale_df_covar["PC1_contrib"] + \
loading_outlier_scale_df_covar[
"PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_outlier_scale_dataf_covar = pd.concat(
[loading_outlier_scale_df_covar.iloc[:, 0:2], loading_outlier_scale_df_covar.iloc[:, 6]], axis=1)
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_dataf_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_df_color_covar = pd.DataFrame(data=loading_outlier_scale_dataf_covar.iloc[:, 2],
columns=['contrib'])
zero_outlier_scale_dff_covar = pd.concat(
[zero_outlier_scale_df_covar, zero_outlier_scale_df_color_covar, line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
loading_outlier_scale_line_graph_sort_covar = loading_outlier_scale_line_graph_covar.sort_values(by='contrib')
# scaling data
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph_sort
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph_sort
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_sort_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_sort_covar
variance = Var_outlier_scale_covar
N = len(data['contrib'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter = 0
counter_color = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], mode='lines+text',
name=i, line=dict(color=colorscale[counter_color]),
textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, opacity=0,
color=[data["contrib"].min(), data["contrib"].max()],
colorscale=colorscale,
colorbar=dict(title=dict(text="Contribution",
side='right'), ypad=0),
), )
lists[counter] = trace1_all
counter = counter + 1
counter_color = counter_color + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == "Custom":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df["PC1_cos2"] = loading_scale_input_df["PC1"] ** 2
loading_scale_input_df["PC2_cos2"] = loading_scale_input_df["PC2"] ** 2
loading_scale_input_df["PC1_contrib"] = \
(loading_scale_input_df["PC1_cos2"] * 100) / (loading_scale_input_df["PC1_cos2"].sum(axis=0))
loading_scale_input_df["PC2_contrib"] = \
(loading_scale_input_df["PC2_cos2"] * 100) / (loading_scale_input_df["PC2_cos2"].sum(axis=0))
loading_scale_input_df["contrib"] = loading_scale_input_df["PC1_contrib"] + loading_scale_input_df[
"PC2_contrib"]
loading_scale_input_dataf = pd.concat(
[loading_scale_input_df.iloc[:, 0:2], loading_scale_input_df.iloc[:, 6]], axis=1)
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_dataf, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_df_color = pd.DataFrame(data=loading_scale_input_dataf.iloc[:, 2], columns=['contrib'])
zero_scale_input_dff = pd.concat([zero_scale_input_df, zero_scale_input_df_color, line_group_scale_input_df],
axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df["PC1_cos2"] = loading_scale_input_outlier_df["PC1"] ** 2
loading_scale_input_outlier_df["PC2_cos2"] = loading_scale_input_outlier_df["PC2"] ** 2
loading_scale_input_outlier_df["PC1_contrib"] = \
(loading_scale_input_outlier_df["PC1_cos2"] * 100) / (
loading_scale_input_outlier_df["PC1_cos2"].sum(axis=0))
loading_scale_input_outlier_df["PC2_contrib"] = \
(loading_scale_input_outlier_df["PC2_cos2"] * 100) / (
loading_scale_input_outlier_df["PC2_cos2"].sum(axis=0))
loading_scale_input_outlier_df["contrib"] = loading_scale_input_outlier_df["PC1_contrib"] + \
loading_scale_input_outlier_df[
"PC2_contrib"]
loading_scale_input_outlier_dataf = pd.concat(
[loading_scale_input_outlier_df.iloc[:, 0:2], loading_scale_input_outlier_df.iloc[:, 6]], axis=1)
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat(
[loading_scale_input_outlier_dataf, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color = pd.DataFrame(data=loading_scale_input_outlier_dataf.iloc[:, 2],
columns=['contrib'])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, zero_scale_input_outlier_df_color,
line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
loading_scale_input_line_graph_sort = loading_scale_input_line_graph.sort_values(by='contrib')
loading_scale_input_outlier_line_graph_sort = loading_scale_input_outlier_line_graph.sort_values(by='contrib')
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df_covar["PC1_cos2"] = loading_scale_input_df_covar["PC1"] ** 2
loading_scale_input_df_covar["PC2_cos2"] = loading_scale_input_df_covar["PC2"] ** 2
loading_scale_input_df_covar["PC1_contrib"] = \
(loading_scale_input_df_covar["PC1_cos2"] * 100) / (loading_scale_input_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_input_df_covar["PC2_contrib"] = \
(loading_scale_input_df_covar["PC2_cos2"] * 100) / (loading_scale_input_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_input_df_covar["contrib"] = loading_scale_input_df_covar["PC1_contrib"] + \
loading_scale_input_df_covar[
"PC2_contrib"]
loading_scale_input_dataf_covar = pd.concat(
[loading_scale_input_df_covar.iloc[:, 0:2], loading_scale_input_df_covar.iloc[:, 6]], axis=1)
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_dataf_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_df_color_covar = pd.DataFrame(data=loading_scale_input_dataf_covar.iloc[:, 2],
columns=['contrib'])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, zero_scale_input_df_color_covar,
line_group_scale_input_df_covar],
axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
loading_scale_input_line_graph_sort_covar = loading_scale_input_line_graph_covar.sort_values(by='contrib')
# COVARIANCE MATRIX WITH OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df_covar["PC1_cos2"] = loading_scale_input_outlier_df_covar["PC1"] ** 2
loading_scale_input_outlier_df_covar["PC2_cos2"] = loading_scale_input_outlier_df_covar["PC2"] ** 2
loading_scale_input_outlier_df_covar["PC1_contrib"] = \
(loading_scale_input_outlier_df_covar["PC1_cos2"] * 100) / (
loading_scale_input_outlier_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_input_outlier_df_covar["PC2_contrib"] = \
(loading_scale_input_outlier_df_covar["PC2_cos2"] * 100) / (
loading_scale_input_outlier_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_input_outlier_df_covar["contrib"] = loading_scale_input_outlier_df_covar["PC1_contrib"] + \
loading_scale_input_outlier_df_covar[
"PC2_contrib"]
loading_scale_input_outlier_dataf_covar = pd.concat(
[loading_scale_input_outlier_df_covar.iloc[:, 0:2], loading_scale_input_outlier_df_covar.iloc[:, 6]],
axis=1)
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat(
[loading_scale_input_outlier_dataf_covar, line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color_covar = pd.DataFrame(data=loading_scale_input_outlier_dataf_covar.iloc[:, 2],
columns=['contrib'])
zero_scale_input_outlier_dff_covar = pd.concat(
[zero_scale_input_outlier_df_covar, zero_scale_input_outlier_df_color_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
loading_scale_input_outlier_line_graph_sort_covar = loading_scale_input_outlier_line_graph_covar.sort_values(
by='contrib')
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph_sort
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
variance = Var_scale_input_outlier
data = loading_scale_input_outlier_line_graph_sort
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_sort_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_scale_input_outlier_line_graph_sort_covar
variance = Var_scale_input_outlier_covar
N = len(data['contrib'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter_color = 0
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'], name=i, line=dict(color=colorscale[counter_color]),
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, color=[data["contrib"].min(), data["contrib"].max()],
colorscale=colorscale, opacity=0,
colorbar=dict(title=dict(text="Contribution",
side='right'), ypad=0)
))
lists[counter] = trace1
counter_color = counter_color + 1
counter = counter + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(Output('download-link', 'download'),
[Input('all-custom-choice', 'value'),
Input('eigenA-outlier', 'value'),
Input("matrix-type-data-table", 'value')])
def update_filename(all_custom, outlier, matrix_type):
if all_custom == 'All' and outlier == 'Yes' and matrix_type == "Correlation":
download = 'all_variables_correlation_matrix_outliers_removed_data.csv'
elif all_custom == 'All' and outlier == 'Yes' and matrix_type == "Covariance":
download = 'all_variables_covariance_matrix_outliers_removed_data.csv'
elif all_custom == 'All' and outlier == 'No' and matrix_type == "Correlation":
download = 'all_variables_correlation_matrix_data.csv'
elif all_custom == 'All' and outlier == 'No' and matrix_type == "Covariance":
download = 'all_variables_covariance_matrix_data.csv'
elif all_custom == 'Custom' and outlier == 'Yes' and matrix_type == "Correlation":
download = 'custom_variables_correlation_matrix_outliers_removed_data.csv'
elif all_custom == 'Custom' and outlier == 'Yes' and matrix_type == "Covariance":
download = 'custom_variables_covariance_matrix_outliers_removed_data.csv'
elif all_custom == 'Custom' and outlier == 'No' and matrix_type == "Correlation":
download = 'custom_variables_correlation_matrix_data.csv'
elif all_custom == 'Custom' and outlier == 'No' and matrix_type == "Covariance":
download = 'custom_variables_covariance_matrix_data.csv'
return download
@app.callback(Output('download-link', 'href'),
[Input('all-custom-choice', 'value'),
Input('feature-input', 'value'),
Input('eigenA-outlier', 'value'),
Input("matrix-type-data-table", 'value'),
Input('csv-data', 'data')])
def update_link(all_custom, input, outlier, matrix_type, data):
if not data:
return dash.no_update, dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
dfff_outlier_scale = finalDf_outlier_scale.fillna(0)
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
dfff_scale_covar = finalDf_scale_covar.fillna(0)
# COVARIANCE MATRIX REMOVING OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
dfff_outlier_scale_covar = finalDf_outlier_scale_covar.fillna(0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_outlier_scale_covar
elif all_custom == 'Custom':
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
# COVARIANCE MATRIX OUTLIERS REMOVED
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_scale_input_outlier_covar
csv_string = dat.to_csv(index=False, encoding='utf-8')
csv_string = "data:text/csv;charset=utf-8,%EF%BB%BF" + urllib.parse.quote(csv_string)
return csv_string
@app.callback(Output('download-link-correlation', 'download'),
[Input('eigenA-outlier', 'value'),
])
def update_filename(outlier):
if outlier == 'Yes':
download = 'feature_correlation_removed_outliers_data.csv'
elif outlier == 'No':
download = 'feature_correlation_data.csv'
return download
@app.callback([Output('data-table-correlation', 'data'),
Output('data-table-correlation', 'columns'),
Output('download-link-correlation', 'href')],
[Input("eigenA-outlier", 'value'),
Input('csv-data', 'data')], )
def update_output(outlier, data):
if not data:
return dash.no_update, dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No':
features1 = dff.columns
features = list(features1)
# correlation coefficient and coefficient of determination
correlation_dff = dff.corr(method='pearson', )
r2_dff_table = correlation_dff * correlation_dff
r2_dff_table.insert(0, 'Features', features)
data_frame = r2_dff_table
if outlier == 'Yes':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier_table = correlation_dff_outlier * correlation_dff_outlier
r2_dff_outlier_table.insert(0, 'Features', features_outlier)
data_frame = r2_dff_outlier_table
data = data_frame.to_dict('records')
columns = [{"name": i, "id": i, "deletable": True, "selectable": True, 'type': 'numeric',
'format': Format(precision=3, scheme=Scheme.fixed)} for i in data_frame.columns]
csv_string = data_frame.to_csv(index=False, encoding='utf-8')
csv_string = "data:text/csv;charset=utf-8,%EF%BB%BF" + urllib.parse.quote(csv_string)
return data, columns, csv_string
@app.callback(Output('download-link-eigenA', 'download'),
[Input("matrix-type-data-table", 'value'),
Input('eigenA-outlier', 'value')])
def update_filename(matrix_type, outlier):
if outlier == 'Yes' and matrix_type == "Correlation":
download = 'Eigen_Analysis_correlation_matrix_removed_outliers_data.csv'
elif outlier == 'Yes' and matrix_type == "Covariance":
download = 'Eigen_Analysis_covariance_matrix_removed_outliers_data.csv'
elif outlier == 'No' and matrix_type == "Correlation":
download = 'Eigen_Analysis_correlation_matrix_data.csv'
elif outlier == "No" and matrix_type == "Covariance":
download = 'Eigen_Analysis_covariance_matrix_data.csv'
return download
@app.callback([Output('data-table-eigenA', 'data'),
Output('data-table-eigenA', 'columns'),
Output('download-link-eigenA', 'href')],
[Input('all-custom-choice', 'value'),
Input("eigenA-outlier", 'value'),
Input('feature-input', 'value'),
Input("matrix-type-data-table", 'value'),
Input('csv-data', 'data')], )
def update_output(all_custom, outlier, input, matrix_type, data):
if not data:
return dash.no_update, dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))],
columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
eigenvalues = pca.explained_variance_
Eigen_df = pd.DataFrame(data=eigenvalues, columns=['Eigenvalues'])
Eigen_dff = pd.concat([PC_df, Eigen_df], axis=1)
Var_dfff = pd.concat([(Var_cumsum * 100)], axis=1)
Eigen_Analysis = pd.concat([PC_df.T, Eigen_df.T, Var_df.T, Var_dfff.T], axis=0)
Eigen_Analysis = Eigen_Analysis.rename(columns=Eigen_Analysis.iloc[0])
Eigen_Analysis = Eigen_Analysis.drop(Eigen_Analysis.index[0])
Eigen_Analysis.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier = correlation_dff_outlier * correlation_dff_outlier
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier = pca_outlier.explained_variance_
Eigen_df_outlier = pd.DataFrame(data=eigenvalues_outlier, columns=['Eigenvalues'])
Eigen_dff_outlier = pd.concat([PC_df_outlier, Eigen_df_outlier], axis=1)
Var_dfff_outlier = pd.concat([Var_cumsum_outlier * 100], axis=1)
Eigen_Analysis_Outlier = pd.concat(
[PC_df_outlier.T, Eigen_df_outlier.T, Var_df_outlier.T, Var_dfff_outlier.T],
axis=0)
Eigen_Analysis_Outlier = Eigen_Analysis_Outlier.rename(columns=Eigen_Analysis_Outlier.iloc[0])
Eigen_Analysis_Outlier = Eigen_Analysis_Outlier.drop(Eigen_Analysis_Outlier.index[0])
Eigen_Analysis_Outlier.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_Outlier
elif outlier == "No" and matrix_type == "Covariance":
features1 = dff.columns
features = list(features1)
x_covar = dff.loc[:, features].values
pca_covar = PCA(n_components=len(features))
principalComponents_covar = pca_covar.fit_transform(x_covar)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
dfff_covar = finalDf_covar
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
loading_dff_covar = loading_df_covar.T
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
eigenvalues_covar = pca_covar.explained_variance_
Eigen_df_covar = pd.DataFrame(data=eigenvalues_covar, columns=['Eigenvalues'])
Eigen_dff_covar = | pd.concat([PC_df_covar, Eigen_df_covar], axis=1) | pandas.concat |
import subprocess
import datetime
import os
import pandas as pd
from absl import logging
def exec_bash(cmd: str):
def gen_exec(cmd):
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
for stdout_line in iter(popen.stdout.readline, ""):
yield stdout_line
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
for path in gen_exec(cmd.split()):
print(path, end="")
def datetime_str(sep=""):
str_format = f'{sep.join([f"%{t}" for t in "ymd"])}_{sep.join([f"%{t}" for t in "HMS"])}'
return datetime.datetime.now().strftime(str_format)
def res_to_csv(flags_dict, total_results, hash=None):
file_path = f"{flags_dict['output_dir']}/{flags_dict['results_file']}"
if hash is not None:
file_path = "".join(file_path.split(".csv")[:-1]+["_",hash, ".csv"])
file_path = fix_path(file_path, False)
logging.info(f"Saving results to {file_path}")
res_dict = {k: [flags_dict[k]] for k in
["model", "seed", "dataset", "loss_fn", "ensemble_size", "train_epochs", "base_learning_rate",
"mlp_hidden_dim", "divide_l2_loss", "random_init"]}
for k, v in total_results.items():
res_dict[k] = [total_results[k]]
res_df = pd.DataFrame.from_dict(res_dict)
if os.path.exists(file_path):
res_df = pd.concat([ | pd.read_csv(file_path, index_col=0) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 9 09:58:22 2020
@author: lenakilian
"""
import tabula
import pandas as pd
import numpy as np
use_for_io = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.2', '1.1.3.1', '1.1.3.2', '1.1.4',
'1.1.5', '1.1.6', '1.1.7', '1.1.8', '1.1.9', '1.1.10.1', '1.1.10.2',
'1.1.10.3', '1.1.10.4', '1.1.11.1', '1.1.11.2', '1.1.11.3', '1.1.12.1',
'1.1.12.2', '1.1.12.3', '1.1.13', '1.1.14', '1.1.15.1', '1.1.15.2', '1.1.16',
'1.1.17', '1.1.18.1', '1.1.18.2', '1.1.19.1', '1.1.19.2', '1.1.19.3',
'1.1.19.4', '1.1.19.5', '1.1.19.6', '1.1.20', '1.1.21', '1.1.22', '1.1.23.1',
'1.1.23.2', '1.1.23.3', '1.1.23.4', '1.1.24', '1.1.25', '1.1.26', '1.1.27',
'1.1.28.1', '1.1.28.2', '1.1.29', '1.1.30', '1.1.31', '1.1.32', '1.1.33.1',
'1.1.33.2', '1.1.33.3', '1.2.1', '1.2.2', '1.2.3', '1.2.4', '1.2.5', '1.2.6',
'2.1.1', '2.1.2.1', '2.1.2.2', '2.1.2.3', '2.1.3.1', '2.1.3.2', '2.1.4',
'2.2.1', '2.2.2.1', '2.2.2.2',
'3.1.1', '3.1.2', '3.1.3', '3.1.4', '3.1.5', '3.1.6', '3.1.7', '3.1.8',
'3.1.9.1', '3.1.9.2', '3.1.9.3', '3.1.9.4', '3.1.10', '3.1.11.1', '3.1.11.2',
'3.2.1', '3.2.2', '3.2.3', '3.2.4',
'4.1.1', '4.1.2', '4.2.1', '4.2.2', '4.2.3', '4.2.4', '4.3.1', '4.3.2',
'4.3.3', '4.4.1', '4.4.2', '4.4.3.1', '4.4.3.2', '4.4.3.3',
'5.1.1.1', '5.1.1.2', '5.1.1.3', '5.1.2.1', '5.1.2.2', '5.2.1', '5.2.2',
'5.3.1', '5.3.2', '5.3.3', '5.3.4', '5.3.5', '5.3.6', '5.3.7', '5.3.8',
'5.3.9', '5.4.1', '5.4.2', '5.4.3', '5.4.4', '5.5.1', '5.5.2', '5.5.3',
'5.5.4', '5.5.5', '5.6.1.1', '5.6.1.2', '5.6.2.1', '5.6.2.2', '5.6.2.3',
'5.6.2.4', '5.6.3.1', '5.6.3.2', '5.6.3.3',
'6.1.1.1', '6.1.1.2', '6.1.1.3', '6.1.1.4', '6.1.2.1', '6.1.2.2', '6.2.1.1',
'6.2.1.2', '6.2.1.3', '6.2.2',
'7.1.1.1', '7.1.1.2', '7.1.2.1', '7.1.2.2', '7.1.3.1', '7.1.3.2', '7.1.3.3',
'7.2.1.1', '7.2.1.2', '7.2.1.3', '7.2.1.4', '7.2.2.1', '7.2.2.2', '7.2.2.3',
'7.2.3.1', '7.2.3.2', '7.2.4.1', '7.2.4.2', '7.2.4.3', '7.2.4.4', '7.2.4.5',
'7.3.1.1', '7.3.1.2', '7.3.2.1', '7.3.2.2', '7.3.3.1', '7.3.3.2', '7.3.4.1',
'7.3.4.2', '7.3.4.3', '7.3.4.4', '7.3.4.5', '7.3.4.6', '7.3.4.7', '7.3.4.8',
'8.1', '8.2.1', '8.2.2', '8.2.3', '8.3.1', '8.3.2', '8.3.3', '8.3.4', '8.4',
'9.1.1.1', '9.1.1.2', '9.1.2.1', '9.1.2.2', '9.1.2.3', '9.1.2.4', '9.1.2.5',
'9.1.2.6', '9.1.2.7', '9.1.2.8', '9.1.2.9', '9.1.3.1', '9.1.3.2', '9.1.3.3',
'9.2.1', '9.2.2', '9.2.3', '9.2.4', '9.2.5', '9.2.6', '9.2.7', '9.2.8',
'9.3.1', '9.3.2.1', '9.3.2.2', '9.3.3', '9.3.4.1', '9.3.4.2', '9.3.4.3',
'9.3.4.4', '9.3.5.1', '9.3.5.2', '9.3.5.3', '9.4.1.1', '9.4.1.2', '9.4.1.3',
'9.4.1.4', '9.4.1.5', '9.4.2.1', '9.4.2.2', '9.4.2.3', '9.4.3.1', '9.4.3.2',
'9.4.3.3', '9.4.3.4', '9.4.3.5', '9.4.3.6', '9.4.4.1', '9.4.4.2', '9.4.4.3',
'9.4.5', '9.4.6.1', '9.4.6.2', '9.4.6.3', '9.4.6.4', '9.5.1', '9.5.2',
'9.5.3', '9.5.4', '9.5.5',
'10.1', '10.2',
'11.1.1', '11.1.2', '11.1.3', '192.168.127.12', '172.16.31.10', '192.168.3.11', '172.16.17.32',
'11.1.5', '172.16.17.32', '172.16.17.32', '11.2.1', '11.2.2', '11.2.3',
'12.1.1', '12.1.2', '192.168.3.11', '172.16.17.32', '172.16.17.32', '12.1.4', '172.16.17.32',
'172.16.31.10', '172.16.17.32', '172.16.58.3', '172.16.58.3', '172.16.17.32', '172.16.17.32',
'192.168.127.12', '192.168.3.11', '192.168.3.11', '192.168.3.11', '192.168.127.12', '192.168.3.11',
'192.168.127.12', '172.16.31.10', '172.16.17.32', '12.4.2', '192.168.3.11', '172.16.58.3',
'12.4.4', '172.16.58.3', '172.16.31.10', '192.168.3.11', '192.168.3.11', '192.168.127.12',
'172.16.31.10', '172.16.31.10', '172.16.17.32', '172.16.17.32', '192.168.3.11', '192.168.3.11',
'172.16.17.32', '172.16.58.3',
'13.1.1', '13.1.2', '13.1.3', '13.2.1', '13.2.2', '13.2.3', '13.3.1',
'13.3.2', '192.168.3.11', '172.16.17.32', '172.16.31.10', '172.16.17.32', '172.16.31.10',
'172.16.31.10', '172.16.58.3', '172.16.31.10',
'14.1.1', '14.1.2', '14.1.3', '14.2', '14.3.1', '14.3.2', '14.3.3', '14.3.4',
'14.3.5', '14.3.6', '14.3.7', '14.4.1', '14.4.2', '14.5.1', '14.5.2',
'14.5.3', '14.5.4', '14.5.5', '14.5.6', '14.5.7', '14.5.8', '14.6.1',
'14.6.2', '14.6.3', '14.7', '14.8']
# import specs documentation
years = ['2001-2002', '2002-2003', '2003-2004', '2004-2005', '2005-2006', '2006', '2007', '2009', '2010',
'2013', '2014', '2015-2016', '2016-2017']
# save first 2 as excel --> come in PDF
pages = ['261-277', '203-219']; names = ['4697userguide1.pdf', '5003userguide3.pdf']
for j in range(2):
file = 'LCFS/'+ years[j] + '/mrdoc/pdf/' + names[j]
temp = tabula.io.read_pdf(file, pages = pages[j], multiple_tables = True, pandas_options={'header':None})
writer = pd.ExcelWriter('LCFS/'+ years[j] + '/mrdoc/excel/specs.xlsx')
for i in range(len(temp)):
temp[i].to_excel(writer, 'Sheet ' + str(i))
writer.save()
names = ['specs.xlsx', 'specs.xlsx', '5210spec2003-04.xls', '5375tablea1spec2004-05.xls', '5688_specification_family_spending_EFS_2005-06.xls',
'5986_spec2006_userguide.xls', '6118_spec2007_userguide.xls', '6655spec2009_v2.xls', '6945spec2010.xls', '7702_2013_specification.xls',
'7992_spec2014.xls', '8210_spec_2015-16.xls', '8351_spec2016-17.xls']
specs = {}
for j in range(len(years)):
specs[int(years[j][:4])] = pd.read_excel('LCFS/'+ years[j] + '/mrdoc/excel/' + names[j], sheet_name=None, header=None)
cleaned_specs = {}
for year in list(specs.keys())[2:]:
cleaned_specs[year] = {}
i = 0
for item in list(specs[year].keys()):
cleaned_specs[year][item] = specs[year][item]
if 'Family Spending' in item:
pass
elif 'changes' in item:
pass
else:
if 'FS code' in cleaned_specs[year][item].iloc[:, 1].tolist():
cleaned_specs[year][item] = cleaned_specs[year][item].iloc[:, 1:]
if 'FS code' in cleaned_specs[year][item].iloc[:, 0].tolist():
cleaned_specs[year][item] = cleaned_specs[year][item].iloc[:, 1:]
if 'FS codes' in cleaned_specs[year][item].iloc[:, 0].tolist():
cleaned_specs[year][item] = cleaned_specs[year][item].iloc[:, 1:]
cleaned_specs[year][item] = cleaned_specs[year][item].loc[cleaned_specs[year][item].iloc[:, 0] != 'FS code']\
.dropna(axis=0, how='all').dropna(axis=1, how='all')
cleaned_specs[year][item] = cleaned_specs[year][item].loc[cleaned_specs[year][item].iloc[:, 0] != 'Variable']
if 'Alcohol' in item or 'Clothing' in item:
if len(cleaned_specs[year][item].columns) > 6:
cleaned_specs[year][item] = cleaned_specs[year][item].dropna(axis=1, how='all')
if len(cleaned_specs[year][item].columns) > 6:
cleaned_specs[year][item] = cleaned_specs[year][item].iloc[:, :-1]
else:
if len(cleaned_specs[year][item].columns) > 6:
cleaned_specs[year][item] = cleaned_specs[year][item].iloc[:, :-1]
if len(cleaned_specs[year][item].columns) > 6:
cleaned_specs[year][item] = cleaned_specs[year][item].dropna(axis=1, how='all')
cleaned_specs[year][item].columns = ['LCFS_1', 'COIPLUS_1', 'Desc_1', 'LCFS_2', 'COIPLUS_2', 'Desc_2']
cleaned_specs[year][item].loc[cleaned_specs[year][item]['LCFS_1'].str.len() > 90, 'LCFS_1'] = np.nan
cleaned_specs[year][item] = cleaned_specs[year][item].dropna(how='all')
for j in range(1, 3):
cleaned_specs[year][item].loc[
cleaned_specs[year][item]['COIPLUS_' + str(j)].str[-1] == '.',
'COIPLUS_' + str(j)] = cleaned_specs[year][item]['COIPLUS_' + str(j)].str[:-1]
if i == 0:
cleaned_specs[year]['all'] = cleaned_specs[year][item]
i += 1
else:
cleaned_specs[year]['all'] = cleaned_specs[year]['all'].append(cleaned_specs[year][item])
writer = pd.ExcelWriter('LCFS/lcfs_coiplus_lookup.xlsx')
check_specs = all_specs = {year:cleaned_specs[year]['all'].dropna(how='all') for year in list(specs.keys())[2:]}
new_specs = {}
no_rooms = ['A114', 'a114', 'a114', 'a114', 'a114p', 'a114p', 'a114p', 'a114p', 'a114p',
'a114p', 'a114p', 'a114p', 'a114p', 'a114p', 'a114p', 'a114p', 'a114p']
room_dict = dict(zip([int(x[:4]) for x in years], no_rooms))
for year in list(check_specs.keys()):
check_specs[year].index = list(range(len(check_specs[year])))
check_specs[year].loc[check_specs[year]['COIPLUS_2'].isnull() == True,
'COIPLUS_2'] = check_specs[year]['COIPLUS_1']
for i in range(1, 3):
if i == 1:
temp = check_specs[year][['LCFS_1', 'LCFS_2', 'COIPLUS_1', 'Desc_1']]
temp.loc[temp['LCFS_1'].isnull() == True, 'LCFS_1'] = temp['LCFS_2']
else:
temp = all_specs[year][['LCFS_2', 'COIPLUS_2', 'Desc_2']]
temp.index = list(range(len(temp)))
temp2 = temp['COIPLUS_' + str(i)].tolist(); temp3 = temp['Desc_' + str(i)].tolist()
for j in range(1, len(temp2)):
if pd.isnull(temp2[j]) == True:
temp2[j] = temp2[j-1]; temp3[j] = temp3[j-1]
temp['COIPLUS_all'] = temp2; temp['Desc_all'] = temp3
temp = temp[['LCFS_' + str(i), 'COIPLUS_all', 'Desc_all']].apply(lambda x: x.astype(str))
temp = temp.set_index(['COIPLUS_all', 'Desc_all']).groupby('COIPLUS_all')['LCFS_' + str(i)].transform(lambda x: '+'.join(x)).drop_duplicates()
temp.columns = 'LCFS'
if i == 1:
new_specs[year] = temp
else:
new_specs[year] = new_specs[year].append(temp).reset_index()
new_specs[year].columns = ['COIPLUS', 'Description', 'LCFS_Code']
new_specs[year]['LCFS_Code'] = [x.replace(' ', '').replace('nan+', '')\
.replace('+nan', '').replace('nan', '')\
.replace('++', '+').replace('+-', '-')\
.replace('-', '-1*')
for x in new_specs[year]['LCFS_Code'].tolist()]
new_specs[year]['COIPLUS'] = [x.split(' ')[0] for x in new_specs[year]['COIPLUS'].tolist()]
new_specs[year] = new_specs[year].loc[new_specs[year]['COIPLUS'] != 'nan']
new_specs[year]['Level_1'] = [pd.to_numeric(x.split('.')[0], errors='coerce') for x in new_specs[year]['COIPLUS'].tolist()]
for i in range(2, 5):
temp = []
for x in new_specs[year]['COIPLUS'].tolist():
if len(x.split('.')) > i-1:
temp.append(pd.to_numeric(x.split('.')[i-1], errors='coerce'))
else:
temp.append(0)
new_specs[year]['Level_' + str(i)] = temp
new_specs[year].loc[new_specs[year]['LCFS_Code'].str[-1] == '+',
'LCFS_Code'] = new_specs[year]['LCFS_Code'].str[:-1]
new_specs[year] = new_specs[year].set_index(['Level_1', 'Level_2', 'Level_3', 'Level_4']).sort_index().drop_duplicates()
new_specs[year].loc[new_specs[year]['COIPLUS'] == '4.1.2', 'Description'] = 'Imputed Rent'
new_specs[year].loc[new_specs[year]['COIPLUS'] == '4.1.2', 'LCFS_Code'] = 'owned_prop*' + room_dict[year]
new_specs[year] = new_specs[year].loc[new_specs[year]['Description'] != 'nan']
new_specs[year] = new_specs[year].loc[new_specs[year]['LCFS_Code'] != '']
new_specs[year]['IO_use'] = False
new_specs[year].loc[new_specs[year]['COIPLUS'].isin( use_for_io) == True, 'IO_use'] = True
new_specs[year].loc[new_specs[year][
'Description'] != 'Stationery, diaries, address books, art materials']
new_specs[year]['Description'] = new_specs[year]['Description'].str.replace(' and ', ' & ')
new_specs[year] = new_specs[year].drop_duplicates()
new_specs[year].to_excel(writer, str(year))
writer.save()
check = new_specs[2003].loc[new_specs[2003]['IO_use'] == True]
# missing
# 8.4 - Internet Subscription Fees - 9.4.3.7 in coiplus
desc_anne_john = pd.read_excel('LCFS/lcfs_desc_anne&john.xlsx', header=None)
desc_anne_john['COICOP'] = [x.split(' ')[0] for x in desc_anne_john[0]]
desc_anne_john['Description_AJ'] = [' '.join(x.split(' ')[1:]) for x in desc_anne_john[0]]
coicop_anne_john = pd.read_excel('LCFS/lcfs_coicop_lookup_anne&john.xlsx', sheet_name=None)
writer = | pd.ExcelWriter('LCFS/lcfs_coicop_full_lookup.xlsx') | pandas.ExcelWriter |
from datetime import date, datetime, timedelta
import pandas as pd
import numpy as np
import math
import os
from pathlib import Path
import scipy.interpolate
import sys
import PySimpleGUI as sg
files = os.listdir('.')
calibrationFilename = "timeDelayCalibration.csv"
calibrationFile = ""
for file in files:
if file.endswith(calibrationFilename):
calibrationFile=file
layout = [[sg.T("Please load the files below:")], [sg.Text("Impulse logfile: "), sg.Input(), sg.FileBrowse(key="-DATA-")], [sg.Text("Calibration file: "), sg.Input(calibrationFile), sg.FileBrowse(key="-CAL-")],[sg.Button("Process")]]
###Building Window
window = sg.Window('Load files', layout, size=(600,150))
while True:
event, values = window.read()
if event == sg.WIN_CLOSED or event=="Exit":
sys.exit("No files loaded.")
elif event == "Process":
window.close()
break
if values['-DATA-']=="":
sys.exit("No Impulse file loaded.")
else:
impulseLogfilePath = Path(values['-DATA-'])
if values['-CAL-'] != "":
timeDelayCalibrationPath = Path(values['-CAL-'])
else:
timeDelayCalibrationPath = calibrationFile
#############################################
MSLogfilePath = ""
beforeTemParameters = ["TimeStamp", "Experiment time", "MFC1 Measured", "MFC1 Setpoint","MFC2 Measured", "MFC2 Setpoint","MFC3 Measured", "MFC3 Setpoint", "MixValve", "% Gas1 Measured", "% Gas2 Measured", "% Gas3 Measured", "% Gas1 Setpoint", "% Gas2 Setpoint", "PumpRotation", "ActiveProgram"]
inTemParameters = ["TimeStamp", "Experiment time", "Fnr", "Fnr Setpoint", "Temperature Setpoint","Temperature Measured", "Pin Measured", "Pin Setpoint", "Pout Measured", "Pout Setpoint", "Pnr (Calculated from Pin Pout)", "Pnr Setpoint","Measured power", "Pvac", "Relative power reference", "Relative power"]
afterTemParameters = ["TimeStamp", "Experiment time", "Channel#1", "Channel#2", "Channel#3", "Channel#4", "Channel#5", "Channel#6", "Channel#7", "Channel#8", "Channel#9", "Channel#10"]
if timeDelayCalibrationPath!="":
print("Loaded curve parameters used.")
curveParameters = | pd.read_csv(timeDelayCalibrationPath) | pandas.read_csv |
"""
Library of standardized plotting functions for basic plot formats
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
import xarray as xr
from scipy.interpolate import interp1d
from scipy.signal import welch
# Standard field labels
# - default: e.g., "Km/s"
# - all superscript: e.g., "K m s^{-1}"
fieldlabels_default_units = {
'wspd': r'Wind speed [m/s]',
'wdir': r'Wind direction [$^\circ$]',
'u': r'u [m/s]',
'v': r'v [m/s]',
'w': r'Vertical wind speed [m/s]',
'theta': r'$\theta$ [K]',
'thetav': r'$\theta_v$ [K]',
'uu': r'$\langle u^\prime u^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'vv': r'$\langle v^\prime v^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'ww': r'$\langle w^\prime w^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'uv': r'$\langle u^\prime v^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'uw': r'$\langle u^\prime w^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'vw': r'$\langle v^\prime w^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'tw': r'$\langle w^\prime \theta^\prime \rangle \;[\mathrm{Km/s}]$',
'TI': r'TI $[-]$',
'TKE': r'TKE $[\mathrm{m^2/s^2}]$',
}
fieldlabels_superscript_units = {
'wspd': r'Wind speed [m s$^{-1}$]',
'wdir': r'Wind direction [$^\circ$]',
'u': r'u [m s$^{-1}$]',
'v': r'v [m s$^{-1}$]',
'w': r'Vertical wind speed [m s$^{-1}$]',
'theta': r'$\theta$ [K]',
'thetav': r'$\theta_v$ [K]',
'uu': r'$\langle u^\prime u^\prime \rangle \;[\mathrm{m^2 s^{-2}}]$',
'vv': r'$\langle v^\prime v^\prime \rangle \;[\mathrm{m^2 s^{-2}}]$',
'ww': r'$\langle w^\prime w^\prime \rangle \;[\mathrm{m^2 s^{-2}}]$',
'uv': r'$\langle u^\prime v^\prime \rangle \;[\mathrm{m^2 s^{-2}}]$',
'uw': r'$\langle u^\prime w^\prime \rangle \;[\mathrm{m^2 s^{-2}}]$',
'vw': r'$\langle v^\prime w^\prime \rangle \;[\mathrm{m^2 s^{-2}}]$',
'tw': r'$\langle w^\prime \theta^\prime \rangle \;[\mathrm{K m s^{-1}}]$',
'TI': r'TI $[-]$',
'TKE': r'TKE $[\mathrm{m^2 s^{-2}}]$',
}
# Standard field labels for frequency spectra
spectrumlabels_default_units = {
'u': r'$E_{uu}\;[\mathrm{m^2/s}]$',
'v': r'$E_{vv}\;[\mathrm{m^2/s}]$',
'w': r'$E_{ww}\;[\mathrm{m^2/s}]$',
'theta': r'$E_{\theta\theta}\;[\mathrm{K^2 s}]$',
'thetav': r'$E_{\theta\theta}\;[\mathrm{K^2 s}]$',
'wspd': r'$E_{UU}\;[\mathrm{m^2/s}]$',
}
spectrumlabels_superscript_units = {
'u': r'$E_{uu}\;[\mathrm{m^2\;s^{-1}}]$',
'v': r'$E_{vv}\;[\mathrm{m^2\;s^{-1}}]$',
'w': r'$E_{ww}\;[\mathrm{m^2\;s^{-1}}]$',
'theta': r'$E_{\theta\theta}\;[\mathrm{K^2\;s}]$',
'thetav': r'$E_{\theta\theta}\;[\mathrm{K^2\;s}]$',
'wspd': r'$E_{UU}\;[\mathrm{m^2\;s^{-1}}]$',
}
# Default settings
default_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
standard_fieldlabels = fieldlabels_default_units
standard_spectrumlabels = spectrumlabels_default_units
# Supported dimensions and associated names
dimension_names = {
'time': ['datetime','time','Time'],
'height': ['height','heights','z'],
'frequency': ['frequency','f',]
}
# Show debug information
debug = False
def plot_timeheight(datasets,
fields=None,
fig=None,ax=None,
colorschemes={},
fieldlimits=None,
heightlimits=None,
timelimits=None,
fieldlabels={},
labelsubplots=False,
showcolorbars=True,
fieldorder='C',
ncols=1,
subfigsize=(12,4),
plot_local_time=False,
local_time_offset=0,
datasetkwargs={},
**kwargs
):
"""
Plot time-height contours for different datasets and fields
Usage
=====
datasets : pandas.DataFrame or dict
Dataset(s). If more than one set, datasets should
be a dictionary with entries <dataset_name>: dataset
fields : str, list, 'all' (or None)
Fieldname(s) corresponding to particular column(s) of
the datasets. fields can be None if input are MultiIndex Series.
'all' means all fields will be plotted (in this case all
datasets should have the same fields)
fig : figure handle
Custom figure handle. Should be specified together with ax
ax : axes handle, or list or numpy ndarray with axes handles
Customand axes handle(s).
Size of ax should equal ndatasets*nfields
colorschemes : str or dict
Name of colorschemes. If only one field is plotted, colorschemes
can be a string. Otherwise, it should be a dictionary with
entries <fieldname>: name_of_colorschemes
Missing colorschemess are set to 'viridis'
fieldlimits : list or tuple, or dict
Value range for the various fields. If only one field is
plotted, fieldlimits can be a list or tuple. Otherwise, it
should be a dictionary with entries <fieldname>: fieldlimit.
Missing fieldlimits are set automatically
heightlimits : list or tuple
Height axis limits
timelimits : list or tuple
Time axis limits
fieldlabels : str or dict
Custom field labels. If only one field is plotted, fieldlabels
can be a string. Otherwise it should be a dictionary with
entries <fieldname>: fieldlabel
labelsubplots : bool, list or tuple
Label subplots as (a), (b), (c), ... If a list or tuple is given
their values should be the horizontal and vertical position
relative to each subaxis.
showcolorbars : bool
Show colorbar per subplot
fieldorder : 'C' or 'F'
Index ordering for assigning fields and datasets to axes grid
(row by row). Fields is considered the first axis, so 'C' means
fields change slowest, 'F' means fields change fastest.
ncols : int
Number of columns in axes grid, must be a true divisor of total
number of axes.
subfigsize : list or tuple
Standard size of subfigures
plot_local_time : bool or str
Plot dual x axes with both UTC time and local time. If a str is
provided, then plot_local_time is assumed to be True and the str
is used as the datetime format.
local_time_offset : float
Local time offset from UTC
datasetkwargs : dict
Dataset-specific options that are passed on to the actual
plotting function. These options overwrite general options
specified through **kwargs. The argument should be a dictionary
with entries <dataset_name>: {**kwargs}
**kwargs : other keyword arguments
Options that are passed on to the actual plotting function.
Note that these options should be the same for all datasets and
fields and can not be used to set dataset or field specific
limits, colorschemess, norms, etc.
Example uses include setting shading, rasterized, etc.
"""
args = PlottingInput(
datasets=datasets,
fields=fields,
fieldlimits=fieldlimits,
fieldlabels=fieldlabels,
colorschemes=colorschemes,
fieldorder=fieldorder
)
args.set_missing_fieldlimits()
nfields = len(args.fields)
ndatasets = len(args.datasets)
ntotal = nfields * ndatasets
# Concatenate custom and standard field labels
# (custom field labels overwrite standard fields labels if existent)
args.fieldlabels = {**standard_fieldlabels, **args.fieldlabels}
fig, ax, nrows, ncols = _create_subplots_if_needed(
ntotal,
ncols,
sharex=True,
sharey=True,
subfigsize=subfigsize,
hspace=0.2,
fig=fig,
ax=ax
)
# Create flattened view of axes
axv = np.asarray(ax).reshape(-1)
# Initialise list of colorbars
cbars = []
# Loop over datasets, fields and times
for i, dfname in enumerate(args.datasets):
df = args.datasets[dfname]
heightvalues = _get_dim_values(df,'height')
timevalues = _get_dim_values(df,'time')
assert(heightvalues is not None), 'timeheight plot needs a height axis'
assert(timevalues is not None), 'timeheight plot needs a time axis'
if isinstance(timevalues, pd.DatetimeIndex):
# If plot local time, shift timevalues
if plot_local_time is not False:
timevalues = timevalues + pd.to_timedelta(local_time_offset,'h')
# Convert to days since 0001-01-01 00:00 UTC, plus one
numerical_timevalues = mdates.date2num(timevalues.values)
else:
if isinstance(timevalues, pd.TimedeltaIndex):
timevalues = timevalues.total_seconds()
# Timevalues is already a numerical array
numerical_timevalues = timevalues
# Create time-height mesh grid
tst = _get_staggered_grid(numerical_timevalues)
zst = _get_staggered_grid(heightvalues)
Ts,Zs = np.meshgrid(tst,zst,indexing='xy')
# Create list with available fields only
available_fields = _get_available_fieldnames(df,args.fields)
# Pivot all fields in a dataset at once
df_pivot = _get_pivot_table(df,'height',available_fields)
for j, field in enumerate(args.fields):
# If available_fields is [None,], fieldname is unimportant
if available_fields == [None]:
pass
# Else, check if field is available
elif not field in available_fields:
print('Warning: field "'+field+'" not available in dataset '+dfname)
continue
# Store plotting options in dictionary
plotting_properties = {
'vmin': args.fieldlimits[field][0],
'vmax': args.fieldlimits[field][1],
'cmap': args.cmap[field]
}
# Index of axis corresponding to dataset i and field j
if args.fieldorder=='C':
axi = i*nfields + j
else:
axi = j*ndatasets + i
# Extract data from dataframe
fieldvalues = _get_pivoted_field(df_pivot,field)
# Gather label, color, general options and dataset-specific options
# (highest priority to dataset-specific options, then general options)
try:
plotting_properties = {**plotting_properties,**kwargs,**datasetkwargs[dfname]}
except KeyError:
plotting_properties = {**plotting_properties,**kwargs}
# Plot data
im = axv[axi].pcolormesh(Ts,Zs,fieldvalues.T,**plotting_properties)
# Colorbar mark up
if showcolorbars:
cbar = fig.colorbar(im,ax=axv[axi],shrink=1.0)
# Set field label if known
try:
cbar.set_label(args.fieldlabels[field])
except KeyError:
pass
# Save colorbar
cbars.append(cbar)
# Set title if more than one dataset
if ndatasets>1:
axv[axi].set_title(dfname,fontsize=16)
# Format time axis
if isinstance(timevalues, (pd.DatetimeIndex, pd.TimedeltaIndex)):
ax2 = _format_time_axis(fig,axv[(nrows-1)*ncols:],plot_local_time,local_time_offset,timelimits)
else:
ax2 = None
# Set time limits if specified
if not timelimits is None:
axv[-1].set_xlim(timelimits)
# Set time label
for axi in axv[(nrows-1)*ncols:]:
axi.set_xlabel('time [s]')
if not heightlimits is None:
axv[-1].set_ylim(heightlimits)
# Add y labels
for r in range(nrows):
axv[r*ncols].set_ylabel(r'Height [m]')
# Align time, height and color labels
_align_labels(fig,axv,nrows,ncols)
if showcolorbars:
_align_labels(fig,[cb.ax for cb in cbars],nrows,ncols)
# Number sub figures as a, b, c, ...
if labelsubplots is not False:
try:
hoffset, voffset = labelsubplots
except (TypeError, ValueError):
hoffset, voffset = -0.14, 1.0
for i,axi in enumerate(axv):
axi.text(hoffset,voffset,'('+chr(i+97)+')',transform=axi.transAxes,size=16)
# Return cbar instead of array if ntotal==1
if len(cbars)==1:
cbars=cbars[0]
if (plot_local_time is not False) and ax2 is not None:
return fig, ax, ax2, cbars
else:
return fig, ax, cbars
def plot_timehistory_at_height(datasets,
fields=None,
heights=None,
fig=None,ax=None,
fieldlimits=None,
timelimits=None,
fieldlabels={},
cmap=None,
stack_by_datasets=None,
labelsubplots=False,
showlegend=None,
ncols=1,
subfigsize=(12,3),
plot_local_time=False,
local_time_offset=0,
datasetkwargs={},
**kwargs
):
"""
Plot time history at specified height(s) for various dataset(s)
and/or field(s).
By default, data for multiple datasets or multiple heights are
stacked in a single subplot. When multiple datasets and multiple
heights are specified together, heights are stacked in a subplot
per field and per dataset.
Usage
=====
datasets : pandas.DataFrame or dict
Dataset(s). If more than one set, datasets should
be a dictionary with entries <dataset_name>: dataset
fields : str, list, 'all' (or None)
Fieldname(s) corresponding to particular column(s) of
the datasets. fields can be None if input are Series.
'all' means all fields will be plotted (in this case all
datasets should have the same fields)
heights : float, list, 'all' (or None)
Height(s) for which time history is plotted. heights can be
None if all datasets combined have no more than one height
value. 'all' means the time history for all heights in the
datasets will be plotted (in this case all datasets should
have the same heights)
fig : figure handle
Custom figure handle. Should be specified together with ax
ax : axes handle, or list or numpy ndarray with axes handles
Customand axes handle(s).
Size of ax should equal nfields * (ndatasets or nheights)
fieldlimits : list or tuple, or dict
Value range for the various fields. If only one field is
plotted, fieldlimits can be a list or tuple. Otherwise, it
should be a dictionary with entries <fieldname>: fieldlimit.
Missing fieldlimits are set automatically
timelimits : list or tuple
Time axis limits
fieldlabels : str or dict
Custom field labels. If only one field is plotted, fieldlabels
can be a string. Otherwise it should be a dictionary with
entries <fieldname>: fieldlabel
cmap : str
Colormap used when stacking heights
stack_by_datasets : bool (or None)
Flag to specify what is plotted ("stacked") together per subfigure.
If True, stack datasets together, otherwise stack by heights. If
None, stack_by_datasets will be set based on the number of heights
and datasets.
labelsubplots : bool, list or tuple
Label subplots as (a), (b), (c), ... If a list or tuple is given
their values should be the horizontal and vertical position
relative to each subaxis.
showlegend : bool (or None)
Label different plots and show legend. If None, showlegend is set
to True if legend will have more than one entry, otherwise it is
set to False.
ncols : int
Number of columns in axes grid, must be a true divisor of total
number of axes.
subfigsize : list or tuple
Standard size of subfigures
plot_local_time : bool or str
Plot dual x axes with both UTC time and local time. If a str is
provided, then plot_local_time is assumed to be True and the str
is used as the datetime format.
local_time_offset : float
Local time offset from UTC
datasetkwargs : dict
Dataset-specific options that are passed on to the actual
plotting function. These options overwrite general options
specified through **kwargs. The argument should be a dictionary
with entries <dataset_name>: {**kwargs}
**kwargs : other keyword arguments
Options that are passed on to the actual plotting function.
Note that these options should be the same for all datasets,
fields and heights, and they can not be used to set dataset,
field or height specific colors, limits, etc.
Example uses include setting linestyle/width, marker, etc.
"""
# Avoid FutureWarning concerning the use of an implicitly registered
# datetime converter for a matplotlib plotting method. The converter
# was registered by pandas on import. Future versions of pandas will
# require explicit registration of matplotlib converters, as done here.
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
args = PlottingInput(
datasets=datasets,
fields=fields,
heights=heights,
fieldlimits=fieldlimits,
fieldlabels=fieldlabels,
)
nfields = len(args.fields)
nheights = len(args.heights)
ndatasets = len(args.datasets)
# Concatenate custom and standard field labels
# (custom field labels overwrite standard fields labels if existent)
args.fieldlabels = {**standard_fieldlabels, **args.fieldlabels}
# Set up subplot grid
if stack_by_datasets is None:
if nheights>1:
stack_by_datasets = False
else:
stack_by_datasets = True
if stack_by_datasets:
ntotal = nfields*nheights
else:
ntotal = nfields*ndatasets
fig, ax, nrows, ncols = _create_subplots_if_needed(
ntotal,
ncols,
sharex=True,
subfigsize=subfigsize,
hspace=0.2,
fig=fig,
ax=ax
)
# Create flattened view of axes
axv = np.asarray(ax).reshape(-1)
# Set showlegend if not specified
if showlegend is None:
if (stack_by_datasets and ndatasets>1) or (not stack_by_datasets and nheights>1):
showlegend = True
else:
showlegend = False
# Loop over datasets and fields
for i,dfname in enumerate(args.datasets):
df = args.datasets[dfname]
timevalues = _get_dim_values(df,'time',default_idx=True)
assert(timevalues is not None), 'timehistory plot needs a time axis'
heightvalues = _get_dim_values(df,'height')
if isinstance(timevalues, pd.TimedeltaIndex):
timevalues = timevalues.total_seconds()
# If plot local time, shift timevalues
if (plot_local_time is not False) and \
isinstance(timevalues, (pd.DatetimeIndex, pd.TimedeltaIndex)):
timevalues = timevalues + pd.to_timedelta(local_time_offset,'h')
# Create list with available fields only
available_fields = _get_available_fieldnames(df,args.fields)
# If any of the requested heights is not available,
# pivot the dataframe to allow interpolation.
# Pivot all fields in a dataset at once to reduce computation time
if (not heightvalues is None) and (not all([h in heightvalues for h in args.heights])):
df_pivot = _get_pivot_table(df,'height',available_fields)
pivoted = True
if debug: print('Pivoting '+dfname)
else:
pivoted = False
for j, field in enumerate(args.fields):
# If available_fields is [None,], fieldname is unimportant
if available_fields == [None]:
pass
# Else, check if field is available
elif not field in available_fields:
print('Warning: field "'+field+'" not available in dataset '+dfname)
continue
for k, height in enumerate(args.heights):
# Store plotting options in dictionary
# Set default linestyle to '-' and no markers
plotting_properties = {
'linestyle':'-',
'marker':None,
}
# Axis order, label and title depend on value of stack_by_datasets
if stack_by_datasets:
# Index of axis corresponding to field j and height k
axi = k*nfields + j
# Use datasetname as label
if showlegend:
plotting_properties['label'] = dfname
# Set title if multiple heights are compared
if nheights>1:
axv[axi].set_title('z = {:.1f} m'.format(height),fontsize=16)
# Set colors
plotting_properties['color'] = default_colors[i % len(default_colors)]
else:
# Index of axis corresponding to field j and dataset i
axi = i*nfields + j
# Use height as label
if showlegend:
plotting_properties['label'] = 'z = {:.1f} m'.format(height)
# Set title if multiple datasets are compared
if ndatasets>1:
axv[axi].set_title(dfname,fontsize=16)
# Set colors
if cmap is not None:
cmap = mpl.cm.get_cmap(cmap)
plotting_properties['color'] = cmap(k/(nheights-1))
else:
plotting_properties['color'] = default_colors[k % len(default_colors)]
# Extract data from dataframe
if pivoted:
signal = interp1d(heightvalues,_get_pivoted_field(df_pivot,field).values,axis=-1,fill_value="extrapolate")(height)
else:
slice_z = _get_slice(df,height,'height')
signal = _get_field(slice_z,field).values
# Gather label, color, general options and dataset-specific options
# (highest priority to dataset-specific options, then general options)
try:
plotting_properties = {**plotting_properties,**kwargs,**datasetkwargs[dfname]}
except KeyError:
plotting_properties = {**plotting_properties,**kwargs}
# Plot data
axv[axi].plot(timevalues,signal,**plotting_properties)
# Set field label if known
try:
axv[axi].set_ylabel(args.fieldlabels[field])
except KeyError:
pass
# Set field limits if specified
try:
axv[axi].set_ylim(args.fieldlimits[field])
except KeyError:
pass
# Set axis grid
for axi in axv:
axi.xaxis.grid(True,which='both')
axi.yaxis.grid(True)
# Format time axis
if isinstance(timevalues, (pd.DatetimeIndex, pd.TimedeltaIndex)):
ax2 = _format_time_axis(fig,axv[(nrows-1)*ncols:],plot_local_time,local_time_offset,timelimits)
else:
ax2 = None
# Set time limits if specified
if not timelimits is None:
axv[-1].set_xlim(timelimits)
# Set time label
for axi in axv[(nrows-1)*ncols:]:
axi.set_xlabel('time [s]')
# Number sub figures as a, b, c, ...
if labelsubplots is not False:
try:
hoffset, voffset = labelsubplots
except (TypeError, ValueError):
hoffset, voffset = -0.14, 1.0
for i,axi in enumerate(axv):
axi.text(hoffset,voffset,'('+chr(i+97)+')',transform=axi.transAxes,size=16)
# Add legend
if showlegend:
leg = _format_legend(axv,index=ncols-1)
# Align labels
_align_labels(fig,axv,nrows,ncols)
if (plot_local_time is not False) and ax2 is not None:
return fig, ax, ax2
else:
return fig, ax
def plot_profile(datasets,
fields=None,
times=None,
timerange=None,
fig=None,ax=None,
fieldlimits=None,
heightlimits=None,
fieldlabels={},
cmap=None,
stack_by_datasets=None,
labelsubplots=False,
showlegend=None,
fieldorder='C',
ncols=None,
subfigsize=(4,5),
plot_local_time=False,
local_time_offset=0,
datasetkwargs={},
**kwargs
):
"""
Plot vertical profile at specified time(s) for various dataset(s)
and/or field(s).
By default, data for multiple datasets or multiple times are
stacked in a single subplot. When multiple datasets and multiple
times are specified together, times are stacked in a subplot
per field and per dataset.
Usage
=====
datasets : pandas.DataFrame or dict
Dataset(s). If more than one set, datasets should
be a dictionary with entries <dataset_name>: dataset
fields : str, list, 'all' (or None)
Fieldname(s) corresponding to particular column(s) of
the datasets. fields can be None if input are Series.
'all' means all fields will be plotted (in this case all
datasets should have the same fields)
times : str, int, float, list (or None)
Time(s) for which vertical profiles are plotted, specified as
either datetime strings or numerical values (seconds, e.g.,
simulation time). times can be None if all datasets combined
have no more than one time value, or if timerange is specified.
timerange : tuple or list
Start and end times (inclusive) between which all times are
plotted. If cmap is None, then it will automatically be set to
viridis by default. This overrides times when specified.
fig : figure handle
Custom figure handle. Should be specified together with ax
ax : axes handle, or list or numpy ndarray with axes handles
Customand axes handle(s).
Size of ax should equal nfields * (ndatasets or ntimes)
fieldlimits : list or tuple, or dict
Value range for the various fields. If only one field is
plotted, fieldlimits can be a list or tuple. Otherwise, it
should be a dictionary with entries <fieldname>: fieldlimit.
Missing fieldlimits are set automatically
heightlimits : list or tuple
Height axis limits
fieldlabels : str or dict
Custom field labels. If only one field is plotted, fieldlabels
can be a string. Otherwise it should be a dictionary with
entries <fieldname>: fieldlabel
cmap : str
Colormap used when stacking times
stack_by_datasets : bool (or None)
Flag to specify what is plotted ("stacked") together per subfigure.
If True, stack datasets together, otherwise stack by times. If
None, stack_by_datasets will be set based on the number of times
and datasets.
labelsubplots : bool, list or tuple
Label subplots as (a), (b), (c), ... If a list or tuple is given
their values should be the horizontal and vertical position
relative to each subaxis.
showlegend : bool (or None)
Label different plots and show legend. If None, showlegend is set
to True if legend will have more than one entry, otherwise it is
set to False.
fieldorder : 'C' or 'F'
Index ordering for assigning fields and datasets/times (depending
on stack_by_datasets) to axes grid (row by row). Fields is considered the
first axis, so 'C' means fields change slowest, 'F' means fields
change fastest.
ncols : int
Number of columns in axes grid, must be a true divisor of total
number of axes.
subfigsize : list or tuple
Standard size of subfigures
plot_local_time : bool or str
Plot dual x axes with both UTC time and local time. If a str is
provided, then plot_local_time is assumed to be True and the str
is used as the datetime format.
local_time_offset : float
Local time offset from UTC
datasetkwargs : dict
Dataset-specific options that are passed on to the actual
plotting function. These options overwrite general options
specified through **kwargs. The argument should be a dictionary
with entries <dataset_name>: {**kwargs}
**kwargs : other keyword arguments
Options that are passed on to the actual plotting function.
Note that these options should be the same for all datasets,
fields and times, and they can not be used to set dataset,
field or time specific colors, limits, etc.
Example uses include setting linestyle/width, marker, etc.
"""
args = PlottingInput(
datasets=datasets,
fields=fields,
times=times,
timerange=timerange,
fieldlimits=fieldlimits,
fieldlabels=fieldlabels,
fieldorder=fieldorder,
)
nfields = len(args.fields)
ntimes = len(args.times)
ndatasets = len(args.datasets)
# Concatenate custom and standard field labels
# (custom field labels overwrite standard fields labels if existent)
args.fieldlabels = {**standard_fieldlabels, **args.fieldlabels}
# Set up subplot grid
if stack_by_datasets is None:
if ntimes>1:
stack_by_datasets = False
else:
stack_by_datasets = True
if stack_by_datasets:
ntotal = nfields * ntimes
else:
ntotal = nfields * ndatasets
fig, ax, nrows, ncols = _create_subplots_if_needed(
ntotal,
ncols,
default_ncols=int(ntotal/nfields),
fieldorder=args.fieldorder,
avoid_single_column=True,
sharey=True,
subfigsize=subfigsize,
hspace=0.4,
fig=fig,
ax=ax,
)
# Create flattened view of axes
axv = np.asarray(ax).reshape(-1)
# Set showlegend if not specified
if showlegend is None:
if (stack_by_datasets and ndatasets>1) or (not stack_by_datasets and ntimes>1):
showlegend = True
else:
showlegend = False
# Set default sequential colormap if timerange was specified
if (timerange is not None) and (cmap is None):
cmap = 'viridis'
# Loop over datasets, fields and times
for i, dfname in enumerate(args.datasets):
df = args.datasets[dfname]
heightvalues = _get_dim_values(df,'height',default_idx=True)
assert(heightvalues is not None), 'profile plot needs a height axis'
timevalues = _get_dim_values(df,'time')
# If plot local time, shift timevalues
timedelta_to_local = None
if plot_local_time is not False:
timedelta_to_local = pd.to_timedelta(local_time_offset,'h')
timevalues = timevalues + timedelta_to_local
# Create list with available fields only
available_fields = _get_available_fieldnames(df,args.fields)
# Pivot all fields in a dataset at once
if timevalues is not None:
df_pivot = _get_pivot_table(df,'height',available_fields)
for j, field in enumerate(args.fields):
# If available_fields is [None,], fieldname is unimportant
if available_fields == [None]:
pass
# Else, check if field is available
elif not field in available_fields:
print('Warning: field "'+field+'" not available in dataset '+dfname)
continue
for k, time in enumerate(args.times):
plotting_properties = {}
# Axis order, label and title depend on value of stack_by_datasets
if stack_by_datasets:
# Index of axis corresponding to field j and time k
if args.fieldorder == 'C':
axi = j*ntimes + k
else:
axi = k*nfields + j
# Use datasetname as label
if showlegend:
plotting_properties['label'] = dfname
# Set title if multiple times are compared
if ntimes>1:
if isinstance(time, (int,float,np.number)):
tstr = '{:g} s'.format(time)
else:
if plot_local_time is False:
tstr = pd.to_datetime(time).strftime('%Y-%m-%d %H%M UTC')
elif plot_local_time is True:
tstr = pd.to_datetime(time).strftime('%Y-%m-%d %H:%M')
else:
assert isinstance(plot_local_time,str), 'Unexpected plot_local_time format'
tstr = pd.to_datetime(time).strftime(plot_local_time)
axv[axi].set_title(tstr, fontsize=16)
# Set color
plotting_properties['color'] = default_colors[i % len(default_colors)]
else:
# Index of axis corresponding to field j and dataset i
if args.fieldorder == 'C':
axi = j*ndatasets + i
else:
axi = i*nfields + j
# Use time as label
if showlegend:
if isinstance(time, (int,float,np.number)):
plotting_properties['label'] = '{:g} s'.format(time)
else:
if plot_local_time is False:
plotting_properties['label'] = pd.to_datetime(time).strftime('%Y-%m-%d %H%M UTC')
elif plot_local_time is True:
plotting_properties['label'] = pd.to_datetime(time).strftime('%Y-%m-%d %H:%M')
else:
assert isinstance(plot_local_time,str), 'Unexpected plot_local_time format'
plotting_properties['label'] = pd.to_datetime(time).strftime(plot_local_time)
# Set title if multiple datasets are compared
if ndatasets>1:
axv[axi].set_title(dfname,fontsize=16)
# Set colors
if cmap is not None:
cmap = mpl.cm.get_cmap(cmap)
plotting_properties['color'] = cmap(k/(ntimes-1))
else:
plotting_properties['color'] = default_colors[k % len(default_colors)]
# Extract data from dataframe
if timevalues is None:
# Dataset will not be pivoted
fieldvalues = _get_field(df,field).values
else:
if plot_local_time is not False:
# specified times are in local time, convert back to UTC
slice_t = _get_slice(df_pivot,time-timedelta_to_local,'time')
else:
slice_t = _get_slice(df_pivot,time,'time')
fieldvalues = _get_pivoted_field(slice_t,field).values.squeeze()
# Gather label, color, general options and dataset-specific options
# (highest priority to dataset-specific options, then general options)
try:
plotting_properties = {**plotting_properties,**kwargs,**datasetkwargs[dfname]}
except KeyError:
plotting_properties = {**plotting_properties,**kwargs}
# Plot data
try:
axv[axi].plot(fieldvalues,heightvalues,**plotting_properties)
except ValueError as e:
print(e,'--', time, 'not found in index?')
# Set field label if known
try:
axv[axi].set_xlabel(args.fieldlabels[field])
except KeyError:
pass
# Set field limits if specified
try:
axv[axi].set_xlim(args.fieldlimits[field])
except KeyError:
pass
for axi in axv:
axi.grid(True,which='both')
# Set height limits if specified
if not heightlimits is None:
axv[0].set_ylim(heightlimits)
# Add y labels
for r in range(nrows):
axv[r*ncols].set_ylabel(r'Height [m]')
# Align labels
_align_labels(fig,axv,nrows,ncols)
# Number sub figures as a, b, c, ...
if labelsubplots is not False:
try:
hoffset, voffset = labelsubplots
except (TypeError, ValueError):
hoffset, voffset = -0.14, -0.18
for i,axi in enumerate(axv):
axi.text(hoffset,voffset,'('+chr(i+97)+')',transform=axi.transAxes,size=16)
# Add legend
if showlegend:
leg = _format_legend(axv,index=ncols-1)
return fig,ax
def plot_spectrum(datasets,
fields=None,
height=None,
times=None,
fig=None,ax=None,
fieldlimits=None,
freqlimits=None,
fieldlabels={},
labelsubplots=False,
showlegend=None,
ncols=None,
subfigsize=(4,5),
datasetkwargs={},
**kwargs
):
"""
Plot frequency spectrum at a given height for different datasets,
time(s) and field(s), using a subplot per time and per field.
Note that this function does not interpolate to the requested height,
i.e., if height is not None, the specified value should be available
in all datasets.
Usage
=====
datasets : pandas.DataFrame or dict
Dataset(s) with spectrum data. If more than one set,
datasets should be a dictionary with entries
<dataset_name>: dataset
fields : str, list, 'all' (or None)
Fieldname(s) corresponding to particular column(s) of
the datasets. fields can be None if input are Series.
'all' means all fields will be plotted (in this case all
datasets should have the same fields)
height : float (or None)
Height for which frequency spectra is plotted. If datasets
have no height dimension, height does not need to be specified.
times : str, int, float, list (or None)
Time(s) for which frequency spectra are plotted, specified as
either datetime strings or numerical values (seconds, e.g.,
simulation time). times can be None if all datasets combined
have no more than one time value.
fig : figure handle
Custom figure handle. Should be specified together with ax
ax : axes handle, or list or numpy ndarray with axes handles
Customand axes handle(s).
Size of ax should equal nfields * ntimes
fieldlimits : list or tuple, or dict
Value range for the various fields. If only one field is
plotted, fieldlimits can be a list or tuple. Otherwise, it
should be a dictionary with entries <fieldname>: fieldlimit.
Missing fieldlimits are set automatically
freqlimits : list or tuple
Frequency axis limits
fieldlabels : str or dict
Custom field labels. If only one field is plotted, fieldlabels
can be a string. Otherwise it should be a dictionary with
entries <fieldname>: fieldlabel
labelsubplots : bool, list or tuple
Label subplots as (a), (b), (c), ... If a list or tuple is given
their values should be the horizontal and vertical position
relative to each subaxis.
showlegend : bool (or None)
Label different plots and show legend. If None, showlegend is set
to True if legend will have more than one entry, otherwise it is
set to False.
ncols : int
Number of columns in axes grid, must be a true divisor of total
number of axes.
subfigsize : list or tuple
Standard size of subfigures
datasetkwargs : dict
Dataset-specific options that are passed on to the actual
plotting function. These options overwrite general options
specified through **kwargs. The argument should be a dictionary
with entries <dataset_name>: {**kwargs}
**kwargs : other keyword arguments
Options that are passed on to the actual plotting function.
Note that these options should be the same for all datasets,
fields and times, and they can not be used to set dataset,
field or time specific colors, limits, etc.
Example uses include setting linestyle/width, marker, etc.
"""
args = PlottingInput(
datasets=datasets,
fields=fields,
times=times,
fieldlimits=fieldlimits,
fieldlabels=fieldlabels,
)
nfields = len(args.fields)
ntimes = len(args.times)
ndatasets = len(args.datasets)
ntotal = nfields * ntimes
# Concatenate custom and standard field labels
# (custom field labels overwrite standard fields labels if existent)
args.fieldlabels = {**standard_spectrumlabels, **args.fieldlabels}
fig, ax, nrows, ncols = _create_subplots_if_needed(
ntotal,
ncols,
default_ncols=ntimes,
avoid_single_column=True,
sharex=True,
subfigsize=subfigsize,
wspace=0.3,
fig=fig,
ax=ax,
)
# Create flattened view of axes
axv = np.asarray(ax).reshape(-1)
# Set showlegend if not specified
if showlegend is None:
if ndatasets>1:
showlegend = True
else:
showlegend = False
# Loop over datasets, fields and times
for i, dfname in enumerate(args.datasets):
df = args.datasets[dfname]
frequencyvalues = _get_dim_values(df,'frequency',default_idx=True)
assert(frequencyvalues is not None), 'spectrum plot needs a frequency axis'
timevalues = _get_dim_values(df,'time')
# Create list with available fields only
available_fields = _get_available_fieldnames(df,args.fields)
for j, field in enumerate(args.fields):
# If available_fields is [None,], fieldname is unimportant
if available_fields == [None]:
pass
# Else, check if field is available
elif not field in available_fields:
print('Warning: field "'+field+'" not available in dataset '+dfname)
continue
for k, time in enumerate(args.times):
plotting_properties = {}
if showlegend:
plotting_properties['label'] = dfname
# Index of axis corresponding to field j and time k
axi = j*ntimes + k
# Axes mark up
if i==0 and ntimes>1:
axv[axi].set_title(pd.to_datetime(time).strftime('%Y-%m-%d %H%M UTC'),fontsize=16)
# Gather label, general options and dataset-specific options
# (highest priority to dataset-specific options, then general options)
try:
plotting_properties = {**plotting_properties,**kwargs,**datasetkwargs[dfname]}
except KeyError:
plotting_properties = {**plotting_properties,**kwargs}
# Get field spectrum
slice_t = _get_slice(df,time,'time')
slice_tz = _get_slice(slice_t,height,'height')
spectrum = _get_field(slice_tz,field).values
# Plot data
axv[axi].loglog(frequencyvalues[1:],spectrum[1:],**plotting_properties)
# Specify field limits if specified
try:
axv[axi].set_ylim(args.fieldlimits[field])
except KeyError:
pass
# Set frequency label
for c in range(ncols):
axv[ncols*(nrows-1)+c].set_xlabel('$f$ [Hz]')
# Specify field label if specified
for r in range(nrows):
try:
axv[r*ncols].set_ylabel(args.fieldlabels[args.fields[r]])
except KeyError:
pass
# Align labels
_align_labels(fig,axv,nrows,ncols)
# Set frequency limits if specified
if not freqlimits is None:
axv[0].set_xlim(freqlimits)
# Number sub figures as a, b, c, ...
if labelsubplots is not False:
try:
hoffset, voffset = labelsubplots
except (TypeError, ValueError):
hoffset, voffset = -0.14, -0.18
for i,axi in enumerate(axv):
axi.text(hoffset,voffset,'('+chr(i+97)+')',transform=axi.transAxes,size=16)
# Add legend
if showlegend:
leg = _format_legend(axv,index=ncols-1)
return fig, ax
# ---------------------------------------------
#
# DEFINITION OF AUXILIARY CLASSES AND FUNCTIONS
#
# ---------------------------------------------
class InputError(Exception):
"""Exception raised for errors in the input.
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
class PlottingInput(object):
"""
Auxiliary class to collect input data and options for plotting
functions, and to check if the inputs are consistent
"""
supported_datatypes = (
pd.Series,
pd.DataFrame,
xr.DataArray,
xr.Dataset,
)
def __init__(self, datasets, fields, **argd):
# Add all arguments as class attributes
self.__dict__.update({'datasets':datasets,
'fields':fields,
**argd})
# Check consistency of all attributes
self._check_consistency()
def _check_consistency(self):
"""
Check consistency of all input data
"""
# ----------------------
# Check dataset argument
# ----------------------
# If a single dataset is provided, convert to a dictionary
# under a generic key 'Dataset'
if isinstance(self.datasets, self.supported_datatypes):
self.datasets = {'Dataset': self.datasets}
for dfname,df in self.datasets.items():
# convert dataset types here
if isinstance(df, (xr.Dataset,xr.DataArray)):
# handle xarray datatypes
self.datasets[dfname] = df.to_dataframe()
columns = self.datasets[dfname].columns
if len(columns) == 1:
# convert to pd.Series
self.datasets[dfname] = self.datasets[dfname][columns[0]]
else:
assert(isinstance(df, self.supported_datatypes)), \
"Dataset {:s} of type {:s} not supported".format(dfname,str(type(df)))
# ----------------------
# Check fields argument
# ----------------------
# If no fields are specified, check that
# - all datasets are series
# - the name of every series is either None or matches other series names
if self.fields is None:
assert(all([isinstance(self.datasets[dfname],pd.Series) for dfname in self.datasets])), \
"'fields' argument must be specified unless all datasets are pandas Series"
series_names = set()
for dfname in self.datasets:
series_names.add(self.datasets[dfname].name)
if len(series_names)==1:
self.fields = list(series_names)
else:
raise InputError('attempting to plot multiple series with different field names')
elif isinstance(self.fields,str):
# If fields='all', retrieve fields from dataset
if self.fields=='all':
self.fields = _get_fieldnames(list(self.datasets.values())[0])
assert(all([_get_fieldnames(df)==self.fields for df in self.datasets.values()])), \
"The option fields = 'all' only works when all datasets have the same fields"
# If fields is a single instance, convert to a list
else:
self.fields = [self.fields,]
# ----------------------------------
# Check match of fields and datasets
# ----------------------------------
# Check if all datasets have at least one of the requested fields
for dfname in self.datasets:
df = self.datasets[dfname]
if isinstance(df,pd.DataFrame):
assert(any([field in df.columns for field in self.fields])), \
'DataFrame '+dfname+' does not contain any of the requested fields'
elif isinstance(df,pd.Series):
if df.name is None:
assert(len(self.fields)==1), \
'Series must have a name if more than one fields is specified'
else:
assert(df.name in self.fields), \
'Series '+dfname+' does not match any of the requested fields'
# ---------------------------------
# Check heights argument (optional)
# ---------------------------------
try:
# If no heights are specified, check that all datasets combined have
# no more than one height value
if self.heights is None:
av_heights = set()
for df in self.datasets.values():
heightvalues = _get_dim_values(df,'height')
try:
for height in heightvalues:
av_heights.add(height)
except TypeError:
# heightvalues is None
pass
if len(av_heights)==0:
# None of the datasets have height values
self.heights = [None,]
elif len(av_heights)==1:
self.heights = list(av_heights)
else:
raise InputError("found more than one height value so 'heights' argument must be specified")
# If heights='all', retrieve heights from dataset
elif isinstance(self.heights,str) and self.heights=='all':
self.heights = _get_dim_values(list(self.datasets.values())[0],'height')
assert(all([np.allclose(_get_dim_values(df,'height'),self.heights) for df in self.datasets.values()])), \
"The option heights = 'all' only works when all datasets have the same vertical levels"
# If heights is single instance, convert to list
elif isinstance(self.heights,(int,float)):
self.heights = [self.heights,]
except AttributeError:
pass
# -----------------------------------
# Check timerange argument (optional)
# -----------------------------------
try:
if self.timerange is not None:
if self.times is not None:
print('Using specified time range',self.timerange,
'and ignoring',self.times)
assert isinstance(self.timerange,(tuple,list)), \
'Need to specify timerange as (starttime,endtime)'
assert (len(self.timerange) == 2)
try:
starttime = pd.to_datetime(self.timerange[0])
endtime = pd.to_datetime(self.timerange[1])
except ValueError:
print('Unable to convert timerange to timestamps')
else:
# get unique times from all datasets
alltimes = []
for df in self.datasets.values():
alltimes += list(_get_dim_values(df,'time'))
alltimes = pd.DatetimeIndex(np.unique(alltimes))
inrange = (alltimes >= starttime) & (alltimes <= endtime)
self.times = alltimes[inrange]
except AttributeError:
pass
# ---------------------------------
# Check times argument (optional)
# ---------------------------------
# If times is single instance, convert to list
try:
# If no times are specified, check that all datasets combined have
# no more than one time value
if self.times is None:
av_times = set()
for df in self.datasets.values():
timevalues = _get_dim_values(df,'time')
try:
for time in timevalues.values:
av_times.add(time)
except AttributeError:
pass
if len(av_times)==0:
# None of the datasets have time values
self.times = [None,]
elif len(av_times)==1:
self.times = list(av_times)
else:
raise InputError("found more than one time value so 'times' argument must be specified")
elif isinstance(self.times,(str,int,float,np.number,pd.Timestamp)):
self.times = [self.times,]
except AttributeError:
pass
# -------------------------------------
# Check fieldlimits argument (optional)
# -------------------------------------
# If one set of fieldlimits is specified, check number of fields
# and convert to dictionary
try:
if self.fieldlimits is None:
self.fieldlimits = {}
elif isinstance(self.fieldlimits, (list, tuple)):
assert(len(self.fields)==1), 'Unclear to what field fieldlimits corresponds'
self.fieldlimits = {self.fields[0]:self.fieldlimits}
except AttributeError:
self.fieldlimits = {}
# -------------------------------------
# Check fieldlabels argument (optional)
# -------------------------------------
# If one fieldlabel is specified, check number of fields
try:
if isinstance(self.fieldlabels, str):
assert(len(self.fields)==1), 'Unclear to what field fieldlabels corresponds'
self.fieldlabels = {self.fields[0]: self.fieldlabels}
except AttributeError:
self.fieldlabels = {}
# -------------------------------------
# Check colorscheme argument (optional)
# -------------------------------------
# If one colorscheme is specified, check number of fields
try:
self.cmap = {}
if isinstance(self.colorschemes, str):
assert(len(self.fields)==1), 'Unclear to what field colorschemes corresponds'
self.cmap[self.fields[0]] = mpl.cm.get_cmap(self.colorschemes)
else:
# Set missing colorschemes to viridis
for field in self.fields:
if field not in self.colorschemes.keys():
if field == 'wdir':
self.colorschemes[field] = 'twilight'
else:
self.colorschemes[field] = 'viridis'
self.cmap[field] = mpl.cm.get_cmap(self.colorschemes[field])
except AttributeError:
pass
# -------------------------------------
# Check fieldorder argument (optional)
# -------------------------------------
# Make sure fieldorder is recognized
try:
assert(self.fieldorder in ['C','F']), "Error: fieldorder '"\
+self.fieldorder+"' not recognized, must be either 'C' or 'F'"
except AttributeError:
pass
def set_missing_fieldlimits(self):
"""
Set missing fieldlimits to min and max over all datasets
"""
for field in self.fields:
if field not in self.fieldlimits.keys():
try:
self.fieldlimits[field] = [
min([_get_field(df,field).min() for df in self.datasets.values() if _contains_field(df,field)]),
max([_get_field(df,field).max() for df in self.datasets.values() if _contains_field(df,field)])
]
except ValueError:
self.fieldlimits[field] = [None,None]
def _get_dim(df,dim,default_idx=False):
"""
Search for specified dimension in dataset and return
level (referred to by either label or position) and
axis {0 or ‘index’, 1 or ‘columns’}
If default_idx is True, return a single unnamed index
if present
"""
assert(dim in dimension_names.keys()), \
"Dimension '"+dim+"' not supported"
# 1. Try to find dim based on name
for name in dimension_names[dim]:
if name in df.index.names:
if debug: print("Found "+dim+" dimension in index with name '{}'".format(name))
return name, 0
else:
try:
if name in df.columns:
if debug: print("Found "+dim+" dimension in column with name '{}'".format(name))
return name, 1
except AttributeError:
# pandas Series has no columns
pass
# 2. Look for Datetime or Timedelta index
if dim=='time':
for idx in range(len(df.index.names)):
if isinstance(df.index.get_level_values(idx),(pd.DatetimeIndex,pd.TimedeltaIndex,pd.PeriodIndex)):
if debug: print("Found "+dim+" dimension in index with level {} without a name ".format(idx))
return idx, 0
# 3. If default index is True, assume that a
# single nameless index corresponds to the
# requested dimension
if (not isinstance(df.index,(pd.MultiIndex,pd.DatetimeIndex,pd.TimedeltaIndex,pd.PeriodIndex))
and default_idx and (df.index.name is None) ):
if debug: print("Assuming nameless index corresponds to '{}' dimension".format(dim))
return 0,0
# 4. Did not found requested dimension
if debug: print("Found no "+dim+" dimension")
return None, None
def _get_available_fieldnames(df,fieldnames):
"""
Return subset of fields available in df
"""
available_fieldnames = []
if isinstance(df,pd.DataFrame):
for field in fieldnames:
if field in df.columns:
available_fieldnames.append(field)
# A Series only has one field, so return that field name
# (if that field is not in fields, an error would have been raised)
elif isinstance(df,pd.Series):
available_fieldnames.append(df.name)
return available_fieldnames
def _get_fieldnames(df):
"""
Return list of fieldnames in df
"""
if isinstance(df,pd.DataFrame):
fieldnames = list(df.columns)
# Remove any column corresponding to
# a dimension (time, height or frequency)
for dim in dimension_names.keys():
name, axis = _get_dim(df,dim)
if axis==1:
fieldnames.remove(name)
return fieldnames
elif isinstance(df,pd.Series):
return [df.name,]
def _contains_field(df,fieldname):
if isinstance(df,pd.DataFrame):
return fieldname in df.columns
elif isinstance(df,pd.Series):
return (df.name is None) or (df.name==fieldname)
def _get_dim_values(df,dim,default_idx=False):
"""
Return values for a given dimension
"""
level, axis = _get_dim(df,dim,default_idx)
# Requested dimension is an index
if axis==0:
return df.index.get_level_values(level).unique()
# Requested dimension is a column
elif axis==1:
return df[level].unique()
# Requested dimension not available
else:
return None
def _get_pivot_table(df,dim,fieldnames):
"""
Return pivot table with given fieldnames as columns
"""
level, axis = _get_dim(df,dim)
# Unstack an index
if axis==0:
return df.unstack(level=level)
# Pivot about a column
elif axis==1:
return df.pivot(columns=level,values=fieldnames)
# Dimension not found, return dataframe
else:
return df
def _get_slice(df,key,dim):
"""
Return cross-section of dataset
"""
if key is None:
return df
# Get dimension level and axis
level, axis = _get_dim(df,dim)
# Requested dimension is an index
if axis==0:
if isinstance(df.index,pd.MultiIndex):
return df.xs(key,level=level)
else:
return df.loc[df.index==key]
# Requested dimension is a column
elif axis==1:
return df.loc[df[level]==key]
# Requested dimension not available, return dataframe
else:
return df
def _get_field(df,fieldname):
"""
Return field from dataset
"""
if isinstance(df,pd.DataFrame):
return df[fieldname]
elif isinstance(df,pd.Series):
if df.name is None or df.name==fieldname:
return df
else:
return None
def _get_pivoted_field(df,fieldname):
"""
Return field from pivoted dataset
"""
if isinstance(df.columns,pd.MultiIndex):
return df[fieldname]
else:
return df
def _create_subplots_if_needed(ntotal,
ncols=None,
default_ncols=1,
fieldorder='C',
avoid_single_column=False,
sharex=False,
sharey=False,
subfigsize=(12,3),
wspace=0.2,
hspace=0.2,
fig=None,
ax=None
):
"""
Auxiliary function to create fig and ax
If fig and ax are None:
- Set nrows and ncols based on ntotal and specified ncols,
accounting for fieldorder and avoid_single_column
- Create fig and ax with nrows and ncols, taking into account
sharex, sharey, subfigsize, wspace, hspace
If fig and ax are not None:
- Try to determine nrows and ncols from ax
- Check whether size of ax corresponds to ntotal
"""
if ax is None:
if not ncols is None:
# Use ncols if specified and appropriate
assert(ntotal%ncols==0), 'Error: Specified number of columns is not a true divisor of total number of subplots'
nrows = int(ntotal/ncols)
else:
# Defaut number of columns
ncols = default_ncols
nrows = int(ntotal/ncols)
if fieldorder=='F':
# Swap number of rows and columns
nrows, ncols = ncols, nrows
if avoid_single_column and ncols==1:
# Swap number of rows and columns
nrows, ncols = ncols, nrows
# Create fig and ax with nrows and ncols
fig,ax = plt.subplots(nrows=nrows,ncols=ncols,sharex=sharex,sharey=sharey,figsize=(subfigsize[0]*ncols,subfigsize[1]*nrows))
# Adjust subplot spacing
fig.subplots_adjust(wspace=wspace,hspace=hspace)
else:
# Make sure user-specified axes has appropriate size
assert(np.asarray(ax).size==ntotal), 'Specified axes does not have the right size'
# Determine nrows and ncols in specified axes
if isinstance(ax,mpl.axes.Axes):
nrows, ncols = (1,1)
else:
try:
nrows,ncols = np.asarray(ax).shape
except ValueError:
# ax array has only one dimension
# Determine whether ax is single row or single column based
# on individual ax positions x0 and y0
x0s = [axi.get_position().x0 for axi in ax]
y0s = [axi.get_position().y0 for axi in ax]
if all(x0==x0s[0] for x0 in x0s):
# All axis have same relative x0 position
nrows = np.asarray(ax).size
ncols = 1
elif all(y0==y0s[0] for y0 in y0s):
# All axis have same relative y0 position
nrows = 1
ncols = np.asarray(ax).size
else:
# More complex axes configuration,
# currently not supported
raise InputError('could not determine nrows and ncols in specified axes, complex axes configuration currently not supported')
return fig, ax, nrows, ncols
def _format_legend(axv,index):
"""
Auxiliary function to format legend
Usage
=====
axv : numpy 1d array
Flattened array of axes
index : int
Index of the axis where to place the legend
"""
all_handles = []
all_labels = []
# Check each axes and add new handle
for axi in axv:
handles, labels = axi.get_legend_handles_labels()
for handle,label in zip(handles,labels):
if not label in all_labels:
all_labels.append(label)
all_handles.append(handle)
leg = axv[index].legend(all_handles,all_labels,loc='upper left',bbox_to_anchor=(1.05,1.0),fontsize=16)
return leg
def _format_time_axis(fig,ax,
plot_local_time,
local_time_offset,
timelimits
):
"""
Auxiliary function to format time axis
"""
ax[-1].xaxis_date()
if timelimits is not None:
timelimits = [ | pd.to_datetime(tlim) | pandas.to_datetime |
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor,RandomForestClassifier,IsolationForest
# from sklearn.feature_selection import SelectKBest
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import data
# first col of cols as target(y) , rest is x
def randomForesetSetValue(df,cols , regression=True):
data = df[cols]
target = cols[0]
train_data = data[data[target].notnull()]
test_data = data[data[target].isnull()]
x,y =train_data.values[:,1:], train_data.values[:,0]
if regression == True:
model = RandomForestRegressor(n_estimators=200)
else:
model = RandomForestClassifier(n_estimators=200)
model.fit(x,y)
target_predict = model.predict(test_data.values[:,1:])
df.loc[data[target].isnull(),target] = target_predict
def drop_col(df):
return df.drop(['PassengerId','Cabin','Name','Ticket'],axis=1)
def fill_missing_data(df , is_train=True ,sex_cat=False, embarked_one_hot=False):
print("preprocess data")
# df = drop_col(df)
if sex_cat:
df['Sex'] = df.Sex.map({'female':0,'male':1}).astype(int)
# Fare should > 0, fill Fare by pclass ,using median of pclass
if len(df.Fare[df.Fare.isnull()]) > 0:
fare = np.zeros(3)
for f in range(0, 3):
fare[f] = df[df.Pclass == f + 1]['Fare'].dropna().median()
for f in range(0, 3): # loop 0 to 2
df.loc[(df.Fare.isnull()) & (df.Pclass == f + 1), 'Fare'] = fare[f]
#默认用S
df.loc[(df.Embarked.isnull()), 'Embarked'] = 'S'
if embarked_one_hot:
df['Embarked'] = df['Embarked'].map({'S': 0, 'C': 1, 'Q': 2, 'U': 0}).astype(int)
embarked_data = | pd.get_dummies(df.Embarked) | pandas.get_dummies |
import regex as re
import emoji
import datetime
import pandas as pd
def deEmojify(txt):
"""
Given as input string, the function removes all emojies and returns
the input string
"""
converted=""
for each in txt:
if each not in emoji.UNICODE_EMOJI:
converted = converted + each.lower()
return converted
def remove_unwanted_texts(txt):
"""
Removed messeges are of the type:
'<media omitted>': Used by whatsapp to tell where a file was uploaded
"(file attached)": Also Used by whatsapp to tell where a file was uploaded
"this message was deleted": Used by whatsapp to tell where other users deleted a message
"you message was deleted": Used by whatsapp to tell where user deleted a message
"""
if (txt != '<media omitted>') & (txt.find("(file attached)")==-1) & (txt !="this message was deleted") & (txt!="you deleted this message"):
return 1
else:
return 0
def correct_dateformat(data):
"""
The function takes in a dataframe and turns the Date coloumn which is
initially in string format and converts it to a DateTime format.
"""
for i in range(len(data.Date.iloc[:])):
if (len(data.Date.iloc[i])==10):
data.Date.iloc[i] = pd.to_datetime(data.Date.iloc[i], format="%d/%m/%Y")
else:
data.Date.iloc[i] = | pd.to_datetime(data.Date.iloc[i], format="%m/%d/%y") | pandas.to_datetime |
import pandas as pd
import numpy as np
import io
from AutoPreProcessing import FeatureType
from AutoPreProcessing import WOE_IV
from jinja2 import Template
from jinja2 import Environment, FileSystemLoader
import random
from scipy.stats import chi2_contingency
import seaborn as sns
import matplotlib.pyplot as plt
import scipy.stats as stats
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.multicomp import MultiComparison
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.base import BaseEstimator, TransformerMixin
#from sklearn.preprocessing import Imputer
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.preprocessing import MinMaxScaler
import time
import os
class EDA:
def __init__(self,df,CategoricalFeatures=[],filename="index.html",VIF_threshold=5,debug='YES'):
''' Constructor for this class. '''
self.df = df
self.df.columns = [col.replace(" ", "_") for col in df.columns]
self.df.columns = [col.replace("(", "_") for col in df.columns]
self.df.columns = [col.replace(")", "_") for col in df.columns]
self.SelectedColors = ["#5D535E", "#9A9EAB","#DFE166","#D9B44A","#4F6457","#4B7447","#8EBA43","#73605B","#D09683","#6E6702","#C05805","#DB9501","#50312F","#E4EA8C","#3F6C45","#B38867","#F69454","#A1BE95","#92AAC7","#FA6E59","#805A3B","#7F152E"]
self.AllColors = ["#f2e1df","#ead6d5","#e3ccca","#dbc2c0","#d4b8b5","#ccaeaa","#c5a3a0","#bd9995","#b68f8b","#ae8480","#a77a75","#a0706b","#986660","#915c56","#89514b","#824740",
"#7a3d36","#73322b","#6b2821","#641e16","#fdedec","#f5e3e1","#eed8d7","#e6cecc","#dec4c1","#d7b9b6","#cfafac","#c7a5a1","#c09a96","#b8908c","#b08681","#a97b76",
"#a1716c","#9a6661","#925c56","#8a524c","#834741","#7b3d36","#73332b","#6c2821","#641e16","#ebe2ef","#e3d8e7","#dacedf","#d2c4d8","#cabad0","#c1b0c8","#b8a6c0",
"#b09cb8","#a892b0","#9f88a8","#967da1","#8e7399","#866991","#7d5f89","#745581","#6c4b79","#644172","#5b376a","#522d62","#4a235a","#dfe9f0","#d5e0e9","#cad8e1",
"#bfcfda","#b5c6d2","#aabdca","#9fb5c3","#95acbb","#8aa3b4","#809aac","#7592a4","#6a899d","#608095","#55788e","#4a6f86","#40667e","#355d77","#2a546f","#204c68",
"#154360","#e1edf4","#d6e4ed","#ccdce6","#c1d4e0","#b7ccd9","#adc3d2","#a2bbcb","#98b3c4","#8daabd","#83a2b6","#799ab0","#6e91a9","#6489a2","#59819b","#4f7894",
"#45708d","#3a6887","#306080","#255779","#1b4f72","#ddf0ed","#d2e9e5","#c7e1dc","#bcdad4","#b2d2cc","#a7cbc4","#9cc4bc","#91bcb3","#86b4ab","#7bada3","#70a69b",
"#659e93","#5a968a","#4f8f82","#44887a","#3a8072","#2f786a","#247161","#196a59","#0e6251","#ddeeea","#d2e6e2","#c7ded9","#bcd5d0","#b1cdc8","#a6c5bf","#9bbdb6",
"#90b5ad","#85ada5","#7aa49c","#6e9c93","#63948b","#588c82","#4d8479","#427c70","#377468","#2c6b5f","#216356","#165b4e","#0b5345","#deefe6","#d4e7dc","#c9dfd3",
"#bed8c9","#b4d0c0","#a9c8b6","#9ec0ad","#94b8a3","#89b09a","#7ea890","#74a187","#69997e","#5f9174","#54896b","#498161","#3f7958","#34724e","#296a45","#1f623b",
"#145a32","#dff3e8","#d5ecdf","#cae4d6","#c0ddcd","#b6d6c4","#abcfba","#a0c8b1","#96c0a8","#8cb99f","#81b296","#76ab8d","#6ca484","#629c7b","#579572","#4c8e68",
"#42875f","#388056","#2d784d","#237144","#186a3b","#f9f3dc","#f4edd1","#efe6c6","#eae0bb","#e5dab0","#e0d4a5","#dbce9a","#d6c78f","#d1c184","#ccbb78","#c7b56d",
"#c2af62","#bda857","#b8a24c","#b39c41","#ae9636","#a9902b","#a48920","#9f8315","#9a7d0a","#7D6608","#f9eedc","#f4e6d1","#efdfc6","#ead8bb","#e6d1b0","#e1caa5",
"#dcc29a","#d7bb8f","#d2b484","#cdac7a","#c8a56f","#c39e64","#be9759","#b9904e","#b48843","#b08138","#ab7a2d","#a67222","#a16b17","#9c640c","#f6e9de","#f0e0d4",
"#e9d8c9","#e2cfbe","#dcc6b3","#d5bda8","#ceb49e","#c8ac93","#c1a388","#ba9a7e","#b49173","#ad8868","#a7805d","#a07752","#996e48","#93653d","#8c5c32","#855427",
"#7f4b1d","#784212","#f4e4da","#eddbcf","#e6d1c4","#dfc7b8","#d8beac","#d1b4a1","#caaa96","#c3a08a","#bc977f","#b48d73","#ad8367","#a67a5c","#9f7050","#986645",
"#915c3a","#8a532e","#834923","#7c3f17","#75360c","#6e2c00","#e1e3e5","#d6d9dc","#cccfd2","#c1c5c9","#b7bbc0","#adb1b6","#a2a7ac","#989da3","#8d939a","#838a90",
"#798086","#6e767d","#646c74","#59626a","#4f5860","#454e57","#3a444e","#303a44","#25303b","#1b2631","#dfe2e4","#d5d8da","#cacdd1","#c0c3c7","#b5b9bd","#abafb3",
"#a0a5a9","#969aa0","#8b9096","#80868c","#767c82","#6b7278","#61676f","#565d65","#4c535b","#414951","#373f47","#2c343e","#222a34","#17202a"]
featureType = FeatureType.FeatureType(df,CategoricalFeatures)
self.CategoricalFeatures = featureType.CategoricalFeatures()
self.NonCategoricalFeatures = featureType.NonCategoricalFeatures()
self.ContinuousFeatures = featureType.ContinuousFeatures()
self.OtherFeatures = featureType.OtherFeatures()
self.BinaryCategoricalFeatures = featureType.BinaryCategoricalFeatures()
self.NonBinaryCategoricalFeatures = featureType.NonBinaryCategoricalFeatures()
self.filename = filename
self.VIF_threshold = VIF_threshold
self.debug = debug
def EDAToHTML(self,title='EDA',out=None):
filename = 'HTMLTemplate\\dist\\HTMLTemplate_V2.html'
this_dir, this_filename = os.path.split(__file__)
Template_PATH = os.path.join(this_dir, filename)
# print(DATA_PATH)
# templateLoader = FileSystemLoader(searchpath="./")
# templateEnv = Environment(loader=templateLoader)
# template = templateEnv.get_template(filename)
with open(Template_PATH) as file:
template = Template(file.read())
# #print(self.std_variance())
CorrList, ColumnNames = self.CorrList()
# #transformer = VIF(VIF_threshold = self.VIF_threshold)
# #print(transformer.fit_transform(self.df[self.ContinuousFeatures]))
# out_filename = 'HTMLTemplate/dist/'+self.filename
if(out):
out_filename = out
else:
# out_filename = './HTMLTemplate/dist/result.html'
out_filename = os.path.join(this_dir, 'HTMLTemplate\\dist\\result.html')
html = template.render(title = title
,ListOfFields = self.ListOfFields()
,CategoricalFeatures = self.CategoricalFeatures
,OtherFeatures = self.OtherFeatures
,ContinuousFeatures = self.ContinuousFeatures
,BinaryCategoricalFeatures = self.BinaryCategoricalFeatures
,NonBinaryCategoricalFeatures = self.NonBinaryCategoricalFeatures
,FeatureTypes = self.CategoricalVsContinuous()
,CategoriesCount = self.CategoriesCount()
,WOEList = self.WOEList()
,ContinuousSummary = self.ContinuousSummary()
,CorrList = CorrList
,ColumnNames = ColumnNames
,AnovaList = self.Anova()
#,VIF_columns = transformer.fit_transform(self.df[self.ContinuousFeatures])
,VIF_columns = self.VIF()
#,VIF_threshold = self.VIF_threshold
,Variance = self.std_variance()
,NullValue = pd.DataFrame(round(self.df.isnull().sum()/self.df.shape[0],3)).reset_index().rename(columns={'index': 'Feature',0:'NullPercentage'})
)
with io.open(out_filename, mode='w', encoding='utf-8') as f:
f.write(html)
import webbrowser
url = 'file://'+out_filename
webbrowser.open(url, new=2)
return out_filename
def ListOfFields(self):
start = time.time()
NameTypeDict = []
for name in list(self.df.columns.values):
item = dict(name = name, type=self.df[name].dtype)
NameTypeDict.append(item)
end = time.time()
if self.debug == 'YES':
print("ListOfFields",end - start)
return NameTypeDict
def CategoricalVsContinuous(self):
start = time.time()
# Choose 3 random colors from Selected Colors
indices = random.sample(range(len(self.SelectedColors)), 3)
colors=[self.SelectedColors[i] for i in sorted(indices)]
FeatureTypes = []
FeatureTypes.append(dict(Name = 'Categorical', Value = len(self.CategoricalFeatures), Color=colors[0]))
FeatureTypes.append(dict(Name = 'Continuous', Value = len(self.ContinuousFeatures), Color=colors[1]))
FeatureTypes.append(dict(Name = 'Others', Value = len(self.OtherFeatures), Color=colors[2]))
end = time.time()
if self.debug == 'YES':
print("CategoricalVsContinuous",end - start)
return (FeatureTypes)
def getRandomColors(self,no_of_colors):
start = time.time()
colors = []
for i in range(0,no_of_colors):
color = (random.randint(0,255),random.randint(0,255),random.randint(0,255))
colors.append('#%02x%02x%02x' % color)
end = time.time()
if self.debug == 'YES':
print('CategoricalVsContinuous',end-start)
return colors
def CategoriesCount(self):
start = time.time()
CategoricalFeatures = self.CategoricalFeatures
CategoriesCount = []
for var in CategoricalFeatures:
df = self.df[var].groupby(self.df[var]).agg(['count'])
df.index.names = ['Name']
df.columns = ['Value']
if df.shape[0] > len(self.SelectedColors):
if df.shape[0] > len(self.AllColors):
colors = self.getRandomColors(df.shape[0])
else:
indices = random.sample(range(len(self.AllColors)), (df.shape[0]))
colors=[self.AllColors[i] for i in sorted(indices)]
else:
indices = random.sample(range(len(self.SelectedColors)), (df.shape[0]))
colors=[self.SelectedColors[i] for i in sorted(indices)]
df['Color'] = colors
CategoriesCount.append(dict(Variable = var, Count = df))
end = time.time()
if self.debug == 'YES':
print('CategoriesCount',end-start)
return CategoriesCount
def WOEList (self):
start = time.time()
woe = WOE_IV.WOE()
WOEList = []
InsightStat = "The variable \"{0}\" is {1} of the variable \"{2}\"."
ChiSqInsight = "With the confidence limit of 0.05, the variable \"{0}\" is statistically {1} the variable \"{2}\""
for DependentVar in self.CategoricalFeatures:
for IndependentVar in self.CategoricalFeatures:
if DependentVar != IndependentVar:
# Update Weight Of Evidence(WOE) and Information Value (IV)
if DependentVar in self.BinaryCategoricalFeatures:
WOE,IV = woe.woe_single_x(self.df[IndependentVar],self.df[DependentVar],event=self.df[DependentVar].unique()[0])
if IV >= 0.3:
IVInsight = InsightStat.format(IndependentVar,"strong predictor",DependentVar)
elif IV >= 0.1:
IVInsight = InsightStat.format(IndependentVar,"medium predictor",DependentVar)
elif IV >= 0.02:
IVInsight = InsightStat.format(IndependentVar,"weak predictor",DependentVar)
else:
IVInsight = InsightStat.format(IndependentVar,"very poor predictor",DependentVar)
EntryPresent = False
for entry in WOEList:
if entry['DependentVar'] == DependentVar and entry['IndependentVar'] == IndependentVar:
entry['WOE'] = WOE
entry['IV'] = IV
entry['IVInsight'] = IVInsight
EntryPresent = True
if EntryPresent == False:
item = dict(DependentVar = DependentVar, IndependentVar = IndependentVar, WOE = WOE, IV = round(IV,2), IVInsight=IVInsight, ChiSq = 0, PValue = 0)
WOEList.append(item)
else:
WOE = dict()
IV = 0
# Update ChiSq and PValue
EntryPresent = False
for entry in WOEList:
if (entry['DependentVar'] == DependentVar and entry['IndependentVar'] == IndependentVar) or (entry['DependentVar'] == IndependentVar and entry['IndependentVar'] == DependentVar ):
EntryPresent = True
if entry['ChiSq'] == 0:
ChiSq,PValue = self.ChiSquareOfDFCols(DependentVar,IndependentVar)
ChiSqInsight = ChiSqInsight.format(DependentVar, "dependent on", IndependentVar) if PValue <= 0.05 else ChiSqInsight.format(DependentVar, "independent from", IndependentVar)
WOEList = self.UpdateChiSq(WOEList,DependentVar, IndependentVar, ChiSq,PValue,ChiSqInsight)
if EntryPresent == False:
ChiSq,PValue = self.ChiSquareOfDFCols(DependentVar,IndependentVar)
ChiSqInsight = ChiSqInsight.format(DependentVar, "dependent on", IndependentVar) if PValue <= 0.05 else ChiSqInsight.format(DependentVar, "independent from", IndependentVar)
item = dict(DependentVar = DependentVar, IndependentVar = IndependentVar, WOE = dict(), IV = 0, IVInsight = "", ChiSq = round(ChiSq,2), PValue = PValue, ChiSqInsight = ChiSqInsight)
WOEList.append(item)
item = dict(DependentVar = IndependentVar, IndependentVar = DependentVar, WOE = dict(), IV = 0, IVInsight = "", ChiSq = round(ChiSq,2), PValue = PValue, ChiSqInsight = ChiSqInsight)
WOEList.append(item)
end = time.time()
if self.debug == 'YES':
print('WOEList',end-start)
return WOEList
def UpdateChiSq(self,WOEList,DependentVar, IndependentVar, ChiSq, PValue, ChiSqInsight):
start = time.time()
for entry in WOEList:
if entry['DependentVar'] == DependentVar and entry['IndependentVar'] == IndependentVar and entry['ChiSq'] == 0:
entry['ChiSq'] = ChiSq
entry['PValue'] = PValue
entry['ChiSqInsight'] = ChiSqInsight
if entry['DependentVar'] == IndependentVar and entry['IndependentVar'] == DependentVar and entry['ChiSq'] == 0:
entry['ChiSq'] = ChiSq
entry['PValue'] = PValue
entry['ChiSqInsight'] = ChiSqInsight
end = time.time()
if self.debug == 'YES':
print('UpdateChiSq',end-start)
return WOEList
def ChiSquareOfDFCols(self, c1, c2):
start = time.time()
groupsizes = self.df.groupby([c1, c2]).size()
ctsum = groupsizes.unstack(c1)
end = time.time()
if self.debug == 'YES':
print('ChiSquareOfDFCols',end-start)
return(list(chi2_contingency(ctsum.fillna(0)))[0:2])
def ContinuousSummary(self):
start = time.time()
df = self.df[self.ContinuousFeatures]
df = df.describe().transpose()
VariableDetails = []
for key,value in df.iterrows():
Edges, Hist, HistValues, PDF, Color1, Color2 = self.HistChart(key)
VariableDetails.append(dict(Name = key
,Count = value['count']
,Mean = value['mean']
,STD = value['std']
,Min = value['min']
,TwentyFive = value['25%']
,Fifty = value['50%']
,SeventyFive = value['75%']
,Max = value['max']
,Median = self.df[key].median()
,ImageFileName = self.BoxPlot(key)
,Hist = Hist
,HistValues = HistValues
,Edges = Edges
,PDF = PDF
,Color1 = Color1
,Color2 = Color2
,Variance = np.var(self.df[key])
))
end = time.time()
if self.debug == 'YES':
print('ContinuousSummary',end-start)
return VariableDetails
def BoxPlot(self,var):
start = time.time()
fig, ax = plt.subplots()
ax = sns.boxplot(y=self.df[var], ax=ax)
box = ax.artists[0]
indices = random.sample(range(len(self.SelectedColors)), 2)
colors=[self.SelectedColors[i] for i in sorted(indices)]
box.set_facecolor(colors[0])
box.set_edgecolor(colors[1])
sns.despine(offset=10, trim=True)
this_dir, this_filename = os.path.split(__file__)
OutFileName = os.path.join(this_dir, 'HTMLTemplate/dist/output/'+var + '.png')
plt.savefig(OutFileName)
end = time.time()
if self.debug == 'YES':
print('BoxPlot',end-start)
return OutFileName
def HistChart (self, var):
start = time.time()
h = list(self.df[var].dropna())
hist, edges = np.histogram(h, density=True, bins=50)
histValues, edgesValues = np.histogram(h, density=False, bins=50)
h.sort()
hmean = np.mean(h)
hstd = np.std(h)
pdf = stats.norm.pdf(edges, hmean, hstd)
hist = ','.join([str(round(x,5)) for x in hist])
histValues = ','.join([str(x) for x in histValues])
edges = ','.join([str(x) for x in edges])
pdf = ','.join([str(round(x,5)) for x in pdf])
indices = random.sample(range(len(self.SelectedColors)), 2)
colors=[self.SelectedColors[i] for i in sorted(indices)]
end = time.time()
if self.debug == 'YES':
print('HistChart',end-start)
return edges, hist, histValues, pdf, colors[0], colors[1]
def CorrList (self):
start = time.time()
df = self.df[self.ContinuousFeatures]
CorrDf = df.corr()
CorrList = []
MasterList = []
for col in CorrDf.columns:
for index,row in CorrDf.iterrows():
CorrList.append(row[col])
MasterList.append(','.join([str(round(x,4)) for x in CorrList]))
CorrList = []
end = time.time()
if self.debug == 'YES':
print('CorrList',end-start)
return MasterList, ','.join("'{0}'".format(x) for x in CorrDf.columns)
def Anova(self):
"""
Calculate the F-Score (One Way Anova) for each of Categorical Variables with all the Continuous Variables
"""
start = time.time()
AnovaList = []
Insight1 = "With Confidence interval of 0.05, the variable - \"{0}\" is influenced by the categorical variable - \"{1}\". "
Insight2 = "As the Categorical variable - \"{0}\" is binary, Tukey's HSD test is not necessary. "
Insight3 = "As the p-Value is higher than the Confidence Interval 0.05, the variable - \"{0}\" is not influenced by the categorical variable - \"{1}\". "
for CategoricalVar in self.CategoricalFeatures:
Binary = 'Yes' if CategoricalVar in self.BinaryCategoricalFeatures else 'No'
for ContinuousVar in self.ContinuousFeatures:
TukeyResult = None
#f,p = stats.f_oneway(*[list(self.df[self.df[CategoricalVar]==name][ContinuousVar]) for name in set(self.df[CategoricalVar])])
f,p = stats.f_oneway(*[list(self.df[self.df[CategoricalVar]==name][ContinuousVar]) for name in set(self.df[CategoricalVar])])
if (p<0.05 and CategoricalVar in self.BinaryCategoricalFeatures):
Insight = Insight1.format(ContinuousVar, CategoricalVar) + Insight2.format(CategoricalVar)
elif p<0.05:
TukeyResult = self.Tukey(CategoricalVar, ContinuousVar)
Insight = Insight1.format(ContinuousVar, CategoricalVar)
else:
Insight = Insight3.format(ContinuousVar, CategoricalVar)
AnovaList.append(dict(Categorical = CategoricalVar, Continuous = ContinuousVar, f = f, p = p, Binary = Binary, Insight = Insight,
TukeyResult = TukeyResult))
end = time.time()
if self.debug == 'YES':
print('Anova',end-start)
return AnovaList
def Tukey(self,Categorical, Continuous):
"""
Calculate Tukey Honest Significance Difference (HSD) Test, to identify the groups whose
distributions are significantly different
"""
start = time.time()
mc = MultiComparison(self.df[Continuous], self.df[Categorical])
result = mc.tukeyhsd()
reject = result.reject
meandiffs = result.meandiffs
UniqueGroup = mc.groupsunique
group1 = [UniqueGroup[index] for index in mc.pairindices[0]]
group2 = [UniqueGroup[index] for index in mc.pairindices[1]]
reject = result.reject
meandiffs = [round(float(meandiff),3) for meandiff in result.meandiffs]
columns = ['Group 1', "Group 2", "Mean Difference", "Reject"]
TukeyResult = pd.DataFrame(np.column_stack((group1, group2, meandiffs, reject)), columns=columns)
end = time.time()
if self.debug == 'YES':
print('Tukey',end-start)
return TukeyResult
def std_variance(self):
"""
Scale the Continuous features with MinMaxScaler and then calculate variance
"""
start = time.time()
scaler = MinMaxScaler()
scaled = scaler.fit_transform(self.df[self.ContinuousFeatures].dropna())
var_list = []
i=0
for column in self.ContinuousFeatures:
var_list.append(dict(column=column,variance=np.var(scaled[:,i])))
i=i+1
end = time.time()
if self.debug == 'YES':
print('std_variance',end-start)
return pd.DataFrame(var_list)
def VIF(self):
"""
Drop the NaN's and calculate the VIF
"""
start = time.time()
vif_list = []
X = self.df[self.ContinuousFeatures].dropna()
for var in X.columns:
vif = variance_inflation_factor(X[X.columns].values,X.columns.get_loc(var))
vif_list.append(dict(column=var,vif=vif))
end = time.time()
if self.debug == 'YES':
print('VIF',end-start)
return | pd.DataFrame(vif_list) | pandas.DataFrame |
'''
Class wrapper for label generation, transforming an input data-frame with endpoints and
an input data-frame with imputed data to a Pandas data-frame
with labels
'''
import os
import sys
import os.path
import ipdb
import numpy as np
import scipy as sp
import pandas as pd
import circews.functions.labels as bern_labels
class AllLabel:
'''
Annotate all labels jointly, including full WorseState and WorseStateSoft labels, multi-class
classification labels, regression-time-to-event labels, and smaller component labels
that refer to conditions on MAP, Lactate and the medications.
'''
def __init__(self, lhours, rhours, dataset=None):
self.abs_datetime_key="AbsDatetime"
self.rel_datetime_key="RelDatetime"
self.patient_id_key="PatientID"
self.lhours=lhours
self.rhours=rhours
self.label_key="AllLabels{}To{}Hours".format(self.lhours, self.rhours)
self.grid_step_seconds=300.0
self.dataset=dataset
def transform(self, df_pat, df_endpoint, pid=None):
abs_time_col=df_pat[self.abs_datetime_key]
rel_time_col=df_pat[self.rel_datetime_key]
patient_col=df_pat[self.patient_id_key]
if df_pat.shape[0]==0 or df_endpoint.shape[0]==0:
print("WARNING: Patient {} has no impute data, skipping...".format(pid), flush=True)
return None
df_endpoint.set_index(keys="Datetime", inplace=True, verify_integrity=True)
try:
if self.dataset=="bern":
df_endpoint=df_endpoint.reindex(index=df_pat.AbsDatetime,method="nearest")
elif self.dataset=="mimic":
df_endpoint=df_endpoint.reindex(index=df_pat.AbsDatetime,method="ffill")
except:
print("WARNING: Issue when re-indexing frame of patient: {}".format(pid), flush=True)
return None
endpoint_status_arr=np.array(df_endpoint.endpoint_status)
unique_status=np.unique(endpoint_status_arr)
for status in unique_status:
assert(status in ["unknown","event 0","event 1", "event 2", "event 3",
"maybe 1","maybe 2", "maybe 3","probably not 1", "probably not 2", "probably not 3"])
lactate_above_ts=np.array(df_endpoint.lactate_above_threshold,dtype=np.float)
map_below_ts=np.array(df_endpoint.MAP_below_threshold,dtype=np.float)
l1_present=np.array(df_endpoint.level1_drugs_present,dtype=np.float)
l2_present=np.array(df_endpoint.level2_drugs_present,dtype=np.float)
l3_present=np.array(df_endpoint.level3_drugs_present,dtype=np.float)
worse_state_arr=bern_labels.future_worse_state(endpoint_status_arr, self.lhours, self.rhours, self.grid_step_seconds) # Joint (A|D|E)
worse_state_soft_arr=bern_labels.future_worse_state_soft(endpoint_status_arr, self.lhours, self.rhours, self.grid_step_seconds) # Joint (B|C|D|E)
from_0_arr=bern_labels.future_worse_state_from_0(endpoint_status_arr, self.lhours, self.rhours, self.grid_step_seconds) # Separate A
from_0_soft_arr=bern_labels.future_worse_state_soft_from_0(endpoint_status_arr, self.lhours, self.rhours, self.grid_step_seconds) # Separate B
from_probably_not_arr=bern_labels.future_worse_state_from_pn(endpoint_status_arr, self.lhours, self.rhours, self.grid_step_seconds) # Separate C
from_1_arr=bern_labels.future_worse_state_from_1(endpoint_status_arr, self.lhours, self.rhours, self.grid_step_seconds) # Separate D
from_2_arr=bern_labels.future_worse_state_from_2(endpoint_status_arr, self.lhours, self.rhours, self.grid_step_seconds) # Separate E;
from_1_or_2_arr=bern_labels.future_worse_state_from_1_or_2(endpoint_status_arr, self.lhours, self.rhours, self.grid_step_seconds) # Join(D|E)
lactate_any_arr=bern_labels.any_positive_transition(lactate_above_ts, self.lhours, self.rhours, self.grid_step_seconds)
map_any_arr=bern_labels.any_positive_transition(map_below_ts, self.lhours, self.rhours, self.grid_step_seconds)
l1_drugs_any_arr=bern_labels.any_positive_transition(l1_present, self.lhours, self.rhours, self.grid_step_seconds)
l2_drugs_any_arr=bern_labels.any_positive_transition(l2_present, self.lhours, self.rhours, self.grid_step_seconds)
l3_drugs_any_arr=bern_labels.any_positive_transition(l3_present, self.lhours, self.rhours, self.grid_step_seconds)
time_to_worse_state_binned_arr=bern_labels.time_to_worse_state_binned(endpoint_status_arr, self.lhours, self.rhours, self.grid_step_seconds)
time_to_worse_state_arr=bern_labels.time_to_worse_state(endpoint_status_arr, self.lhours, self.rhours, self.grid_step_seconds)
output_df_dict={}
output_df_dict[self.abs_datetime_key]=abs_time_col
output_df_dict[self.rel_datetime_key]=rel_time_col
output_df_dict[self.patient_id_key]=patient_col
output_df_dict["WorseState{}To{}Hours".format(self.lhours, self.rhours)]=worse_state_arr
output_df_dict["WorseStateSoft{}To{}Hours".format(self.lhours, self.rhours)]=worse_state_soft_arr
output_df_dict["WorseStateFromZero{}To{}Hours".format(self.lhours, self.rhours)]=from_0_arr
output_df_dict["WorseStateSoftFromZero{}To{}Hours".format(self.lhours, self.rhours)]=from_0_soft_arr
output_df_dict["WorseStateFromPn{}To{}Hours".format(self.lhours, self.rhours)]=from_probably_not_arr
output_df_dict["WorseStateFromOne{}To{}Hours".format(self.lhours, self.rhours)]=from_1_arr
output_df_dict["WorseStateFromTwo{}To{}Hours".format(self.lhours, self.rhours)]=from_2_arr
output_df_dict["WorseStateFromOneOrTwo{}To{}Hours".format(self.lhours, self.rhours)]=from_1_or_2_arr
output_df_dict["LactateAboveTs{}To{}Hours".format(self.lhours, self.rhours)]=lactate_any_arr
output_df_dict["MAPBelowTs{}To{}Hours".format(self.lhours, self.rhours)]=map_any_arr
output_df_dict["L1Drugs{}To{}Hours".format(self.lhours, self.rhours)]=l1_drugs_any_arr
output_df_dict["L2Drugs{}To{}Hours".format(self.lhours, self.rhours)]=l2_drugs_any_arr
output_df_dict["L3Drugs{}To{}Hours".format(self.lhours, self.rhours)]=l3_drugs_any_arr
output_df_dict["TimeToWorseState{}To{}Hours".format(self.lhours, self.rhours)]=time_to_worse_state_arr
output_df_dict["TimeToWorseStateBinned{}To{}Hours".format(self.lhours, self.rhours)]=time_to_worse_state_binned_arr
output_df=pd.DataFrame(output_df_dict)
return output_df
class DeteriorationLabel:
def __init__(self,lhours,rhours):
self.abs_datetime_key="AbsDatetime"
self.rel_datetime_key="RelDatetime"
self.patient_id_key="PatientID"
self.lhours=lhours
self.rhours=rhours
self.label_key="Deterioration_{}To{}Hours".format(self.lhours,self.rhours)
self.grid_step_seconds=300.0
def transform(self, df_pat, df_endpoint, pid=None):
abs_time_col=df_pat[self.abs_datetime_key]
rel_time_col=df_pat[self.rel_datetime_key]
patient_col=df_pat[self.patient_id_key]
## Patient has no information in the imputed table or the endpoints (SHOULD NOT HAPPEN)
if df_pat.shape[0]==0 or df_endpoint.shape[0]==0:
print("WARNING: Patient {} has no impute data, skipping...".format(pid),flush=True)
return None
df_endpoint.set_index(keys="Datetime",inplace=True,verify_integrity=True)
# Re-index the endpoint to the grid of the imputed data.
try:
df_endpoint=df_endpoint.reindex(index=df_pat.AbsDatetime,method="nearest")
except :
print("WARNING: Issue when re-indexing frame of patient {}".format(pid),flush=True)
return None
event1_arr=np.array(df_endpoint.event1)
event2_arr=np.array(df_endpoint.event2)
event3_arr=np.array(df_endpoint.event3)
maybe1_arr=np.array(df_endpoint.maybe_event1)
maybe2_arr=np.array(df_endpoint.maybe_event2)
maybe3_arr=np.array(df_endpoint.maybe_event3)
not1_arr=np.array(df_endpoint.probably_not_event1)
not2_arr=np.array(df_endpoint.probably_not_event2)
not3_arr=np.array(df_endpoint.probably_not_event3)
# Any deterioration, does not require that the exact downward takes place in the forward horizon, but only if there
# is some more severe endpoint in the period
if self.lhours==0:
label_arr=bern_labels.future_worse_state(event1_arr, event2_arr, event3_arr,maybe1_arr, maybe2_arr, maybe3_arr, self.lhours, self.rhours, self.grid_step_seconds)
else:
label_arr=bern_labels.future_deterioration(event1_arr, event2_arr, event3_arr,maybe1_arr, maybe2_arr, maybe3_arr, self.lhours, self.rhours, self.grid_step_seconds)
output_df_dict={}
output_df_dict[self.abs_datetime_key]=abs_time_col
output_df_dict[self.rel_datetime_key]=rel_time_col
output_df_dict[self.patient_id_key]=patient_col
output_df_dict[self.label_key]=label_arr
output_df= | pd.DataFrame(output_df_dict) | pandas.DataFrame |
import requests
import base64
import datetime
from urllib.parse import urlencode
import pandas as pd
from IPython.display import Image
from IPython.core.display import HTML
def main(client_id, client_secret, artist, lookup_id=None, playlist_id=None, fields="", query=None, search_type='artist'):
access_token = auth(client_id, client_secret)
search = search_spotify(at=access_token , query=query, search_type=search_type)
artist_id = get_artist_id(access_token, artist=artist)
albums_ids = get_list_of_albums(lookup_id=lookup_id, artist=artist, at=access_token)
album_info_list, albums_json = album_information(list_of_albums=albums_ids, at=access_token)
artists_in_ablums_= get_multiple_artists_from_albums(albums_json= albums_json)
list_of_songs_, list_of_songs_tolist= songs_information(albums_json= albums_json)
artists_in_albums_, songs_json, artist_id_, songs_id_= artists_from_songs(list_of_songs_ids=list_of_songs_tolist ,at=access_token)
artist_list_df= multiple_artists_songs(list_of_artists_ids= artist_id_, at=access_token)
song_features, songs_features_json = song_features(list_of_songs_ids = songs_id_ , at=access_token)
play_list_json_V2, empty_list_one_V2= playlist_data(at=access_token, playlist_id=playlist_id, fields=fields)
def auth():
base_url = "https://accounts.spotify.com/api/token"
client_id = input("client_id: ")
client_secret = input("client_secret: ")
credentials = f"{client_id}:{client_secret}"
b64_credentials = base64.b64encode(credentials.encode())
data_for_token = {"grant_type": "client_credentials"}
headers_for_token = {"Authorization": f"Basic {b64_credentials.decode()}"}
access_token = requests.post(base_url, data=data_for_token, headers=headers_for_token).json()
return access_token
def search_spotify(at, query=None, search_type='artist'):
"""
Search_Type Options: album , artist, playlist, track, show and episode
"""
endpoint = "https://api.spotify.com/v1/search"
headers = { "Authorization": f"Bearer {at['access_token']}" }
data = urlencode({"q": query, "type": search_type.lower()})
lookup_url = f"{endpoint}?{data}"
search = requests.get(lookup_url, headers=headers).json()
return search
def get_artist_id(access_token, artist):
endpoint = "https://api.spotify.com/v1/search"
headers = { "Authorization": f"Bearer {access_token['access_token']}" }
data = urlencode({"q": artist, "type": "artist"})
lookup_url = f"{endpoint}?{data}"
artist_id = requests.get(lookup_url, headers=headers).json()["artists"]["items"][0]["id"]
return artist_id
def get_list_of_albums(lookup_id, at, artist=None, resource_type='albums', versions='v1', market="US"):
if lookup_id == None:
lookup_id = get_artist_id(at, artist=artist)
dataV1 = urlencode({"market": market})
endpoint = f"https://api.spotify.com/{versions}/artists/{lookup_id}/{resource_type}?{dataV1}"
headers = { "Authorization": f"Bearer {at['access_token']}" }
album_json = requests.get(endpoint, headers=headers).json()
album_df=[]
for albums in range(len(album_json["items"])):
album_df.append({
"album_id":album_json["items"][albums]["id"],
"artist_id":album_json["items"][0]["artists"][0]["id"]
})
albums_ids = pd.DataFrame(album_df)["album_id"].tolist()
return albums_ids
def album_information(list_of_albums, at, market="US"):
counter = len(list_of_albums)
info=[]
while counter > 0:
var1 = counter-20
var2= counter
headers = { "Authorization": f"Bearer {at['access_token']}" }
if var1 < 0:
var1 = 0
joined_list = ",".join(list_of_albums[var1:var2])
data = urlencode({"market": market,"ids":joined_list})
endpoint = f"https://api.spotify.com/v1/albums?{data}"
albums_json = requests.get(endpoint, headers=headers).json()
for index in range(len(list_of_albums[var1:var2])):
albums= albums_json["albums"][index]
info.append({
"name_of_album":albums["name"],
"album_id":albums["id"],
#"artist_name":albums["artists"][0]["name"],
#"artist_id":albums["artists"][0]["id"],
"album_url":albums["external_urls"]["spotify"],
"album_genres":albums["genres"],
"album_cover": albums["images"][1]["url"],
"album_popularity":albums["popularity"],
"release_date":albums["release_date"]
})
counter -= 20
album_info_list= pd.DataFrame.from_dict(info)
return album_info_list, albums_json
def get_multiple_artists_from_albums(albums_json):
artists_in_albums = []
for si in range(len(albums_json["albums"])):
for i in range(len(albums_json["albums"][si]["artists"])):
artists = albums_json["albums"][si]["artists"][i]
album_info= albums_json["albums"][si]
artists_in_albums.append({
"album_id":album_info["id"],
"album_name":album_info["name"],
f"album_artist{i+1}":artists["name"],
f"album_artist{i+1}_id": artists["id"]
})
artists_in_ablums_=pd.DataFrame.from_dict(artists_in_albums).groupby('album_id').first().reset_index()
return artists_in_ablums_
def songs_information(albums_json):
albums = albums_json["albums"]
number_of_albums = len(albums)
list_of_songs = []
for i in range(number_of_albums):
songs_key = albums[i]["tracks"]["items"]
number_of_songs_in_album = len(songs_key)
album_id = albums[i]["id"]
for si in range(number_of_songs_in_album):
songs_subkey= songs_key[si]
list_of_songs.append({
"album_id":album_id,
"song_id":songs_subkey["id"],
"name_of_song":songs_subkey["name"],
"duration":songs_subkey["duration_ms"],
"song_url": songs_subkey["external_urls"]["spotify"],
"song_preview": songs_subkey["preview_url"]
})
list_of_songs_= pd.DataFrame.from_dict(list_of_songs)
list_of_songs_tolist = list_of_songs_["song_id"].tolist()
return list_of_songs_, list_of_songs_tolist
def artists_from_songs(list_of_songs_ids,at):
counter = len(list_of_songs_ids)
artists_in_albums=[]
artists_id = []
while counter > 0:
var1 = counter-50
var2= counter
headers = { "Authorization": f"Bearer {at['access_token']}" }
if var1 < 0:
var1 = 0
joined_list = ",".join(list_of_songs_ids[var1:var2])
endpoint = f"https://api.spotify.com/v1/tracks?ids={joined_list}"
songs_json = requests.get(endpoint, headers=headers).json()
songs_in_list = len(songs_json["tracks"])
for i in range(songs_in_list):
tracks_in_list = songs_json["tracks"][i]
artists_in_track = len(tracks_in_list["artists"])
for si in range(artists_in_track):
count_artist = tracks_in_list["artists"][si]
artists_in_albums.append({
"song_id":tracks_in_list["id"],
"song_popularity":tracks_in_list["popularity"],
"song_image":tracks_in_list["album"]["images"][1]["url"],
f"name_artist_{si+1}":count_artist["name"],
f"id_artist_{si+1}": count_artist["id"]
})
artists_id.append(count_artist["id"])
counter -= 50
artists_in_albums_= pd.DataFrame.from_dict(artists_in_albums).groupby('song_id').first().reset_index()
artist_id_ = list(set(artists_id))
songs_id_ = artists_in_albums_["song_id"].tolist()
return artists_in_albums_, songs_json, artist_id_, songs_id_
def multiple_artists_songs(list_of_artists_ids,at):
counter = len(list_of_artists_ids)
empty_list_one = []
while counter > 0:
var1 = counter-50
var2= counter
headers = { "Authorization": f"Bearer {at['access_token']}" }
if var1 < 0:
var1 = 0
joined_list = ",".join(list_of_artists_ids[var1:var2])
endpoint = f"https://api.spotify.com/v1/artists?ids={joined_list}"
artists_json = requests.get(endpoint, headers=headers).json()
artist_count = len(artists_json["artists"])
for i in range(artist_count):
working_with_artist= artists_json["artists"][i]
count_genres = len(working_with_artist["genres"])
for si in range(count_genres):
empty_list_one.append({
"id_artist":working_with_artist["id"],
"name_artist":working_with_artist["name"],
"url": working_with_artist["external_urls"]["spotify"],
"followers":working_with_artist["followers"]["total"],
"image":working_with_artist["images"][1]["url"],
"artist_popluarity":working_with_artist["popularity"],
f"genre_{si}":working_with_artist["genres"][si]
})
counter -= 50
artist_list_df = pd.DataFrame.from_dict(empty_list_one).groupby('id_artist').first().reset_index()
return artist_list_df
def song_features(list_of_songs_ids,at):
counter = len(list_of_songs_ids)
empty_list_one = []
while counter > 0:
var1 = counter-100
var2 = counter
headers = { "Authorization": f"Bearer {at['access_token']}" }
if var1 < 0:
var1 = 0
joined_list = ",".join(list_of_songs_ids[var1:var2])
print(joined_list)
endpoint = f"https://api.spotify.com/v1/audio-features?ids={joined_list}"
songs_features_json = requests.get(endpoint, headers=headers).json()
count_features = len(songs_features_json["audio_features"])
for i in range(count_features):
sf = songs_features_json["audio_features"][i]
if sf != None:
empty_list_one.append({
'song_id': sf["id"],'danceability': sf["danceability"],
'energy': sf["energy"],'key': sf["key"],
'loudness': sf["loudness"],'mode': sf["mode"],
'speechiness': sf["speechiness"],'acousticness': sf["acousticness"],
'instrumentalness': sf["instrumentalness"],'liveness': sf["liveness"],
'valence': sf["valence"],'tempo': sf["tempo"],
})
else:
empty_list_one.append({
'song_id': 0,'danceability': 0,
'energy': 0,'key': 0,
'loudness': 0,'mode': 0,
'speechiness': 0,'acousticness': 0,
'instrumentalness': 0,'liveness': 0,
'valence': 0,'tempo': 0,
})
counter -= 100
song_features = pd.DataFrame.from_dict(empty_list_one)
return song_features
def playlist_data(at, playlist_id, market="US", fields=""):
endpoint = f"https://api.spotify.com/v1/playlists/{playlist_id}"
headers = { "Authorization": f"Bearer {at['access_token']}" }
fields = fields
data = urlencode({"market": market,"fields":fields})
lookup_url = f"{endpoint}?{data}"
play_list_json_V2 = requests.get(lookup_url, headers=headers).json()
songs_in_playlist = len(play_list_json_V2["tracks"]["items"])
empty_list_one=[]
for i in range(songs_in_playlist):
songs_data = play_list_json_V2["tracks"]["items"][i]["track"]
count_artists = len(songs_data["artists"])
for si in range(count_artists):
songs_data_artist = songs_data["artists"]
empty_list_one.append({
"playlist_id":play_list_json_V2["id"],
"playlist_name":play_list_json_V2["name"],
"playlist_owner":play_list_json_V2["owner"]["display_name"],
"owner_url":play_list_json_V2["owner"]["external_urls"]["spotify"],
"playlist_url":play_list_json_V2["external_urls"]["spotify"],
"playlist_followers": play_list_json_V2["followers"]["total"],
"playlist_cover_art": play_list_json_V2["images"][0]["url"],
"song_id":songs_data["id"],
"song_added_at": play_list_json_V2["tracks"]["items"][i]["added_at"],
"song_name":songs_data["name"],
"song_duration":songs_data["duration_ms"],
"song_popularity":songs_data["popularity"],
"song_url":songs_data["external_urls"]["spotify"],
f"name_artist_{si+1}":songs_data_artist[si]["name"],
f"id_artist_{si+1}":songs_data_artist[si]["id"]
})
empty_list_one_V2 = pd.DataFrame.from_dict(empty_list_one).groupby("song_id").first().reset_index()
return play_list_json_V2, empty_list_one_V2
def podcast_info(at, show_id, market="MX"):
endpoint = f"https://api.spotify.com/v1/shows/{show_id}"
headers = { "Authorization": f"Bearer {at['access_token']}" }
data = urlencode({"market": market})
lookup_url = f"{endpoint}?{data}"
podcast_json = requests.get(lookup_url, headers=headers).json()
empty_list =[]
count_episodes = len(podcast_json["episodes"]["items"])
empty_list.append({
"name" : podcast_json["name"],
"publisher":podcast_json["publisher"],
"total_episodes":podcast_json["total_episodes"],
"show_id":show_id,
"show_cover": podcast_json["episodes"]["items"][0]["images"][1]["url"]
})
podcast_basic_info = pd.DataFrame.from_dict(empty_list)
return podcast_basic_info, podcast_json
def browse_new_playlists(at,limit,offset, country="MX", asset_type="categories"):
"""
featured-playlists | new-releases | categories | categories/{category_id} | categories/{category_id}/playlists
"""
base_url =f"https://api.spotify.com/v1/browse/{asset_type}"
headers = { "Authorization": f"Bearer {at['access_token']}" }
data=urlencode({"country":country, "limit":limit, "offset":offset })
lookup_url = f"{base_url}?{data}"
featured_playlist_json = requests.get(lookup_url, headers=headers).json()
return featured_playlist_json
def top_playlists(at, country):
empty_list=[]
for countries in range(len(country)):
categories_toplists = browse_new_playlists(at, limit=50, offset=0, asset_type=f"featured-playlists", country=country[countries])
len_playlists= range(len(categories_toplists["playlists"]["items"]))
for playlist_id in len_playlists:
first_key = categories_toplists["playlists"]["items"][playlist_id]
empty_list.append({
"name_of_playlist":first_key["name"],
"playlist_id":first_key["id"],
"owner":first_key["owner"]["display_name"],
"playlist_cover":first_key["images"][0]["url"],
"country": country[countries]
})
top_playlists = | pd.DataFrame.from_dict(empty_list) | pandas.DataFrame.from_dict |
from collections import defaultdict
from datetime import datetime
from itertools import product
import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
Series,
array,
concat,
merge,
)
import pandas._testing as tm
from pandas.core.algorithms import safe_sort
import pandas.core.common as com
from pandas.core.sorting import (
decons_group_index,
get_group_index,
is_int64_overflow_possible,
lexsort_indexer,
nargsort,
)
class TestSorting:
@pytest.mark.slow
def test_int64_overflow(self):
B = np.concatenate((np.arange(1000), np.arange(1000), np.arange(500)))
A = np.arange(2500)
df = DataFrame(
{
"A": A,
"B": B,
"C": A,
"D": B,
"E": A,
"F": B,
"G": A,
"H": B,
"values": np.random.randn(2500),
}
)
lg = df.groupby(["A", "B", "C", "D", "E", "F", "G", "H"])
rg = df.groupby(["H", "G", "F", "E", "D", "C", "B", "A"])
left = lg.sum()["values"]
right = rg.sum()["values"]
exp_index, _ = left.index.sortlevel()
tm.assert_index_equal(left.index, exp_index)
exp_index, _ = right.index.sortlevel(0)
tm.assert_index_equal(right.index, exp_index)
tups = list(map(tuple, df[["A", "B", "C", "D", "E", "F", "G", "H"]].values))
tups = com.asarray_tuplesafe(tups)
expected = df.groupby(tups).sum()["values"]
for k, v in expected.items():
assert left[k] == right[k[::-1]]
assert left[k] == v
assert len(left) == len(right)
def test_int64_overflow_moar(self):
# GH9096
values = range(55109)
data = DataFrame.from_dict({"a": values, "b": values, "c": values, "d": values})
grouped = data.groupby(["a", "b", "c", "d"])
assert len(grouped) == len(values)
arr = np.random.randint(-1 << 12, 1 << 12, (1 << 15, 5))
i = np.random.choice(len(arr), len(arr) * 4)
arr = np.vstack((arr, arr[i])) # add sume duplicate rows
i = np.random.permutation(len(arr))
arr = arr[i] # shuffle rows
df = DataFrame(arr, columns=list("abcde"))
df["jim"], df["joe"] = np.random.randn(2, len(df)) * 10
gr = df.groupby(list("abcde"))
# verify this is testing what it is supposed to test!
assert is_int64_overflow_possible(gr.grouper.shape)
# manually compute groupings
jim, joe = defaultdict(list), defaultdict(list)
for key, a, b in zip(map(tuple, arr), df["jim"], df["joe"]):
jim[key].append(a)
joe[key].append(b)
assert len(gr) == len(jim)
mi = MultiIndex.from_tuples(jim.keys(), names=list("abcde"))
def aggr(func):
f = lambda a: np.fromiter(map(func, a), dtype="f8")
arr = np.vstack((f(jim.values()), f(joe.values()))).T
res = DataFrame(arr, columns=["jim", "joe"], index=mi)
return res.sort_index()
tm.assert_frame_equal(gr.mean(), aggr(np.mean))
tm.assert_frame_equal(gr.median(), aggr(np.median))
def test_lexsort_indexer(self):
keys = [[np.nan] * 5 + list(range(100)) + [np.nan] * 5]
# orders=True, na_position='last'
result = lexsort_indexer(keys, orders=True, na_position="last")
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=True, na_position='first'
result = lexsort_indexer(keys, orders=True, na_position="first")
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=False, na_position='last'
result = lexsort_indexer(keys, orders=False, na_position="last")
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=False, na_position='first'
result = lexsort_indexer(keys, orders=False, na_position="first")
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
def test_nargsort(self):
# np.argsort(items) places NaNs last
items = [np.nan] * 5 + list(range(100)) + [np.nan] * 5
# np.argsort(items2) may not place NaNs first
items2 = np.array(items, dtype="O")
# mergesort is the most difficult to get right because we want it to be
# stable.
# According to numpy/core/tests/test_multiarray, """The number of
# sorted items must be greater than ~50 to check the actual algorithm
# because quick and merge sort fall over to insertion sort for small
# arrays."""
# mergesort, ascending=True, na_position='last'
result = nargsort(items, kind="mergesort", ascending=True, na_position="last")
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='first'
result = nargsort(items, kind="mergesort", ascending=True, na_position="first")
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='last'
result = nargsort(items, kind="mergesort", ascending=False, na_position="last")
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='first'
result = nargsort(items, kind="mergesort", ascending=False, na_position="first")
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='last'
result = nargsort(items2, kind="mergesort", ascending=True, na_position="last")
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='first'
result = nargsort(items2, kind="mergesort", ascending=True, na_position="first")
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='last'
result = nargsort(items2, kind="mergesort", ascending=False, na_position="last")
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='first'
result = nargsort(
items2, kind="mergesort", ascending=False, na_position="first"
)
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
class TestMerge:
@pytest.mark.slow
def test_int64_overflow_issues(self):
# #2690, combinatorial explosion
df1 = DataFrame(np.random.randn(1000, 7), columns=list("ABCDEF") + ["G1"])
df2 = DataFrame(np.random.randn(1000, 7), columns=list("ABCDEF") + ["G2"])
# it works!
result = merge(df1, df2, how="outer")
assert len(result) == 2000
low, high, n = -1 << 10, 1 << 10, 1 << 20
left = DataFrame(np.random.randint(low, high, (n, 7)), columns=list("ABCDEFG"))
left["left"] = left.sum(axis=1)
# one-2-one match
i = np.random.permutation(len(left))
right = left.iloc[i].copy()
right.columns = right.columns[:-1].tolist() + ["right"]
right.index = np.arange(len(right))
right["right"] *= -1
out = merge(left, right, how="outer")
assert len(out) == len(left)
tm.assert_series_equal(out["left"], -out["right"], check_names=False)
result = out.iloc[:, :-2].sum(axis=1)
tm.assert_series_equal(out["left"], result, check_names=False)
assert result.name is None
out.sort_values(out.columns.tolist(), inplace=True)
out.index = np.arange(len(out))
for how in ["left", "right", "outer", "inner"]:
tm.assert_frame_equal(out, merge(left, right, how=how, sort=True))
# check that left merge w/ sort=False maintains left frame order
out = merge(left, right, how="left", sort=False)
tm.assert_frame_equal(left, out[left.columns.tolist()])
out = merge(right, left, how="left", sort=False)
tm.assert_frame_equal(right, out[right.columns.tolist()])
# one-2-many/none match
n = 1 << 11
left = DataFrame(
np.random.randint(low, high, (n, 7)).astype("int64"),
columns=list("ABCDEFG"),
)
# confirm that this is checking what it is supposed to check
shape = left.apply(Series.nunique).values
assert is_int64_overflow_possible(shape)
# add duplicates to left frame
left = concat([left, left], ignore_index=True)
right = DataFrame(
np.random.randint(low, high, (n // 2, 7)).astype("int64"),
columns=list("ABCDEFG"),
)
# add duplicates & overlap with left to the right frame
i = np.random.choice(len(left), n)
right = concat([right, right, left.iloc[i]], ignore_index=True)
left["left"] = np.random.randn(len(left))
right["right"] = np.random.randn(len(right))
# shuffle left & right frames
i = np.random.permutation(len(left))
left = left.iloc[i].copy()
left.index = np.arange(len(left))
i = np.random.permutation(len(right))
right = right.iloc[i].copy()
right.index = np.arange(len(right))
# manually compute outer merge
ldict, rdict = defaultdict(list), defaultdict(list)
for idx, row in left.set_index(list("ABCDEFG")).iterrows():
ldict[idx].append(row["left"])
for idx, row in right.set_index(list("ABCDEFG")).iterrows():
rdict[idx].append(row["right"])
vals = []
for k, lval in ldict.items():
rval = rdict.get(k, [np.nan])
for lv, rv in product(lval, rval):
vals.append(
k
+ (
lv,
rv,
)
)
for k, rval in rdict.items():
if k not in ldict:
for rv in rval:
vals.append(
k
+ (
np.nan,
rv,
)
)
def align(df):
df = df.sort_values(df.columns.tolist())
df.index = np.arange(len(df))
return df
def verify_order(df):
kcols = list("ABCDEFG")
tm.assert_frame_equal(
df[kcols].copy(), df[kcols].sort_values(kcols, kind="mergesort")
)
out = DataFrame(vals, columns=list("ABCDEFG") + ["left", "right"])
out = align(out)
jmask = {
"left": out["left"].notna(),
"right": out["right"].notna(),
"inner": out["left"].notna() & out["right"].notna(),
"outer": np.ones(len(out), dtype="bool"),
}
for how in ["left", "right", "outer", "inner"]:
mask = jmask[how]
frame = align(out[mask].copy())
assert mask.all() ^ mask.any() or how == "outer"
for sort in [False, True]:
res = merge(left, right, how=how, sort=sort)
if sort:
verify_order(res)
# as in GH9092 dtypes break with outer/right join
tm.assert_frame_equal(
frame, align(res), check_dtype=how not in ("right", "outer")
)
def test_decons():
def testit(codes_list, shape):
group_index = get_group_index(codes_list, shape, sort=True, xnull=True)
codes_list2 = decons_group_index(group_index, shape)
for a, b in zip(codes_list, codes_list2):
tm.assert_numpy_array_equal(a, b)
shape = (4, 5, 6)
codes_list = [
np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100).astype(np.int64),
np.tile([0, 2, 4, 3, 0, 1, 2, 3], 100).astype(np.int64),
np.tile([5, 1, 0, 2, 3, 0, 5, 4], 100).astype(np.int64),
]
testit(codes_list, shape)
shape = (10000, 10000)
codes_list = [
np.tile(np.arange(10000, dtype=np.int64), 5),
np.tile(np.arange(10000, dtype=np.int64), 5),
]
testit(codes_list, shape)
class TestSafeSort:
def test_basic_sort(self):
values = [3, 1, 2, 0, 4]
result = safe_sort(values)
expected = np.array([0, 1, 2, 3, 4])
tm.assert_numpy_array_equal(result, expected)
values = list("baaacb")
result = safe_sort(values)
expected = np.array(list("aaabbc"), dtype="object")
tm.assert_numpy_array_equal(result, expected)
values = []
result = safe_sort(values)
expected = np.array([])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("verify", [True, False])
def test_codes(self, verify):
values = [3, 1, 2, 0, 4]
expected = np.array([0, 1, 2, 3, 4])
codes = [0, 1, 1, 2, 3, 0, -1, 4]
result, result_codes = safe_sort(values, codes, verify=verify)
expected_codes = np.array([3, 1, 1, 2, 0, 3, -1, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_codes, expected_codes)
# na_sentinel
codes = [0, 1, 1, 2, 3, 0, 99, 4]
result, result_codes = safe_sort(values, codes, na_sentinel=99, verify=verify)
expected_codes = np.array([3, 1, 1, 2, 0, 3, 99, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_codes, expected_codes)
codes = []
result, result_codes = safe_sort(values, codes, verify=verify)
expected_codes = np.array([], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_codes, expected_codes)
@pytest.mark.parametrize("na_sentinel", [-1, 99])
def test_codes_out_of_bound(self, na_sentinel):
values = [3, 1, 2, 0, 4]
expected = np.array([0, 1, 2, 3, 4])
# out of bound indices
codes = [0, 101, 102, 2, 3, 0, 99, 4]
result, result_codes = safe_sort(values, codes, na_sentinel=na_sentinel)
expected_codes = np.array(
[3, na_sentinel, na_sentinel, 2, 0, 3, na_sentinel, 4], dtype=np.intp
)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_codes, expected_codes)
def test_mixed_integer(self):
values = np.array(["b", 1, 0, "a", 0, "b"], dtype=object)
result = safe_sort(values)
expected = np.array([0, 0, 1, "a", "b", "b"], dtype=object)
tm.assert_numpy_array_equal(result, expected)
values = np.array(["b", 1, 0, "a"], dtype=object)
codes = [0, 1, 2, 3, 0, -1, 1]
result, result_codes = safe_sort(values, codes)
expected = np.array([0, 1, "a", "b"], dtype=object)
expected_codes = np.array([3, 1, 0, 2, 3, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_codes, expected_codes)
def test_mixed_integer_from_list(self):
values = ["b", 1, 0, "a", 0, "b"]
result = safe_sort(values)
expected = np.array([0, 0, 1, "a", "b", "b"], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_unsortable(self):
# GH 13714
arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object)
msg = "'[<>]' not supported between instances of .*"
with pytest.raises(TypeError, match=msg):
safe_sort(arr)
def test_exceptions(self):
with pytest.raises(TypeError, match="Only list-like objects are allowed"):
safe_sort(values=1)
with pytest.raises(TypeError, match="Only list-like objects or None"):
safe_sort(values=[0, 1, 2], codes=1)
with pytest.raises(ValueError, match="values should be unique"):
safe_sort(values=[0, 1, 2, 1], codes=[0, 1])
def test_extension_array(self):
# a = array([1, 3, np.nan, 2], dtype='Int64')
a = array([1, 3, 2], dtype="Int64")
result = safe_sort(a)
# expected = array([1, 2, 3, np.nan], dtype='Int64')
expected = array([1, 2, 3], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("verify", [True, False])
@pytest.mark.parametrize("na_sentinel", [-1, 99])
def test_extension_array_codes(self, verify, na_sentinel):
a = | array([1, 3, 2], dtype="Int64") | pandas.array |
"""
from https://www.kaggle.com/bminixhofer/aggregated-features-lightgbm
"""
import gc
import pandas as pd
import numpy as np
def run():
used_cols = ["item_id", "user_id"]
train = pd.read_csv("../input/train.csv", usecols=used_cols)
train_active = | pd.read_csv("../input/train_active.csv", usecols=used_cols) | pandas.read_csv |
import torch
import numpy as np
import os, sys
os.environ['TORCH_MODEL_ZOO'] = '/mnt/projects/counting/pretrained/resnet'
import models, datasets, metrics
import utils as ut
import tqdm, time
import pandas as pd
def main():
exp_dict = {"model":"MRCNN",
"max_epochs":10,
"batch_size":1}
model = models.mrcnn.MRCNN(exp_dict).cuda()
path_base = "checkpoints"
# model_state_dict = torch.load(path_base + "/model_state_dict.pth")
# model.load_state_dict(model_state_dict)
train_set = datasets.pascal2012.Pascal2012(split="train", exp_dict=exp_dict,
root="/mnt/datasets/public/issam/VOCdevkit/VOC2012/")
train_loader = torch.utils.data.DataLoader(
train_set,
collate_fn=train_set.collate_fn,
batch_size=1,
shuffle=True,
num_workers=1,
pin_memory=True)
# Main loop
model.history = {"score_list": []}
for e in range(exp_dict["max_epochs"]):
# Train for one epoch
score_dict = train_epoch(model, train_loader)
score_dict["epoch"] = e
# Update history
model.history["score_list"] += [score_dict]
# Report
results_df = | pd.DataFrame(model.history["score_list"]) | pandas.DataFrame |
import pandas as pd
import io
import lithops
from .utils import derived_from, is_series_like, M
no_default = "__no_default__"
class DataFrame:
def __init__(self, df, filepath, npartitions):
self.filepath = filepath
self.df = df
self.npartitions = npartitions
def reduction(
self,
chunk,
aggregate=None,
combine=None,
meta=no_default,
token=None,
split_every=None,
chunk_kwargs=None,
aggregate_kwargs=None,
combine_kwargs=None,
**kwargs,
):
"""Generic row-wise reductions.
Parameters
----------
chunk : callable
Function to operate on each partition. Should return a
``pandas.DataFrame``, ``pandas.Series``, or a scalar.
aggregate : callable, optional
Function to operate on the concatenated result of ``chunk``. If not
specified, defaults to ``chunk``. Used to do the final aggregation
in a tree reduction.
The input to ``aggregate`` depends on the output of ``chunk``.
If the output of ``chunk`` is a:
- scalar: Input is a Series, with one row per partition.
- Series: Input is a DataFrame, with one row per partition. Columns
are the rows in the output series.
- DataFrame: Input is a DataFrame, with one row per partition.
Columns are the columns in the output dataframes.
Should return a ``pandas.DataFrame``, ``pandas.Series``, or a
scalar.
combine : callable, optional
Function to operate on intermediate concatenated results of
``chunk`` in a tree-reduction. If not provided, defaults to
``aggregate``. The input/output requirements should match that of
``aggregate`` described above.
$META
token : str, optional
The name to use for the output keys.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used,
and all intermediates will be concatenated and passed to
``aggregate``. Default is 8.
chunk_kwargs : dict, optional
Keyword arguments to pass on to ``chunk`` only.
aggregate_kwargs : dict, optional
Keyword arguments to pass on to ``aggregate`` only.
combine_kwargs : dict, optional
Keyword arguments to pass on to ``combine`` only.
kwargs :
All remaining keywords will be passed to ``chunk``, ``combine``,
and ``aggregate``.
Examples
--------
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})
>>> ddf = dd.from_pandas(df, npartitions=4)
Count the number of rows in a DataFrame. To do this, count the number
of rows in each partition, then sum the results:
>>> res = ddf.reduction(lambda x: x.count(),
... aggregate=lambda x: x.sum())
>>> res.compute()
x 50
y 50
dtype: int64
Count the number of rows in a Series with elements greater than or
equal to a value (provided via a keyword).
>>> def count_greater(x, value=0):
... return (x >= value).sum()
>>> res = ddf.x.reduction(count_greater, aggregate=lambda x: x.sum(),
... chunk_kwargs={'value': 25})
>>> res.compute()
25
Aggregate both the sum and count of a Series at the same time:
>>> def sum_and_count(x):
... return pd.Series({'count': x.count(), 'sum': x.sum()},
... index=['count', 'sum'])
>>> res = ddf.x.reduction(sum_and_count, aggregate=lambda x: x.sum())
>>> res.compute()
count 50
sum 1225
dtype: int64
Doing the same, but for a DataFrame. Here ``chunk`` returns a
DataFrame, meaning the input to ``aggregate`` is a DataFrame with an
index with non-unique entries for both 'x' and 'y'. We groupby the
index, and sum each group to get the final result.
>>> def sum_and_count(x):
... return pd.DataFrame({'count': x.count(), 'sum': x.sum()},
... columns=['count', 'sum'])
>>> res = ddf.reduction(sum_and_count,
... aggregate=lambda x: x.groupby(level=0).sum())
>>> res.compute()
count sum
x 50 1225
y 50 3725
"""
if aggregate is None:
aggregate = chunk
if combine is None:
if combine_kwargs:
raise ValueError("`combine_kwargs` provided with no `combine`")
combine = aggregate
combine_kwargs = aggregate_kwargs
chunk_kwargs = chunk_kwargs.copy() if chunk_kwargs else {}
chunk_kwargs["aca_chunk"] = chunk
combine_kwargs = combine_kwargs.copy() if combine_kwargs else {}
combine_kwargs["aca_combine"] = combine
aggregate_kwargs = aggregate_kwargs.copy() if aggregate_kwargs else {}
aggregate_kwargs["aca_aggregate"] = aggregate
return aca(
self,
chunk=_reduction_chunk,
aggregate=_reduction_aggregate,
combine=_reduction_combine,
meta=meta,
token=token,
split_every=split_every,
chunk_kwargs=chunk_kwargs,
aggregate_kwargs=aggregate_kwargs,
combine_kwargs=combine_kwargs,
**kwargs,
)
def _reduction_agg(self, name, axis=None, skipna=True, split_every=False, out=None):
axis = self._validate_axis(axis)
meta = getattr(self._meta_nonempty, name)(axis=axis, skipna=skipna)
token = self._token_prefix + name
method = getattr(M, name)
if axis == 1:
result = self.map_partitions(
method, meta=meta, token=token, skipna=skipna, axis=axis
)
return handle_out(out, result)
else:
result = self.reduction(
method,
meta=meta,
token=token,
skipna=skipna,
axis=axis,
split_every=split_every,
)
if isinstance(self, DataFrame):
result.divisions = (self.columns.min(), self.columns.max())
return handle_out(out, result)
def apply(
self,
func,
axis=0,
broadcast=None,
raw=False,
reduce=None,
args=(),
meta=no_default,
result_type=None,
**kwds,
):
"""Parallel version of pandas.DataFrame.apply
This mimics the pandas version except for the following:
1. Only ``axis=1`` is supported (and must be specified explicitly).
2. The user should provide output metadata via the `meta` keyword.
Parameters
----------
func : function
Function to apply to each column/row
axis : {0 or 'index', 1 or 'columns'}, default 0
- 0 or 'index': apply function to each column (NOT SUPPORTED)
- 1 or 'columns': apply function to each row
$META
args : tuple
Positional arguments to pass to function in addition to the array/series
Additional keyword arguments will be passed as keywords to the function
Returns
-------
applied : Series or DataFrame
Examples
--------
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
Apply a function to row-wise passing in extra arguments in ``args`` and
``kwargs``:
>>> def myadd(row, a, b=1):
... return row.sum() + a + b
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5) # doctest: +SKIP
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with name ``'x'``, and dtype
``float64``:
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ddf.apply(lambda row: row + 1, axis=1, meta=ddf)
See Also
--------
dask.DataFrame.map_partitions
"""
pandas_kwargs = {"axis": axis, "raw": raw, "result_type": result_type}
if axis == 0:
msg = (
"lithops.DataFrame.apply only supports axis=1\n"
" Try: df.apply(func, axis=1)"
)
raise NotImplementedError(msg)
def pandas_apply_function(obj):
buf = io.BytesIO(obj.data_stream.read())
df = pd.read_csv(buf)
df.apply(func, args=args, **kwds, **pandas_kwargs)
fexec = lithops.FunctionExecutor()
fexec.map(pandas_apply_function, [self.filepath], chunk_n=self.npartitions)
fexec.wait()
return self
@derived_from(pd.DataFrame)
def all(self, axis=None, skipna=True, split_every=False, out=None):
def pandas_all_function(obj):
buf = io.BytesIO(obj.data_stream.read())
df = pd.read_csv(buf)
df.all(axis=axis, skipna=skipna)
fexec = lithops.FunctionExecutor()
fexec.map(pandas_all_function, [self.filepath], chunk_n=self.npartitions)
fexec.wait()
return self
@derived_from(pd.DataFrame)
def any(self, axis=None, skipna=True, split_every=False, out=None):
def pandas_any_function(obj):
buf = io.BytesIO(obj.data_stream.read())
df = pd.read_csv(buf)
df.any(axis=axis, skipna=skipna)
fexec = lithops.FunctionExecutor()
fexec.map(pandas_any_function, [self.filepath], chunk_n=self.npartitions)
fexec.wait()
return self
@derived_from(pd.DataFrame)
def sum(
self,
axis=None,
skipna=True,
split_every=False,
dtype=None,
out=None,
min_count=None,
):
# use self._reduction_agg()
def pandas_sum_function(obj):
buf = io.BytesIO(obj.data_stream.read())
df = pd.read_csv(buf)
df.sum(axis=axis, skipna=skipna, min_count=min_count)
fexec = lithops.FunctionExecutor()
fexec.map(pandas_sum_function, [self.filepath], chunk_n=self.npartitions)
fexec.wait()
return self
@derived_from(pd.DataFrame)
def prod(
self,
axis=None,
skipna=True,
split_every=False,
dtype=None,
out=None,
min_count=None,
):
# use self._reduction_agg()
def pandas_prod_function(obj):
buf = io.BytesIO(obj.data_stream.read())
df = pd.read_csv(buf)
df.prod(axis=axis, skipna=skipna)
fexec = lithops.FunctionExecutor()
fexec.map(pandas_prod_function, [self.filepath], chunk_n=self.npartitions)
fexec.wait()
return self
@derived_from(pd.DataFrame)
def max(self, axis=None, skipna=True, split_every=False, out=None):
# use self._reduction_agg()
def pandas_max_function(obj):
buf = io.BytesIO(obj.data_stream.read())
df = pd.read_csv(buf)
df.max(axis=axis, skipna=skipna)
fexec = lithops.FunctionExecutor()
fexec.map(pandas_max_function, [self.filepath], chunk_n=self.npartitions)
fexec.wait()
return self
@derived_from(pd.DataFrame)
def min(self, axis=None, skipna=True, split_every=False, out=None):
# use self._reduction_agg()
def pandas_min_function(obj):
buf = io.BytesIO(obj.data_stream.read())
df = pd.read_csv(buf)
df.min(axis=axis, skipna=skipna)
fexec = lithops.FunctionExecutor()
fexec.map(pandas_min_function, [self.filepath], chunk_n=self.npartitions)
fexec.wait()
return self
@derived_from(pd.DataFrame)
def count(self, axis=None, split_every=False):
# use self.map_partition whens axis = 1 , self.reduction when axis = 0()
def pandas_count_function(obj):
buf = io.BytesIO(obj.data_stream.read())
df = | pd.read_csv(buf) | pandas.read_csv |
import json
import pandas as pd
import os
import re
def create_entry(raw_entry,hashfunction,encoding):
return_dict = {}
app_metadata = {'is_god':raw_entry['is_admin']}
if not pd.isna(raw_entry['organisation_id']):
app_metadata['organisation_id'] = round(raw_entry['organisation_id'])
if not pd.isna(raw_entry['base_ids']):
app_metadata['base_ids']=str(raw_entry['base_ids']).split(',')
return_dict['user_id']=str(raw_entry['id'])
return_dict['name']=raw_entry['naam']
if not pd.isna(raw_entry['deleted']) and raw_entry['deleted'] != '0000-00-00 00:00:00':
return_dict['email']=re.sub(r'\.deleted\.\d+', '',raw_entry['email'])
app_metadata['last_blocked_date']=raw_entry['deleted']
return_dict['blocked']=True
else:
return_dict['email']=raw_entry['email']
return_dict['email_verified']=False
return_dict['custom_password_hash']= {
"algorithm":hashfunction,
"hash":{
"value":raw_entry['pass'],
"encoding":encoding
}
}
if not | pd.isna(raw_entry['cms_usergroups_id']) | pandas.isna |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
import numpy as np
import pytest
import pandas.compat as compat
from pandas.compat import range
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, NaT, Series, bdate_range, date_range, isna)
from pandas.core import ops
import pandas.core.nanops as nanops
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal)
from .common import TestData
class TestSeriesLogicalOps(object):
@pytest.mark.parametrize('bool_op', [operator.and_,
operator.or_, operator.xor])
def test_bool_operators_with_nas(self, bool_op):
# boolean &, |, ^ should work with object arrays and propagate NAs
ser = Series(bdate_range('1/1/2000', periods=10), dtype=object)
ser[::2] = np.nan
mask = ser.isna()
filled = ser.fillna(ser[0])
result = bool_op(ser < ser[9], ser > ser[3])
expected = bool_op(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_operators_bitwise(self):
# GH#9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4), dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list('abc'))
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list('abc'))
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & n1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype='int8')
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype='int64')
assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype='int32')
assert_series_equal(res, expected)
with pytest.raises(TypeError):
s_1111 & 'a'
with pytest.raises(TypeError):
s_1111 & ['a', 'b', 'c', 'd']
with pytest.raises(TypeError):
s_0123 & np.NaN
with pytest.raises(TypeError):
s_0123 & 3.14
with pytest.raises(TypeError):
s_0123 & [0.1, 4, 3.14, 2]
# s_0123 will be all false now because of reindexing like s_tft
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_tft & s_0123, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_tft & s_0123, exp)
# s_tft will be all false now because of reindexing like s_0123
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_0123 & s_tft, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_0123 & s_tft, exp)
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
assert_series_equal(s_0123 & [False], Series([False] * 4))
assert_series_equal(s_0123 & (False), Series([False] * 4))
assert_series_equal(s_0123 & Series([False, np.NaN, False, False]),
Series([False] * 4))
s_ftft = Series([False, True, False, True])
assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
s_abNd = Series(['a', 'b', np.NaN, 'd'])
res = s_0123 & s_abNd
expected = s_ftft
assert_series_equal(res, expected)
def test_scalar_na_logical_ops_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
with pytest.raises(TypeError):
s & datetime(2005, 1, 1)
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True, index=s.index)
expected[::2] = False
result = s & list(s)
assert_series_equal(result, expected)
d = DataFrame({'A': s})
# TODO: Fix this exception - needs to be fixed! (see GH5035)
# (previously this was a TypeError because series returned
# NotImplemented
# this is an alignment issue; these are equivalent
# https://github.com/pandas-dev/pandas/issues/5284
with pytest.raises(TypeError):
d.__and__(s, axis='columns')
with pytest.raises(TypeError):
s & d
# this is wrong as its not a boolean result
# result = d.__and__(s,axis='index')
@pytest.mark.parametrize('op', [
operator.and_,
operator.or_,
operator.xor,
])
def test_logical_ops_with_index(self, op):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Series([op(ser[n], idx1[n]) for n in range(len(ser))])
result = op(ser, idx1)
assert_series_equal(result, expected)
expected = Series([op(ser[n], idx2[n]) for n in range(len(ser))],
dtype=bool)
result = op(ser, idx2)
assert_series_equal(result, expected)
@pytest.mark.parametrize("op, expected", [
(ops.rand_, pd.Index([False, True])),
(ops.ror_, pd.Index([False, True])),
(ops.rxor, pd.Index([])),
])
def test_reverse_ops_with_index(self, op, expected):
# https://github.com/pandas-dev/pandas/pull/23628
# multi-set Index ops are buggy, so let's avoid duplicates...
ser = Series([True, False])
idx = Index([False, True])
result = op(ser, idx)
tm.assert_index_equal(result, expected)
def test_logical_ops_label_based(self):
# GH#4947
# logical ops should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([False, True, False], list('abc'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False], list('abc'))
result = a | b
assert_series_equal(result, expected)
expected = Series([True, False, False], list('abc'))
result = a ^ b
assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([False, True, False, False], list('abcd'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False, False], list('abcd'))
result = a | b
assert_series_equal(result, expected)
# filling
# vs empty
result = a & Series([])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result, expected)
result = a | Series([])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ['z'])
expected = Series([False, False, False, False], list('abcz'))
assert_series_equal(result, expected)
result = a | Series([1], ['z'])
expected = Series([True, True, False, False], list('abcz'))
assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [Series([]), Series([1], ['z']),
Series(np.nan, b.index), Series(np.nan, a.index)]:
result = a[a | e]
assert_series_equal(result, a[a])
for e in [Series(['z'])]:
if compat.PY3:
with tm.assert_produces_warning(RuntimeWarning):
result = a[a | e]
else:
result = a[a | e]
assert_series_equal(result, a[a])
# vs scalars
index = list('bca')
t = Series([True, False, True])
for v in [True, 1, 2]:
result = Series([True, False, True], index=index) | v
expected = Series([True, True, True], index=index)
assert_series_equal(result, expected)
for v in [np.nan, 'foo']:
with pytest.raises(TypeError):
t | v
for v in [False, 0]:
result = Series([True, False, True], index=index) | v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [True, 1]:
result = Series([True, False, True], index=index) & v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [False, 0]:
result = Series([True, False, True], index=index) & v
expected = Series([False, False, False], index=index)
assert_series_equal(result, expected)
for v in [np.nan]:
with pytest.raises(TypeError):
t & v
def test_logical_ops_df_compat(self):
# GH#1134
s1 = pd.Series([True, False, True], index=list('ABC'), name='x')
s2 = pd.Series([True, True, False], index=list('ABD'), name='x')
exp = pd.Series([True, False, False, False],
index=list('ABCD'), name='x')
assert_series_equal(s1 & s2, exp)
assert_series_equal(s2 & s1, exp)
# True | np.nan => True
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
assert_series_equal(s1 | s2, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, False, False],
index=list('ABCD'), name='x')
assert_series_equal(s2 | s1, exp)
# DataFrame doesn't fill nan with False
exp = pd.DataFrame({'x': [True, False, np.nan, np.nan]},
index=list('ABCD'))
assert_frame_equal(s1.to_frame() & s2.to_frame(), exp)
assert_frame_equal(s2.to_frame() & s1.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, np.nan, np.nan]},
index=list('ABCD'))
assert_frame_equal(s1.to_frame() | s2.to_frame(), exp)
assert_frame_equal(s2.to_frame() | s1.to_frame(), exp)
# different length
s3 = pd.Series([True, False, True], index=list('ABC'), name='x')
s4 = pd.Series([True, True, True, True], index=list('ABCD'), name='x')
exp = pd.Series([True, False, True, False],
index=list('ABCD'), name='x')
assert_series_equal(s3 & s4, exp)
assert_series_equal(s4 & s3, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
assert_series_equal(s3 | s4, exp)
# True | np.nan => True
exp = pd.Series([True, True, True, True],
index=list('ABCD'), name='x')
assert_series_equal(s4 | s3, exp)
exp = pd.DataFrame({'x': [True, False, True, np.nan]},
index=list('ABCD'))
assert_frame_equal(s3.to_frame() & s4.to_frame(), exp)
assert_frame_equal(s4.to_frame() & s3.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, True, np.nan]},
index=list('ABCD'))
assert_frame_equal(s3.to_frame() | s4.to_frame(), exp)
assert_frame_equal(s4.to_frame() | s3.to_frame(), exp)
class TestSeriesComparisons(object):
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
assert_series_equal(s == s2, exp)
assert_series_equal(s2 == s, exp)
def test_categorical_comparisons(self):
# GH 8938
# allow equality comparisons
a = Series(list('abc'), dtype="category")
b = Series(list('abc'), dtype="object")
c = Series(['a', 'b', 'cc'], dtype="object")
d = Series(list('acb'), dtype="object")
e = Categorical(list('abc'))
f = Categorical(list('acb'))
# vs scalar
assert not (a == 'a').all()
assert ((a != 'a') == ~(a == 'a')).all()
assert not ('a' == a).all()
assert (a == 'a')[0]
assert ('a' == a)[0]
assert not ('a' != a)[0]
# vs list-like
assert (a == a).all()
assert not (a != a).all()
assert (a == list(a)).all()
assert (a == b).all()
assert (b == a).all()
assert ((~(a == b)) == (a != b)).all()
assert ((~(b == a)) == (b != a)).all()
assert not (a == c).all()
assert not (c == a).all()
assert not (a == d).all()
assert not (d == a).all()
# vs a cat-like
assert (a == e).all()
assert (e == a).all()
assert not (a == f).all()
assert not (f == a).all()
assert ((~(a == e) == (a != e)).all())
assert ((~(e == a) == (e != a)).all())
assert ((~(a == f) == (a != f)).all())
assert ((~(f == a) == (f != a)).all())
# non-equality is not comparable
with pytest.raises(TypeError):
a < b
with pytest.raises(TypeError):
b < a
with pytest.raises(TypeError):
a > b
with pytest.raises(TypeError):
b > a
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
ser = Series(bdate_range('1/1/2000', periods=10), dtype=object)
ser[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = ser[5]
f = getattr(operator, op)
result = f(ser, val)
expected = f(ser.dropna(), val).reindex(ser.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
def test_unequal_categorical_comparison_raises_type_error(self):
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
with pytest.raises(TypeError):
cat > "b"
cat = Series(Categorical(list("abc"), ordered=False))
with pytest.raises(TypeError):
cat > "b"
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
with pytest.raises(TypeError):
cat < "d"
with pytest.raises(TypeError):
cat > "d"
with pytest.raises(TypeError):
"d" < cat
with pytest.raises(TypeError):
"d" > cat
tm.assert_series_equal(cat == "d", Series([False, False, False]))
tm.assert_series_equal(cat != "d", Series([True, True, True]))
@pytest.mark.parametrize('pair', [
([pd.Timestamp('2011-01-01'), NaT, pd.Timestamp('2011-01-03')],
[NaT, NaT, pd.Timestamp('2011-01-03')]),
([pd.Timedelta('1 days'), NaT, pd.Timedelta('3 days')],
[NaT, NaT, pd.Timedelta('3 days')]),
([pd.Period('2011-01', freq='M'), NaT,
pd.Period('2011-03', freq='M')],
[NaT, NaT, pd.Period('2011-03', freq='M')]),
])
@pytest.mark.parametrize('reverse', [True, False])
@pytest.mark.parametrize('box', [Series, Index])
@pytest.mark.parametrize('dtype', [None, object])
def test_nat_comparisons(self, dtype, box, reverse, pair):
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
# Series, Index
expected = Series([False, False, True])
assert_series_equal(left == right, expected)
expected = Series([True, True, False])
assert_series_equal(left != right, expected)
expected = | Series([False, False, False]) | pandas.Series |
# -*- coding: utf-8 -*-
from __future__ import print_function
from distutils.version import LooseVersion
from numpy import nan, random
import numpy as np
from pandas.compat import lrange
from pandas import (DataFrame, Series, Timestamp,
date_range)
import pandas as pd
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData, _check_mixed_float
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
class TestDataFrameMissingData(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_dropEmptyRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
original = Series(mat, index=self.frame.index, name='foo')
expected = original.dropna()
inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna(how='all')
# check that original was preserved
assert_series_equal(frame['foo'], original)
inplace_frame1.dropna(how='all', inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame1['foo'], expected)
smaller_frame = frame.dropna(how='all', subset=['foo'])
inplace_frame2.dropna(how='all', subset=['foo'], inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame2['foo'], expected)
def test_dropIncompleteRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
frame['bar'] = 5
original = Series(mat, index=self.frame.index, name='foo')
inp_frame1, inp_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna()
assert_series_equal(frame['foo'], original)
inp_frame1.dropna(inplace=True)
exp = Series(mat[5:], index=self.frame.index[5:], name='foo')
tm.assert_series_equal(smaller_frame['foo'], exp)
tm.assert_series_equal(inp_frame1['foo'], exp)
samesize_frame = frame.dropna(subset=['bar'])
assert_series_equal(frame['foo'], original)
self.assertTrue((frame['bar'] == 5).all())
inp_frame2.dropna(subset=['bar'], inplace=True)
self.assert_index_equal(samesize_frame.index, self.frame.index)
self.assert_index_equal(inp_frame2.index, self.frame.index)
def test_dropna(self):
df = DataFrame(np.random.randn(6, 4))
df[2][:2] = nan
dropped = df.dropna(axis=1)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
# threshold
dropped = df.dropna(axis=1, thresh=5)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, thresh=5, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0, thresh=4)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, thresh=4, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=1, thresh=4)
assert_frame_equal(dropped, df)
dropped = df.dropna(axis=1, thresh=3)
assert_frame_equal(dropped, df)
# subset
dropped = df.dropna(axis=0, subset=[0, 1, 3])
inp = df.copy()
inp.dropna(axis=0, subset=[0, 1, 3], inplace=True)
assert_frame_equal(dropped, df)
assert_frame_equal(inp, df)
# all
dropped = df.dropna(axis=1, how='all')
assert_frame_equal(dropped, df)
df[2] = nan
dropped = df.dropna(axis=1, how='all')
expected = df.ix[:, [0, 1, 3]]
assert_frame_equal(dropped, expected)
# bad input
self.assertRaises(ValueError, df.dropna, axis=3)
def test_drop_and_dropna_caching(self):
# tst that cacher updates
original = Series([1, 2, np.nan], name='A')
expected = Series([1, 2], dtype=original.dtype, name='A')
df = pd.DataFrame({'A': original.values.copy()})
df2 = df.copy()
df['A'].dropna()
assert_series_equal(df['A'], original)
df['A'].dropna(inplace=True)
assert_series_equal(df['A'], expected)
df2['A'].drop([1])
| assert_series_equal(df2['A'], original) | pandas.util.testing.assert_series_equal |
"""
Tests for Timestamp timezone-related methods
"""
from datetime import (
date,
datetime,
timedelta,
)
import dateutil
from dateutil.tz import (
gettz,
tzoffset,
)
import pytest
import pytz
from pytz.exceptions import (
AmbiguousTimeError,
NonExistentTimeError,
)
from pandas._libs.tslibs import timezones
from pandas.errors import OutOfBoundsDatetime
import pandas.util._test_decorators as td
from pandas import (
NaT,
Timestamp,
)
class TestTimestampTZOperations:
# --------------------------------------------------------------
# Timestamp.tz_localize
def test_tz_localize_pushes_out_of_bounds(self):
# GH#12677
# tz_localize that pushes away from the boundary is OK
msg = (
f"Converting {Timestamp.min.strftime('%Y-%m-%d %H:%M:%S')} "
f"underflows past {Timestamp.min}"
)
pac = Timestamp.min.tz_localize("US/Pacific")
assert pac.value > Timestamp.min.value
pac.tz_convert("Asia/Tokyo") # tz_convert doesn't change value
with pytest.raises(OutOfBoundsDatetime, match=msg):
Timestamp.min.tz_localize("Asia/Tokyo")
# tz_localize that pushes away from the boundary is OK
msg = (
f"Converting {Timestamp.max.strftime('%Y-%m-%d %H:%M:%S')} "
f"overflows past {Timestamp.max}"
)
tokyo = Timestamp.max.tz_localize("Asia/Tokyo")
assert tokyo.value < Timestamp.max.value
tokyo.tz_convert("US/Pacific") # tz_convert doesn't change value
with pytest.raises(OutOfBoundsDatetime, match=msg):
Timestamp.max.tz_localize("US/Pacific")
def test_tz_localize_ambiguous_bool(self):
# make sure that we are correctly accepting bool values as ambiguous
# GH#14402
ts = Timestamp("2015-11-01 01:00:03")
expected0 = | Timestamp("2015-11-01 01:00:03-0500", tz="US/Central") | pandas.Timestamp |
""" Metrics for seismic objects: cubes and horizons. """
from copy import copy
from textwrap import dedent
from itertools import zip_longest
from tqdm.auto import tqdm
import numpy as np
try:
import cupy as cp
CUPY_AVAILABLE = True
except ImportError:
cp = np
CUPY_AVAILABLE = False
import cv2
import pandas as pd
from ..batchflow.notifier import Notifier
from .labels import Horizon
from .utils import Accumulator, to_list
from .functional import to_device, from_device
from .functional import correlation, crosscorrelation, btch, kl, js, hellinger, tv, hilbert
from .functional import smooth_out, digitize, gridify, perturb, histo_reduce
from .plotters import plot_image
class BaseMetrics:
""" Base class for seismic metrics.
Child classes have to implement access to `data`, `probs`, `bad_traces` attributes.
"""
# pylint: disable=attribute-defined-outside-init, blacklisted-name
PLOT_DEFAULTS = {
'cmap': 'Metric',
'fill_color': 'black'
}
LOCAL_DEFAULTS = {
'kernel_size': 3,
'agg': 'nanmean',
'device': 'gpu',
'amortize': True,
}
SUPPORT_DEFAULTS = {
'supports': 100,
'safe_strip': 50,
'agg': 'nanmean',
'device': 'gpu',
'amortize': True,
}
SMOOTHING_DEFAULTS = {
'kernel_size': 21,
'sigma': 10.0,
}
EPS = 0.00001
def evaluate(self, metric, plot=False, plot_supports=False, enlarge=True, width=5, **kwargs):
""" Calculate desired metric, apply aggregation, then plot resulting metric-map.
To plot the results, set `plot` argument to True.
Parameters
----------
metric : str
Name of metric to evaluate.
enlarge : bool
Whether to apply `:meth:.Horizon.matrix_enlarge` to the result.
width : int
Widening for the metric. Works only if `enlarge` set to True.
plot : bool
Whether to use `:func:.plot_image` to show the result.
plot_supports : bool
Whether to show support traces on resulting image. Works only if `plot` set to True.
kwargs : dict
Arguments to be passed in metric-calculation methods
(see `:meth:.compute_local` and `:meth:.compute_support`),
as well as plotting arguments (see `:func:.plot_image`).
"""
if 'support' in metric:
kwargs = {**self.SUPPORT_DEFAULTS, **kwargs}
elif 'local' in metric:
kwargs = {**self.LOCAL_DEFAULTS, **kwargs}
self._last_evaluation = {**kwargs}
metric_fn = getattr(self, metric)
metric_val, plot_dict = metric_fn(**kwargs)
if cp is not np and cp.cuda.is_available():
# pylint: disable=protected-access
cp._default_memory_pool.free_all_blocks()
if hasattr(self, 'horizon') and self.horizon.is_carcass and enlarge:
metric_val = self.horizon.matrix_enlarge(metric_val, width)
if plot:
plot_dict = {**self.PLOT_DEFAULTS, **plot_dict}
figure = plot_image(metric_val, **plot_dict, return_figure=True)
if 'support' in metric and plot_supports:
support_coords = self._last_evaluation['support_coords']
figure.axes[0].scatter(support_coords[:, 0],
support_coords[:, 1], s=33, marker='.', c='blue')
# Store for debug / introspection purposes
self._last_evaluation['plot_dict'] = plot_dict
self._last_evaluation['figure'] = figure
return metric_val
def compute_local(self, function, data, bad_traces, kernel_size=3,
normalize=True, agg='mean', amortize=False, axis=0, device='cpu', pbar=None):
""" Compute metric in a local fashion, using `function` to compare nearest traces.
Under the hood, each trace is compared against its nearest neighbours in a square window
of `kernel_size` size. Results of comparisons are aggregated via `agg` function.
Works on both `cpu` (via standard `NumPy`) and GPU (with the help of `cupy` library).
The returned array is always on CPU.
Parameters
----------
function : callable
Function to compare two arrays. Must have the following signature:
`(array1, array2, std1, std2)`, where `std1` and `std2` are pre-computed standard deviations.
In order to work properly on GPU, must be device-agnostic.
data : ndarray
3D array of data to evaluate on.
bad_traces : ndarray
2D matrix of traces where the metric should not be computed.
kernel_size : int
Window size for comparison traces.
normalize : bool
Whether the data should be zero-meaned before computing metric.
agg : str
Function to aggregate values for each trace. See :class:`.Accumulator` for details.
amortize : bool
Whether the aggregation should be sequential or by stacking all the matrices.
See :class:`.Accumulator` for details.
axis : int
Axis to stack arrays on. See :class:`.Accumulator` for details.
device : str
Device specificator. Can be either string (`cpu`, `gpu:4`) or integer (`4`).
pbar : type or None
Progress bar to use.
"""
i_range, x_range = data.shape[:2]
k = kernel_size // 2 + 1
# Transfer to GPU, if needed
data = to_device(data, device)
bad_traces = to_device(bad_traces, device)
xp = cp.get_array_module(data) if CUPY_AVAILABLE else np
# Compute data statistics
data_stds = data.std(axis=-1)
bad_traces[data_stds == 0.0] = 1
if normalize:
data_n = data - data.mean(axis=-1, keepdims=True)
else:
data_n = data
# Pad everything
padded_data = xp.pad(data_n, ((0, k), (k, k), (0, 0)), constant_values=xp.nan)
padded_stds = xp.pad(data_stds, ((0, k), (k, k)), constant_values=0.0)
padded_bad_traces = xp.pad(bad_traces, k, constant_values=1)
# Compute metric by shifting arrays
total = kernel_size * kernel_size - 1
pbar = Notifier(pbar, total=total)
accumulator = Accumulator(agg=agg, amortize=amortize, axis=axis, total=total)
for i in range(k):
for j in range(-k+1, k):
# Comparison between (x, y) and (x+i, y+j) vectors is the same as comparison between (x+i, y+j)
# and (x, y). So, we can compare (x, y) with (x+i, y+j) and save computed result twice:
# matrix associated with vector (x, y) and matrix associated with (x+i, y+j) vector.
if (i == 0) and (j <= 0):
continue
shifted_data = padded_data[i:i+i_range, k+j:k+j+x_range]
shifted_stds = padded_stds[i:i+i_range, k+j:k+j+x_range]
shifted_bad_traces = padded_bad_traces[k+i:k+i+i_range, k+j:k+j+x_range]
computed = function(data, shifted_data, data_stds, shifted_stds)
# Using symmetry property:
symmetric_bad_traces = padded_bad_traces[k-i:k-i+i_range, k-j:k-j+x_range]
symmetric_computed = computed[:i_range-i, max(0, -j):min(x_range, x_range-j)]
symmetric_computed = xp.pad(symmetric_computed,
((i, 0), (max(0, j), -min(0, j))),
constant_values=xp.nan)
computed[shifted_bad_traces == 1] = xp.nan
symmetric_computed[symmetric_bad_traces == 1] = xp.nan
accumulator.update(computed)
accumulator.update(symmetric_computed)
pbar.update(2)
pbar.close()
result = accumulator.get(final=True)
return from_device(result)
def compute_support(self, function, data, bad_traces, supports, safe_strip=0,
normalize=True, agg='mean', amortize=False, axis=0, device='cpu', pbar=None):
""" Compute metric in a support fashion, using `function` to compare all the traces
against a set of (randomly chosen or supplied) reference ones.
Results of comparisons are aggregated via `agg` function.
Works on both `cpu` (via standard `NumPy`) and GPU (with the help of `cupy` library).
The returned array is always on CPU.
Parameters
----------
function : callable
Function to compare two arrays. Must have the following signature:
`(array1, array2, std1, std2)`, where `std1` and `std2` are pre-computed standard deviations.
In order to work properly on GPU, must be device-agnostic.
data : ndarray
3D array of data to evaluate on.
bad_traces : ndarray
2D matrix of traces where the metric should not be computed.
supports : int or ndarray
If int, then number of supports to generate randomly from non-bad traces.
If ndarray, then should be of (N, 2) shape and contain coordinates of reference traces.
normalize : bool
Whether the data should be zero-meaned before computing metric.
agg : str
Function to aggregate values for each trace. See :class:`.Accumulator` for details.
amortize : bool
Whether the aggregation should be sequential or by stacking all the matrices.
See :class:`.Accumulator` for details.
axis : int
Axis to stack arrays on. See :class:`.Accumulator` for details.
device : str
Device specificator. Can be either string (`cpu`, `gpu:4`) or integer (`4`).
pbar : type or None
Progress bar to use.
"""
# Transfer to GPU, if needed
data = to_device(data, device)
bad_traces = to_device(bad_traces, device)
xp = cp.get_array_module(data) if CUPY_AVAILABLE else np
# Compute data statistics
data_stds = data.std(axis=-1)
bad_traces[data_stds == 0.0] = 1
if normalize:
data_n = data - data.mean(axis=-1, keepdims=True)
else:
data_n = data
# Generate support coordinates
if isinstance(supports, int):
if safe_strip:
bad_traces_ = bad_traces.copy()
bad_traces_[:, :safe_strip], bad_traces_[:, -safe_strip:] = 1, 1
bad_traces_[:safe_strip, :], bad_traces_[-safe_strip:, :] = 1, 1
else:
bad_traces_ = bad_traces
valid_traces = xp.where(bad_traces_ == 0)
indices = xp.random.choice(len(valid_traces[0]), supports)
support_coords = xp.asarray([valid_traces[0][indices], valid_traces[1][indices]]).T
elif isinstance(supports, (tuple, list, np.ndarray)):
support_coords = xp.asarray(supports)
# Save for plot and introspection
self._last_evaluation['support_coords'] = from_device(support_coords)
# Generate support traces
support_traces = data_n[support_coords[:, 0], support_coords[:, 1]]
support_stds = data_stds[support_coords[:, 0], support_coords[:, 1]]
# Compute metric
pbar = Notifier(pbar, total=len(support_traces))
accumulator = Accumulator(agg=agg, amortize=amortize, axis=axis, total=len(support_traces))
for i, _ in enumerate(support_traces):
computed = function(data_n, support_traces[i], data_stds, support_stds[i])
computed[bad_traces == 1] = xp.nan
accumulator.update(computed)
pbar.update()
pbar.close()
result = accumulator.get(final=True)
return from_device(result)
def local_corrs(self, kernel_size=3, normalize=True, agg='mean', amortize=False,
device='cpu', pbar=None, **kwargs):
""" Compute correlation in a local fashion. """
metric = self.compute_local(function=correlation, data=self.data, bad_traces=self.bad_traces,
kernel_size=kernel_size, normalize=normalize, agg=agg, amortize=amortize,
device=device, pbar=pbar)
title, plot_defaults = self.get_plot_defaults()
title = f'Local correlation, k={kernel_size}, with `{agg}` aggregation\nfor {title}'
plot_dict = {
**plot_defaults,
'title_label': title,
'zmin': -1.0, 'zmax': 1.0,
**kwargs
}
return metric, plot_dict
def support_corrs(self, supports=100, safe_strip=0, normalize=True, agg='mean', amortize=False,
device='cpu', pbar=None, **kwargs):
""" Compute correlation against reference traces. """
metric = self.compute_support(function=correlation, data=self.data, bad_traces=self.bad_traces,
supports=supports, safe_strip=safe_strip,
normalize=normalize, agg=agg, device=device, amortize=amortize,
pbar=pbar)
title, plot_defaults = self.get_plot_defaults()
n_supports = supports if isinstance(supports, int) else len(supports)
title = f'Support correlation with {n_supports} supports with `{agg}` aggregation\nfor {title}'
plot_dict = {
**plot_defaults,
'title_label': title,
'zmin': -1.0, 'zmax': 1.0,
'colorbar': True,
'bad_color': 'k',
**kwargs
}
return metric, plot_dict
def local_crosscorrs(self, kernel_size=3, normalize=False, agg='mean', amortize=False,
device='cpu', pbar=None, **kwargs):
""" Compute cross-correlation in a local fashion. """
metric = self.compute_local(function=crosscorrelation, data=self.data, bad_traces=self.bad_traces,
kernel_size=kernel_size, normalize=normalize, agg=agg, amortize=amortize,
device=device, pbar=pbar)
zvalue = np.nanquantile(np.abs(metric), 0.98).astype(np.int32)
title, plot_defaults = self.get_plot_defaults()
title = f'Local cross-correlation, k={kernel_size}, with `{agg}` aggregation\nfor {title}'
plot_dict = {
**plot_defaults,
'title_label': title,
'cmap': 'seismic_r',
'zmin': -zvalue, 'zmax': zvalue,
**kwargs
}
return metric, plot_dict
def support_crosscorrs(self, supports=100, safe_strip=0, normalize=False, agg='mean', amortize=False,
device='cpu', pbar=None, **kwargs):
""" Compute cross-correlation against reference traces. """
metric = self.compute_support(function=crosscorrelation, data=self.data, bad_traces=self.bad_traces,
supports=supports, safe_strip=safe_strip,
normalize=normalize, agg=agg, amortize=amortize, device=device, pbar=pbar)
zvalue = np.nanquantile(np.abs(metric), 0.98).astype(np.int32)
title, plot_defaults = self.get_plot_defaults()
n_supports = supports if isinstance(supports, int) else len(supports)
title = f'Support cross-correlation with {n_supports} supports with `{agg}` aggregation\nfor {title}'
plot_dict = {
**plot_defaults,
'title_label': title,
'cmap': 'seismic_r',
'zmin': -zvalue, 'zmax': zvalue,
**kwargs
}
return metric, plot_dict
def local_btch(self, kernel_size=3, normalize=False, agg='mean', amortize=False,
device='cpu', pbar=None, **kwargs):
""" Compute Bhattacharyya divergence in a local fashion. """
metric = self.compute_local(function=btch, data=self.probs, bad_traces=self.bad_traces,
kernel_size=kernel_size, normalize=normalize, agg=agg, amortize=amortize,
device=device, pbar=pbar)
title, plot_defaults = self.get_plot_defaults()
title = f'Local Bhattacharyya divergence, k={kernel_size}, with `{agg}` aggregation\nfor {title}'
plot_dict = {
**plot_defaults,
'title_label': title,
'zmin': 0.0, 'zmax': 1.0,
**kwargs
}
return metric, plot_dict
def support_btch(self, supports=100, safe_strip=0, normalize=False, agg='mean', amortize=False,
device='cpu', pbar=None, **kwargs):
""" Compute Bhattacharyya divergence against reference traces. """
metric = self.compute_support(function=btch, data=self.probs, bad_traces=self.bad_traces,
supports=supports, safe_strip=safe_strip,
normalize=normalize, agg=agg, amortize=amortize, device=device, pbar=pbar)
title, plot_defaults = self.get_plot_defaults()
n_supports = supports if isinstance(supports, int) else len(supports)
title = f'Support Bhattacharyya divergence with {n_supports} supports with `{agg}` aggregation\nfor {title}'
plot_dict = {
**plot_defaults,
'title_label': title,
'zmin': 0.0, 'zmax': 1.0,
**kwargs
}
return metric, plot_dict
def local_kl(self, kernel_size=3, normalize=False, agg='mean', amortize=False,
device='cpu', pbar=None, **kwargs):
""" Compute Kullback-Leibler divergence in a local fashion. """
metric = self.compute_local(function=kl, data=self.probs, bad_traces=self.bad_traces,
kernel_size=kernel_size, normalize=normalize, agg=agg, amortize=amortize,
device=device, pbar=pbar)
title, plot_defaults = self.get_plot_defaults()
title = f'Local KL divergence, k={kernel_size}, with `{agg}` aggregation\nfor {title}'
plot_dict = {
**plot_defaults,
'title_label': title,
'zmin': None, 'zmax': None,
**kwargs
}
return metric, plot_dict
def support_kl(self, supports=100, safe_strip=0, normalize=False, agg='mean', amortize=False,
device='cpu', pbar=None, **kwargs):
""" Compute Kullback-Leibler divergence against reference traces. """
metric = self.compute_support(function=kl, data=self.probs, bad_traces=self.bad_traces,
supports=supports, safe_strip=safe_strip,
normalize=normalize, agg=agg, amortize=amortize,
device=device, pbar=pbar)
title, plot_defaults = self.get_plot_defaults()
n_supports = supports if isinstance(supports, int) else len(supports)
title = f'Support KL divergence with {n_supports} supports with `{agg}` aggregation\nfor {title}'
plot_dict = {
**plot_defaults,
'title_label': title,
'zmin': None, 'zmax': None,
**kwargs
}
return metric, plot_dict
def local_js(self, kernel_size=3, normalize=False, agg='mean', amortize=False, device='cpu', pbar=None, **kwargs):
""" Compute Jensen-Shannon divergence in a local fashion. """
metric = self.compute_local(function=js, data=self.probs, bad_traces=self.bad_traces,
kernel_size=kernel_size, normalize=normalize, agg=agg, amortize=amortize,
device=device, pbar=pbar)
title, plot_defaults = self.get_plot_defaults()
title = f'Local JS divergence, k={kernel_size}, with `{agg}` aggregation\nfor {title}'
plot_dict = {
**plot_defaults,
'title_label': title,
'zmin': None, 'zmax': None,
**kwargs
}
return metric, plot_dict
def support_js(self, supports=100, safe_strip=0, normalize=False, agg='mean', amortize=False,
device='cpu', pbar=None, **kwargs):
""" Compute Jensen-Shannon divergence against reference traces. """
metric = self.compute_support(function=js, data=self.probs, bad_traces=self.bad_traces,
supports=supports, safe_strip=safe_strip,
normalize=normalize, agg=agg, amortize=amortize,
device=device, pbar=pbar)
title, plot_defaults = self.get_plot_defaults()
n_supports = supports if isinstance(supports, int) else len(supports)
title = f'Support JS divergence with {n_supports} supports with `{agg}` aggregation\nfor {title}'
plot_dict = {
**plot_defaults,
'title_label': title,
'zmin': None, 'zmax': None,
**kwargs
}
return metric, plot_dict
def local_hellinger(self, kernel_size=3, normalize=False, agg='mean', amortize=False,
device='cpu', pbar=None, **kwargs):
""" Compute Hellinger distance in a local fashion. """
metric = self.compute_local(function=hellinger, data=self.probs, bad_traces=self.bad_traces,
kernel_size=kernel_size, normalize=normalize, agg=agg, amortize=amortize,
device=device, pbar=pbar)
title, plot_defaults = self.get_plot_defaults()
title = f'Local Hellinger distance, k={kernel_size}, with `{agg}` aggregation\nfor {title}'
plot_dict = {
**plot_defaults,
'title_label': title,
'zmin': None, 'zmax': None,
**kwargs
}
return metric, plot_dict
def support_hellinger(self, supports=100, safe_strip=0, normalize=False, agg='mean', amortize=False,
device='cpu', pbar=None, **kwargs):
""" Compute Hellinger distance against reference traces. """
metric = self.compute_support(function=hellinger, data=self.probs, bad_traces=self.bad_traces,
supports=supports, safe_strip=safe_strip,
normalize=normalize, agg=agg, amortize=amortize,
device=device, pbar=pbar)
title, plot_defaults = self.get_plot_defaults()
n_supports = supports if isinstance(supports, int) else len(supports)
title = f'Support Hellinger distance with {n_supports} supports with `{agg}` aggregation\nfor {title}'
plot_dict = {
**plot_defaults,
'title_label': title,
'zmin': None, 'zmax': None,
**kwargs
}
return metric, plot_dict
def local_tv(self, kernel_size=3, normalize=False, agg='mean', amortize=False, device='cpu', pbar=None, **kwargs):
""" Compute total variation in a local fashion. """
metric = self.compute_local(function=tv, data=self.probs, bad_traces=self.bad_traces,
kernel_size=kernel_size, normalize=normalize, agg=agg, amortize=amortize,
device=device, pbar=pbar)
title, plot_defaults = self.get_plot_defaults()
title = f'Local total variation, k={kernel_size}, with `{agg}` aggregation\nfor {title}'
plot_dict = {
**plot_defaults,
'title_label': title,
'zmin': None, 'zmax': None,
**kwargs
}
return metric, plot_dict
def support_tv(self, supports=100, safe_strip=0, normalize=False, agg='mean', amortize=False,
device='cpu', pbar=None, **kwargs):
""" Compute total variation against reference traces. """
metric = self.compute_support(function=tv, data=self.probs, bad_traces=self.bad_traces,
supports=supports, safe_strip=safe_strip,
normalize=normalize, agg=agg, amortize=amortize,
device=device, pbar=pbar)
title, plot_defaults = self.get_plot_defaults()
n_supports = supports if isinstance(supports, int) else len(supports)
title = f'Support total variation with {n_supports} supports with `{agg}` aggregation\nfor {title}'
plot_dict = {
**plot_defaults,
'title_label': title,
'zmin': None, 'zmax': None,
**kwargs
}
return metric, plot_dict
class HorizonMetrics(BaseMetrics):
""" Evaluate metric(s) on horizon(s).
During initialization, data along the horizon is cut with the desired parameters.
To get the value of a particular metric, use :meth:`.evaluate`::
HorizonMetrics(horizon).evaluate('support_corrs', supports=20, agg='mean')
To plot the results, set `plot` argument of :meth:`.evaluate` to True.
Parameters
horizons : :class:`.Horizon` or sequence of :class:`.Horizon`
Horizon(s) to evaluate.
Can be either one horizon, then this horizon is evaluated on its own,
or sequence of two horizons, then they are compared against each other,
or nested sequence of horizon and list of horizons, then the first horizon is compared against the
best match from the list.
other parameters
Passed direcly to :meth:`.Horizon.get_cube_values` or :meth:`.Horizon.get_cube_values_line`.
"""
AVAILABLE_METRICS = [
'local_corrs', 'support_corrs',
'local_btch', 'support_btch',
'local_kl', 'support_kl',
'local_js', 'support_js',
'local_hellinger', 'support_hellinger',
'local_tv', 'support_tv',
'instantaneous_phase',
]
def __init__(self, horizons, window=23, offset=0, normalize=False, chunk_size=256):
super().__init__()
horizons = list(horizons) if isinstance(horizons, tuple) else horizons
horizons = horizons if isinstance(horizons, list) else [horizons]
self.horizons = horizons
# Save parameters for later evaluation
self.window, self.offset, self.normalize, self.chunk_size = window, offset, normalize, chunk_size
# The first horizon is used to evaluate metrics
self.horizon = horizons[0]
self.name = self.horizon.short_name
# Properties
self._data = None
self._probs = None
self._bad_traces = None
def get_plot_defaults(self):
""" Axis labels and horizon/cube names in the title. """
title = f'horizon `{self.name}` on cube `{self.horizon.field.displayed_name}`'
return title, {
'xlabel': self.horizon.field.axis_names[0],
'ylabel': self.horizon.field.axis_names[1],
}
@property
def data(self):
""" Create `data` attribute at the first time of evaluation. """
if self._data is None:
self._data = self.horizon.get_cube_values(window=self.window, offset=self.offset,
chunk_size=self.chunk_size)
self._data[self._data == Horizon.FILL_VALUE] = np.nan
return self._data
@property
def probs(self):
""" Probabilistic interpretation of `data`. """
if self._probs is None:
hist_matrix = histo_reduce(self.data, self.horizon.field.bins)
self._probs = hist_matrix / np.sum(hist_matrix, axis=-1, keepdims=True) + self.EPS
return self._probs
@property
def bad_traces(self):
""" Traces to fill with `nan` values. """
if self._bad_traces is None:
self._bad_traces = self.horizon.field.zero_traces.copy()
self._bad_traces[self.horizon.full_matrix == Horizon.FILL_VALUE] = 1
return self._bad_traces
def perturbed(self, n=5, scale=2.0, clip=3, window=None, kernel_size=3, agg='nanmean', device='cpu', **kwargs):
""" Evaluate horizon by:
- compute the `local_corrs` metric
- perturb the horizon `n` times by random shifts, generated from normal
distribution of `scale` std and clipping of `clip` size
- compute the `local_corrs` metric for each of the perturbed horizons
- get a mean and max value of those metrics: they correspond to the `averagely shifted` and
`best generated shifts` horizons
- use difference between horizon metric and mean/max metrics of perturbed as a final assesment maps
Parameters
----------
n : int
Number of perturbed horizons to generate.
scale : number
Standard deviation (spread or “width”) of the distribution. Must be non-negative.
clip : number
Maximum size of allowed shifts
window : int or None
Size of the data along the height axis to evaluate perturbed horizons.
Note that due to shifts, it must be smaller than the original data by atleast 2 * `clip` units.
kernel_size, agg, device
Parameters of individual metric evaluation
"""
w = self.data.shape[2]
window = window or w - 2 * clip - 1
# Compute metrics for multiple perturbed horizons: generate shifts, apply them to data,
# evaluate metric on the produced array
acc_mean = Accumulator('nanmean')
acc_max = Accumulator('nanmax')
for _ in range(n):
shifts = np.random.normal(scale=2., size=self.data.shape[:2])
shifts = np.rint(shifts).astype(np.int32)
shifts = np.clip(shifts, -clip, clip)
shifts[self.horizon.full_matrix == self.horizon.FILL_VALUE] = 0
pb = perturb(self.data, shifts, window)
pb_metric = self.compute_local(function=correlation, data=pb, bad_traces=self.bad_traces,
kernel_size=kernel_size, normalize=True, agg=agg, device=device)
acc_mean.update(pb_metric)
acc_max.update(pb_metric)
pb_mean = acc_mean.get(final=True)
pb_max = acc_max.get(final=True)
# Subtract mean/max maps from the horizon metric
horizon_metric = self.compute_local(function=correlation, data=self.data, bad_traces=self.bad_traces,
kernel_size=kernel_size, normalize=True, agg=agg, device=device)
diff_mean = horizon_metric - pb_mean
diff_max = horizon_metric - pb_max
title, plot_defaults = self.get_plot_defaults()
title = f'Perturbed metrics\nfor {title}'
plot_dict = {
**plot_defaults,
'figsize': (20, 7),
'separate': True,
'suptitle_label': title,
'title_label': ['mean', 'max'],
'cmap': 'Reds_r',
'zmin': [0.0, -0.5], 'zmax': 0.5,
**kwargs
}
return (diff_mean, diff_max), plot_dict
def instantaneous_phase(self, device='cpu', **kwargs):
""" Compute instantaneous phase via Hilbert transform. """
#pylint: disable=unexpected-keyword-arg
# Transfer to GPU, if needed
data = to_device(self.data, device)
xp = cp.get_array_module(data) if CUPY_AVAILABLE else np
# Compute hilbert transform and scale to 2pi range
analytic = hilbert(data, axis=2)
phase = xp.angle(analytic)
phase_slice = phase[:, :, phase.shape[-1] // 2]
phase_slice[np.isnan(xp.std(data, axis=-1))] = xp.nan
phase_slice[self.horizon.full_matrix == self.horizon.FILL_VALUE] = xp.nan
# Evaluate mode value
values = phase_slice[~xp.isnan(phase_slice)].round(2)
uniques, counts = xp.unique(values, return_counts=True)
# 3rd most frequent value is chosen to skip the first two (they are highly likely -pi/2 and pi/2)
mode = uniques[xp.argpartition(counts, -3)[-3]]
shifted_slice = phase_slice - mode
shifted_slice[shifted_slice >= xp.pi] -= 2 * xp.pi
# Re-norm so that mode value is at zero point
if xp.nanmin(shifted_slice) < -xp.pi:
shifted_slice[shifted_slice < -xp.pi] += 2 * xp.pi
if xp.nanmax(shifted_slice) > xp.pi:
shifted_slice[shifted_slice > xp.pi] -= 2 * xp.pi
title, plot_defaults = self.get_plot_defaults()
title = f'Instantaneous phase\nfor {title}'
plot_dict = {
**plot_defaults,
'title_label': title,
'cmap': 'seismic',
'zmin': -np.pi, 'zmax': np.pi,
'colorbar': True,
'bad_color': 'k',
**kwargs
}
return from_device(shifted_slice), plot_dict
def find_best_match(self, offset=0, **kwargs):
""" Find the closest horizon to the first one in the list of passed at initialization. """
_ = kwargs
if isinstance(self.horizons[1], Horizon):
self.horizons[1] = [self.horizons[1]]
lst = []
for horizon in self.horizons[1]:
if horizon.field.name == self.horizon.field.name:
overlap_info = Horizon.check_proximity(self.horizon, horizon, offset=offset)
lst.append((horizon, overlap_info))
lst.sort(key=lambda x: abs(x[1].get('mean', 999999)))
other, overlap_info = lst[0]
return (other, overlap_info), {} # actual return + fake plot dict
def compare(self, offset=0, absolute=True, hist=True, printer=print, **kwargs):
""" Compare horizons on against the best match from the list of horizons.
Parameters
----------
offset : number
Value to shift horizon down. Can be used to take into account different counting bases.
absolute : bool
Whether to use absolute values for differences.
hist : bool
Whether to plot histogram of differences.
printer : callable
Function to print results, for example `print` or any other callable that can log data.
"""
if len(self.horizons) != 2:
raise ValueError('Can compare two horizons exactly or one to the best match from list of horizons. ')
_ = kwargs
(other, oinfo), _ = self.find_best_match(offset=offset)
self_full_matrix = self.horizon.full_matrix
other_full_matrix = other.full_matrix
metric = np.where((self_full_matrix != other.FILL_VALUE) & (other_full_matrix != other.FILL_VALUE),
offset + self_full_matrix - other_full_matrix, np.nan)
if absolute:
metric = np.abs(metric)
at_1 = len(np.asarray((self_full_matrix != other.FILL_VALUE) &
(other_full_matrix == other.FILL_VALUE)).nonzero()[0])
at_2 = len(np.asarray((self_full_matrix == other.FILL_VALUE) &
(other_full_matrix != other.FILL_VALUE)).nonzero()[0])
if printer is not None:
msg = f"""
Comparing horizons:
{self.horizon.name.rjust(45)}
{other.name.rjust(45)}
{'—'*45}
Rate in 5ms: {oinfo['window_rate']:8.3f}
Mean/std of errors: {oinfo['mean']:4.2f} / {oinfo['std']:4.2f}
Mean/std of abs errors: {oinfo['abs_mean']:4.2f} / {oinfo['abs_std']:4.2f}
Max error/abs error: {oinfo['max']:4} / {oinfo['abs_max']:4}
{'—'*45}
Lengths of horizons: {len(self.horizon):8}
{len(other):8}
{'—'*45}
Average heights of horizons: {(offset + self.horizon.h_mean):8.2f}
{other.h_mean:8.2f}
{'—'*45}
Coverage of horizons: {self.horizon.coverage:8.4f}
{other.coverage:8.4f}
{'—'*45}
Solidity of horizons: {self.horizon.solidity:8.4f}
{other.solidity:8.4f}
{'—'*45}
Number of holes in horizons: {self.horizon.number_of_holes:8}
{other.number_of_holes:8}
{'—'*45}
Additional traces labeled: {at_1:8}
(present in one, absent in other) {at_2:8}
{'—'*45}
"""
printer(dedent(msg))
if hist:
hist_dict = {
'bins': 100,
'xlabel': 'l1-values',
'ylabel': 'N',
'title_label': 'Histogram of l1 differences',
}
plot_image(metric, mode='hist', **hist_dict)
title = f'Height differences between {self.horizon.name} and {other.name}'
plot_dict = {
'spatial': True,
'title_label': f'{title} on cube {self.horizon.field.displayed_name}',
'cmap': 'Reds',
'zmin': 0, 'zmax': np.nanmax(metric),
'ignore_value': np.nan,
'xlabel': 'INLINE_3D', 'ylabel': 'CROSSLINE_3D',
'bad_color': 'black',
'colorbar': True,
**kwargs
}
return metric, plot_dict
class GeometryMetrics(BaseMetrics):
""" Metrics to asses cube quality. """
AVAILABLE_METRICS = [
'local_corrs', 'support_corrs',
'local_btch', 'support_btch',
'local_kl', 'support_kl',
'local_js', 'support_js',
'local_hellinger', 'support_hellinger',
'local_tv', 'support_tv',
]
def __init__(self, geometries):
super().__init__()
geometries = list(geometries) if isinstance(geometries, tuple) else geometries
geometries = geometries if isinstance(geometries, list) else [geometries]
self.geometries = geometries
self.geometry = geometries[0]
self._data = None
self._probs = None
self._bad_traces = None
self.name = 'hist_matrix'
def get_plot_defaults(self):
""" Axis labels and horizon/cube names in the title. """
title = f'`{self.name}` on cube `{self.geometry.displayed_name}`'
return title, {
'xlabel': self.geometry.axis_names[0],
'ylabel': self.geometry.axis_names[1],
}
@property
def data(self):
""" Histogram of values for every trace in the cube. """
if self._data is None:
self._data = self.geometry.hist_matrix
return self._data
@property
def bad_traces(self):
""" Traces to exclude from metric evaluations: bad traces are marked with `1`s. """
if self._bad_traces is None:
self._bad_traces = self.geometry.zero_traces
self._bad_traces[self.data.max(axis=-1) == self.data.sum(axis=-1)] = 1
return self._bad_traces
@property
def probs(self):
""" Probabilistic interpretation of `data`. """
if self._probs is None:
self._probs = self.data / np.sum(self.data, axis=-1, keepdims=True) + self.EPS
return self._probs
def quality_map(self, quantiles, metric_names=None, computed_metrics=None,
agg='mean', amortize=False, axis=0, apply_smoothing=False,
smoothing_params=None, local_params=None, support_params=None, **kwargs):
""" Create a quality map based on number of metrics.
Parameters
----------
quantiles : sequence of floats
Quantiles for computing hardness thresholds. Must be in (0, 1) ranges.
metric_names : sequence of str
Which metrics to use to assess hardness of data.
reduce_func : str
Function to reduce multiple metrics into one spatial map.
smoothing_params, local_params, support_params : dicts
Additional parameters for smoothening, local metrics, support metrics.
"""
_ = kwargs
computed_metrics = computed_metrics or []
smoothing_params = smoothing_params or self.SMOOTHING_DEFAULTS
local_params = local_params or self.LOCAL_DEFAULTS
support_params = support_params or self.SUPPORT_DEFAULTS
smoothing_params = {**self.SMOOTHING_DEFAULTS, **smoothing_params, **kwargs}
local_params = {**self.LOCAL_DEFAULTS, **local_params, **kwargs}
support_params = {**self.SUPPORT_DEFAULTS, **support_params, **kwargs}
if metric_names:
for metric_name in metric_names:
if 'local' in metric_name:
kwds = copy(local_params)
elif 'support' in metric_name:
kwds = copy(support_params)
metric = self.evaluate(metric_name, plot=False, **kwds)
computed_metrics.append(metric)
accumulator = Accumulator(agg=agg, amortize=amortize, axis=axis)
for metric_matrix in computed_metrics:
if apply_smoothing:
metric_matrix = smooth_out(metric_matrix, **smoothing_params)
digitized = digitize(metric_matrix, quantiles)
accumulator.update(digitized)
quality_map = accumulator.get(final=True)
if apply_smoothing:
quality_map = smooth_out(quality_map, **smoothing_params)
title, plot_defaults = self.get_plot_defaults()
plot_dict = {
**plot_defaults,
'title_label': f'Quality map for {title}',
'cmap': 'Reds',
'zmin': 0.0, 'zmax': np.nanmax(quality_map),
**kwargs
}
return quality_map, plot_dict
def make_grid(self, quality_map, frequencies, iline=True, xline=True, full_lines=True, margin=0, **kwargs):
""" Create grid with various frequencies based on quality map. """
_ = kwargs
if margin:
bad_traces = np.copy(self.geometry.zero_traces)
bad_traces[:, 0] = 1
bad_traces[:, -1] = 1
bad_traces[0, :] = 1
bad_traces[-1, :] = 1
kernel = np.ones((2 + 2*margin, 2 + 2*margin), dtype=np.uint8)
bad_traces = cv2.dilate(bad_traces.astype(np.uint8), kernel, iterations=1).astype(bad_traces.dtype)
quality_map[(bad_traces - self.geometry.zero_traces) == 1] = 0.0
pre_grid = np.rint(quality_map)
grid = gridify(pre_grid, frequencies, iline, xline, full_lines)
if margin:
grid[(bad_traces - self.geometry.zero_traces) == 1] = 0
return grid
def tracewise(self, func, l=3, pbar=True, **kwargs):
""" Apply `func` to compare two cubes tracewise. """
pbar = tqdm if pbar else lambda iterator, *args, **kwargs: iterator
metric = np.full((*self.geometry.spatial_shape, l), np.nan)
indices = [geometry.dataframe['trace_index'] for geometry in self.geometries]
for idx, _ in pbar(indices[0].iteritems(), total=len(indices[0])):
trace_indices = [ind[idx] for ind in indices]
header = self.geometries[0].segyfile.header[trace_indices[0]]
keys = [header.get(field) for field in self.geometries[0].byte_no]
store_key = [self.geometries[0].uniques_inversed[i][item] for i, item in enumerate(keys)]
store_key = tuple(store_key)
traces = [geometry.load_trace(trace_index) for
geometry, trace_index in zip(self.geometries, trace_indices)]
metric[store_key] = func(*traces, **kwargs)
title = f"tracewise {func}"
plot_dict = {
'title_label': f'{title} for `{self.name}` on cube `{self.geometry.displayed_name}`',
'cmap': 'seismic',
'zmin': None, 'zmax': None,
'ignore_value': np.nan,
'xlabel': 'INLINE_3D', 'ylabel': 'CROSSLINE_3D',
**kwargs
}
return metric, plot_dict
def tracewise_unsafe(self, func, l=3, pbar=True, **kwargs):
""" Apply `func` to compare two cubes tracewise in an unsafe way:
structure of cubes is assumed to be identical.
"""
pbar = tqdm if pbar else lambda iterator, *args, **kwargs: iterator
metric = np.full((*self.geometry.spatial_shape, l), np.nan)
for idx in pbar(range(len(self.geometries[0].dataframe))):
header = self.geometries[0].segyfile.header[idx]
keys = [header.get(field) for field in self.geometries[0].byte_no]
store_key = [self.geometries[0].uniques_inversed[i][item] for i, item in enumerate(keys)]
store_key = tuple(store_key)
traces = [geometry.load_trace(idx) for geometry in self.geometries]
metric[store_key] = func(*traces, **kwargs)
title = f"tracewise unsafe {func}"
plot_dict = {
'title_label': f'{title} for {self.name} on cube {self.geometry.displayed_name}',
'cmap': 'seismic',
'zmin': None, 'zmax': None,
'ignore_value': np.nan,
'xlabel': 'INLINE_3D', 'ylabel': 'CROSSLINE_3D',
**kwargs
}
return metric, plot_dict
def blockwise(self, func, l=3, pbar=True, kernel=(5, 5), block_size=(1000, 1000),
heights=None, prep_func=None, **kwargs):
""" Apply function to all traces in lateral window """
window = np.array(kernel)
low = window // 2
high = window - low
total = np.product(self.geometries[0].lens - window)
prep_func = prep_func if prep_func else lambda x: x
pbar = tqdm if pbar else lambda iterator, *args, **kwargs: iterator
metric = np.full((*self.geometries[0].lens, l), np.nan)
heights = slice(0, self.geometries[0].depth) if heights is None else slice(*heights)
with pbar(total=total) as prog_bar:
for il_block in np.arange(0, self.geometries[0].cube_shape[0], block_size[0]-window[0]):
for xl_block in np.arange(0, self.geometries[0].cube_shape[1], block_size[1]-window[1]):
block_len = np.min((np.array(self.geometries[0].lens) - (il_block, xl_block),
block_size), axis=0)
locations = [slice(il_block, il_block + block_len[0]),
slice(xl_block, xl_block + block_len[1]),
heights]
blocks = [prep_func(geometry.load_crop(locations)) for geometry in self.geometries]
for il_kernel in range(low[0], blocks[0].shape[0] - high[0]):
for xl_kernel in range(low[1], blocks[0].shape[1] - high[1]):
il_from, il_to = il_kernel - low[0], il_kernel + high[0]
xl_from, xl_to = xl_kernel - low[1], xl_kernel + high[1]
subsets = [b[il_from:il_to, xl_from:xl_to, :].reshape((-1, b.shape[-1])) for b in blocks]
metric[il_block + il_kernel, xl_block + xl_kernel, :] = func(*subsets, **kwargs)
prog_bar.update(1)
title = f"Blockwise {func}"
plot_dict = {
'title_label': f'{title} for {self.name} on cube {self.geometry.displayed_name}',
'cmap': 'seismic',
'zmin': None, 'zmax': None,
'ignore_value': np.nan,
**kwargs
}
return metric, plot_dict
class FaultsMetrics:
""" Faults metric class. """
SHIFTS = [-20, -15, -5, 5, 15, 20]
def similarity_metric(self, semblance, masks, threshold=None):
""" Compute similarity metric for faults mask. """
if threshold:
masks = masks > threshold
if semblance.ndim == 2:
semblance = np.expand_dims(semblance, axis=0)
if semblance.ndim == 3:
semblance = np.expand_dims(semblance, axis=0)
if masks.ndim == 2:
masks = np.expand_dims(masks, axis=0)
if masks.ndim == 3:
masks = np.expand_dims(masks, axis=0)
res = []
m = self.sum_with_axes(masks * (1 - semblance), axes=[1,2,3])
weights = np.ones((len(self.SHIFTS), 1))
weights = weights / weights.sum()
for i in self.SHIFTS:
random_mask = self.make_shift(masks, shift=i)
rm = self.sum_with_axes(random_mask * (1 - semblance), axes=[1,2,3])
ratio = m/rm
res += [np.log(ratio)]
res = np.stack(res, axis=0)
res = (res * weights).sum(axis=0)
res = np.clip(res, -2, 2)
return res
def sum_with_axes(self, array, axes=None):
""" Sum for several axes. """
if axes is None:
return array.sum()
if isinstance(axes, int):
axes = [axes]
res = array
axes = sorted(axes)
for i, axis in enumerate(axes):
res = res.sum(axis=axis-i)
return res
def make_shift(self, array, shift=20):
""" Make shifts for mask. """
result = np.zeros_like(array)
for i, _array in enumerate(array):
if shift > 0:
result[i][:, shift:] = _array[:, :-shift]
elif shift < 0:
result[i][:, :shift] = _array[:, -shift:]
else:
result[i] = _array
return result
class FaciesMetrics():
""" Evaluate facies metrics.
To get the value of a particular metric, use :meth:`.evaluate`::
FaciesMetrics(horizon, true_label, pred_label).evaluate('dice')
Parameters
horizons : :class:`.Horizon` or sequence of :class:`.Horizon`
Horizon(s) to use as base labels that contain facies.
true_labels : :class:`.Horizon` or sequence of :class:`.Horizon`
Facies to use as ground-truth labels.
pred_labels : :class:`.Horizon` or sequence of :class:`.Horizon`
Horizon(s) to use as predictions labels.
"""
def __init__(self, horizons, true_labels=None, pred_labels=None):
self.horizons = to_list(horizons)
self.true_labels = to_list(true_labels or [])
self.pred_labels = to_list(pred_labels or [])
@staticmethod
def true_positive(true, pred):
""" Calculate correctly classified facies pixels. """
return np.sum(true * pred)
@staticmethod
def true_negative(true, pred):
""" Calculate correctly classified non-facies pixels. """
return np.sum((1 - true) * (1 - pred))
@staticmethod
def false_positive(true, pred):
""" Calculate misclassified facies pixels. """
return np.sum((1 - true) * pred)
@staticmethod
def false_negative(true, pred):
""" Calculate misclassified non-facies pixels. """
return np.sum(true * (1 - pred))
def sensitivity(self, true, pred):
""" Calculate ratio of correctly classified facies points to ground-truth facies points. """
tp = self.true_positive(true, pred)
fn = self.false_negative(true, pred)
return tp / (tp + fn)
def specificity(self, true, pred):
""" Calculate ratio of correctly classified non-facies points to ground-truth non-facies points. """
tn = self.true_negative(true, pred)
fp = self.false_positive(true, pred)
return tn / (tn + fp)
def dice(self, true, pred):
""" Calculate the similarity of ground-truth facies mask and preditcted facies mask. """
tp = self.true_positive(true, pred)
fp = self.false_positive(true, pred)
fn = self.false_negative(true, pred)
return 2 * tp / (2 * tp + fp + fn)
def evaluate(self, metrics):
""" Calculate desired metric and return a dataframe of results.
Parameters
----------
metrics : str or list of str
Name of metric(s) to evaluate.
"""
metrics = [getattr(self, fn) for fn in to_list(metrics)]
names = [fn.__name__ for fn in metrics]
rows = []
for horizon, true_label, pred_label in zip_longest(self.horizons, self.true_labels, self.pred_labels):
kwargs = {}
if true_label is not None:
true = true_label.load_attribute('masks', fill_value=0)
true = true[horizon.presence_matrix]
kwargs['true'] = true
if pred_label is not None:
pred = pred_label.load_attribute('masks', fill_value=0)
pred = pred[horizon.presence_matrix]
kwargs['pred'] = pred
values = [fn(**kwargs) for fn in metrics]
index = pd.MultiIndex.from_arrays([[horizon.field.displayed_name], [horizon.short_name]],
names=['field_name', 'horizon_name'])
data = dict(zip(names, values))
row = pd.DataFrame(index=index, data=data)
rows.append(row)
df = | pd.concat(rows) | pandas.concat |
#TODO:
import tensorflow as tf
import os
import argparse
import sys
import random
import math
import logging
import operator
import itertools
import datetime
import numpy as np
import pandas as pd
from csv import reader
from random import randrange
FLAGS = None
#FORMAT = '%(asctime)s %(levelname)s %(message)s'
#logging.basicConfig(format=FORMAT)
#logger = logging.getLogger('tensorflow')
logger = logging.getLogger('tensorflow')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.removeHandler(logger.handlers[0])
logger.propagate = False
def sales_example(sales):
record = {
'sales': tf.train.Feature(float_list=tf.train.FloatList(value=sales))
}
return tf.train.Example(features=tf.train.Features(feature=record))
def capacity_example(capacity):
record = {
'capacity': tf.train.Feature(float_list=tf.train.FloatList(value=capacity))
}
return tf.train.Example(features=tf.train.Features(feature=record))
def stock_example(stock):
record = {
'stock': tf.train.Feature(float_list=tf.train.FloatList(value=stock))
}
return tf.train.Example(features=tf.train.Features(feature=record))
#https://stackoverflow.com/questions/553303/generate-a-random-date-between-two-other-dates
def random_date(start, end):
return start + datetime.timedelta(
seconds=random.randint(0, int((end - start).total_seconds())),
)
def create_records(number_of_products, start_date, end_date, start_time_period, middle_time_period, end_time_period, orders_file, products_file, departments_file, order_products_prior_file, order_products_train_file, train_tfrecords_file, test_tfrecords_file, capacity_tfrecords_file, stock_tfrecords_file):
stock = np.random.uniform(low=0.0, high=1.0, size=(FLAGS.number_of_products))
with tf.io.TFRecordWriter(stock_tfrecords_file) as writer:
logger.debug ("stock: {}".format(stock))
tf_example = stock_example(stock)
writer.write(tf_example.SerializeToString())
with open(orders_file, 'r') as f:
csv_reader = reader(f)
next(csv_reader)
orders_list = list(map(tuple, csv_reader))
sorted_orders = sorted(orders_list, key = lambda x: (int(x[1]), int(x[3])))
dated_orders = []
i = 0
for k, g in itertools.groupby(sorted_orders, lambda x : int(x[1])):
item = next(g)
order_date = random_date(start_date, end_date)
while order_date.weekday() != int(item[4]):
order_date = order_date + datetime.timedelta(days=1)
start_date = datetime.datetime.combine(start_date, datetime.datetime.min.time())
end_date = datetime.datetime.combine(end_date, datetime.datetime.min.time())
order_date = datetime.datetime(order_date.year, order_date.month, order_date.day, int(item[5]), 0, 0)
time_period = int((order_date - start_date).total_seconds() / (60*60*6))
dated_orders.append((int(item[0]), int(item[1]), int(item[4]), order_date, time_period))
for item in g:
order_date = order_date + datetime.timedelta(days=int(float(item[6])))
order_date = datetime.datetime(order_date.year, order_date.month, order_date.day, int(item[5]), 0, 0)
time_period = int((order_date - start_date).total_seconds() / (60*60*6))
dated_orders.append((int(item[0]), int(item[1]), int(item[4]), order_date, time_period))
orders = pd.DataFrame(dated_orders, columns =['order_id', 'user_id', 'order_dow', 'order_date', 'time_period'])
products = pd.read_csv(products_file)
departments = pd.read_csv(departments_file)
prior_order = pd.read_csv("data/order_products__prior.csv")
train_order = pd.read_csv("data/order_products__train.csv")
#aisles = pd.read_csv("data/aisles.csv")
ntop = int(FLAGS.top_products*products['product_id'].count())
all_ordered_products = pd.concat([prior_order, train_order], axis=0)[["order_id", "product_id"]]
largest = all_ordered_products[['product_id']].groupby(['product_id']).size().nlargest(ntop).to_frame()
largest.reset_index(inplace=True)
products_largest = pd.merge(largest, products, how="left", on="product_id")[['product_id', 'product_name', 'aisle_id', 'department_id']]
products_departments = pd.merge(products_largest, departments, how="left", on="department_id")
products_departments = products_departments[products_departments["department"].isin(["frozen", "bakery", "produce", "beverages", "dry goods pasta", "meat seafood", "pantry", "breakfast", "canned goods", "dairy eggs", "snacks", "deli"])]
products_departments_list = products_departments.values.tolist()
products_subset=set()
while len(products_subset) < number_of_products:
products_subset.add((random.randint(0,len(products_departments_list))))
selected_products_departments_list = [products_departments_list[i] for i in products_subset]
selected_products_list = [products_departments_list[i][0] for i in products_subset]
for p, product_id in enumerate(selected_products_list):
logger.info ("{} {}".format(p, product_id))
selected_products_departments = pd.DataFrame(selected_products_departments_list, columns =['product_id', 'product_name', 'aisle_id', 'department_id', 'department'])
all_ordered_products_quantity_list = []
for item in all_ordered_products.itertuples():
all_ordered_products_quantity_list.append((item[1], item[2], 1))
#all_ordered_products_quantity_list.append((item[1], item[2], random.randint(1, 6)))
all_ordered_products_quantity = pd.DataFrame(all_ordered_products_quantity_list, columns =["order_id", "product_id", 'quantity'])
order_product_departments = pd.merge(selected_products_departments, all_ordered_products_quantity, how="left", on="product_id")
order_product_departments_dates = | pd.merge(order_product_departments, orders, how="left", on="order_id") | pandas.merge |
from collections import OrderedDict
import numpy as np
from numpy import nan, array
import pandas as pd
import pytest
from .conftest import (
assert_series_equal, assert_frame_equal, fail_on_pvlib_version)
from numpy.testing import assert_allclose
import unittest.mock as mock
from pvlib import inverter, pvsystem
from pvlib import atmosphere
from pvlib import iam as _iam
from pvlib import irradiance
from pvlib.location import Location
from pvlib import temperature
from pvlib._deprecation import pvlibDeprecationWarning
@pytest.mark.parametrize('iam_model,model_params', [
('ashrae', {'b': 0.05}),
('physical', {'K': 4, 'L': 0.002, 'n': 1.526}),
('martin_ruiz', {'a_r': 0.16}),
])
def test_PVSystem_get_iam(mocker, iam_model, model_params):
m = mocker.spy(_iam, iam_model)
system = pvsystem.PVSystem(module_parameters=model_params)
thetas = 1
iam = system.get_iam(thetas, iam_model=iam_model)
m.assert_called_with(thetas, **model_params)
assert iam < 1.
def test_PVSystem_multi_array_get_iam():
model_params = {'b': 0.05}
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=model_params),
pvsystem.Array(module_parameters=model_params)]
)
iam = system.get_iam((1, 5), iam_model='ashrae')
assert len(iam) == 2
assert iam[0] != iam[1]
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_iam((1,), iam_model='ashrae')
def test_PVSystem_get_iam_sapm(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
mocker.spy(_iam, 'sapm')
aoi = 0
out = system.get_iam(aoi, 'sapm')
_iam.sapm.assert_called_once_with(aoi, sapm_module_params)
assert_allclose(out, 1.0, atol=0.01)
def test_PVSystem_get_iam_interp(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
with pytest.raises(ValueError):
system.get_iam(45, iam_model='interp')
def test__normalize_sam_product_names():
BAD_NAMES = [' -.()[]:+/",', 'Module[1]']
NORM_NAMES = ['____________', 'Module_1_']
norm_names = pvsystem._normalize_sam_product_names(BAD_NAMES)
assert list(norm_names) == NORM_NAMES
BAD_NAMES = ['Module[1]', 'Module(1)']
NORM_NAMES = ['Module_1_', 'Module_1_']
with pytest.warns(UserWarning):
norm_names = pvsystem._normalize_sam_product_names(BAD_NAMES)
assert list(norm_names) == NORM_NAMES
BAD_NAMES = ['Module[1]', 'Module[1]']
NORM_NAMES = ['Module_1_', 'Module_1_']
with pytest.warns(UserWarning):
norm_names = pvsystem._normalize_sam_product_names(BAD_NAMES)
assert list(norm_names) == NORM_NAMES
def test_PVSystem_get_iam_invalid(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
with pytest.raises(ValueError):
system.get_iam(45, iam_model='not_a_model')
def test_retrieve_sam_raise_no_parameters():
"""
Raise an exception if no parameters are provided to `retrieve_sam()`.
"""
with pytest.raises(ValueError) as error:
pvsystem.retrieve_sam()
assert 'A name or path must be provided!' == str(error.value)
def test_retrieve_sam_cecmod():
"""
Test the expected data is retrieved from the CEC module database. In
particular, check for a known module in the database and check for the
expected keys for that module.
"""
data = pvsystem.retrieve_sam('cecmod')
keys = [
'BIPV',
'Date',
'T_NOCT',
'A_c',
'N_s',
'I_sc_ref',
'V_oc_ref',
'I_mp_ref',
'V_mp_ref',
'alpha_sc',
'beta_oc',
'a_ref',
'I_L_ref',
'I_o_ref',
'R_s',
'R_sh_ref',
'Adjust',
'gamma_r',
'Version',
'STC',
'PTC',
'Technology',
'Bifacial',
'Length',
'Width',
]
module = 'Itek_Energy_LLC_iT_300_HE'
assert module in data
assert set(data[module].keys()) == set(keys)
def test_retrieve_sam_cecinverter():
"""
Test the expected data is retrieved from the CEC inverter database. In
particular, check for a known inverter in the database and check for the
expected keys for that inverter.
"""
data = pvsystem.retrieve_sam('cecinverter')
keys = [
'Vac',
'Paco',
'Pdco',
'Vdco',
'Pso',
'C0',
'C1',
'C2',
'C3',
'Pnt',
'Vdcmax',
'Idcmax',
'Mppt_low',
'Mppt_high',
'CEC_Date',
'CEC_Type',
]
inverter = 'Yaskawa_Solectria_Solar__PVI_5300_208__208V_'
assert inverter in data
assert set(data[inverter].keys()) == set(keys)
def test_sapm(sapm_module_params):
times = pd.date_range(start='2015-01-01', periods=5, freq='12H')
effective_irradiance = pd.Series([-1000, 500, 1100, np.nan, 1000],
index=times)
temp_cell = pd.Series([10, 25, 50, 25, np.nan], index=times)
out = pvsystem.sapm(effective_irradiance, temp_cell, sapm_module_params)
expected = pd.DataFrame(np.array(
[[ -5.0608322 , -4.65037767, nan, nan,
nan, -4.91119927, -4.15367716],
[ 2.545575 , 2.28773882, 56.86182059, 47.21121608,
108.00693168, 2.48357383, 1.71782772],
[ 5.65584763, 5.01709903, 54.1943277 , 42.51861718,
213.32011294, 5.52987899, 3.48660728],
[ nan, nan, nan, nan,
nan, nan, nan],
[ nan, nan, nan, nan,
nan, nan, nan]]),
columns=['i_sc', 'i_mp', 'v_oc', 'v_mp', 'p_mp', 'i_x', 'i_xx'],
index=times)
assert_frame_equal(out, expected, check_less_precise=4)
out = pvsystem.sapm(1000, 25, sapm_module_params)
expected = OrderedDict()
expected['i_sc'] = 5.09115
expected['i_mp'] = 4.5462909092579995
expected['v_oc'] = 59.260800000000003
expected['v_mp'] = 48.315600000000003
expected['p_mp'] = 219.65677305534581
expected['i_x'] = 4.9759899999999995
expected['i_xx'] = 3.1880204359100004
for k, v in expected.items():
assert_allclose(out[k], v, atol=1e-4)
# just make sure it works with Series input
pvsystem.sapm(effective_irradiance, temp_cell,
pd.Series(sapm_module_params))
def test_PVSystem_sapm(sapm_module_params, mocker):
mocker.spy(pvsystem, 'sapm')
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
effective_irradiance = 500
temp_cell = 25
out = system.sapm(effective_irradiance, temp_cell)
pvsystem.sapm.assert_called_once_with(effective_irradiance, temp_cell,
sapm_module_params)
assert_allclose(out['p_mp'], 100, atol=100)
def test_PVSystem_multi_array_sapm(sapm_module_params):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=sapm_module_params),
pvsystem.Array(module_parameters=sapm_module_params)]
)
effective_irradiance = (100, 500)
temp_cell = (15, 25)
sapm_one, sapm_two = system.sapm(effective_irradiance, temp_cell)
assert sapm_one['p_mp'] != sapm_two['p_mp']
sapm_one_flip, sapm_two_flip = system.sapm(
(effective_irradiance[1], effective_irradiance[0]),
(temp_cell[1], temp_cell[0])
)
assert sapm_one_flip['p_mp'] == sapm_two['p_mp']
assert sapm_two_flip['p_mp'] == sapm_one['p_mp']
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.sapm(effective_irradiance, 10)
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.sapm(500, temp_cell)
@pytest.mark.parametrize('airmass,expected', [
(1.5, 1.00028714375),
(np.array([[10, np.nan]]), np.array([[0.999535, 0]])),
(pd.Series([5]), pd.Series([1.0387675]))
])
def test_sapm_spectral_loss(sapm_module_params, airmass, expected):
out = pvsystem.sapm_spectral_loss(airmass, sapm_module_params)
if isinstance(airmass, pd.Series):
assert_series_equal(out, expected, check_less_precise=4)
else:
assert_allclose(out, expected, atol=1e-4)
def test_PVSystem_sapm_spectral_loss(sapm_module_params, mocker):
mocker.spy(pvsystem, 'sapm_spectral_loss')
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
airmass = 2
out = system.sapm_spectral_loss(airmass)
pvsystem.sapm_spectral_loss.assert_called_once_with(airmass,
sapm_module_params)
assert_allclose(out, 1, atol=0.5)
def test_PVSystem_multi_array_sapm_spectral_loss(sapm_module_params):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=sapm_module_params),
pvsystem.Array(module_parameters=sapm_module_params)]
)
loss_one, loss_two = system.sapm_spectral_loss(2)
assert loss_one == loss_two
# this test could be improved to cover all cell types.
# could remove the need for specifying spectral coefficients if we don't
# care about the return value at all
@pytest.mark.parametrize('module_parameters,module_type,coefficients', [
({'Technology': 'mc-Si'}, 'multisi', None),
({'Material': 'Multi-c-Si'}, 'multisi', None),
({'first_solar_spectral_coefficients': (
0.84, -0.03, -0.008, 0.14, 0.04, -0.002)},
None,
(0.84, -0.03, -0.008, 0.14, 0.04, -0.002))
])
def test_PVSystem_first_solar_spectral_loss(module_parameters, module_type,
coefficients, mocker):
mocker.spy(atmosphere, 'first_solar_spectral_correction')
system = pvsystem.PVSystem(module_parameters=module_parameters)
pw = 3
airmass_absolute = 3
out = system.first_solar_spectral_loss(pw, airmass_absolute)
atmosphere.first_solar_spectral_correction.assert_called_once_with(
pw, airmass_absolute, module_type, coefficients)
assert_allclose(out, 1, atol=0.5)
def test_PVSystem_multi_array_first_solar_spectral_loss():
system = pvsystem.PVSystem(
arrays=[
pvsystem.Array(
module_parameters={'Technology': 'mc-Si'},
module_type='multisi'
),
pvsystem.Array(
module_parameters={'Technology': 'mc-Si'},
module_type='multisi'
)
]
)
loss_one, loss_two = system.first_solar_spectral_loss(1, 3)
assert loss_one == loss_two
@pytest.mark.parametrize('test_input,expected', [
([1000, 100, 5, 45], 1140.0510967821877),
([np.array([np.nan, 1000, 1000]),
np.array([100, np.nan, 100]),
np.array([1.1, 1.1, 1.1]),
np.array([10, 10, 10])],
np.array([np.nan, np.nan, 1081.1574])),
([pd.Series([1000]), pd.Series([100]), pd.Series([1.1]),
pd.Series([10])],
pd.Series([1081.1574]))
])
def test_sapm_effective_irradiance(sapm_module_params, test_input, expected):
test_input.append(sapm_module_params)
out = pvsystem.sapm_effective_irradiance(*test_input)
if isinstance(test_input, pd.Series):
assert_series_equal(out, expected, check_less_precise=4)
else:
assert_allclose(out, expected, atol=1e-1)
def test_PVSystem_sapm_effective_irradiance(sapm_module_params, mocker):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
mocker.spy(pvsystem, 'sapm_effective_irradiance')
poa_direct = 900
poa_diffuse = 100
airmass_absolute = 1.5
aoi = 0
p = (sapm_module_params['A4'], sapm_module_params['A3'],
sapm_module_params['A2'], sapm_module_params['A1'],
sapm_module_params['A0'])
f1 = np.polyval(p, airmass_absolute)
expected = f1 * (poa_direct + sapm_module_params['FD'] * poa_diffuse)
out = system.sapm_effective_irradiance(
poa_direct, poa_diffuse, airmass_absolute, aoi)
pvsystem.sapm_effective_irradiance.assert_called_once_with(
poa_direct, poa_diffuse, airmass_absolute, aoi, sapm_module_params)
assert_allclose(out, expected, atol=0.1)
def test_PVSystem_multi_array_sapm_effective_irradiance(sapm_module_params):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(module_parameters=sapm_module_params),
pvsystem.Array(module_parameters=sapm_module_params)]
)
poa_direct = (500, 900)
poa_diffuse = (50, 100)
aoi = (0, 10)
airmass_absolute = 1.5
irrad_one, irrad_two = system.sapm_effective_irradiance(
poa_direct, poa_diffuse, airmass_absolute, aoi
)
assert irrad_one != irrad_two
@pytest.fixture
def two_array_system(pvsyst_module_params, cec_module_params):
"""Two-array PVSystem.
Both arrays are identical.
"""
temperature_model = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass'
]
# Need u_v to be non-zero so wind-speed changes cell temperature
# under the pvsyst model.
temperature_model['u_v'] = 1.0
# parameter for fuentes temperature model
temperature_model['noct_installed'] = 45
# parameters for noct_sam temperature model
temperature_model['noct'] = 45.
temperature_model['module_efficiency'] = 0.2
module_params = {**pvsyst_module_params, **cec_module_params}
return pvsystem.PVSystem(
arrays=[
pvsystem.Array(
temperature_model_parameters=temperature_model,
module_parameters=module_params
),
pvsystem.Array(
temperature_model_parameters=temperature_model,
module_parameters=module_params
)
]
)
@pytest.mark.parametrize("poa_direct, poa_diffuse, aoi",
[(20, (10, 10), (20, 20)),
((20, 20), (10,), (20, 20)),
((20, 20), (10, 10), 20)])
def test_PVSystem_sapm_effective_irradiance_value_error(
poa_direct, poa_diffuse, aoi, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
two_array_system.sapm_effective_irradiance(
poa_direct, poa_diffuse, 10, aoi
)
def test_PVSystem_sapm_celltemp(mocker):
a, b, deltaT = (-3.47, -0.0594, 3) # open_rack_glass_glass
temp_model_params = {'a': a, 'b': b, 'deltaT': deltaT}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'sapm_cell')
temps = 25
irrads = 1000
winds = 1
out = system.sapm_celltemp(irrads, temps, winds)
temperature.sapm_cell.assert_called_once_with(irrads, temps, winds, a, b,
deltaT)
assert_allclose(out, 57, atol=1)
def test_PVSystem_sapm_celltemp_kwargs(mocker):
temp_model_params = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass']
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'sapm_cell')
temps = 25
irrads = 1000
winds = 1
out = system.sapm_celltemp(irrads, temps, winds)
temperature.sapm_cell.assert_called_once_with(irrads, temps, winds,
temp_model_params['a'],
temp_model_params['b'],
temp_model_params['deltaT'])
assert_allclose(out, 57, atol=1)
def test_PVSystem_multi_array_sapm_celltemp_different_arrays():
temp_model_one = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass']
temp_model_two = temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'close_mount_glass_glass']
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(temperature_model_parameters=temp_model_one),
pvsystem.Array(temperature_model_parameters=temp_model_two)]
)
temp_one, temp_two = system.sapm_celltemp(
(1000, 1000), 25, 1
)
assert temp_one != temp_two
def test_PVSystem_pvsyst_celltemp(mocker):
parameter_set = 'insulated'
temp_model_params = temperature.TEMPERATURE_MODEL_PARAMETERS['pvsyst'][
parameter_set]
alpha_absorption = 0.85
module_efficiency = 0.17
module_parameters = {'alpha_absorption': alpha_absorption,
'module_efficiency': module_efficiency}
system = pvsystem.PVSystem(module_parameters=module_parameters,
temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'pvsyst_cell')
irrad = 800
temp = 45
wind = 0.5
out = system.pvsyst_celltemp(irrad, temp, wind_speed=wind)
temperature.pvsyst_cell.assert_called_once_with(
irrad, temp, wind_speed=wind, u_c=temp_model_params['u_c'],
u_v=temp_model_params['u_v'], module_efficiency=module_efficiency,
alpha_absorption=alpha_absorption)
assert (out < 90) and (out > 70)
def test_PVSystem_faiman_celltemp(mocker):
u0, u1 = 25.0, 6.84 # default values
temp_model_params = {'u0': u0, 'u1': u1}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'faiman')
temps = 25
irrads = 1000
winds = 1
out = system.faiman_celltemp(irrads, temps, winds)
temperature.faiman.assert_called_once_with(irrads, temps, winds, u0, u1)
assert_allclose(out, 56.4, atol=1)
def test_PVSystem_noct_celltemp(mocker):
poa_global, temp_air, wind_speed, noct, module_efficiency = (
1000., 25., 1., 45., 0.2)
expected = 55.230790492
temp_model_params = {'noct': noct, 'module_efficiency': module_efficiency}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
mocker.spy(temperature, 'noct_sam')
out = system.noct_sam_celltemp(poa_global, temp_air, wind_speed)
temperature.noct_sam.assert_called_once_with(
poa_global, temp_air, wind_speed, effective_irradiance=None, noct=noct,
module_efficiency=module_efficiency)
assert_allclose(out, expected)
# dufferent types
out = system.noct_sam_celltemp(np.array(poa_global), np.array(temp_air),
np.array(wind_speed))
assert_allclose(out, expected)
dr = pd.date_range(start='2020-01-01 12:00:00', end='2020-01-01 13:00:00',
freq='1H')
out = system.noct_sam_celltemp(pd.Series(index=dr, data=poa_global),
pd.Series(index=dr, data=temp_air),
pd.Series(index=dr, data=wind_speed))
assert_series_equal(out, pd.Series(index=dr, data=expected))
# now use optional arguments
temp_model_params.update({'transmittance_absorptance': 0.8,
'array_height': 2,
'mount_standoff': 2.0})
expected = 60.477703576
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
out = system.noct_sam_celltemp(poa_global, temp_air, wind_speed,
effective_irradiance=1100.)
assert_allclose(out, expected)
def test_PVSystem_noct_celltemp_error():
poa_global, temp_air, wind_speed, module_efficiency = (1000., 25., 1., 0.2)
temp_model_params = {'module_efficiency': module_efficiency}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
with pytest.raises(KeyError):
system.noct_sam_celltemp(poa_global, temp_air, wind_speed)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_functions(celltemp, two_array_system):
times = pd.date_range(start='2020-08-25 11:00', freq='H', periods=3)
irrad_one = pd.Series(1000, index=times)
irrad_two = pd.Series(500, index=times)
temp_air = pd.Series(25, index=times)
wind_speed = pd.Series(1, index=times)
temp_one, temp_two = celltemp(
two_array_system, (irrad_one, irrad_two), temp_air, wind_speed)
assert (temp_one != temp_two).all()
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_multi_temp(celltemp, two_array_system):
times = pd.date_range(start='2020-08-25 11:00', freq='H', periods=3)
irrad = pd.Series(1000, index=times)
temp_air_one = pd.Series(25, index=times)
temp_air_two = pd.Series(5, index=times)
wind_speed = pd.Series(1, index=times)
temp_one, temp_two = celltemp(
two_array_system,
(irrad, irrad),
(temp_air_one, temp_air_two),
wind_speed
)
assert (temp_one != temp_two).all()
temp_one_swtich, temp_two_switch = celltemp(
two_array_system,
(irrad, irrad),
(temp_air_two, temp_air_one),
wind_speed
)
assert_series_equal(temp_one, temp_two_switch)
assert_series_equal(temp_two, temp_one_swtich)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_multi_wind(celltemp, two_array_system):
times = pd.date_range(start='2020-08-25 11:00', freq='H', periods=3)
irrad = pd.Series(1000, index=times)
temp_air = pd.Series(25, index=times)
wind_speed_one = pd.Series(1, index=times)
wind_speed_two = pd.Series(5, index=times)
temp_one, temp_two = celltemp(
two_array_system,
(irrad, irrad),
temp_air,
(wind_speed_one, wind_speed_two)
)
assert (temp_one != temp_two).all()
temp_one_swtich, temp_two_switch = celltemp(
two_array_system,
(irrad, irrad),
temp_air,
(wind_speed_two, wind_speed_one)
)
assert_series_equal(temp_one, temp_two_switch)
assert_series_equal(temp_two, temp_one_swtich)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_temp_too_short(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, (1000, 1000), (1,), 1)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_temp_too_long(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, (1000, 1000), (1, 1, 1), 1)
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_wind_too_short(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, (1000, 1000), 25, (1,))
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_wind_too_long(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, (1000, 1000), 25, (1, 1, 1))
@pytest.mark.parametrize("celltemp",
[pvsystem.PVSystem.faiman_celltemp,
pvsystem.PVSystem.pvsyst_celltemp,
pvsystem.PVSystem.sapm_celltemp,
pvsystem.PVSystem.fuentes_celltemp,
pvsystem.PVSystem.noct_sam_celltemp])
def test_PVSystem_multi_array_celltemp_poa_length_mismatch(
celltemp, two_array_system):
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
celltemp(two_array_system, 1000, 25, 1)
def test_PVSystem_fuentes_celltemp(mocker):
noct_installed = 45
temp_model_params = {'noct_installed': noct_installed}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params)
spy = mocker.spy(temperature, 'fuentes')
index = pd.date_range('2019-01-01 11:00', freq='h', periods=3)
temps = pd.Series(25, index)
irrads = pd.Series(1000, index)
winds = pd.Series(1, index)
out = system.fuentes_celltemp(irrads, temps, winds)
assert_series_equal(spy.call_args[0][0], irrads)
assert_series_equal(spy.call_args[0][1], temps)
assert_series_equal(spy.call_args[0][2], winds)
assert spy.call_args[1]['noct_installed'] == noct_installed
assert_series_equal(out, pd.Series([52.85, 55.85, 55.85], index,
name='tmod'))
def test_PVSystem_fuentes_celltemp_override(mocker):
# test that the surface_tilt value in the cell temp calculation can be
# overridden but defaults to the surface_tilt attribute of the PVSystem
spy = mocker.spy(temperature, 'fuentes')
noct_installed = 45
index = pd.date_range('2019-01-01 11:00', freq='h', periods=3)
temps = pd.Series(25, index)
irrads = pd.Series(1000, index)
winds = pd.Series(1, index)
# uses default value
temp_model_params = {'noct_installed': noct_installed}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params,
surface_tilt=20)
system.fuentes_celltemp(irrads, temps, winds)
assert spy.call_args[1]['surface_tilt'] == 20
# can be overridden
temp_model_params = {'noct_installed': noct_installed, 'surface_tilt': 30}
system = pvsystem.PVSystem(temperature_model_parameters=temp_model_params,
surface_tilt=20)
system.fuentes_celltemp(irrads, temps, winds)
assert spy.call_args[1]['surface_tilt'] == 30
def test_Array__infer_temperature_model_params():
array = pvsystem.Array(module_parameters={},
racking_model='open_rack',
module_type='glass_polymer')
expected = temperature.TEMPERATURE_MODEL_PARAMETERS[
'sapm']['open_rack_glass_polymer']
assert expected == array._infer_temperature_model_params()
array = pvsystem.Array(module_parameters={},
racking_model='freestanding',
module_type='glass_polymer')
expected = temperature.TEMPERATURE_MODEL_PARAMETERS[
'pvsyst']['freestanding']
assert expected == array._infer_temperature_model_params()
array = pvsystem.Array(module_parameters={},
racking_model='insulated',
module_type=None)
expected = temperature.TEMPERATURE_MODEL_PARAMETERS[
'pvsyst']['insulated']
assert expected == array._infer_temperature_model_params()
def test_Array__infer_cell_type():
array = pvsystem.Array(module_parameters={})
assert array._infer_cell_type() is None
def test_calcparams_desoto(cec_module_params):
times = pd.date_range(start='2015-01-01', periods=3, freq='12H')
effective_irradiance = pd.Series([0.0, 800.0, 800.0], index=times)
temp_cell = pd.Series([25, 25, 50], index=times)
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_desoto(
effective_irradiance,
temp_cell,
alpha_sc=cec_module_params['alpha_sc'],
a_ref=cec_module_params['a_ref'],
I_L_ref=cec_module_params['I_L_ref'],
I_o_ref=cec_module_params['I_o_ref'],
R_sh_ref=cec_module_params['R_sh_ref'],
R_s=cec_module_params['R_s'],
EgRef=1.121,
dEgdT=-0.0002677)
assert_series_equal(IL, pd.Series([0.0, 6.036, 6.096], index=times),
check_less_precise=3)
assert_series_equal(I0, pd.Series([0.0, 1.94e-9, 7.419e-8], index=times),
check_less_precise=3)
assert_allclose(Rs, 0.094)
assert_series_equal(Rsh, pd.Series([np.inf, 19.65, 19.65], index=times),
check_less_precise=3)
assert_series_equal(nNsVth, pd.Series([0.473, 0.473, 0.5127], index=times),
check_less_precise=3)
def test_calcparams_cec(cec_module_params):
times = pd.date_range(start='2015-01-01', periods=3, freq='12H')
effective_irradiance = pd.Series([0.0, 800.0, 800.0], index=times)
temp_cell = pd.Series([25, 25, 50], index=times)
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_cec(
effective_irradiance,
temp_cell,
alpha_sc=cec_module_params['alpha_sc'],
a_ref=cec_module_params['a_ref'],
I_L_ref=cec_module_params['I_L_ref'],
I_o_ref=cec_module_params['I_o_ref'],
R_sh_ref=cec_module_params['R_sh_ref'],
R_s=cec_module_params['R_s'],
Adjust=cec_module_params['Adjust'],
EgRef=1.121,
dEgdT=-0.0002677)
assert_series_equal(IL, pd.Series([0.0, 6.036, 6.0896], index=times),
check_less_precise=3)
assert_series_equal(I0, pd.Series([0.0, 1.94e-9, 7.419e-8], index=times),
check_less_precise=3)
assert_allclose(Rs, 0.094)
assert_series_equal(Rsh, pd.Series([np.inf, 19.65, 19.65], index=times),
check_less_precise=3)
assert_series_equal(nNsVth, pd.Series([0.473, 0.473, 0.5127], index=times),
check_less_precise=3)
def test_calcparams_pvsyst(pvsyst_module_params):
times = pd.date_range(start='2015-01-01', periods=2, freq='12H')
effective_irradiance = pd.Series([0.0, 800.0], index=times)
temp_cell = pd.Series([25, 50], index=times)
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_pvsyst(
effective_irradiance,
temp_cell,
alpha_sc=pvsyst_module_params['alpha_sc'],
gamma_ref=pvsyst_module_params['gamma_ref'],
mu_gamma=pvsyst_module_params['mu_gamma'],
I_L_ref=pvsyst_module_params['I_L_ref'],
I_o_ref=pvsyst_module_params['I_o_ref'],
R_sh_ref=pvsyst_module_params['R_sh_ref'],
R_sh_0=pvsyst_module_params['R_sh_0'],
R_s=pvsyst_module_params['R_s'],
cells_in_series=pvsyst_module_params['cells_in_series'],
EgRef=pvsyst_module_params['EgRef'])
assert_series_equal(
IL.round(decimals=3), pd.Series([0.0, 4.8200], index=times))
assert_series_equal(
I0.round(decimals=3), pd.Series([0.0, 1.47e-7], index=times))
assert_allclose(Rs, 0.500)
assert_series_equal(
Rsh.round(decimals=3), pd.Series([1000.0, 305.757], index=times))
assert_series_equal(
nNsVth.round(decimals=4), pd.Series([1.6186, 1.7961], index=times))
def test_PVSystem_calcparams_desoto(cec_module_params, mocker):
mocker.spy(pvsystem, 'calcparams_desoto')
module_parameters = cec_module_params.copy()
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
system = pvsystem.PVSystem(module_parameters=module_parameters)
effective_irradiance = np.array([0, 800])
temp_cell = 25
IL, I0, Rs, Rsh, nNsVth = system.calcparams_desoto(effective_irradiance,
temp_cell)
pvsystem.calcparams_desoto.assert_called_once_with(
effective_irradiance,
temp_cell,
alpha_sc=cec_module_params['alpha_sc'],
a_ref=cec_module_params['a_ref'],
I_L_ref=cec_module_params['I_L_ref'],
I_o_ref=cec_module_params['I_o_ref'],
R_sh_ref=cec_module_params['R_sh_ref'],
R_s=cec_module_params['R_s'],
EgRef=module_parameters['EgRef'],
dEgdT=module_parameters['dEgdT'])
assert_allclose(IL, np.array([0.0, 6.036]), atol=1)
assert_allclose(I0, 2.0e-9, atol=1.0e-9)
assert_allclose(Rs, 0.1, atol=0.1)
assert_allclose(Rsh, np.array([np.inf, 20]), atol=1)
assert_allclose(nNsVth, 0.5, atol=0.1)
def test_PVSystem_calcparams_pvsyst(pvsyst_module_params, mocker):
mocker.spy(pvsystem, 'calcparams_pvsyst')
module_parameters = pvsyst_module_params.copy()
system = pvsystem.PVSystem(module_parameters=module_parameters)
effective_irradiance = np.array([0, 800])
temp_cell = np.array([25, 50])
IL, I0, Rs, Rsh, nNsVth = system.calcparams_pvsyst(effective_irradiance,
temp_cell)
pvsystem.calcparams_pvsyst.assert_called_once_with(
effective_irradiance,
temp_cell,
alpha_sc=pvsyst_module_params['alpha_sc'],
gamma_ref=pvsyst_module_params['gamma_ref'],
mu_gamma=pvsyst_module_params['mu_gamma'],
I_L_ref=pvsyst_module_params['I_L_ref'],
I_o_ref=pvsyst_module_params['I_o_ref'],
R_sh_ref=pvsyst_module_params['R_sh_ref'],
R_sh_0=pvsyst_module_params['R_sh_0'],
R_s=pvsyst_module_params['R_s'],
cells_in_series=pvsyst_module_params['cells_in_series'],
EgRef=pvsyst_module_params['EgRef'],
R_sh_exp=pvsyst_module_params['R_sh_exp'])
assert_allclose(IL, np.array([0.0, 4.8200]), atol=1)
assert_allclose(I0, np.array([0.0, 1.47e-7]), atol=1.0e-5)
assert_allclose(Rs, 0.5, atol=0.1)
assert_allclose(Rsh, np.array([1000, 305.757]), atol=50)
assert_allclose(nNsVth, np.array([1.6186, 1.7961]), atol=0.1)
@pytest.mark.parametrize('calcparams', [pvsystem.PVSystem.calcparams_pvsyst,
pvsystem.PVSystem.calcparams_desoto,
pvsystem.PVSystem.calcparams_cec])
def test_PVSystem_multi_array_calcparams(calcparams, two_array_system):
params_one, params_two = calcparams(
two_array_system, (1000, 500), (30, 20)
)
assert params_one != params_two
@pytest.mark.parametrize('calcparams, irrad, celltemp',
[ (f, irrad, celltemp)
for f in (pvsystem.PVSystem.calcparams_desoto,
pvsystem.PVSystem.calcparams_cec,
pvsystem.PVSystem.calcparams_pvsyst)
for irrad, celltemp in [(1, (1, 1)), ((1, 1), 1)]])
def test_PVSystem_multi_array_calcparams_value_error(
calcparams, irrad, celltemp, two_array_system):
with pytest.raises(ValueError,
match='Length mismatch for per-array parameter'):
calcparams(two_array_system, irrad, celltemp)
@pytest.fixture(params=[
{ # Can handle all python scalar inputs
'Rsh': 20.,
'Rs': 0.1,
'nNsVth': 0.5,
'I': 3.,
'I0': 6.e-7,
'IL': 7.,
'V_expected': 7.5049875193450521
},
{ # Can handle all rank-0 array inputs
'Rsh': np.array(20.),
'Rs': np.array(0.1),
'nNsVth': np.array(0.5),
'I': np.array(3.),
'I0': np.array(6.e-7),
'IL': np.array(7.),
'V_expected': np.array(7.5049875193450521)
},
{ # Can handle all rank-1 singleton array inputs
'Rsh': np.array([20.]),
'Rs': np.array([0.1]),
'nNsVth': np.array([0.5]),
'I': np.array([3.]),
'I0': np.array([6.e-7]),
'IL': np.array([7.]),
'V_expected': np.array([7.5049875193450521])
},
{ # Can handle all rank-1 non-singleton array inputs with infinite shunt
# resistance, Rsh=inf gives V=Voc=nNsVth*(np.log(IL + I0) - np.log(I0)
# at I=0
'Rsh': np.array([np.inf, 20.]),
'Rs': np.array([0.1, 0.1]),
'nNsVth': np.array([0.5, 0.5]),
'I': np.array([0., 3.]),
'I0': np.array([6.e-7, 6.e-7]),
'IL': np.array([7., 7.]),
'V_expected': np.array([0.5*(np.log(7. + 6.e-7) - np.log(6.e-7)),
7.5049875193450521])
},
{ # Can handle mixed inputs with a rank-2 array with infinite shunt
# resistance, Rsh=inf gives V=Voc=nNsVth*(np.log(IL + I0) - np.log(I0)
# at I=0
'Rsh': np.array([[np.inf, np.inf], [np.inf, np.inf]]),
'Rs': np.array([0.1]),
'nNsVth': np.array(0.5),
'I': 0.,
'I0': np.array([6.e-7]),
'IL': np.array([7.]),
'V_expected': 0.5*(np.log(7. + 6.e-7) - np.log(6.e-7))*np.ones((2, 2))
},
{ # Can handle ideal series and shunt, Rsh=inf and Rs=0 give
# V = nNsVth*(np.log(IL - I + I0) - np.log(I0))
'Rsh': np.inf,
'Rs': 0.,
'nNsVth': 0.5,
'I': np.array([7., 7./2., 0.]),
'I0': 6.e-7,
'IL': 7.,
'V_expected': np.array([0., 0.5*(np.log(7. - 7./2. + 6.e-7) -
np.log(6.e-7)), 0.5*(np.log(7. + 6.e-7) -
np.log(6.e-7))])
},
{ # Can handle only ideal series resistance, no closed form solution
'Rsh': 20.,
'Rs': 0.,
'nNsVth': 0.5,
'I': 3.,
'I0': 6.e-7,
'IL': 7.,
'V_expected': 7.804987519345062
},
{ # Can handle all python scalar inputs with big LambertW arg
'Rsh': 500.,
'Rs': 10.,
'nNsVth': 4.06,
'I': 0.,
'I0': 6.e-10,
'IL': 1.2,
'V_expected': 86.320000493521079
},
{ # Can handle all python scalar inputs with bigger LambertW arg
# 1000 W/m^2 on a Canadian Solar 220M with 20 C ambient temp
# github issue 225 (this appears to be from PR 226 not issue 225)
'Rsh': 190.,
'Rs': 1.065,
'nNsVth': 2.89,
'I': 0.,
'I0': 7.05196029e-08,
'IL': 10.491262,
'V_expected': 54.303958833791455
},
{ # Can handle all python scalar inputs with bigger LambertW arg
# 1000 W/m^2 on a Canadian Solar 220M with 20 C ambient temp
# github issue 225
'Rsh': 381.68,
'Rs': 1.065,
'nNsVth': 2.681527737715915,
'I': 0.,
'I0': 1.8739027472625636e-09,
'IL': 5.1366949999999996,
'V_expected': 58.19323124611128
},
{ # Verify mixed solution type indexing logic
'Rsh': np.array([np.inf, 190., 381.68]),
'Rs': 1.065,
'nNsVth': np.array([2.89, 2.89, 2.681527737715915]),
'I': 0.,
'I0': np.array([7.05196029e-08, 7.05196029e-08, 1.8739027472625636e-09]),
'IL': np.array([10.491262, 10.491262, 5.1366949999999996]),
'V_expected': np.array([2.89*np.log1p(10.491262/7.05196029e-08),
54.303958833791455, 58.19323124611128])
}])
def fixture_v_from_i(request):
return request.param
@pytest.mark.parametrize(
'method, atol', [('lambertw', 1e-11), ('brentq', 1e-11), ('newton', 1e-8)]
)
def test_v_from_i(fixture_v_from_i, method, atol):
# Solution set loaded from fixture
Rsh = fixture_v_from_i['Rsh']
Rs = fixture_v_from_i['Rs']
nNsVth = fixture_v_from_i['nNsVth']
I = fixture_v_from_i['I']
I0 = fixture_v_from_i['I0']
IL = fixture_v_from_i['IL']
V_expected = fixture_v_from_i['V_expected']
V = pvsystem.v_from_i(Rsh, Rs, nNsVth, I, I0, IL, method=method)
assert(isinstance(V, type(V_expected)))
if isinstance(V, type(np.ndarray)):
assert(isinstance(V.dtype, type(V_expected.dtype)))
assert(V.shape == V_expected.shape)
assert_allclose(V, V_expected, atol=atol)
def test_i_from_v_from_i(fixture_v_from_i):
# Solution set loaded from fixture
Rsh = fixture_v_from_i['Rsh']
Rs = fixture_v_from_i['Rs']
nNsVth = fixture_v_from_i['nNsVth']
I = fixture_v_from_i['I']
I0 = fixture_v_from_i['I0']
IL = fixture_v_from_i['IL']
V = fixture_v_from_i['V_expected']
# Convergence criteria
atol = 1.e-11
I_expected = pvsystem.i_from_v(Rsh, Rs, nNsVth, V, I0, IL,
method='lambertw')
assert_allclose(I, I_expected, atol=atol)
I = pvsystem.i_from_v(Rsh, Rs, nNsVth, V, I0, IL)
assert(isinstance(I, type(I_expected)))
if isinstance(I, type(np.ndarray)):
assert(isinstance(I.dtype, type(I_expected.dtype)))
assert(I.shape == I_expected.shape)
assert_allclose(I, I_expected, atol=atol)
@pytest.fixture(params=[
{ # Can handle all python scalar inputs
'Rsh': 20.,
'Rs': 0.1,
'nNsVth': 0.5,
'V': 7.5049875193450521,
'I0': 6.e-7,
'IL': 7.,
'I_expected': 3.
},
{ # Can handle all rank-0 array inputs
'Rsh': np.array(20.),
'Rs': np.array(0.1),
'nNsVth': np.array(0.5),
'V': np.array(7.5049875193450521),
'I0': np.array(6.e-7),
'IL': np.array(7.),
'I_expected': np.array(3.)
},
{ # Can handle all rank-1 singleton array inputs
'Rsh': np.array([20.]),
'Rs': np.array([0.1]),
'nNsVth': np.array([0.5]),
'V': np.array([7.5049875193450521]),
'I0': np.array([6.e-7]),
'IL': np.array([7.]),
'I_expected': np.array([3.])
},
{ # Can handle all rank-1 non-singleton array inputs with a zero
# series resistance, Rs=0 gives I=IL=Isc at V=0
'Rsh': np.array([20., 20.]),
'Rs': np.array([0., 0.1]),
'nNsVth': np.array([0.5, 0.5]),
'V': np.array([0., 7.5049875193450521]),
'I0': np.array([6.e-7, 6.e-7]),
'IL': np.array([7., 7.]),
'I_expected': np.array([7., 3.])
},
{ # Can handle mixed inputs with a rank-2 array with zero series
# resistance, Rs=0 gives I=IL=Isc at V=0
'Rsh': np.array([20.]),
'Rs': np.array([[0., 0.], [0., 0.]]),
'nNsVth': np.array(0.5),
'V': 0.,
'I0': np.array([6.e-7]),
'IL': np.array([7.]),
'I_expected': np.array([[7., 7.], [7., 7.]])
},
{ # Can handle ideal series and shunt, Rsh=inf and Rs=0 give
# V_oc = nNsVth*(np.log(IL + I0) - np.log(I0))
'Rsh': np.inf,
'Rs': 0.,
'nNsVth': 0.5,
'V': np.array([0., 0.5*(np.log(7. + 6.e-7) - np.log(6.e-7))/2.,
0.5*(np.log(7. + 6.e-7) - np.log(6.e-7))]),
'I0': 6.e-7,
'IL': 7.,
'I_expected': np.array([7., 7. - 6.e-7*np.expm1((np.log(7. + 6.e-7) -
np.log(6.e-7))/2.), 0.])
},
{ # Can handle only ideal shunt resistance, no closed form solution
'Rsh': np.inf,
'Rs': 0.1,
'nNsVth': 0.5,
'V': 7.5049875193450521,
'I0': 6.e-7,
'IL': 7.,
'I_expected': 3.2244873645510923
}])
def fixture_i_from_v(request):
return request.param
@pytest.mark.parametrize(
'method, atol', [('lambertw', 1e-11), ('brentq', 1e-11), ('newton', 1e-11)]
)
def test_i_from_v(fixture_i_from_v, method, atol):
# Solution set loaded from fixture
Rsh = fixture_i_from_v['Rsh']
Rs = fixture_i_from_v['Rs']
nNsVth = fixture_i_from_v['nNsVth']
V = fixture_i_from_v['V']
I0 = fixture_i_from_v['I0']
IL = fixture_i_from_v['IL']
I_expected = fixture_i_from_v['I_expected']
I = pvsystem.i_from_v(Rsh, Rs, nNsVth, V, I0, IL, method=method)
assert(isinstance(I, type(I_expected)))
if isinstance(I, type(np.ndarray)):
assert(isinstance(I.dtype, type(I_expected.dtype)))
assert(I.shape == I_expected.shape)
assert_allclose(I, I_expected, atol=atol)
def test_PVSystem_i_from_v(mocker):
system = pvsystem.PVSystem()
m = mocker.patch('pvlib.pvsystem.i_from_v', autospec=True)
args = (20, 0.1, 0.5, 7.5049875193450521, 6e-7, 7)
system.i_from_v(*args)
m.assert_called_once_with(*args)
def test_i_from_v_size():
with pytest.raises(ValueError):
pvsystem.i_from_v(20, [0.1] * 2, 0.5, [7.5] * 3, 6.0e-7, 7.0)
with pytest.raises(ValueError):
pvsystem.i_from_v(20, [0.1] * 2, 0.5, [7.5] * 3, 6.0e-7, 7.0,
method='brentq')
with pytest.raises(ValueError):
pvsystem.i_from_v(20, 0.1, 0.5, [7.5] * 3, 6.0e-7, np.array([7., 7.]),
method='newton')
def test_v_from_i_size():
with pytest.raises(ValueError):
pvsystem.v_from_i(20, [0.1] * 2, 0.5, [3.0] * 3, 6.0e-7, 7.0)
with pytest.raises(ValueError):
pvsystem.v_from_i(20, [0.1] * 2, 0.5, [3.0] * 3, 6.0e-7, 7.0,
method='brentq')
with pytest.raises(ValueError):
pvsystem.v_from_i(20, [0.1], 0.5, [3.0] * 3, 6.0e-7, np.array([7., 7.]),
method='newton')
def test_mpp_floats():
"""test max_power_point"""
IL, I0, Rs, Rsh, nNsVth = (7, 6e-7, .1, 20, .5)
out = pvsystem.max_power_point(IL, I0, Rs, Rsh, nNsVth, method='brentq')
expected = {'i_mp': 6.1362673597376753, # 6.1390251797935704, lambertw
'v_mp': 6.2243393757884284, # 6.221535886625464, lambertw
'p_mp': 38.194210547580511} # 38.194165464983037} lambertw
assert isinstance(out, dict)
for k, v in out.items():
assert np.isclose(v, expected[k])
out = pvsystem.max_power_point(IL, I0, Rs, Rsh, nNsVth, method='newton')
for k, v in out.items():
assert np.isclose(v, expected[k])
def test_mpp_array():
"""test max_power_point"""
IL, I0, Rs, Rsh, nNsVth = (np.array([7, 7]), 6e-7, .1, 20, .5)
out = pvsystem.max_power_point(IL, I0, Rs, Rsh, nNsVth, method='brentq')
expected = {'i_mp': [6.1362673597376753] * 2,
'v_mp': [6.2243393757884284] * 2,
'p_mp': [38.194210547580511] * 2}
assert isinstance(out, dict)
for k, v in out.items():
assert np.allclose(v, expected[k])
out = pvsystem.max_power_point(IL, I0, Rs, Rsh, nNsVth, method='newton')
for k, v in out.items():
assert np.allclose(v, expected[k])
def test_mpp_series():
"""test max_power_point"""
idx = ['2008-02-17T11:30:00-0800', '2008-02-17T12:30:00-0800']
IL, I0, Rs, Rsh, nNsVth = (np.array([7, 7]), 6e-7, .1, 20, .5)
IL = pd.Series(IL, index=idx)
out = pvsystem.max_power_point(IL, I0, Rs, Rsh, nNsVth, method='brentq')
expected = pd.DataFrame({'i_mp': [6.1362673597376753] * 2,
'v_mp': [6.2243393757884284] * 2,
'p_mp': [38.194210547580511] * 2},
index=idx)
assert isinstance(out, pd.DataFrame)
for k, v in out.items():
assert np.allclose(v, expected[k])
out = pvsystem.max_power_point(IL, I0, Rs, Rsh, nNsVth, method='newton')
for k, v in out.items():
assert np.allclose(v, expected[k])
def test_singlediode_series(cec_module_params):
times = pd.date_range(start='2015-01-01', periods=2, freq='12H')
effective_irradiance = pd.Series([0.0, 800.0], index=times)
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_desoto(
effective_irradiance,
temp_cell=25,
alpha_sc=cec_module_params['alpha_sc'],
a_ref=cec_module_params['a_ref'],
I_L_ref=cec_module_params['I_L_ref'],
I_o_ref=cec_module_params['I_o_ref'],
R_sh_ref=cec_module_params['R_sh_ref'],
R_s=cec_module_params['R_s'],
EgRef=1.121,
dEgdT=-0.0002677
)
out = pvsystem.singlediode(IL, I0, Rs, Rsh, nNsVth)
assert isinstance(out, pd.DataFrame)
def test_singlediode_array():
# github issue 221
photocurrent = np.linspace(0, 10, 11)
resistance_shunt = 16
resistance_series = 0.094
nNsVth = 0.473
saturation_current = 1.943e-09
sd = pvsystem.singlediode(photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth,
method='lambertw')
expected = np.array([
0. , 0.54538398, 1.43273966, 2.36328163, 3.29255606,
4.23101358, 5.16177031, 6.09368251, 7.02197553, 7.96846051,
8.88220557])
assert_allclose(sd['i_mp'], expected, atol=0.01)
sd = pvsystem.singlediode(photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth)
expected = pvsystem.i_from_v(resistance_shunt, resistance_series, nNsVth,
sd['v_mp'], saturation_current, photocurrent,
method='lambertw')
assert_allclose(sd['i_mp'], expected, atol=0.01)
def test_singlediode_floats():
out = pvsystem.singlediode(7, 6e-7, .1, 20, .5, method='lambertw')
expected = {'i_xx': 4.2498,
'i_mp': 6.1275,
'v_oc': 8.1063,
'p_mp': 38.1937,
'i_x': 6.7558,
'i_sc': 6.9651,
'v_mp': 6.2331,
'i': None,
'v': None}
assert isinstance(out, dict)
for k, v in out.items():
if k in ['i', 'v']:
assert v is None
else:
assert_allclose(v, expected[k], atol=1e-3)
def test_singlediode_floats_ivcurve():
out = pvsystem.singlediode(7, 6e-7, .1, 20, .5, ivcurve_pnts=3, method='lambertw')
expected = {'i_xx': 4.2498,
'i_mp': 6.1275,
'v_oc': 8.1063,
'p_mp': 38.1937,
'i_x': 6.7558,
'i_sc': 6.9651,
'v_mp': 6.2331,
'i': np.array([6.965172e+00, 6.755882e+00, 2.575717e-14]),
'v': np.array([0., 4.05315, 8.1063])}
assert isinstance(out, dict)
for k, v in out.items():
assert_allclose(v, expected[k], atol=1e-3)
def test_singlediode_series_ivcurve(cec_module_params):
times = pd.date_range(start='2015-06-01', periods=3, freq='6H')
effective_irradiance = pd.Series([0.0, 400.0, 800.0], index=times)
IL, I0, Rs, Rsh, nNsVth = pvsystem.calcparams_desoto(
effective_irradiance,
temp_cell=25,
alpha_sc=cec_module_params['alpha_sc'],
a_ref=cec_module_params['a_ref'],
I_L_ref=cec_module_params['I_L_ref'],
I_o_ref=cec_module_params['I_o_ref'],
R_sh_ref=cec_module_params['R_sh_ref'],
R_s=cec_module_params['R_s'],
EgRef=1.121,
dEgdT=-0.0002677)
out = pvsystem.singlediode(IL, I0, Rs, Rsh, nNsVth, ivcurve_pnts=3,
method='lambertw')
expected = OrderedDict([('i_sc', array([0., 3.01054475, 6.00675648])),
('v_oc', array([0., 9.96886962, 10.29530483])),
('i_mp', array([0., 2.65191983, 5.28594672])),
('v_mp', array([0., 8.33392491, 8.4159707])),
('p_mp', array([0., 22.10090078, 44.48637274])),
('i_x', array([0., 2.88414114, 5.74622046])),
('i_xx', array([0., 2.04340914, 3.90007956])),
('v', array([[0., 0., 0.],
[0., 4.98443481, 9.96886962],
[0., 5.14765242, 10.29530483]])),
('i', array([[0., 0., 0.],
[3.01079860e+00, 2.88414114e+00,
3.10862447e-14],
[6.00726296e+00, 5.74622046e+00,
0.00000000e+00]]))])
for k, v in out.items():
assert_allclose(v, expected[k], atol=1e-2)
out = pvsystem.singlediode(IL, I0, Rs, Rsh, nNsVth, ivcurve_pnts=3)
expected['i_mp'] = pvsystem.i_from_v(Rsh, Rs, nNsVth, out['v_mp'], I0, IL,
method='lambertw')
expected['v_mp'] = pvsystem.v_from_i(Rsh, Rs, nNsVth, out['i_mp'], I0, IL,
method='lambertw')
expected['i'] = pvsystem.i_from_v(Rsh, Rs, nNsVth, out['v'].T, I0, IL,
method='lambertw').T
expected['v'] = pvsystem.v_from_i(Rsh, Rs, nNsVth, out['i'].T, I0, IL,
method='lambertw').T
for k, v in out.items():
assert_allclose(v, expected[k], atol=1e-2)
def test_scale_voltage_current_power():
data = pd.DataFrame(
np.array([[2, 1.5, 10, 8, 12, 0.5, 1.5]]),
columns=['i_sc', 'i_mp', 'v_oc', 'v_mp', 'p_mp', 'i_x', 'i_xx'],
index=[0])
expected = pd.DataFrame(
np.array([[6, 4.5, 20, 16, 72, 1.5, 4.5]]),
columns=['i_sc', 'i_mp', 'v_oc', 'v_mp', 'p_mp', 'i_x', 'i_xx'],
index=[0])
out = pvsystem.scale_voltage_current_power(data, voltage=2, current=3)
assert_frame_equal(out, expected, check_less_precise=5)
def test_PVSystem_scale_voltage_current_power(mocker):
data = None
system = pvsystem.PVSystem(modules_per_string=2, strings_per_inverter=3)
m = mocker.patch(
'pvlib.pvsystem.scale_voltage_current_power', autospec=True)
system.scale_voltage_current_power(data)
m.assert_called_once_with(data, voltage=2, current=3)
def test_PVSystem_multi_scale_voltage_current_power(mocker):
data = (1, 2)
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(modules_per_string=2, strings=3),
pvsystem.Array(modules_per_string=3, strings=5)]
)
m = mocker.patch(
'pvlib.pvsystem.scale_voltage_current_power', autospec=True
)
system.scale_voltage_current_power(data)
m.assert_has_calls(
[mock.call(1, voltage=2, current=3),
mock.call(2, voltage=3, current=5)],
any_order=True
)
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.scale_voltage_current_power(None)
def test_PVSystem_get_ac_sandia(cec_inverter_parameters, mocker):
inv_fun = mocker.spy(inverter, 'sandia')
system = pvsystem.PVSystem(
inverter=cec_inverter_parameters['Name'],
inverter_parameters=cec_inverter_parameters,
)
vdcs = pd.Series(np.linspace(0, 50, 3))
idcs = pd.Series(np.linspace(0, 11, 3))
pdcs = idcs * vdcs
pacs = system.get_ac('sandia', pdcs, v_dc=vdcs)
inv_fun.assert_called_once()
assert_series_equal(pacs, pd.Series([-0.020000, 132.004308, 250.000000]))
@fail_on_pvlib_version('0.10')
def test_PVSystem_snlinverter(cec_inverter_parameters):
system = pvsystem.PVSystem(
inverter=cec_inverter_parameters['Name'],
inverter_parameters=cec_inverter_parameters,
)
vdcs = pd.Series(np.linspace(0,50,3))
idcs = pd.Series(np.linspace(0,11,3))
pdcs = idcs * vdcs
with pytest.warns(pvlibDeprecationWarning):
pacs = system.snlinverter(vdcs, pdcs)
assert_series_equal(pacs, pd.Series([-0.020000, 132.004308, 250.000000]))
def test_PVSystem_get_ac_sandia_multi(cec_inverter_parameters, mocker):
inv_fun = mocker.spy(inverter, 'sandia_multi')
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(), pvsystem.Array()],
inverter=cec_inverter_parameters['Name'],
inverter_parameters=cec_inverter_parameters,
)
vdcs = pd.Series(np.linspace(0, 50, 3))
idcs = pd.Series(np.linspace(0, 11, 3)) / 2
pdcs = idcs * vdcs
pacs = system.get_ac('sandia', (pdcs, pdcs), v_dc=(vdcs, vdcs))
inv_fun.assert_called_once()
assert_series_equal(pacs, pd.Series([-0.020000, 132.004308, 250.000000]))
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_ac('sandia', vdcs, (pdcs, pdcs))
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_ac('sandia', vdcs, (pdcs,))
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_ac('sandia', (vdcs, vdcs), (pdcs, pdcs, pdcs))
def test_PVSystem_get_ac_pvwatts(pvwatts_system_defaults, mocker):
mocker.spy(inverter, 'pvwatts')
pdc = 50
out = pvwatts_system_defaults.get_ac('pvwatts', pdc)
inverter.pvwatts.assert_called_once_with(
pdc, **pvwatts_system_defaults.inverter_parameters)
assert out < pdc
def test_PVSystem_get_ac_pvwatts_kwargs(pvwatts_system_kwargs, mocker):
mocker.spy(inverter, 'pvwatts')
pdc = 50
out = pvwatts_system_kwargs.get_ac('pvwatts', pdc)
inverter.pvwatts.assert_called_once_with(
pdc, **pvwatts_system_kwargs.inverter_parameters)
assert out < pdc
def test_PVSystem_get_ac_pvwatts_multi(
pvwatts_system_defaults, pvwatts_system_kwargs, mocker):
mocker.spy(inverter, 'pvwatts_multi')
expected = [pd.Series([0.0, 48.123524, 86.400000]),
pd.Series([0.0, 45.893550, 85.500000])]
systems = [pvwatts_system_defaults, pvwatts_system_kwargs]
for base_sys, exp in zip(systems, expected):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(), pvsystem.Array()],
inverter_parameters=base_sys.inverter_parameters,
)
pdcs = pd.Series([0., 25., 50.])
pacs = system.get_ac('pvwatts', (pdcs, pdcs))
assert_series_equal(pacs, exp)
assert inverter.pvwatts_multi.call_count == 2
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_ac('pvwatts', (pdcs,))
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_ac('pvwatts', pdcs)
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_ac('pvwatts', (pdcs, pdcs, pdcs))
@pytest.mark.parametrize('model', ['sandia', 'adr', 'pvwatts'])
def test_PVSystem_get_ac_single_array_tuple_input(
model,
pvwatts_system_defaults,
cec_inverter_parameters,
adr_inverter_parameters):
vdcs = {
'sandia': pd.Series(np.linspace(0, 50, 3)),
'pvwatts': None,
'adr': pd.Series([135, 154, 390, 420, 551])
}
pdcs = {'adr': pd.Series([135, 1232, 1170, 420, 551]),
'sandia': pd.Series(np.linspace(0, 11, 3)) * vdcs['sandia'],
'pvwatts': 50}
inverter_parameters = {
'sandia': cec_inverter_parameters,
'adr': adr_inverter_parameters,
'pvwatts': pvwatts_system_defaults.inverter_parameters
}
expected = {
'adr': pd.Series([np.nan, 1161.5745, 1116.4459, 382.6679, np.nan]),
'sandia': pd.Series([-0.020000, 132.004308, 250.000000])
}
system = pvsystem.PVSystem(
arrays=[pvsystem.Array()],
inverter_parameters=inverter_parameters[model]
)
ac = system.get_ac(p_dc=(pdcs[model],), v_dc=(vdcs[model],), model=model)
if model == 'pvwatts':
assert ac < pdcs['pvwatts']
else:
assert_series_equal(ac, expected[model])
def test_PVSystem_get_ac_adr(adr_inverter_parameters, mocker):
mocker.spy(inverter, 'adr')
system = pvsystem.PVSystem(
inverter_parameters=adr_inverter_parameters,
)
vdcs = pd.Series([135, 154, 390, 420, 551])
pdcs = pd.Series([135, 1232, 1170, 420, 551])
pacs = system.get_ac('adr', pdcs, vdcs)
assert_series_equal(pacs, pd.Series([np.nan, 1161.5745, 1116.4459,
382.6679, np.nan]))
inverter.adr.assert_called_once_with(vdcs, pdcs,
system.inverter_parameters)
def test_PVSystem_get_ac_adr_multi(adr_inverter_parameters):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(), pvsystem.Array()],
inverter_parameters=adr_inverter_parameters,
)
pdcs = pd.Series([135, 1232, 1170, 420, 551])
with pytest.raises(ValueError,
match="The adr inverter function cannot be used"):
system.get_ac(model='adr', p_dc=pdcs)
def test_PVSystem_get_ac_invalid(cec_inverter_parameters):
system = pvsystem.PVSystem(
inverter_parameters=cec_inverter_parameters,
)
pdcs = pd.Series(np.linspace(0, 50, 3))
with pytest.raises(ValueError, match="is not a valid AC power model"):
system.get_ac(model='not_a_model', p_dc=pdcs)
def test_PVSystem_creation():
pv_system = pvsystem.PVSystem(module='blah', inverter='blarg')
# ensure that parameter attributes are dict-like. GH 294
pv_system.module_parameters['pdc0'] = 1
pv_system.inverter_parameters['Paco'] = 1
def test_PVSystem_multiple_array_creation():
array_one = pvsystem.Array(surface_tilt=32)
array_two = pvsystem.Array(surface_tilt=15, module_parameters={'pdc0': 1})
pv_system = pvsystem.PVSystem(arrays=[array_one, array_two])
assert pv_system.surface_tilt == (32, 15)
assert pv_system.surface_azimuth == (180, 180)
assert pv_system.module_parameters == ({}, {'pdc0': 1})
assert pv_system.arrays == (array_one, array_two)
with pytest.raises(TypeError):
pvsystem.PVSystem(arrays=array_one)
def test_PVSystem_get_aoi():
system = pvsystem.PVSystem(surface_tilt=32, surface_azimuth=135)
aoi = system.get_aoi(30, 225)
assert np.round(aoi, 4) == 42.7408
def test_PVSystem_multiple_array_get_aoi():
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(surface_tilt=15, surface_azimuth=135),
pvsystem.Array(surface_tilt=32, surface_azimuth=135)]
)
aoi_one, aoi_two = system.get_aoi(30, 225)
assert np.round(aoi_two, 4) == 42.7408
assert aoi_two != aoi_one
assert aoi_one > 0
def test_PVSystem_get_irradiance():
system = pvsystem.PVSystem(surface_tilt=32, surface_azimuth=135)
times = pd.date_range(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
location = Location(latitude=32, longitude=-111)
solar_position = location.get_solarposition(times)
irrads = pd.DataFrame({'dni':[900,0], 'ghi':[600,0], 'dhi':[100,0]},
index=times)
irradiance = system.get_irradiance(solar_position['apparent_zenith'],
solar_position['azimuth'],
irrads['dni'],
irrads['ghi'],
irrads['dhi'])
expected = pd.DataFrame(data=np.array(
[[ 883.65494055, 745.86141676, 137.79352379, 126.397131 ,
11.39639279],
[ 0. , -0. , 0. , 0. , 0. ]]),
columns=['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse'],
index=times)
assert_frame_equal(irradiance, expected, check_less_precise=2)
def test_PVSystem_get_irradiance_model(mocker):
spy_perez = mocker.spy(irradiance, 'perez')
spy_haydavies = mocker.spy(irradiance, 'haydavies')
system = pvsystem.PVSystem(surface_tilt=32, surface_azimuth=135)
times = pd.date_range(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
location = Location(latitude=32, longitude=-111)
solar_position = location.get_solarposition(times)
irrads = pd.DataFrame({'dni': [900, 0], 'ghi': [600, 0], 'dhi': [100, 0]},
index=times)
system.get_irradiance(solar_position['apparent_zenith'],
solar_position['azimuth'],
irrads['dni'],
irrads['ghi'],
irrads['dhi'])
spy_haydavies.assert_called_once()
system.get_irradiance(solar_position['apparent_zenith'],
solar_position['azimuth'],
irrads['dni'],
irrads['ghi'],
irrads['dhi'],
model='perez')
spy_perez.assert_called_once()
def test_PVSystem_multi_array_get_irradiance():
array_one = pvsystem.Array(surface_tilt=32, surface_azimuth=135)
array_two = pvsystem.Array(surface_tilt=5, surface_azimuth=150)
system = pvsystem.PVSystem(arrays=[array_one, array_two])
location = Location(latitude=32, longitude=-111)
times = pd.date_range(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
solar_position = location.get_solarposition(times)
irrads = pd.DataFrame({'dni': [900, 0], 'ghi': [600, 0], 'dhi': [100, 0]},
index=times)
array_one_expected = array_one.get_irradiance(
solar_position['apparent_zenith'],
solar_position['azimuth'],
irrads['dni'], irrads['ghi'], irrads['dhi']
)
array_two_expected = array_two.get_irradiance(
solar_position['apparent_zenith'],
solar_position['azimuth'],
irrads['dni'], irrads['ghi'], irrads['dhi']
)
array_one_irrad, array_two_irrad = system.get_irradiance(
solar_position['apparent_zenith'],
solar_position['azimuth'],
irrads['dni'], irrads['ghi'], irrads['dhi']
)
assert_frame_equal(
array_one_irrad, array_one_expected, check_less_precise=2
)
assert_frame_equal(
array_two_irrad, array_two_expected, check_less_precise=2
)
def test_PVSystem_multi_array_get_irradiance_multi_irrad():
"""Test a system with two identical arrays but different irradiance.
Because only the irradiance is different we expect the same output
when only one GHI/DHI/DNI input is given, but different output
for each array when different GHI/DHI/DNI input is given. For the later
case we verify that the correct irradiance data is passed to each array.
"""
array_one = pvsystem.Array()
array_two = pvsystem.Array()
system = pvsystem.PVSystem(arrays=[array_one, array_two])
location = Location(latitude=32, longitude=-111)
times = pd.date_range(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
solar_position = location.get_solarposition(times)
irrads = pd.DataFrame({'dni': [900, 0], 'ghi': [600, 0], 'dhi': [100, 0]},
index=times)
irrads_two = pd.DataFrame(
{'dni': [0, 900], 'ghi': [0, 600], 'dhi': [0, 100]},
index=times
)
array_irrad = system.get_irradiance(
solar_position['apparent_zenith'],
solar_position['azimuth'],
(irrads['dhi'], irrads['dhi']),
(irrads['ghi'], irrads['ghi']),
(irrads['dni'], irrads['dni'])
)
assert_frame_equal(array_irrad[0], array_irrad[1])
array_irrad = system.get_irradiance(
solar_position['apparent_zenith'],
solar_position['azimuth'],
(irrads['dhi'], irrads_two['dhi']),
(irrads['ghi'], irrads_two['ghi']),
(irrads['dni'], irrads_two['dni'])
)
array_one_expected = array_one.get_irradiance(
solar_position['apparent_zenith'],
solar_position['azimuth'],
irrads['dhi'], irrads['ghi'], irrads['dni']
)
array_two_expected = array_two.get_irradiance(
solar_position['apparent_zenith'],
solar_position['azimuth'],
irrads_two['dhi'], irrads_two['ghi'], irrads_two['dni']
)
assert not array_irrad[0].equals(array_irrad[1])
assert_frame_equal(array_irrad[0], array_one_expected)
assert_frame_equal(array_irrad[1], array_two_expected)
with pytest.raises(ValueError,
match="Length mismatch for per-array parameter"):
system.get_irradiance(
solar_position['apparent_zenith'],
solar_position['azimuth'],
(irrads['dhi'], irrads_two['dhi'], irrads['dhi']),
(irrads['ghi'], irrads_two['ghi']),
irrads['dni']
)
array_irrad = system.get_irradiance(
solar_position['apparent_zenith'],
solar_position['azimuth'],
(irrads['dhi'], irrads_two['dhi']),
irrads['ghi'],
irrads['dni']
)
assert_frame_equal(array_irrad[0], array_one_expected)
assert not array_irrad[0].equals(array_irrad[1])
def test_PVSystem_change_surface_azimuth():
system = pvsystem.PVSystem(surface_azimuth=180)
assert system.surface_azimuth == 180
system.surface_azimuth = 90
assert system.surface_azimuth == 90
def test_PVSystem_get_albedo(two_array_system):
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(albedo=0.5)]
)
assert system.albedo == 0.5
assert two_array_system.albedo == (0.25, 0.25)
def test_PVSystem_modules_per_string():
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(modules_per_string=1),
pvsystem.Array(modules_per_string=2)]
)
assert system.modules_per_string == (1, 2)
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(modules_per_string=5)]
)
assert system.modules_per_string == 5
def test_PVSystem_strings_per_inverter():
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(strings=2),
pvsystem.Array(strings=1)]
)
assert system.strings_per_inverter == (2, 1)
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(strings=5)]
)
assert system.strings_per_inverter == 5
def test_PVSystem___repr__():
system = pvsystem.PVSystem(
module='blah', inverter='blarg', name='pv ftw',
temperature_model_parameters={'a': -3.56})
expected = """PVSystem:
name: pv ftw
Array:
name: None
surface_tilt: 0
surface_azimuth: 180
module: blah
albedo: 0.25
racking_model: None
module_type: None
temperature_model_parameters: {'a': -3.56}
strings: 1
modules_per_string: 1
inverter: blarg"""
assert system.__repr__() == expected
def test_PVSystem_multi_array___repr__():
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(surface_tilt=30, surface_azimuth=100),
pvsystem.Array(surface_tilt=20, surface_azimuth=220,
name='foo')],
inverter='blarg',
)
expected = """PVSystem:
name: None
Array:
name: None
surface_tilt: 30
surface_azimuth: 100
module: None
albedo: 0.25
racking_model: None
module_type: None
temperature_model_parameters: {}
strings: 1
modules_per_string: 1
Array:
name: foo
surface_tilt: 20
surface_azimuth: 220
module: None
albedo: 0.25
racking_model: None
module_type: None
temperature_model_parameters: {}
strings: 1
modules_per_string: 1
inverter: blarg"""
assert expected == system.__repr__()
def test_Array___repr__():
array = pvsystem.Array(
surface_tilt=10, surface_azimuth=100,
albedo=0.15, module_type='glass_glass',
temperature_model_parameters={'a': -3.56},
racking_model='close_mount',
module_parameters={'foo': 'bar'},
modules_per_string=100,
strings=10, module='baz',
name='biz'
)
expected = """Array:
name: biz
surface_tilt: 10
surface_azimuth: 100
module: baz
albedo: 0.15
racking_model: close_mount
module_type: glass_glass
temperature_model_parameters: {'a': -3.56}
strings: 10
modules_per_string: 100"""
assert array.__repr__() == expected
def test_pvwatts_dc_scalars():
expected = 88.65
out = pvsystem.pvwatts_dc(900, 30, 100, -0.003)
assert_allclose(out, expected)
def test_pvwatts_dc_arrays():
irrad_trans = np.array([np.nan, 900, 900])
temp_cell = np.array([30, np.nan, 30])
irrad_trans, temp_cell = np.meshgrid(irrad_trans, temp_cell)
expected = np.array([[nan, 88.65, 88.65],
[nan, nan, nan],
[nan, 88.65, 88.65]])
out = pvsystem.pvwatts_dc(irrad_trans, temp_cell, 100, -0.003)
assert_allclose(out, expected, equal_nan=True)
def test_pvwatts_dc_series():
irrad_trans = pd.Series([np.nan, 900, 900])
temp_cell = pd.Series([30, np.nan, 30])
expected = pd.Series(np.array([ nan, nan, 88.65]))
out = pvsystem.pvwatts_dc(irrad_trans, temp_cell, 100, -0.003)
assert_series_equal(expected, out)
def test_pvwatts_losses_default():
expected = 14.075660688264469
out = pvsystem.pvwatts_losses()
assert_allclose(out, expected)
def test_pvwatts_losses_arrays():
expected = np.array([nan, 14.934904])
age = np.array([nan, 1])
out = pvsystem.pvwatts_losses(age=age)
assert_allclose(out, expected)
def test_pvwatts_losses_series():
expected = pd.Series([nan, 14.934904])
age = pd.Series([nan, 1])
out = pvsystem.pvwatts_losses(age=age)
assert_series_equal(expected, out)
@pytest.fixture
def pvwatts_system_defaults():
module_parameters = {'pdc0': 100, 'gamma_pdc': -0.003}
inverter_parameters = {'pdc0': 90}
system = pvsystem.PVSystem(module_parameters=module_parameters,
inverter_parameters=inverter_parameters)
return system
@pytest.fixture
def pvwatts_system_kwargs():
module_parameters = {'pdc0': 100, 'gamma_pdc': -0.003, 'temp_ref': 20}
inverter_parameters = {'pdc0': 90, 'eta_inv_nom': 0.95, 'eta_inv_ref': 1.0}
system = pvsystem.PVSystem(module_parameters=module_parameters,
inverter_parameters=inverter_parameters)
return system
def test_PVSystem_pvwatts_dc(pvwatts_system_defaults, mocker):
mocker.spy(pvsystem, 'pvwatts_dc')
irrad = 900
temp_cell = 30
expected = 90
out = pvwatts_system_defaults.pvwatts_dc(irrad, temp_cell)
pvsystem.pvwatts_dc.assert_called_once_with(
irrad, temp_cell, **pvwatts_system_defaults.module_parameters)
assert_allclose(expected, out, atol=10)
def test_PVSystem_pvwatts_dc_kwargs(pvwatts_system_kwargs, mocker):
mocker.spy(pvsystem, 'pvwatts_dc')
irrad = 900
temp_cell = 30
expected = 90
out = pvwatts_system_kwargs.pvwatts_dc(irrad, temp_cell)
pvsystem.pvwatts_dc.assert_called_once_with(
irrad, temp_cell, **pvwatts_system_kwargs.module_parameters)
assert_allclose(expected, out, atol=10)
def test_PVSystem_multiple_array_pvwatts_dc():
array_one_module_parameters = {
'pdc0': 100, 'gamma_pdc': -0.003, 'temp_ref': 20
}
array_one = pvsystem.Array(
module_parameters=array_one_module_parameters
)
array_two_module_parameters = {
'pdc0': 150, 'gamma_pdc': -0.002, 'temp_ref': 25
}
array_two = pvsystem.Array(
module_parameters=array_two_module_parameters
)
system = pvsystem.PVSystem(arrays=[array_one, array_two])
irrad_one = 900
irrad_two = 500
temp_cell_one = 30
temp_cell_two = 20
expected_one = pvsystem.pvwatts_dc(irrad_one, temp_cell_one,
**array_one_module_parameters)
expected_two = pvsystem.pvwatts_dc(irrad_two, temp_cell_two,
**array_two_module_parameters)
dc_one, dc_two = system.pvwatts_dc((irrad_one, irrad_two),
(temp_cell_one, temp_cell_two))
assert dc_one == expected_one
assert dc_two == expected_two
def test_PVSystem_multiple_array_pvwatts_dc_value_error():
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(), pvsystem.Array(), pvsystem.Array()]
)
error_message = 'Length mismatch for per-array parameter'
with pytest.raises(ValueError, match=error_message):
system.pvwatts_dc(10, (1, 1, 1))
with pytest.raises(ValueError, match=error_message):
system.pvwatts_dc((10, 10), (1, 1, 1))
with pytest.raises(ValueError, match=error_message):
system.pvwatts_dc((10, 10, 10, 10), (1, 1, 1))
with pytest.raises(ValueError, match=error_message):
system.pvwatts_dc((1, 1, 1), 1)
with pytest.raises(ValueError, match=error_message):
system.pvwatts_dc((1, 1, 1), (1,))
with pytest.raises(ValueError, match=error_message):
system.pvwatts_dc((1,), 1)
with pytest.raises(ValueError, match=error_message):
system.pvwatts_dc((1, 1, 1, 1), (1, 1))
with pytest.raises(ValueError, match=error_message):
system.pvwatts_dc(2, 3)
with pytest.raises(ValueError, match=error_message):
# ValueError is raised for non-tuple iterable with correct length
system.pvwatts_dc((1, 1, 1), pd.Series([1, 2, 3]))
def test_PVSystem_pvwatts_losses(pvwatts_system_defaults, mocker):
mocker.spy(pvsystem, 'pvwatts_losses')
age = 1
pvwatts_system_defaults.losses_parameters = dict(age=age)
expected = 15
out = pvwatts_system_defaults.pvwatts_losses()
pvsystem.pvwatts_losses.assert_called_once_with(age=age)
assert out < expected
@fail_on_pvlib_version('0.10')
def test_PVSystem_pvwatts_ac(pvwatts_system_defaults, mocker):
mocker.spy(inverter, 'pvwatts')
pdc = 50
with pytest.warns(pvlibDeprecationWarning):
out = pvwatts_system_defaults.pvwatts_ac(pdc)
inverter.pvwatts.assert_called_once_with(
pdc, **pvwatts_system_defaults.inverter_parameters)
assert out < pdc
@fail_on_pvlib_version('0.10')
def test_PVSystem_pvwatts_ac_kwargs(pvwatts_system_kwargs, mocker):
mocker.spy(inverter, 'pvwatts')
pdc = 50
with pytest.warns(pvlibDeprecationWarning):
out = pvwatts_system_kwargs.pvwatts_ac(pdc)
inverter.pvwatts.assert_called_once_with(
pdc, **pvwatts_system_kwargs.inverter_parameters)
assert out < pdc
def test_PVSystem_num_arrays():
system_one = pvsystem.PVSystem()
system_two = pvsystem.PVSystem(arrays=[pvsystem.Array(), pvsystem.Array()])
assert system_one.num_arrays == 1
assert system_two.num_arrays == 2
def test_combine_loss_factors():
test_index = | pd.date_range(start='1990/01/01T12:00', periods=365, freq='D') | pandas.date_range |
import pandas as pd
import numpy as np
from datetime import datetime
###############
# SELECT DATA #
###############
print("Selecting attributes...")
# GIT_COMMITS
gitCommits = pd.read_csv("../../data/raw/GIT_COMMITS.csv")
attributes = ['projectID', 'commitHash', 'author', 'committer', 'committerDate']
gitCommits = gitCommits[attributes]
gitCommits.to_csv('../../data/interim/DataPreparation/SelectData/GIT_COMMITS_select.csv', header=True)
# GIT_COMMITS_CHANGES
gitCommitsChanges = pd.read_csv("../../data/raw/GIT_COMMITS_CHANGES.csv")
attributes = ['projectID', 'commitHash', 'changeType', 'linesAdded', 'linesRemoved']
gitCommitsChanges = gitCommitsChanges[attributes]
gitCommitsChanges.to_csv('../../data/interim/DataPreparation/SelectData/GIT_COMMITS_CHANGES_select.csv', header=True)
# JIRA_ISSUES
jiraIssues = pd.read_csv("../../data/raw/JIRA_ISSUES.csv")
attributes = ['projectID', 'key', 'creationDate', 'resolutionDate', 'type', 'priority', 'assignee', 'reporter']
jiraIssues = jiraIssues[attributes]
jiraIssues.to_csv('../../data/interim/DataPreparation/SelectData/JIRA_ISSUES_select.csv', header=True)
# REFACTORING_MINER
refactoringMiner = pd.read_csv("../../data/raw/REFACTORING_MINER.csv")
attributes = ['projectID', 'commitHash', 'refactoringType']
refactoringMiner = refactoringMiner[attributes]
refactoringMiner.to_csv('../../data/interim/DataPreparation/SelectData/REFACTORING_MINER_select.csv', header=True)
# SONAR_ISSUES
sonarIssues = pd.read_csv("../../data/raw/SONAR_ISSUES.csv")
attributes = ['projectID', 'creationDate', 'closeDate', 'creationCommitHash', 'closeCommitHash', 'type', 'severity',
'debt', 'author']
sonarIssues = sonarIssues[attributes]
sonarIssues.to_csv('../../data/interim/DataPreparation/SelectData/SONAR_ISSUES_select.csv', header=True)
# SONAR_MEASURES
sonarMeasures = pd.read_csv("../../data/raw/SONAR_MEASURES.csv")
attributes = ['commitHash', 'projectID', 'functions', 'commentLinesDensity', 'complexity', 'functionComplexity', 'duplicatedLinesDensity',
'violations', 'blockerViolations', 'criticalViolations', 'infoViolations', 'majorViolations', 'minorViolations', 'codeSmells',
'bugs', 'vulnerabilities', 'cognitiveComplexity', 'ncloc', 'sqaleIndex', 'sqaleDebtRatio', 'reliabilityRemediationEffort', 'securityRemediationEffort']
sonarMeasures = sonarMeasures[attributes]
sonarMeasures.to_csv('../../data/interim/DataPreparation/SelectData/SONAR_MEASURES_select.csv', header=True)
# SZZ_FAULT_INDUCING_COMMITS
szzFaultInducingCommits = pd.read_csv("../../data/raw/SZZ_FAULT_INDUCING_COMMITS.csv")
attributes = ['projectID', 'faultFixingCommitHash', 'faultInducingCommitHash', 'key']
szzFaultInducingCommits = szzFaultInducingCommits[attributes]
szzFaultInducingCommits.to_csv('../../data/interim/DataPreparation/SelectData/SZZ_FAULT_INDUCING_COMMITS_select.csv', header=True)
print("Attributes selected.")
##############
# CLEAN DATA #
##############
print("Cleaning data...")
def intersection(l1, l2):
temp = set(l2)
l3 = [value for value in l1 if value in temp]
return l3
def difference(li1, li2):
return (list(list(set(li1)-set(li2)) + list(set(li2)-set(li1))))
# GIT_COMMITS
gitCommits = pd.read_csv("../../data/interim/DataPreparation/SelectData/GIT_COMMITS_select.csv")
authorNan = list(np.where(gitCommits.author.isna()))[0]
committerNan = list(np.where(gitCommits.committer.isna()))[0]
inters = intersection(authorNan, committerNan)
gitCommits = gitCommits.drop(inters)
gitCommits.to_csv('../../data/interim/DataPreparation/CleanData/GIT_COMMITS_clean.csv', header=True)
# GIT_COMMITS_CHANGES
gitCommitsChanges = pd.read_csv("../../data/interim/DataPreparation/SelectData/GIT_COMMITS_CHANGES_select.csv").iloc[:,1:]
gitCommitsChanges.to_csv('../../data/interim/DataPreparation/CleanData/GIT_COMMITS_CHANGES_clean.csv', header=True)
# JIRA_ISSUES
jiraIssues = pd.read_csv("../../data/interim/DataPreparation/SelectData/JIRA_ISSUES_select.csv").iloc[:,1:]
resolutionDate_nan = list(np.where(jiraIssues.resolutionDate.isna()))[0]
jiraIssues_notresolved = jiraIssues.iloc[resolutionDate_nan,:]
gitCommits = pd.read_csv("../../data/interim/DataPreparation/SelectData/GIT_COMMITS_select.csv").iloc[:,[1,-1]]
lastTimestamp = gitCommits.groupby(['projectID']).max()
jiraIssues_notresolved = pd.merge(jiraIssues_notresolved, lastTimestamp, how='left', on='projectID')
jiraIssues_notresolved = jiraIssues_notresolved.iloc[:,[0,1,2,4,5,6,7,8]].rename(columns={'committerDate': 'resolutionDate'})
jiraIssues_resolved = jiraIssues.drop(resolutionDate_nan)
jiraIssues = pd.concat([jiraIssues_resolved, jiraIssues_notresolved], sort=False).sort_index().reset_index().iloc[:,1:]
priority_nan = list(np.where(jiraIssues.priority.isna()))[0]
jiraIssues = jiraIssues.drop(priority_nan)
assignee_nan = list(np.where(jiraIssues.assignee.isna()))[0]
jiraIssues.assignee = jiraIssues.assignee.fillna('not-assigned')
jiraIssues.to_csv('../../data/interim/DataPreparation/CleanData/JIRA_ISSUES_clean.csv', header=True)
# REFACTORING_MINER
refactoringMiner = pd.read_csv("../../data/interim/DataPreparation/SelectData/REFACTORING_MINER_select.csv")
commitHashNan = list(np.where(refactoringMiner.commitHash.isna()))[0]
refactoringTypeNan = list(np.where(refactoringMiner.refactoringType.isna()))[0]
inters = intersection(commitHashNan, refactoringTypeNan)
refactoringMiner = refactoringMiner.drop(inters)
refactoringMiner.to_csv('../../data/interim/DataPreparation/CleanData/REFACTORING_MINER_clean.csv', header=True)
# SONAR_ISSUES
sonarIssues = pd.read_csv("../../data/interim/DataPreparation/SelectData/SONAR_ISSUES_select.csv").iloc[:,1:]
closeDateNan = list(np.where(sonarIssues.closeDate.isna()))[0]
closeCommitHashNan = list(np.where(sonarIssues.closeCommitHash.isna()))[0]
debtNan = list(np.where(sonarIssues.debt.isna()))[0]
authorNan = list(np.where(sonarIssues.author.isna()))[0]
inter = intersection(closeDateNan, closeCommitHashNan)
diff = difference(closeCommitHashNan, closeDateNan)
debtNan = list(np.where(sonarIssues.debt.isna())[0])
sonarIssues = sonarIssues.drop(debtNan).reset_index()
sonarIssues = sonarIssues.fillna({'closeCommitHash': 'not-resolved'})
gitCommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/GIT_COMMITS_clean.csv").iloc[:,1:]
lastTimestamp = gitCommits.loc[:,['projectID', 'committerDate']].groupby(['projectID']).max()
closeDateNan = list(np.where(sonarIssues.closeDate.isna()))[0]
sonarIssues_notresolved = sonarIssues.iloc[closeDateNan,:]
sonarIssues_notresolved = pd.merge(sonarIssues_notresolved, lastTimestamp, how='left', on='projectID')
sonarIssues_notresolved = sonarIssues_notresolved.loc[:,['projectID', 'creationDate', 'creationCommitHash', 'closeCommitHash', 'type', 'severity', 'debt', 'author', 'committerDate']].rename(columns={'committerDate': 'closeDate'})
sonarIssues_resolved = sonarIssues.drop(closeDateNan)
sonarIssues = pd.concat([sonarIssues_resolved, sonarIssues_notresolved], sort=False).sort_index().reset_index().iloc[:,1:]
sonarIssues.groupby(['author']).size().reset_index().rename(columns={0:'count'})
df1 = gitCommits[['commitHash', 'committer']]
df2 = (sonarIssues[['creationCommitHash', 'author']]).rename(columns={'creationCommitHash': 'commitHash'})
merge = pd.merge(df1, df2, on='commitHash', how='inner').drop_duplicates()
pairs = merge.groupby(['committer', 'author']).size().reset_index().rename(columns={0:'count'})
index1 = list(np.where(pairs.committer.value_counts()==1))[0]
committer_1 = (pairs.committer.value_counts())[index1].index
index2 = list(np.where(pairs.author.value_counts()==1))[0]
author_1 = (pairs.author.value_counts())[index2].index
index_author_1 = pairs.loc[pairs['author'].isin(author_1)].index
index_committer_1 = pairs.loc[pairs['committer'].isin(committer_1)].index
inter_pairs = intersection(index_author_1, index_committer_1)
pairs_unique = pairs.loc[inter_pairs]
commiters = list(pairs_unique.committer)
authors = list(pairs_unique.author)
merge2 = pd.merge(merge, pairs_unique, on='committer', how='inner')
merge2 = merge2[['commitHash', 'committer', 'author_y']].rename(columns={'author_y': 'author', 'commitHash': 'creationCommitHash'})
merge2 = merge2.drop_duplicates()
prova2 = merge2[['creationCommitHash', 'author']]
dictionary = prova2.set_index('creationCommitHash').T.to_dict('records')[0]
sonarIssues.author = sonarIssues.author.fillna(sonarIssues.creationCommitHash.map(dictionary))
sonarIssues = sonarIssues.dropna(subset=['author'])
sonarIssues = sonarIssues.iloc[:,1:]
sonarIssues.to_csv('../../data/interim/DataPreparation/CleanData/SONAR_ISSUES_clean.csv', header=True)
# SONAR_MEASURES
sonarMeasures = pd.read_csv("../../data/interim/DataPreparation/SelectData/SONAR_MEASURES_select.csv")
sonarMeasures.to_csv('../../data/interim/DataPreparation/CleanData/SONAR_MEASURES_clean.csv', header=True)
# SZZ_FAULT_INDUCING_COMMITS
szzFaultInducingCommits = pd.read_csv("../../data/interim/DataPreparation/SelectData/SZZ_FAULT_INDUCING_COMMITS_select.csv").iloc[:,1:]
szzFaultInducingCommits.to_csv('../../data/interim/DataPreparation/CleanData/SZZ_FAULT_INDUCING_COMMITS_clean.csv', header=True)
print("Data cleaned.")
##################
# CONSTRUCT DATA #
##################
print("Constructing data...")
def produce_bug(x):
if pd.isna(x.faultFixingCommitHash):
return False
return True
# COMMITS_FREQUENCY
gitCommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/GIT_COMMITS_clean.csv")
gitCommits = gitCommits[['committer', 'committerDate']]
newDFsorted = gitCommits.sort_values(by=['committer', 'committerDate']).reset_index()[['committer', 'committerDate']]
newDFsortedCopy = []
committer = newDFsorted.iloc[0,0]
for index, row in newDFsorted.iterrows():
if index != 0:
if committer == newDFsorted.iloc[index,0]:
r = (pd.to_datetime(newDFsorted.iloc[index,1])-pd.to_datetime(newDFsorted.iloc[index-1,1]))
newDFsortedCopy.append([committer, r])
else:
committer = newDFsorted.iloc[index,0]
time_between_commits = pd.DataFrame(newDFsortedCopy)
time_between_commits[1] = time_between_commits[1].dt.total_seconds()
time_between_commits_commiter = time_between_commits.groupby([0]).mean()
time_between_commits_commiter = pd.DataFrame(time_between_commits_commiter).rename(columns={0:'committer', 1:'time_between_commits'})
time_between_commits_commiter.to_csv('../../data/interim/DataPreparation/ConstructData/COMMITS_FREQUENCY.csv', header=True)
# FIXED_ISSUES
gitCommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/GIT_COMMITS_clean.csv")
SZZcommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/SZZ_FAULT_INDUCING_COMMITS_clean.csv")
sonarIssues = pd.read_csv("../../data/interim/DataPreparation/CleanData/SONAR_ISSUES_clean.csv")
jiraIssues = pd.read_csv("../../data/interim/DataPreparation/CleanData/JIRA_ISSUES_clean.csv")
SZZcommits = SZZcommits['faultFixingCommitHash']
gitCommits = gitCommits[['commitHash', 'committer']]
sonarIssues = sonarIssues['closeCommitHash']
jiraIssues = jiraIssues['assignee']
SZZ_issues = (pd.merge(SZZcommits, gitCommits, how='inner', left_on='faultFixingCommitHash', right_on='commitHash').drop_duplicates())[['commitHash', 'committer']]
SSZ_issue_committer = SZZ_issues.committer.value_counts().rename_axis('committer').reset_index(name='SZZIssues')
Sonar_issues = pd.merge(sonarIssues, gitCommits, how='inner', left_on='closeCommitHash', right_on='commitHash').drop_duplicates()[['commitHash', 'committer']]
Sonar_issues_committer = Sonar_issues.committer.value_counts().rename_axis('committer').reset_index(name='SonarIssues')
Jira_issues_committer = jiraIssues[jiraIssues != 'not-assigned'].value_counts().rename_axis('committer').reset_index(name='JiraIssues')
issues = pd.merge(SSZ_issue_committer, Sonar_issues_committer, on='committer', how='outer')
issues = pd.merge(issues, Jira_issues_committer, on='committer', how='outer')
issues = issues.fillna(0)
issues.to_csv('../../data/interim/DataPreparation/ConstructData/FIXED_ISSUES.csv', header=True)
# INDUCED_ISSUES
gitCommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/GIT_COMMITS_clean.csv")
SZZcommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/SZZ_FAULT_INDUCING_COMMITS_clean.csv")
sonarIssues = pd.read_csv("../../data/interim/DataPreparation/CleanData/SONAR_ISSUES_clean.csv")
SZZcommits = SZZcommits['faultInducingCommitHash']
gitCommits = gitCommits[['commitHash', 'committer']]
sonarIssues = sonarIssues['creationCommitHash']
SZZ_issues = (pd.merge(SZZcommits, gitCommits, how='inner', left_on='faultInducingCommitHash', right_on='commitHash').drop_duplicates())[['commitHash', 'committer']]
SSZ_issue_committer = SZZ_issues.committer.value_counts().rename_axis('committer').reset_index(name='SZZIssues')
Sonar_issues = pd.merge(sonarIssues, gitCommits, how='inner', left_on='creationCommitHash', right_on='commitHash').drop_duplicates()[['commitHash', 'committer']]
Sonar_issues_committer = Sonar_issues.committer.value_counts().rename_axis('committer').reset_index(name='SonarIssues')
issues = pd.merge(SSZ_issue_committer, Sonar_issues_committer, on='committer', how='outer')
issues = issues.fillna(0)
issues.to_csv('../../data/interim/DataPreparation/ConstructData/INDUCED_ISSUES.csv', header=True)
# JIRA_ISSUES_time
jiraIssues = pd.read_csv("../../data/interim/DataPreparation/CleanData/JIRA_ISSUES_clean.csv").iloc[:,1:]
jiraIssues['creationDate'] = pd.to_datetime(jiraIssues['creationDate'], format="%Y-%m-%dT%H:%M:%S.%f")
jiraIssues['resolutionDate'] = pd.to_datetime(jiraIssues['resolutionDate'], format="%Y-%m-%dT%H:%M:%S.%f")
jiraIssues["resolutionTime"] = jiraIssues["resolutionDate"]
seconds = (jiraIssues.loc[:,"resolutionDate"] - jiraIssues.loc[:,"creationDate"]).dt.total_seconds()
jiraIssues.loc[:,"resolutionTime"] = seconds/3600
jiraIssues.to_csv('../../data/interim/DataPreparation/ConstructData/JIRA_ISSUES_time.csv', header=True)
# NUMBER_COMMITS
gitCommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/GIT_COMMITS_clean.csv").iloc[:,2:]
number_commits = gitCommits.groupby(['committer']).count().iloc[1:,1]
number_commits = pd.DataFrame(number_commits).rename(columns={'commitHash': 'numberCommits'})
number_commits.to_csv('../../data/interim/DataPreparation/ConstructData/NUMBER_COMMITS.csv', header=True)
# REFACTORING_MINER_bug
refactoringMiner = pd.read_csv("../../data/interim/DataPreparation/CleanData/REFACTORING_MINER_clean.csv")[['projectID', 'commitHash', 'refactoringType']]
szzFaultInducingCommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/SZZ_FAULT_INDUCING_COMMITS_clean.csv")[['projectID', 'faultFixingCommitHash', 'faultInducingCommitHash']]
induced_bug = pd.merge(refactoringMiner, szzFaultInducingCommits, how='left', left_on='commitHash', right_on='faultInducingCommitHash').drop_duplicates().reset_index()
induced_bug['bug'] = induced_bug.apply(lambda x: produce_bug(x), axis=1)
induced_bug = induced_bug[['projectID_x', 'commitHash', 'refactoringType', 'bug']].rename(columns={'projectID_x': 'projectID'})
induced_bug.to_csv('../../data/interim/DataPreparation/ConstructData/REFACTORING_MINER_bug.csv', header=True)
# SONAR_ISSUES_time
sonarIssues = pd.read_csv("../../data/interim/DataPreparation/CleanData/SONAR_ISSUES_clean.csv").iloc[:,1:]
sonarIssues['creationDate'] = pd.to_datetime(sonarIssues['creationDate'], format='%Y-%m-%dT%H:%M:%SZ')
sonarIssues["closeDate"] = pd.to_datetime(sonarIssues["closeDate"], format="%Y-%m-%dT%H:%M:%SZ")
sonarIssues["closeTime"] = sonarIssues["closeDate"]
seconds = (sonarIssues.loc[:,"closeDate"] - sonarIssues.loc[:,"creationDate"]).dt.total_seconds()
sonarIssues.loc[:,"closeTime"] = seconds/3600
sonarIssues.to_csv('../../data/interim/DataPreparation/ConstructData/SONAR_ISSUES_time.csv', header=True)
# SONAR_MEASURES_difference
sonarMeasures = pd.read_csv("../../data/interim/DataPreparation/CleanData/SONAR_MEASURES_clean.csv").iloc[:, 2:]
gitCommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/GIT_COMMITS_clean.csv").iloc[:, 2:]
gitCommits['committerDate'] = pd.to_datetime(gitCommits['committerDate'], format='%Y-%m-%dT%H:%M:%SZ')
newDF = pd.merge(sonarMeasures, gitCommits, how='left', left_on=['commitHash','projectID'], right_on = ['commitHash','projectID'])
newDFNaN = list(np.where(newDF.committerDate.isna()))[0]
newDF = newDF.drop(newDFNaN)
projectID = newDF.projectID.unique()
newDFsorted = newDF.sort_values(by=['projectID', 'committerDate'])
newDFsortedCopy = newDFsorted.copy()
project = newDFsorted.iloc[0,1]
for index, row in newDFsorted.iterrows():
if index < 55625:
if project == newDFsorted.iloc[index,1]:
r = newDFsortedCopy.iloc[index-1:index+1,2:22].diff().iloc[1,:]
newDFsorted.iloc[index:index+1,2:22] = np.array(r)
else:
project = newDFsorted.iloc[index,1]
sonarMeasuresDifference = newDFsorted.iloc[:,:22]
sonarMeasuresDifference.to_csv('../../data/interim/DataPreparation/ConstructData/SONAR_MEASURES_difference.csv', header=True)
# TIME_IN_EACH_PROJECT
gitCommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/GIT_COMMITS_clean.csv")
time_in_project = gitCommits.groupby(['projectID', 'committer'])['committerDate'].agg(['min', 'max']).reset_index()
time = (pd.to_datetime(time_in_project['max'])-pd.to_datetime(time_in_project['min']))
time_in_project['time'] = time.dt.total_seconds()
time_in_project = time_in_project[['projectID', 'committer', 'time']]
time_in_project.to_csv('../../data/interim/DataPreparation/ConstructData/TIME_IN_PROJECT.csv', header=True)
print("Data constructed.")
##################
# INTEGRATE DATA #
##################
print("Integarting data...")
numberCommits = pd.read_csv("../../data/interim/DataPreparation/ConstructData/NUMBER_COMMITS.csv")
fixedIssues = pd.read_csv("../../data/interim/DataPreparation/ConstructData/FIXED_ISSUES.csv").iloc[:,1:]
fixedIssues = fixedIssues.rename(columns={'SZZIssues':'fixedSZZIssues','SonarIssues':'fixedSonarIssues','JiraIssues':'fixedJiraIssues'})
inducedIssues = pd.read_csv("../../data/interim/DataPreparation/ConstructData/INDUCED_ISSUES.csv").iloc[:,1:]
inducedIssues = inducedIssues.rename(columns={'SZZIssues':'inducedSZZIssues','SonarIssues':'inducedSonarIssues'})
dataFrame = pd.merge(numberCommits, fixedIssues, how='outer', left_on=['committer'], right_on = ['committer'])
dataFrame = dataFrame.fillna(0.0)
dataFrame = pd.merge(dataFrame, inducedIssues, how='outer', left_on=['committer'], right_on = ['committer'])
dataFrame = dataFrame.fillna(0)
timeInProject = pd.read_csv("../../data/interim/DataPreparation/ConstructData/TIME_IN_PROJECT.csv").iloc[:,1:]
timeInProject = timeInProject.rename(columns={'time':'timeInProject'})
timeInProject = timeInProject.groupby(['committer']).mean().iloc[1:,:]
dataFrame = pd.merge(dataFrame, timeInProject, how='outer', left_on=['committer'], right_on = ['committer'])
dataFrame = dataFrame.fillna(0)
jiraIssues = pd.read_csv("../../data/interim/DataPreparation/ConstructData/JIRA_ISSUES_time.csv").iloc[:,1:]
dum = pd.get_dummies(jiraIssues[["type", 'priority']], prefix=['type', 'priority'])
TypePriority = jiraIssues[['assignee']].join(dum)
TypePriority = TypePriority[TypePriority.assignee!='not-assigned'].reset_index().iloc[:,1:]
TypePriority = TypePriority.groupby(["assignee"]).sum()
resolutionTime = jiraIssues.loc[:,['assignee','resolutionTime']]
resolutionTime = resolutionTime.groupby(["assignee"]).mean()
jiraIssues = resolutionTime.join(TypePriority)
jiraIssues = jiraIssues.reset_index().rename(columns={'assignee':'committer'})
dataFrame = pd.merge(dataFrame, jiraIssues, how='left', left_on=['committer'], right_on = ['committer'])
dataFrame = dataFrame.fillna(0.0)
gitCommitsChanges = pd.read_csv("../../data/interim/DataPreparation/CleanData/GIT_COMMITS_CHANGES_clean.csv").iloc[:,2:]
dum = pd.get_dummies(gitCommitsChanges[["changeType"]])
dum = dum.rename(columns={'changeType_ModificationType.ADD':'ADD', 'changeType_ModificationType.DELETE':'DELETE', 'changeType_ModificationType.MODIFY':'MODIFY', 'changeType_ModificationType.RENAME':'RENAME', 'changeType_ModificationType.UNKNOWN':'UNKNOWN'})
Lines = gitCommitsChanges[["commitHash",'linesAdded','linesRemoved']]
gitCommitsChanges = pd.concat([Lines,dum], axis=1)
gitCommitsChanges = gitCommitsChanges.groupby(['commitHash']).agg({'ADD':'sum', 'DELETE':'sum', 'MODIFY':'sum', 'RENAME':'sum', 'UNKNOWN':'sum', 'linesAdded':'mean', 'linesRemoved':'mean'})
gitCommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/GIT_COMMITS_clean.csv")
gitCommitsChanges = pd.merge(gitCommits, gitCommitsChanges, how='left', left_on=['commitHash'], right_on = ['commitHash'])
gitCommitsChanges = gitCommitsChanges[['committer','ADD','DELETE','MODIFY','RENAME','UNKNOWN','linesAdded','linesRemoved']]
gitCommitsChanges = gitCommitsChanges.groupby(['committer']).mean()
dataFrame = pd.merge(dataFrame, gitCommitsChanges, how='left', left_on=['committer'], right_on = ['committer'])
dataFrame = dataFrame.fillna(0.0)
refactoringMinerBug = pd.read_csv("../../data/interim/DataPreparation/ConstructData/REFACTORING_MINER_bug.csv").iloc[:,1:]
dum = pd.get_dummies(refactoringMinerBug[['refactoringType', 'bug']])
commitHash = refactoringMinerBug[["commitHash"]]
refactoringMinerBug = pd.concat([commitHash,dum], axis=1)
refactoringMinerBug = refactoringMinerBug.groupby(['commitHash']).sum()
refactoringMinerBug = pd.merge(refactoringMinerBug, gitCommits, how='left', left_on=['commitHash'], right_on = ['commitHash'])
refactoringMinerBug = pd.concat([refactoringMinerBug[['committer']], refactoringMinerBug.iloc[:,:-4]], axis=1)
refactoringMinerBug = refactoringMinerBug.groupby(['committer']).sum()
dataFrame = pd.merge(dataFrame, refactoringMinerBug, how='left', left_on=['committer'], right_on = ['committer'])
dataFrame = dataFrame.fillna(0.0)
sonarMeasures = pd.read_csv("../../data/interim/DataPreparation/ConstructData/SONAR_MEASURES_difference.csv").iloc[:,1:]
gitCommits = pd.read_csv("../../data/interim/DataPreparation/CleanData/GIT_COMMITS_clean.csv")[['commitHash', 'committer']]
sonarMeasures = pd.merge(sonarMeasures, gitCommits, how='left', on='commitHash').iloc[:,2:]
sonarMeasures_committer = sonarMeasures.groupby(['committer']).agg({'functions':'sum', 'commentLinesDensity':'mean',
'complexity':'sum', 'functionComplexity':'sum', 'duplicatedLinesDensity':'mean', 'violations':'sum', 'blockerViolations':'sum',
'criticalViolations':'sum','infoViolations':'sum','majorViolations':'sum','minorViolations':'sum','codeSmells':'sum',
'bugs':'sum','vulnerabilities':'sum','cognitiveComplexity':'sum','ncloc':'sum','sqaleIndex':'sum',
'sqaleDebtRatio':'sum','reliabilityRemediationEffort':'sum','securityRemediationEffort':'sum'}).reset_index()
dataFrame = pd.merge(dataFrame, sonarMeasures_committer, how='left', on='committer')
dataFrame = dataFrame.fillna(0)
sonarIssues = pd.read_csv("../../data/interim/DataPreparation/ConstructData/SONAR_ISSUES_time.csv").iloc[:,4:]
sonarIssues = pd.concat([sonarIssues[['creationCommitHash']], sonarIssues.iloc[:,2:-2], sonarIssues[['closeTime']]], axis=1)
debtSec = sonarIssues.debt.apply(pd.Timedelta)
debtHour = debtSec.apply(lambda x: x.seconds/3600 + x.days*24)
sonarIssues[['debt']] = debtHour.to_frame()
dum = pd.get_dummies(sonarIssues[['type','severity']])
sonarIssues = pd.concat([sonarIssues[['creationCommitHash','debt','closeTime']], dum], axis=1)
closeTime = sonarIssues.loc[:,['creationCommitHash','closeTime']]
closeTime = closeTime.groupby(["creationCommitHash"]).mean()
Debt = sonarIssues[['creationCommitHash','debt']]
Debt = Debt.groupby(['creationCommitHash']).sum()
TypesSeverities = pd.concat([sonarIssues[['creationCommitHash']], sonarIssues.iloc[:,3:]], axis=1)
TypesSeverities = TypesSeverities.groupby(['creationCommitHash']).sum()
sonarIssues = pd.concat([Debt, closeTime, TypesSeverities], axis=1)
sonarIssues2 = pd.merge(sonarIssues, gitCommits, how='left', left_on=['creationCommitHash'], right_on=['commitHash'])
closeTime = sonarIssues2.loc[:,['committer','closeTime']]
closeTime = closeTime.groupby(["committer"]).mean()
Debt = sonarIssues2[['committer','debt']]
Debt = Debt.groupby(['committer']).sum()
TypesSeverities = | pd.concat([sonarIssues2[['committer']], sonarIssues2.iloc[:,2:-3]], axis=1) | pandas.concat |
import mysql.connector
import pandas as pd
mydb = mysql.connector.connect(
host="172.16.17.32",
user="admin",
password="<PASSWORD>",
database="rod_input",
port=3306,
)
mycursor1 = mydb.cursor()
#mycursor.execute("SHOW GLOBAL VARIABLES LIKE 'innodb_rollback_on_timeout';")
liste1=[]
mycursor1.execute("SELECT * FROM `data_input` WHERE 1")
liste1=[]
myresult1 = mycursor1.fetchall()
#mydb.commit()
for x in myresult1:
liste1.append(x)
df1=pd.DataFrame(liste1,columns=['Nom','Prénom','Telephone','Adresse','Ville','Code postal','Statut','agent','Campagne','Liste d\'appel','Date d\'appel','forme_juridique','nace_code','nace_description','contact_position','numero_entreprise','province','website','sexe','mail_direct','mail_general','gsm','tel_direct','commentaire_appel','First Name','Last Name'])
#-----------------------------------------------------------------
import mysql.connector
import pandas as pd
mydb = mysql.connector.connect(
host="172.16.17.32",
user="admin",
password="<PASSWORD>",
database="rod_input",
port=3306,
)
liste2=[]
mycursor2 = mydb.cursor()
mycursor2.execute("SELECT * FROM `code_postaux` WHERE 1")
myresult2 = mycursor2.fetchall()
for x in myresult2:
liste2.append(x)
df2=pd.DataFrame(liste2,columns=['PAYS','CONCATENATED','CODE POSTAL','LOCALITE','COMMUNE','CODE POSTAL2','LANGUE Officielle','PROVINCE','REGION','PROVINCE EN','Commune riche'])
#------------------------------------------------------------------------------
import mysql.connector
import pandas as pd
mydb = mysql.connector.connect(
host="172.16.17.32",
user="admin",
password="<PASSWORD>",
database="rod_input",
port=3306,
)
liste3=[]
mycursor3 = mydb.cursor()
mycursor3.execute("SELECT * FROM `preferred_language` WHERE 1")
myresult3 = mycursor3.fetchall()
mydb.commit()
for x in myresult3:
liste3.append(x)
df3=pd.DataFrame(liste3,columns=['NomAgent','Prefered Language'])
#-------------------------------------------------------------------------
#pd.merge(df1,df2,on=df1['Code postal'],how=df2['CODE POSTAL2'])
df4=pd.merge(df1,df2,left_on='Code postal',right_on='CODE POSTAL2')
df5= | pd.merge(df4,df3,left_on='agent',right_on='NomAgent') | pandas.merge |
import numpy as np
import pandas as pd
def autocorr_single_tp(a: np.array, t: int) -> float:
"""Do autocorrelation for a single time point.
Parameters
----------
a : np.array
The array to correlate (complex or real number)
t : int
The distance (in the index)
Returns
-------
float
The autocorrelation as a real number.
"""
return np.real(np.sum(a[0] * np.conj(a[t])))
def autocorr(df: pd.DataFrame) -> pd.DataFrame:
"""Do autocorrelation for all possible time steps over all columns.
Parameters
----------
df : pd.DataFrame
The data frame to correlate
Returns
-------
pd.DataFrame
The resulting dataframe with timestep as index and one column named autocorr
"""
df_result = | pd.DataFrame() | pandas.DataFrame |
import requests
import pandas as pd
import numpy as np
import os
import unidecode as udc
def get_points(place):
"""
Transfers one given place to points by following rules:
1st place = 200p
2nd place = 190p
3rd place = 182p
4th place = 176p
5th place = 172p
6th place = 170p
7th place = 169p
8th place = 168p
...
175th place = 1p
"""
if place == "1.":
return 200
if place == "2.":
return 190
if place == "3.":
return 182
if place == "4.":
return 176
if place == "5.":
return 172
if place == 'DISK' or place == 'MS' or int(place[:-1]) > 175:
return 0
else:
return 176 - int(place[:-1])
def clean_race_dataframe(df):
"""
Replaces all empty strings, strings containing only whitespaces and None values by NaN.
Replaces empty plaes with 'DISK'.
Replaces NaN registrations with 'nereg.'.
Replaces empty UserIDs (ORIS) with NaN.
"""
df.replace(r'^\s*$', np.nan, regex=True, inplace=True)
df.fillna(value=np.nan, inplace=True)
df['Place'] = df['Place'].fillna(value="DISK")
df['RegNo'] = df['RegNo'].fillna(value="nereg.")
df['UserID'] = df['UserID'].fillna(value=np.nan)
return df
def assign_points(df):
"""
Iterates through given dataframe and assigns points to every runner based on his or her place in category.
"""
for i, runner in df.iterrows():
df.at[i, 'Points'] = get_points(runner['Place'])
df = df.astype({'Points': 'int32'})
return df
def export_race_to_csv(df, race_id):
"""
Exports race dataframe with assigned points to a .csv file with name in format: 'points_<race_id>.csv'.
"""
if 'Points' in df.columns:
df.to_csv("points_{}.csv".format(race_id), sep=',', index=False)
def export_class_overall_to_csv(df, class_desc):
"""
Exports overall results dataframe to a .csv file with name in format: 'overall_<class_desc>.csv'.
"""
df.to_csv("overall_{}.csv".format(class_desc), sep=',')
def race_mode(race_id):
"""
Single race mode.
Reads race's ORIS id from user's input, loads the race from ORIS, assigns points and exports the result into a .csv file.
"""
# Select race by it's ORIS-id
try:
race_id = int(race_id)
except ValueError:
print("'{}' není celé číslo.".format(race_id))
return
# First, load name and date of selected race. Then load its results.
url = "https://oris.orientacnisporty.cz/API/?format=json&method=getEvent&id={}".format(race_id)
try:
response = requests.get(url)
data = response.json()
if data['Status'] == 'OK':
name = data['Data']['Name']
date = data['Data']['Date']
print("Jméno závodu:", name)
print("Datum závodu:", date)
# Load results of selected race
url = "https://oris.orientacnisporty.cz/API/?format=json&method=getEventResults&eventid={}".format(race_id)
response = requests.get(url)
data = response.json()
columns_to_keep = ['ClassDesc', 'Place', 'Name', 'RegNo', 'UserID', 'Time']
# Clean dataset
try:
results = clean_race_dataframe(
pd.DataFrame.from_dict(data['Data'], orient='index').set_index('ID')[columns_to_keep]
)
except KeyError:
print("CHYBA: Závod je ve špatném formátu (chybí mu ID výsledku). Určitě jsi zadal správné id závodu?")
return
# Assign points
results_with_points = assign_points(results)
# Export to .csv
export_race_to_csv(results_with_points, race_id)
print("Závod úspěšně vyhodnocen a uložen do: points_{}.csv".format(race_id))
else:
print("Nepodařilo se stáhnout závod z ORISu. (ORIS status: {})".format(data['Status']))
except requests.exceptions.ConnectionError:
print("Nepodařilo se připojit se k ORISU. Zkontroluj prosím své připojení k internetu.")
def list_races():
"""
Lists all races with already assigned points in current folder. With their names and dates.
"""
filenames = sorted([f for f in os.listdir("./") if os.path.isfile(os.path.join("./", f))])
race_filenames = [f for f in filenames if f[:7] == "points_" and f[-4:] == ".csv"]
race_ids = [int(f[7:-4]) for f in race_filenames]
for filename, race_id in zip(race_filenames, race_ids):
url = "https://oris.orientacnisporty.cz/API/?format=json&method=getEvent&id={}".format(race_id)
try:
response = requests.get(url)
data = response.json()
if data['Status'] == 'OK':
name = data['Data']['Name']
date = data['Data']['Date']
print("'{}' - '{}' - {}".format(filename, name, date))
else:
print("Nepodařilo se stáhnout závod z ORISu. (ORIS status: {})".format(data['Status']))
except requests.exceptions.ConnectionError:
print("Nepodařilo se připojit se k ORISU. Zkontroluj prosím své připojení k internetu.")
def get_overall_results():
"""
Goes through all 'points_<id>.csv' files in current directory and creates overall results from points.
"""
# Get filenames and ids of races with assigned points
filenames = sorted([f for f in os.listdir("./") if os.path.isfile(os.path.join("./", f))])
race_filenames = [f for f in filenames if f[:7] == "points_" and f[-4:] == ".csv"]
race_ids = [int(f[7:-4]) for f in race_filenames]
if len(race_filenames) > 0:
races = {}
columns_list = ['Name', 'RegNo']
# For each race add <id>-Place and <id>-Points column
for r_id, r_filename in zip(race_ids, race_filenames):
races[r_id] = pd.read_csv(r_filename, index_col=False)
columns_list.append("{}-Place".format(r_id))
columns_list.append("{}-Points".format(r_id))
# Create overall results - dataframe for every category
ovr_results = {'H': pd.DataFrame(columns=columns_list),
'D': pd.DataFrame(columns=columns_list),
'ZV': pd.DataFrame(columns=columns_list),
'HDD': pd.DataFrame(columns=columns_list)}
# Iterate through races and runners and add them to overall results
for r_id in race_ids:
race = races[r_id]
# Create a data structure for adding new runners (have no evidence in already processed races)
new_runners = {}
for class_desc in ['H', 'D', 'ZV', 'HDD']:
new_runners[class_desc] = {'Name': [],
'RegNo': [],
'{}-Place'.format(r_id): [],
'{}-Points'.format(r_id): []}
# Iterate through runners
for _, race_result in race.iterrows():
reg_no = race_result['RegNo']
class_desc = race_result['ClassDesc']
# Registered runners
if len(reg_no) == 7 and 64 < ord(reg_no[0]) < 91:
# Runner with this RegNo already has some results in this category in overall results
if reg_no in ovr_results[class_desc]['RegNo'].values:
reg_no_mask = ovr_results[class_desc]['RegNo'] == reg_no
ovr_results[class_desc].loc[reg_no_mask, '{}-Place'.format(r_id)] = race_result['Place']
ovr_results[class_desc].loc[reg_no_mask, '{}-Points'.format(r_id)] = race_result['Points']
# Runner with this RegNo has no results in this category in overall results so far
else:
new_runners[class_desc]['Name'].append(race_result['Name'])
new_runners[class_desc]['RegNo'].append(reg_no)
new_runners[class_desc]['{}-Place'.format(r_id)].append(race_result['Place'])
new_runners[class_desc]['{}-Points'.format(r_id)].append(race_result['Points'])
# Not registered runners ('nereg.')
else:
name = race_result['Name']
# Runner with this Name already has some results in this category in overall results
if name in ovr_results[class_desc]['Name'].values:
# Runner with this Name was already added into `new_runners`. It means two
# not registered runners with same name in results of one race in same class.
if name in new_runners[class_desc]['Name']:
print("POZOR: Závodník bez registračky jménem '{}' již v závodě '{}' v kategorii '{}' existuje."
.format(race_result['Name'], r_id, class_desc))
else:
name_mask = ovr_results[class_desc]['Name'] == name
ovr_results[class_desc].loc[name_mask, '{}-Place'.format(r_id)] = race_result['Place']
ovr_results[class_desc].loc[name_mask, '{}-Points'.format(r_id)] = race_result['Points']
# Runner with this Name has no results in this category in overall results so far
else:
# Runner with this Name was already added into `new_runners`. It means two
# not registered runners with same name in results of one race in same class.
if name in new_runners[class_desc]['Name']:
print("POZOR: Závodník bez registračky jménem '{}' již v závodě '{}' v kategorii '{}' existuje."
.format(race_result['Name'], r_id, class_desc))
else:
new_runners[class_desc]['Name'].append(name)
new_runners[class_desc]['RegNo'].append(reg_no)
new_runners[class_desc]['{}-Place'.format(r_id)].append(race_result['Place'])
new_runners[class_desc]['{}-Points'.format(r_id)].append(race_result['Points'])
# Add all new runners to overall results of particular category
for class_desc in ['H', 'D', 'ZV', 'HDD']:
ovr_results[class_desc] = pd.concat(
[ovr_results[class_desc], pd.DataFrame.from_dict(new_runners[class_desc])],
ignore_index=True,
sort=False)
return ovr_results
else:
print("Žádné závody ve složce nenalezeny.")
return None
def solve_duplicities(input_results):
output_results = {}
for class_desc in ['H', 'D', 'ZV', 'HDD']:
columns = input_results[class_desc].columns
output_results[class_desc] = {}
for column in columns:
output_results[class_desc][column] = []
# Iterate through all categories and try to merge probable duplicities
deleted_ids = [] # list of all runners that were merged into another ones
for class_desc in ['H', 'D', 'ZV', 'HDD']:
for i_actual, runner in input_results[class_desc].iterrows():
# i_actual runner wasn't merged with a previous one yet
if i_actual not in deleted_ids:
duplicity_solved = False
# Iterate through all other runners that could possible be duplicates of this one
for i_other in range(i_actual+1, input_results[class_desc].shape[0]):
# Lowercase names without diacritics matches => duplicity to solve
if udc.unidecode(runner['Name']).lower() == udc.unidecode(input_results[class_desc].loc[i_other, 'Name']).lower():
print(70*"=")
print("DUPLICITY\t|\tLeft:\t\t\t|\tRight:")
print(70*"-")
print("Name:\t\t|\t{}\t|\t{}".format(runner['Name'], input_results[class_desc].loc[i_other, 'Name']))
print("RegNo:\t\t|\t{}\t\t\t|\t{}".format(runner['RegNo'], input_results[class_desc].loc[i_other, 'RegNo']))
for descriptor in runner.index[2:]:
print("{}:\t|\t{}\t\t\t|\t{}".format(descriptor, runner[descriptor],
input_results[class_desc].loc[i_other, descriptor]))
merge = input("Merge and keep left (l) / Merge and keep right (r) / Keep separate (s)? ")
while merge not in ['l', 'r', 's']:
merge = input("Merge and keep left (l) / Merge and keep right (r) / Keep separate (s)? ")
# Merge and keep left values primarily
if merge == 'l':
for descriptor in columns:
if runner[descriptor] is not np.nan:
output_results[class_desc][descriptor].append(runner[descriptor])
elif input_results[class_desc].loc[i_other, descriptor] is not np.nan:
output_results[class_desc][descriptor].append(input_results[class_desc].loc[i_other, descriptor])
else:
output_results[class_desc][descriptor].append(np.nan)
deleted_ids.append(i_other)
duplicity_solved = True
# Merge and keep right values primarily
elif merge == 'r':
for descriptor in columns:
if input_results[class_desc].loc[i_other, descriptor] is not np.nan:
output_results[class_desc][descriptor].append(input_results[class_desc].loc[i_other, descriptor])
elif runner[descriptor] is not np.nan:
output_results[class_desc][descriptor].append(runner[descriptor])
else:
output_results[class_desc][descriptor].append(np.nan)
deleted_ids.append(i_other)
duplicity_solved = True
# Keep both runners separately
elif merge == 's':
pass
# This runner was not written to the output_results during duplicity solving (writing standard cases)
if not duplicity_solved:
for descriptor in columns:
output_results[class_desc][descriptor].append(runner[descriptor])
for class_desc in ['H', 'D', 'ZV', 'HDD']:
output_results[class_desc] = pd.DataFrame.from_dict(output_results[class_desc])
return output_results
def best_n_races(results):
for class_desc in ['H', 'D', 'ZV', 'HDD']:
num_of_all_races = len(results[class_desc].columns[2:]) // 2
num_of_races_to_count = (num_of_all_races // 2) + 1
total_points = []
columns = results[class_desc].columns[2:]
for _, runner in results[class_desc].iterrows():
points = []
total_points.append(0)
for descriptor in columns:
if 'Points' in descriptor:
if not np.isnan(runner[descriptor]):
points.append(int(runner[descriptor]))
else:
points.append(0)
for race_points in sorted(points, reverse=True)[:num_of_races_to_count]:
total_points[-1] += race_points
results[class_desc]['Best{}-Points'.format(num_of_races_to_count)] = | pd.Series(total_points) | pandas.Series |
# Authors: <EMAIL>
"""
This module provide a databse data pull test for all tables
"""
import os
import pandas as pd
import psycopg2
from user_similarity_model.config.core import SQL_DIR, config
def test_course_tag_pull(sample_local_data):
"""Test if the data that is pulled from database is consistent with
local version"""
# establish a connection to the Azure PostgreSql
conn = psycopg2.connect(**config.app_config.database_specs)
cur = conn.cursor()
# Open the test sql file and load the query
with open(os.path.join(SQL_DIR, "test-sql-fetch.sql")) as file:
query = file.read().split(";")
# run the query and fetch the results in a pandas DF
cur.execute(query[0])
rows = cur.fetchall()
courses_remote = pd.DataFrame(rows, columns=["count"]).loc[0, "count"]
local_df_course_tag = sample_local_data["course_tags"]
courses_local = local_df_course_tag[
local_df_course_tag.course_id == "2d-racing-games-unity-volume-2-1286"
].shape[0]
# are local and remote consistent?
assert courses_remote == courses_local
# test a score between two databases
cur.execute(query[1])
rows = cur.fetchall()
assessments_remote = | pd.DataFrame(rows, columns=["score"]) | pandas.DataFrame |
#........................................................................................................
# Title: Wikidata claims (statements) to natural language (a part of Triple2Text/Ontology2Text task)
# Author: <NAME>
# Email: <EMAIL>
# Lab: https://www.cic.ipn.mx
# Date: 12/2019
#........................................................................................................
from base import *
#from cluster_methods import *
from wiki_core import *
from read_write_file import *
from word2vec import *
from wiki2vec import *
from itertools import cycle
from collections import Counter
from sklearn.cluster import DBSCAN, OPTICS, MeanShift, AffinityPropagation, AgglomerativeClustering, SpectralClustering, Birch
from sklearn.mixture import GaussianMixture
from sklearn.decomposition import TruncatedSVD, PCA, NMF, SparsePCA, FastICA
from sklearn.preprocessing import MinMaxScaler
from sklearn.neighbors import LocalOutlierFactor
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn.manifold import TSNE
from sklearn.neighbors import NearestNeighbors
import numpy as np
import math
import re
import time
import matplotlib.pyplot as plt
from collections import Counter
import pandas as pd
from nltk import ngrams
import string
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
nlp = spacy.load("en_core_web_md")
definition_properties = {
'P26': ['spouse', 'wife', 'married', 'marry', 'marriage', 'partner', 'wedded', 'wed', 'wives', 'husbands', 'spouses', 'husband'],
'P39': ['position', 'political', 'object', 'seat', 'public', 'office', 'subject', 'formerly', 'holds', 'currently', 'held', 'occupied'],
'P54': ['sports', 'teams', 'clubs', 'member', 'team', 'played', 'plays', 'club', 'player'],
'P69': ['educated', 'educational', 'institution', 'attended', 'subject', 'alma', 'mater', 'education',
'alumni', 'alumnus', 'alumna', 'college', 'university', 'school', 'studied', 'graduate', 'graduated', 'faculty'],
'P108': ['employer', 'person', 'organization', 'subject', 'works', 'worked', 'workplace', 'employed', 'working', 'place'],
'P166': ['work', 'awarded', 'won', 'medals', 'creative', 'person', 'awards', 'win', 'winner', 'honors', 'received', 'award',
'prize', 'title', 'recognition', 'honorary', 'honours', 'organisation'],
'P6': ['first', 'chancellor', 'prime', 'minister', 'government', 'mayor', 'state', 'executive', 'town', 'national', 'other', 'power', 'head', 'municipality', 'country', 'premier', 'body', 'governor', 'heading', 'city', 'headed', 'governmental', 'president'],
'P17': ['human', 'host', 'used', 'beings', 'item', 'sovereign', 'country', 'nation', 'land', 'state'],
'P22': ['daughter', 'daddy', 'subject', 'dad', 'male', 'stepfather', 'stepparent', 'son', 'father', 'child', 'parent'],
'P27': ['national', 'subject', 'nationality', 'country', 'citizenship', 'object', 'citizen', 'recognizes'],
'P31': ['example', 'main', 'member', 'class', 'individual', 'unsubclassable', 'subject', 'instance', 'occurrence', 'unsubtypable', 'distinct', 'uninstantiable', 'non', 'specific', 'unique', 'rdf', 'unsubclassifiable', 'element', 'unitary', 'type', 'particular'],
'P35': ['highest', 'king', 'authority', 'governor', 'queen', 'chief', 'monarch', 'head', 'official', 'country', 'headed', 'emperor', 'leader', 'formal', 'state', 'president'],
'P101': ['specialism', 'specialization', 'speciality', 'studies', 'FOW', 'organization', 'field', 'researcher', 'area', 'work', 'fields', 'person', 'academic', 'research', 'occupation', 'activity', 'subject', 'domain', 'scientific', 'discipline', 'responsible', 'conduct', 'see', 'study'],
'P103': ['languages', 'mother', 'person', 'language', 'native', 'learned', 'first', 'L1', 'speaker', 'tongue', 'birth'],
'P106': ['work', 'position', 'held', 'person', 'employment', 'craft', 'occupation', 'profession', 'career', 'field', 'job'],
'P108': ['person', 'employed', 'workplace', 'employer', 'working', 'works', 'subject', 'worked', 'place', 'organization'],
'P131': ['district', 'administrative', 'arrondissement', 'rural', 'territorial', 'entity', 'happens', 'village', 'region', 'following', 'territory', 'item', 'Indian', 'local', 'shire', 'government', 'area', 'based', 'borough', 'department', 'state', 'reservation', 'town', 'commune', 'unit', 'places', 'province', 'reserve', 'municipality', 'settlement', 'ward', 'county', 'prefecture', 'non', 'locations', 'parish', 'items', 'principal', 'location', 'voivodeship', 'locality', 'specifying', 'city', 'events', 'located'],
'P155': ['comes', 'offices', 'prequel', 'preceding', 'prior', 'replaces', 'split', 'sequel', 'item', 'successor', 'immediately', 'follows', 'before', 'series', 'subject', 'replaced', 'political', 'use', 'preceded', 'part', 'succeeds', 'previous', 'predecessor'],
'P156': ['comes', 'offices', 'prequel', 'part', 'sequel', 'following', 'item', 'successor', 'succeeded', 'immediately', 'followed', 'before', 'preceeds', 'series', 'subject', 'precedes', 'replaced', 'political', 'next', 'use', 'succeded'],
'P184': ['PhD', 'supervisor', 'doctorate', 'thesis', 'promotor', 'advisor', 'subject', 'doctoral', 'supervised'],
'P276': ['administrative', 'case', 'entity', 'region', 'physical', 'venue', 'event', 'item', 'place', 'area', 'based', 'object', 'held', 'feature', 'neighborhood', 'distinct', 'origin', 'terrain', 'location', 'use', 'located', 'moveable'],
'P407': ['broadcasting', 'audio', 'signed', 'URL', 'written', 'languages', 'associated', 'used', 'such', 'name', 'language', 'native', 'available', 'text', 'website', 'work', 'creative', 'named', 'reference', 'spoken', 'websites', 'songs', 'persons', 'use', 'shows', 'books'],
'P413': ['specialism', 'position', 'played', 'player', 'speciality', 'team', 'fielding'],
'P453': ['filled', 'specific', 'played', 'cast', 'subject', 'role', 'use', 'plays', 'acting', 'qualifier', 'character', 'member', 'actor', 'only', 'voice'],
'P512': ['person', 'academic', 'degree', 'holds', 'diploma'],
'P570': ['date', 'died', 'dead', 'subject', 'deathdate', 'year', 'death', 'end', 'DOD'],
'P571': ['introduced', 'commenced', 'defined', 'commencement', 'existence', 'point', 'came', 'time', 'creation', 'formation', 'first', 'inception', 'founded', 'written', 'founding', 'built', 'created', 'constructed', 'foundation', 'when', 'inititated', 'date', 'dedication', 'subject', 'establishment', 'issue', 'start', 'inaugurated', 'launch', 'introduction', 'launched', 'formed', 'construction', 'year', 'incorporated', 'incorporation', 'completed', 'established'],
'P577': ['work', 'date', 'airdate', 'dop', 'released', 'point', 'air', 'initial', 'pubdate', 'time', 'publication', 'first', 'year', 'published', 'release', 'when'],
'P580': ['start', 'starting', 'began', 'statement', 'date', 'introduced', 'introduction', 'begins', 'item', 'started', 'beginning', 'exist', 'time', 'valid', 'starttime', 'starts', 'building'],
'P582': ['ending', 'ceases', 'indicates', 'divorced', 'cease', 'left', 'time', 'closed', 'end', 'endtime', 'operation', 'item', 'date', 'stop', 'statement', 'office', 'dissolved', 'ends', 'stops', 'valid', 'being', 'exist', 'fall', 'completed'],
'P585': ['date', 'statement', 'event', 'existed', 'point', 'something', 'place', 'true', 'time', 'year', 'took', 'when'],
'P642': ['stating', 'statement', 'item', 'scope', 'qualifier', 'applies', 'particular'],
'P669': ['road', 'add', 'street', 'square', 'item', 'number', 'where', 'use', 'address', 'qualifier', 'there', 'property', 'located'],
'P708': ['church', 'types', 'archdiocese', 'division', 'administrative', 'other', 'diocese', 'ecclesiastical', 'use', 'entities', 'bishopric', 'belongs', 'element', 'territorial', 'archbishopric'],
'P735': ['forename', 'family', 'Christian', 'person', 'used', 'names', 'middle', 'values', 'name', 'should', 'link', 'first', 'disambiguations', 'property', 'given', 'personal'],
'P748': ['person', 'appointed', 'used', 'can', 'office', 'qualifier'],
'P768': ['district', 'seat', 'electoral', 'area', 'candidacy', 'representing', 'held', 'Use', 'election', 'riding', 'person', 'office', 'ward', 'position', 'being', 'contested', 'qualifier', 'constituency', 'electorate'],
'P805': ['dedicated', 'identified', 'statement', 'qualifying', 'item', 'describes', 'subject', 'artfor', 'article', 'relation', 'claim'],
'P811': ['college', 'someone', 'studied', 'academic', 'minor', 'university'],
'P812': ['college', 'someone', 'studied', 'academic', 'major', 'subject', 'university', 'field', 'study'],
'P828': ['due', 'causes', 'has', 'result', 'ultimate', 'had', 'why', 'ultimately', 'implied', 'thing', 'reason', 'effect', 'underlying', 'outcome', 'resulted', 'originated', 'caused', 'cause', 'initial'],
'P937': ['work', 'workplace', 'persons', 'working', 'activity', 'where', 'location', 'place', 'active'],
'P1001': ['value', 'institution', 'has', 'territorial', 'jurisdiction', 'item', 'linked', 'law', 'applied', 'state', 'statement', 'office', 'power', 'country', 'municipality', 'valid', 'belongs', 'applies', 'public'],
'P1013': ['respect', 'basis', 'used', 'according', 'made', 'criterion', 'reference', 'criteria', 'respectively', 'property', 'distinction', 'based', 'classification', 'by'],
'P1066': ['pupil', 'master', 'person', 'academic', 'disciple', 'supervisor', 'teacher', 'professor', 'studied', 'has', 'mentor', 'advisor', 'taught', 'student', 'tutor'],
'P1264': ['applicability', 'statement', 'validity', 'period', 'time', 'valid', 'applies', 'when'],
'P1268': ['represents', 'entity', 'organization', 'organisation', 'individual'],
'P1350': ['pitched', 'number', 'played', 'races', 'games', 'matches', 'team', 'appearances', 'caps', 'starts', 'gp', 'sports', 'mp'],
'P1351': ['scored', 'used', 'event', 'number', 'league', 'participant', 'points', 'goals', 'qualifier', 'GF', 'score', 'set', 'match', 'use'],
'P1365': ['replaces', 'structures', 'identical', 'item', 'successor', 'continues', 'forefather', 'follows', 'holder', 'person', 'job', 'replaced', 'structure', 'preceded', 'supersedes', 'succeeds', 'previous', 'predecessor'],
'P1366': ['adds', 'role', 'identical', 'item', 'heir', 'successor', 'succeeded', 'continues', 'superseded', 'followed', 'dropping', 'holder', 'person', 'other', 'series', 'job', 'replaced', 'next', 'replacing', 'continued', 'mediatised', 'books'],
'P1534': ['date', 'ending', 'specify', 'together', 'use', 'qualifier', 'cause', 'ended', 'end', 'reason'],
'P1642': ['status', 'transaction', 'player', 'acquisition', 'acquired', 'team', 'how', 'qualifier', 'member', 'loan', 'contract', 'sports'],
'P1686': ['work', 'awarded', 'nominated', 'received', 'award', 'qualifier', 'citation', 'creator', 'given'],
'P1706': ['item', 'together', 'award', 'tied', 'feat', 'qualifier', 'featuring', 'property', 'shared', 'accompanied', 'specify'],
'P2389': ['leads', 'person', 'directed', 'office', 'head', 'heads', 'directs', 'leader', 'organization', 'runs', 'organisation', 'led'],
'P2578': ['learning', 'research', 'academic', 'item', 'working', 'study', 'subject', 'studies', 'researches', 'property', 'object', 'field', 'studying', 'scholarly'],
'P2715': ['election', 'position', 'reelection', 'confirmed', 'person', 'statements', 'gained', 'qualifier', 'link', 'elected', 'held'],
'P2842': ['wedding', 'location', 'where', 'place', 'spouse', 'celebrated', 'marriage', 'property', 'married'],
'P2868': ['value', 'duty', 'function', 'context', 'has', 'role', 'title', 'purpose', 'generic', 'item', 'acting', 'identity', 'character', 'object', 'statement', 'subject', 'roles', 'job', 'use'],
'P3831': ['value', 'generic', 'statement', 'context', 'specifically', 'circumstances', 'item', 'employment', 'subject', 'role', 'identity', 'use', 'qualifier', 'object'],
'P4100': ['parliament', 'group', 'faction', 'belongs', 'parliamentary', 'member', 'party'],
'P1319': ['date', 'earliest']
}
# load corpus from property name
def load_corpus(file_name, word2vec_file_name, property_name, delimiter='#', dtype=dtypes, trained=False, idf_dict_status=False):
df = pd.read_csv(file_name, delimiter='#', dtype=dtype, usecols=list(dtype))
best_sentences, best_rows = get_best_sentences(df, show=False)
labeled_sen_list = df['labeled_sentence_2']
counter = create_ngram(labeled_sen_list, 1) # unigram
idf_dict = {}
if (idf_dict_status == True):
idf_dict = create_idf_dict(labeled_sen_list)
word_corpus = create_true_distribution_corpus2(labeled_sen_list, 0)
if (trained == True):
word2vec_train(word2vec_file_name, property_name, word_corpus)
# load models
local_model = load_word2vec(word2vec_file_name)
global_model = load_wiki2vec('D:\wiki-news-300d-1M.vec', 200000)
result_dict = {}
result_dict['file_name'] = file_name
result_dict['sen_list'] = df
result_dict['best_sentences'] = best_sentences
result_dict['labeled_sen_list'] = labeled_sen_list
result_dict['counter'] = counter
result_dict['idf_dict'] = idf_dict
result_dict['word_corpus'] = word_corpus
result_dict['local_model'] = local_model
result_dict['global_model'] = global_model
print('Loading corpus was done!!!')
return result_dict
# some basic statistics
def basic_statistics(file_name, delimiter='#', dtype=dtypes, best_sentence = False):
print('file_name: ', file_name)
#sen_list = read_from_csv_file(file_name, '#', 'all')[1:] # remove headers
df = pd.read_csv(file_name, delimiter=delimiter, dtype=dtype, usecols=list(dtype))
average_sentence_length(df)
average_word(df)
average_token(df)
average_token_labeled_sentence(df)
ratio_token_per_quad(df)
ratio_token_per_quad_item(df)
if (best_sentence == True):
print('++ Best sentences statistics')
labeled_list, df2 = get_best_sentences(df)
#print(len(labeled_list))
average_sentence_length(df2)
average_word(df2)
average_token(df2)
average_token_labeled_sentence(df2)
ratio_token_per_quad(df2)
ratio_token_per_quad_item(df2)
print('.............................')
print('.............................')
# cumulative rate by property
def cumulative_rate_by_property(property_name, df):
length_list = []
for index, row in df.iterrows():
#print(property_name, row['length'])
if (row['predicate'].lower() == property_name.lower()):
length_list.append(int(row['length']))
elif (property_name == 'common'): # count all properties
length_list.append(int(row['length']))
#file_name, sen_list, best_sentences, labeled_sen_list, counter, idf_dict, word_corpus, local_model, global_model = load_corpus(property_name)
#sentences, number_redundant_word_list, redundant_word_list = get_corpus_redundant_words(sen_list)
#print('length_list: ', length_list)
rank_list = rank_sentence_by_redundant_words(length_list)
cumulative_list = cumulative_rate(rank_list)
#print('rank_list: ', rank_list)
return cumulative_list
def treat_labeled_items2():
prefixes = list(nlp.Defaults.prefixes)
prefixes.remove('\\[')
prefix_regex = spacy.util.compile_prefix_regex(prefixes)
nlp.tokenizer.prefix_search = prefix_regex.search
suffixes = list(nlp.Defaults.suffixes)
suffixes.remove('\\]')
suffix_regex = spacy.util.compile_suffix_regex(suffixes)
nlp.tokenizer.suffix_search = suffix_regex.search
infixes = list(nlp.Defaults.prefixes)
infixes.remove('\\[')
infixes.remove('\\]')
try:
infixes.remove('\\-')
except Exception as e:
pass
try:
infixes.remove(':')
except Exception as e:
pass
try:
infixes.remove('_')
except Exception as e:
pass
infix_regex = spacy.util.compile_infix_regex(infixes)
nlp.tokenizer = Tokenizer(nlp.vocab, infix_finditer=infix_regex.finditer)
# create n-gram from text
def ngram(text, n):
# make n-gram and also count the frequency of each item by Counter
treat_labeled_items2()
doc = nlp(text)
temp = [token.text for token in doc if token.text != '']
return list(ngrams(temp, n))
# create n-gram from list
def create_ngram(sentence_list, n):
temp = []
for sentence in sentence_list:
sentence = "[start] " + sentence + " [end]"
temp += (ngram(sentence, n))
return Counter(temp)
# filter by property name
def filter_by_property(property_name, sen_list):
#property_list = ['P26','P39','P54','P69','P108','P166']
result_list = []
for p in sen_list[1:]: # start with data in line 1 (not headers)
if (p[2] == property_name):
result_list.append(p)
result_list = sorted(result_list, key = lambda x: (int(x[2][1:]), x[4])) # sort by qualifier
return result_list
# write file from list
def write_file_from_list(file_name, sen_list):
with open(file_name,'w', newline='', encoding='utf-8') as f:
wr = csv.writer(f, delimiter='#', quoting=csv.QUOTE_MINIMAL)
for p in sen_list:
print(p)
wr.writerow(p)
# average length per raw sentence
def average_sentence_length(df):
al = 0
for index, row in df.iterrows():
#print('row: ', row)
al += len(row['raw_sentence'])
print('average_sentence_length: ', al/len(df))
return al/len(df)
# average word per raw sentence
def average_word(df):
al = 0
for index, row in df.iterrows():
doc = nlp(row['raw_sentence'])
# words = [token.text for token in doc if token.is_punct != True]
al += len(row['raw_sentence'].split())
print('average_word: ', al/len(df))
return al/len(df)
# average token per raw sentence
def average_token(df):
al = 0
for index, row in df.iterrows():
doc = nlp(row['raw_sentence'])
al += doc.__len__()
print('average_token: ', al/len(df))
return al/len(df)
# average token per labeled sentence
def average_token_labeled_sentence(df):
al = 0
treat_labeled_items() # treat a labeled item as a token
for index, row in df.iterrows():
doc = nlp(row['labeled_sentence_2'])
al += doc.__len__()
print('average_token_labeled_sentence: ', al/len(df))
return al/len(df)
# ratio of token per quad (labeled sentence)
def ratio_token_per_quad(df):
treat_labeled_items() # treat a labeled item as a token
tokens = 0
quads = len(df) # 1 quad in 1 sentence
for index, row in df.iterrows():
doc = nlp(row['labeled_sentence_2'])
tokens += doc.__len__()
print('ratio_token_per_quad: ', tokens/quads)
return tokens/quads
# ratio of token per quad item (labeled sentence)
def ratio_token_per_quad_item(df):
treat_labeled_items() # treat a labeled item as a token
tokens = 0
quad_items = 0
for index, row in df.iterrows():
doc = nlp(row['labeled_sentence_2'])
temp_quads = len(row['order_2'].split(','))
tokens += doc.__len__() - temp_quads
quad_items += temp_quads
print('ratio_token_per_quad_item: ', tokens/quad_items)
return tokens/quad_items
# get the best sentences: no redundant words (except stop words & a verb as ROOT)
def get_best_sentences(df, show=False):
treat_labeled_items2() # treat a labeled item as a token
best_sentence_list = []
best_row_list = []
columns = []
if (len(df) != 0):
columns = [index for index, val in df.iloc[0].iteritems()]
for index, row in df.iterrows():
doc = nlp(row['labeled_sentence_2'])
redudant_list = []
temp_quads = [x.strip() for x in row['order_2'].split(',')]
for token in doc:
if (token.pos_ == "X"):
continue
if (token.pos_ == "PUNCT"):
continue
if (token.pos_ == "CCONJ"):
continue
if (token.pos_ == "ADP"):
continue
if (token.pos_ == "PRON"):
continue
if (token.pos_ == "PART"):
continue
if (token.pos_ == "DET"):
continue
if (token.dep_ == "punct"):
continue
if (token.text not in temp_quads):
redudant_list.append([token.text, token.pos_, token.dep_])
#print(token.text, token.pos_, token.dep_)
if (len(redudant_list) == 1):
if (redudant_list[0][2] == "ROOT"): # token.pos_
if (row['labeled_sentence_2'] not in best_sentence_list):
best_sentence_list.append(row['labeled_sentence_2']) # add the labeled sentence only
best_row_list.append([val for index, val in row.iteritems()]) # add a whole row
if (show != False):
print('..............................')
print('..............................')
print('Best sentences:')
for s in best_sentence_list:
print(s)
print('-----------')
print('..............................')
print('..............................')
# convert to dataframe
df = pd.DataFrame(best_row_list, columns=columns)
#print('df: ', df)
return best_sentence_list, df
# get redundant words in labeled sentences
def get_redundant_words(sen_row):
redudant_list = []
treat_labeled_items2()
doc = nlp(sen_row['labeled_sentence_2'])
quad_items = get_quad_items(sen_row)
for token in doc:
if (token.pos_ == "X"):
continue
if (token.pos_ == "PUNCT"):
continue
if (token.pos_ == "CCONJ"):
continue
if (token.pos_ == "ADP"):
continue
if (token.pos_ == "PRON"):
continue
if (token.pos_ == "PART"):
continue
if (token.pos_ == "DET"):
continue
if (token.dep_ == "punct"):
continue
if (token.text not in quad_items and token.text.strip() != ''):
#redudant_list.append([token.text, token.pos_, token.dep_])
redudant_list.append(token.text)
return redudant_list
# train corpus using CBOW
def word2vec_train(word2vec_file, property_name, corpus):
# save_word2vec(corpus, min_count, size, window, sorted_vocab, sg, workers, iters, file_name)
if (property_name == 'p26'):
save_word2vec(corpus, 0, 150, 2, 1, 0, 8, 10, word2vec_file)
if (property_name == 'p108'):
save_word2vec(corpus, 0, 150, 2, 1, 0, 8, 30, word2vec_file)
if (property_name == 'p69'):
save_word2vec(corpus, 0, 150, 2, 1, 0, 8, 20, word2vec_file)
if (property_name == 'p166'):
save_word2vec(corpus, 0, 150, 2, 1, 0, 8, 20, word2vec_file)
if (property_name == 'p54'):
save_word2vec(corpus, 0, 150, 2, 1, 0, 8, 5, word2vec_file)
if (property_name == 'p39'):
save_word2vec(corpus, 0, 150, 2, 1, 0, 8, 25, word2vec_file)
if (property_name == 'common'):
save_word2vec(corpus, 0, 150, 2, 1, 0, 8, 3, word2vec_file)
if (property_name == 'common2'):
save_word2vec(corpus, 0, 150, 2, 1, 0, 8, 3, word2vec_file)
# get quad items in a sentence
def get_quad_items(sen_row):
quad_items = []
quad_items = [x.strip() for x in sen_row['order_2'].split(',')]
return quad_items
# get important quad items in a sentence
def get_important_quad_items(sen_row):
quad_items = []
quad_items = [x.strip() for x in sen_row['order_2'].split(',')]
quad_items = list(set(quad_items) - set(['[det:the]','[det:a-an]','[s:poss]'])) # remove unimportant terms
return quad_items
# get qualifier quad items in a sentence
def get_qualifier_items(sen_row):
quad_items = []
quad_items = [x.strip() for x in sen_row['order_2'].split(',')]
qualifier_list = []
for q in quad_items:
if ('qualifier' in q and 'o0' in q): # get qualifiers of o0 (main object or first object)
qualifier_list.append(q)
#print('qualifier_list: ', qualifier_list)
return qualifier_list
# convert sentence to measures (tf, idf, local_distance, global_distance, vector, etc) & write to a result file
def convert_sentence_to_measures(output_file_name, sen_row, best_sentences, local_model, global_model, counter, idf_dict):
#print('sen_row: ', sen_row)
# redundant words
redundant_words = get_redundant_words(sen_row)
length = len(redundant_words)
sentence = redundant_words # check redundant words only
# best sentence
label = ''
if (sen_row['labeled_sentence_2'] in best_sentences): label = 'x'
# sum & product
tf1, tf2 = convert_sentence_to_tf(sentence, local_model, counter)
idf1, idf2 = convert_sentence_to_idf(sentence, idf_dict)
local1, local2 = convert_sentence_to_local_distance(sen_row, sentence, local_model, counter)
global1, global2 = convert_sentence_to_global_distance(sen_row, sentence, global_model)
# combination
tf_idf1, tf_idf2 = convert_sentence_to_tf_idf(sentence, local_model, counter, idf_dict)
local_tf1, local_tf2 = convert_sentence_to_local_tf_distance(sen_row, sentence, local_model, counter)
local_idf1, local_idf2 = convert_sentence_to_local_idf_distance(sen_row, sentence, local_model, counter, idf_dict)
local_tf_idf1, local_tf_idf2 = convert_sentence_to_local_tf_idf_distance(sen_row, sentence, local_model, counter, idf_dict)
global_tf1, global_tf2 = convert_sentence_to_global_tf_distance(sen_row, sentence, global_model, counter, qualifier=False)
global_idf1, global_idf2 = convert_sentence_to_global_idf_distance(sen_row, sentence, global_model, idf_dict, qualifier=False)
global_tf_idf1, global_tf_idf2 = convert_sentence_to_global_tf_idf_distance(sen_row, sentence, global_model, counter, idf_dict,
qualifier=False)
# global with qualifier
global_qualifier1, global_qualifier2 = convert_sentence_to_global_distance(sen_row, sentence, global_model, qualifier=True)
global_qualifier_tf1, global_qualifier_tf2 = convert_sentence_to_global_tf_distance(sen_row, sentence, global_model, counter, qualifier=True)
global_qualifier_idf1, global_qualifier_idf2 = convert_sentence_to_global_idf_distance(sen_row, sentence, global_model, idf_dict, qualifier=True)
global_qualifier_tf_idf1, global_qualifier_tf_idf2 = convert_sentence_to_global_tf_idf_distance(sen_row, sentence, global_model, counter, idf_dict,
qualifier=True)
# vector
vector_sum, vector_product = convert_sentence_to_vector(sentence, local_model) # base on local_model
# add results to sen_row
temp_list = [label, redundant_words, length, tf1, tf2, idf1, idf2, local1, local2, global1, global2, tf_idf1, tf_idf2, local_tf1,
local_tf2, local_idf1, local_idf2, local_tf_idf1, local_tf_idf2, global_tf1, global_tf2, global_idf1, global_idf2,
global_tf_idf1, global_tf_idf2, global_qualifier1, global_qualifier2, global_qualifier_tf1, global_qualifier_tf2,
global_qualifier_idf1, global_qualifier_idf2, global_qualifier_tf_idf1, global_qualifier_tf_idf2]
sen_row = sen_row.values.tolist()
sen_row.extend(temp_list)
write_to_csv_file(output_file_name, '#', sen_row)
# count average distance of a word to other words in a sentence (use important terms)
def convert_sentence_to_global_distance(sen_row, sentence, global_model, qualifier=False):
predicate = sen_row[2]
#print('predicate: ', predicate)
definition_word_list = []
if (predicate in definition_properties):
definition_word_list = definition_properties[predicate]
if (qualifier == True):
qualifiers = sen_row[4].split('-')
for q in qualifiers:
if (q in definition_properties):
definition_word_list += definition_properties[q]
length = len(sentence)
sum_dist = 0
product_dist = 1
def_length = len(definition_word_list)
if (def_length == 0): return 0, 0
for w in sentence:
temp_sum = 0
for item in definition_word_list:
sim = 0
try:
# raw normalized similarity, change range [-1,1] to [0,1]
sim = (global_model.similarity(w, item) + 1)/2
temp_sum += sim
#print('w, item: ', w, item)
#print('sim: ', sim)
except:
pass
if (temp_sum == 0): continue
'''print('temp_sum: ', temp_sum)
print('def_length: ', def_length)
print('...............')
print('...............')'''
sum_dist += temp_sum/def_length
product_dist *= -math.log(temp_sum/def_length)
# return sum_dist, math.log(product_dist + 1)
return sum_dist, -math.log(product_dist)
# count average distance of a word to other words in a sentence (use important terms)
def convert_sentence_to_global_tf_idf_distance(sen_row, sentence, global_model, counter, idf_dict, qualifier=False):
predicate = sen_row[2]
#print('predicate: ', predicate)
definition_word_list = []
if (predicate in definition_properties):
definition_word_list = definition_properties[predicate]
if (qualifier == True):
qualifiers = sen_row[4].split('-')
for q in qualifiers:
if (q in definition_properties):
definition_word_list += definition_properties[q]
length = len(sentence)
sum_dist = 0
product_dist = 1
def_length = len(definition_word_list)
if (def_length == 0): return 0, 0
for w in sentence:
temp_sum = 0
for item in definition_word_list:
sim = 0
try:
# raw normalized similarity, change range [-1,1] to [0,1]
sim = (global_model.similarity(w, item) + 1)/2
temp_sum += sim
#print('w, item: ', w, item)
#print('sim: ', sim)
except:
pass
if (temp_sum == 0): continue
'''print('temp_sum: ', temp_sum)
print('def_length: ', def_length)
print('...............')
print('...............')'''
idf = get_idf(idf_dict, w) # inverse topic frequency
tf = get_tf(counter, w) # term frequency
sum_dist += (temp_sum*idf*tf)/def_length
product_dist *= math.log(1 + (temp_sum*idf*tf)/def_length)
# return sum_dist, math.log(product_dist + 1)
return sum_dist, math.log(product_dist + 1)
# count average distance of a word to other words in a sentence (use important terms)
def convert_sentence_to_global_idf_distance(sen_row, sentence, global_model, idf_dict, qualifier=False):
predicate = sen_row[2]
#print('predicate: ', predicate)
definition_word_list = []
if (predicate in definition_properties):
definition_word_list = definition_properties[predicate]
if (qualifier == True):
qualifiers = sen_row[4].split('-')
for q in qualifiers:
if (q in definition_properties):
definition_word_list += definition_properties[q]
length = len(sentence)
sum_dist = 0
product_dist = 1
def_length = len(definition_word_list)
if (def_length == 0): return 0, 0
for w in sentence:
temp_sum = 0
for item in definition_word_list:
sim = 0
try:
# raw normalized similarity, change range [-1,1] to [0,1]
sim = (global_model.similarity(w, item) + 1)/2
temp_sum += sim
#print('w, item: ', w, item)
#print('sim: ', sim)
except:
pass
if (temp_sum == 0): continue
'''print('temp_sum: ', temp_sum)
print('def_length: ', def_length)
print('...............')
print('...............')'''
idf = get_idf(idf_dict, w) # inverse topic frequency
sum_dist += (temp_sum*idf)/def_length
product_dist *= math.log(1 + (temp_sum*idf)/def_length)
# return sum_dist, math.log(product_dist + 1)
return sum_dist, math.log(product_dist + 1)
# count average distance of a word to other words in a sentence (use important terms)
def convert_sentence_to_global_tf_distance(sen_row, sentence, global_model, counter, qualifier=False):
predicate = sen_row[2]
#print('predicate: ', predicate)
definition_word_list = []
if (predicate in definition_properties):
definition_word_list = definition_properties[predicate]
if (qualifier == True):
qualifiers = sen_row[4].split('-')
for q in qualifiers:
if (q in definition_properties):
definition_word_list += definition_properties[q]
length = len(sentence)
sum_dist = 0
product_dist = 1
def_length = len(definition_word_list)
if (def_length == 0): return 0, 0
for w in sentence:
temp_sum = 0
for item in definition_word_list:
sim = 0
try:
# raw normalized similarity, change range [-1,1] to [0,1]
sim = (global_model.similarity(w, item) + 1)/2
temp_sum += sim
#print('w, item: ', w, item)
#print('sim: ', sim)
except:
pass
if (temp_sum == 0): continue
'''print('temp_sum: ', temp_sum)
print('def_length: ', def_length)
print('...............')
print('...............')'''
tf = get_tf(counter, w) # term frequency
sum_dist += (temp_sum*tf)/def_length
product_dist *= math.log(1 + (temp_sum*tf)/def_length)
#print('---', (temp_sum*tf)/def_length, math.log((temp_sum*tf)/def_length))
#print('product_dist: ', product_dist)
# return sum_dist, math.log(product_dist + 1)
return sum_dist, math.log(product_dist + 1)
# convert sentence to a vector distance (similarity)
def convert_sentence_to_local_distance(sen_row, sentence, local_model, counter):
length = len(sentence)
sum_dist = 0
product_dist = 1
quad_items = get_important_quad_items(sen_row)
quad_length = len(quad_items)
for w in sentence:
temp_sum = 0
for item in quad_items:
sim = 0
try:
# raw normalized similarity, change range [-1,1] to [0,1]
sim = ((local_model.similarity(w, item) + 1)/2)
temp_sum += sim
except:
pass
if (temp_sum == 0): continue
sum_dist += temp_sum/quad_length
product_dist *= -math.log(temp_sum/quad_length)
#return sum_dist, math.log(product_dist + 1)
return sum_dist, -math.log(product_dist)
# convert sentence to local-tf
def convert_sentence_to_local_tf_distance(sen_row, sentence, local_model, counter):
length = len(sentence)
sum_dist = 0
product_dist = 1
quad_items = get_important_quad_items(sen_row)
quad_length = len(quad_items)
for w in sentence:
temp_sum = 0
for item in quad_items:
sim = 0
try:
# raw normalized similarity, change range [-1,1] to [0,1]
sim = ((local_model.similarity(w, item) + 1)/2)
temp_sum += sim
except:
pass
if (temp_sum == 0): continue
tf = get_tf(counter, w) # term frequency
sum_dist += (temp_sum*tf)/quad_length
product_dist *= math.log(1 + (temp_sum*tf)/quad_length)
#print('---', (temp_sum*tf)/quad_length, math.log((temp_sum*tf)/quad_length))
#return sum_dist, math.log(product_dist + 1)
#print('product_dist: ', product_dist)
return sum_dist, math.log(product_dist + 1)
# convert sentence to local-idf
def convert_sentence_to_local_idf_distance(sen_row, sentence, local_model, counter, idf_dict):
length = len(sentence)
sum_dist = 0
product_dist = 1
quad_items = get_important_quad_items(sen_row)
quad_length = len(quad_items)
for w in sentence:
temp_sum = 0
for item in quad_items:
sim = 0
try:
# raw normalized similarity, change range [-1,1] to [0,1]
sim = ((local_model.similarity(w, item) + 1)/2)
temp_sum += sim
except:
pass
if (temp_sum == 0): continue
idf = get_idf(idf_dict, w) # inverse topic frequency
sum_dist += (temp_sum*idf)/quad_length
product_dist *= math.log(1 + (temp_sum*idf)/quad_length)
#return sum_dist, math.log(product_dist + 1)
return sum_dist, math.log(product_dist + 1)
# convert sentence to local-tf-idf
def convert_sentence_to_local_tf_idf_distance(sen_row, sentence, local_model, counter, idf_dict):
length = len(sentence)
sum_dist = 0
product_dist = 1
quad_items = get_important_quad_items(sen_row)
quad_length = len(quad_items)
for w in sentence:
temp_sum = 0
for item in quad_items:
sim = 0
try:
# raw normalized similarity, change range [-1,1] to [0,1]
sim = ((local_model.similarity(w, item) + 1)/2)
temp_sum += sim
except:
pass
if (temp_sum == 0): continue
tf = get_tf(counter, w) # term frequency
idf = get_idf(idf_dict, w) # inverse topic frequency
sum_dist += (temp_sum*tf*idf)/quad_length
product_dist *= math.log(1 + (temp_sum*tf*idf)/quad_length)
#return sum_dist, math.log(product_dist + 1)
return sum_dist, math.log(product_dist + 1)
# convert sentence to tf-idf
def convert_sentence_to_tf_idf(sentence, model, counter, idf_dict):
length = len(sentence)
sum_score = 0
product_score = 1
for w in sentence:
try:
tf = get_tf(counter, w)
idf = get_idf(idf_dict, w)
sum_score += tf*idf
product_score *= tf*idf
except:
pass
return sum_score, math.log(product_score + 1)
# convert sentence to term frequency
def convert_sentence_to_tf(sentence, model, counter):
#length = len(sentence)
sum_score = 0
product_score = 1
for w in sentence:
try:
score = get_tf(counter, w)
#print('---', score)
sum_score += score
product_score *= score
except:
pass
#print('product_score: ', product_score)
return sum_score, math.log(product_score)
# convert sentence to term frequency
def convert_sentence_to_idf(sentence, idf_dict):
length = len(sentence)
sum_score = 0
product_score = 1
for w in sentence:
try:
score = get_idf(idf_dict, w)
sum_score += score
product_score *= score
except:
pass
return sum_score, math.log(product_score + 1)
# convert sentence to vector
def convert_sentence_to_vector(sentence, model):
length = len(sentence)
sum_vec = 1
product_vec = 1
for w in sentence:
try:
w_vec = model.get_vector(w)
sum_vec += w_vec
product_vec *= w_vec
except:
pass
return sum_vec, product_vec
# convert corpus to vector
def convert_corpus_to_vector(corpus, best_sentences, model, counter):
label_list = []
vector_list = []
i = 0
# note that a sentence is a list of words
for sentence in corpus:
# convert back to a string sentence
temp = ' '.join(e for e in sentence[1:-1]) # remove [start], [end]
if (temp in best_sentences):
label_list.append('x')
else:
label_list.append('')
sum_vector, product_vector = convert_sentence_to_vector(sentence, model, counter)
vector_list.append([sum_vector, product_vector])
i = i + 1
return label_list, vector_list
# get redundant words and their length for all sentences
def get_corpus_redundant_words(sen_list):
sentence_list, number_redundant_word_list, redundant_word_list = [], [], []
for p in sen_list:
redundant_words = get_redundant_words(p)
length = len(redundant_words)
number_redundant_word_list.append(length)
redundant_word_list.append(redundant_words)
sentence_list.append(p['labeled_sentence_2'])
return sentence_list, number_redundant_word_list, redundant_word_list
# convert corpus to measures and write to file
def convert_corpus_to_measures(output_file_name, sen_list, best_sentences, local_model, global_model, counter, idf_dict):
#label_list, metric_list, number_redundant_word_list, redundant_word_list, sentence_list = [], [], [], [], []
#i = 0
# note that sentence is a list of words
for index, sen_row in sen_list.iterrows():
convert_sentence_to_measures(output_file_name, sen_row, best_sentences, local_model, global_model, counter, idf_dict)
#metric_list.append(temp_dict)
#i = i + 1
#return label_list, metric_list, number_redundant_word_list, redundant_word_list, sentence_list
#...........................
#...........................
# rank a predicate frequency by property (P26, P39, P54, etc)
def rank_predicate_by_property(count_list, property_name):
# group and calculate average values
temp_list = []
for i in count_list:
if (i['term'].split('-')[0] == property_name):
temp_list.append([i['term'], i['local_average'], i['local_max_dist'], i['global_average'], i['global_max_dist'],
i['subject_dist'], i['object_dist'], i['redundant_words']])
df = pd.DataFrame(temp_list)
df = df.groupby([0]).agg('mean')
df = {x[0]: x[1:] for x in df.itertuples(index=True)}
# calculate term frequency and add it & average values to freq_dict
freq_list = [t[0] for t in temp_list]
#print('freq_list: ', freq_list)
length = len(freq_list) # size of corpus
freq_dict = Counter(freq_list)
#print('freq_dict: ', freq_dict)
for k, v in freq_dict.items():
freq_dict[k] = {'tf':v/length, 'local_average':df[k][0], 'local_max_dist':df[k][1],
'global_average': df[k][2], 'global_max_dist':df[k][3], 'subject_dist': df[k][4],
'object_dist':df[k][5], 'redundant_words':df[k][6]}
#print('freq_dict: ', freq_dict)
return freq_dict
# count the average distance of a word to other words (important words/terms only) in the same sentence
def word_distance_to_sentence(quad_items, word, local_model, global_model):
local_length = len(quad_items) # the numbers of quad items
global_length = 0
local_sum = 0
global_sum = 0
local_max_dist = 0
global_max_dist = 0
subject_dist = object_dist = 0
try:
subject_dist = local_model.similarity(word, '[s]') # subject distance
object_dist = local_model.similarity(word, '[o0]') # object distance
except:
pass
# local model
for term in quad_items: # can be qualifiers or all items in quad (subject, object, qualifiers)
try:
dist = local_model.similarity(word, term)
#print('dist, word, term: ', dist, word, term)
if (dist > local_max_dist):
local_max_dist = dist
local_sum += dist
#print('local_sum: ', local_sum)
except:
local_length = local_length - 1 # word is not in model
pass
# global model
#print('quad_items: +++', quad_items)
for term in quad_items:
value = term[term.index(':')+1:term.index('-')]
temp_list = []
try:
temp_list = definition_properties[value]
except:
pass
temp_length = len(temp_list)
#print('term, value, temp_list, temp_length: ', term, value, temp_list, temp_length)
for t in temp_list:
try:
dist = global_model.similarity(word, t)
#print('dist: ', dist, word, t)
if (dist > global_max_dist):
global_max_dist = dist
global_sum += dist
except:
temp_length = temp_length - 1 # word is not in model
pass
global_length += temp_length
local_average = global_average = 0
if (local_length == 0): local_average = 0
else: local_average = local_sum/local_length
if (global_length == 0): global_average = 0
else: global_average = global_sum/global_length
result_dict = {'local_average':local_average, 'local_max_dist': local_max_dist,
'global_average': global_average, 'global_max_dist': global_max_dist,
'subject_dist': subject_dist, 'object_dist': object_dist}
#print('result_dict: ', result_dict)
return result_dict
# count average distance of a word to other words in a sentence (use important terms)
def word_distance_to_property_definition(prop_items, word, global_model):
length = len(prop_items)
temp_sum = 0
max_dist = 0
for term in prop_items:
try:
dist = global_model.similarity(word, term)
if (dist > max_dist):
max_dist = dist
temp_sum += dist
except:
length = length - 1 # word is not in model
pass
if (length == 0):
return temp_sum, max_dist
return temp_sum/length, max_dist
# rank predicate (Wikidata properties) by term frequency
def rank_predicate(sen_df, best_sentences, counter, local_model, global_model, by_qualifier=False):
result_dict = Counter()
predicate_criteria_list = [] # list of criteria of each predicate
property_name_list = []
redundant_list = []
for index, sen_row in sen_df.iterrows():
predicate = sen_row['predicate'].strip() # Wikidata property
qualifiers = sen_row['qualifiers'].strip().split('-')
#prepositional_verb = sen_row['prepositional_verb'].split(',')[0].strip("'")
root = sen_row['root'].split(',')
root_value = root[0].strip("'") # value of root (verb)
root_pos = root[1] # position of root
quad_items = get_qualifier_items(sen_row)
distance_dict = word_distance_to_sentence(quad_items, root_value, local_model, global_model)
if (by_qualifier == True):
term = predicate + '-' + root_value + '-' + '-'.join(qualifiers)
else:
term = predicate + '-' + root_value
property_name_list.append(predicate)
distance_dict['term'] = term
redundant_words = get_redundant_words(sen_row)
distance_dict['redundant_words'] = len(redundant_words)
predicate_criteria_list.append(distance_dict)
property_names = list(set(property_name_list))
# join dictionaries by property
for pn in property_names:
result_dict = {**result_dict, **rank_predicate_by_property(predicate_criteria_list, pn)} # join two dictionaries
normalized_values = []
normalized_labels = []
for k, v in result_dict.items():
temp = k.split('-')
property_name = temp[0]
predicate = temp[1]
tf = get_tf(counter, predicate)
'''average_def_dist, max_def_dist = word_distance_to_property_definition(definition_properties[property_name], predicate,
global_model)'''
#print('---', average_def_dist, v['local_average'], v['global_average'], v['tf'])
temp_list = [v['local_average'], v['global_average'], v['tf']]
temp_score = (np.prod(temp_list)*len(temp_list))/sum(temp_list)
try: temp_score = 1/-math.log(temp_score)
except: temp_score = 0
result_dict[k] = (temp_score, temp_score, v['tf'])
normalized_values.append((temp_score, temp_score, v['tf']))
normalized_labels.append(k)
#{'local_average':local_average, 'local_max_dist': local_max_dist, 'global_average': global_average,
# 'global_max_dist': global_max_dist, 'subject_dist': subject_dist, 'object_dist': object_dist}
# normalize values
normalized_values = MinMaxScaler().fit(normalized_values).transform(normalized_values)
for k, v in zip(normalized_labels, normalized_values):
result_dict[k] = v.tolist()
#print('result_dict: ', result_dict)
result_dict = dict(sorted(result_dict.items(), key = lambda v: v[1], reverse = True))
return result_dict
def group_predicate(predicate_dict, top=10, show=False):
group_dict = {}
for k, v in predicate_dict.items():
temp_list = k.split('-')
key = temp_list[0] + '-' + '-'.join(temp_list[2:])
key = key.strip('-')
predicate = temp_list[1]
temp_list = [*v]
temp_list.insert(0, predicate)
if (key not in group_dict):
group_dict[key] = [temp_list]
else:
group_dict[key].append(temp_list)
#group_dict = sorted(group_dict.items(), key = lambda v: (v[0]), reverse = True))
if (show==False): return group_dict
i = 1
for k, v in group_dict.items():
print('+', k)
for x in v:
if (i > top): break
print('---', x)
i = i + 1
i = 1
return group_dict
#...........................
#...........................
# get idf of a word
def create_idf_word(sentences, word):
n = len(sentences)
freq = 0 # raw frequency
for s in sentences:
freq += sum(1 for _ in re.finditer(r'\b%s\b' % re.escape(word), s))
return freq
# create idf dictionary
def create_idf_dict(sentences):
n = len(sentences)
result_dict = {}
result_dict['%%SIZE%%'] = n # number of documents (sentences)
for s in sentences:
doc = nlp(s)
for token in doc:
if (str(token.text) not in result_dict):
result_dict[str(token.text)] = create_idf_word(sentences, str(token.text))
return result_dict
# get inverse document frequency, a sentence as a document
def get_idf(idf_dict, word):
n = idf_dict['%%SIZE%%']
freq = 0
if (word in idf_dict):
freq = idf_dict[word]
# return -math.log((freq + 1)/n)
return -math.log((freq+1)/n) + 1
# get frequency of a term in corpus, corpus as a document
def get_tf(counter, word):
temp = (word,) # create key
freq = 0
freq = counter[temp] # raw frequency
#n = len(counter)
return math.log(freq+1) + 1
# count and rank the number of sentence by its redudant words
def rank_sentence_by_redundant_words(redundants):
count_dict = {}
for r in redundants:
if (r not in count_dict):
count_dict[r] = 1
else:
count_dict[r] += 1
count_dict = sorted(count_dict.items(), key=lambda x: x[0])
return count_dict
# show sentences by distance
def show_sentence_distance(labels, scores, redundants, sentences, redundant_word_list):
i = 0
for value in scores:
print('#' + str(i) + ': ', value, labels[i], redundants[i], redundant_word_list[i], '---', sentences[i])
i = i + 1
# show plot of sentences
def show_sentence_plot(labels, scores, redundants, sentences, redundant_word_list):
#labels = []
#scores = []
#redundants = []
#sentences = []
#redundant_word_list = []
#labels, scores, redundants, sentences, redundant_word_list = convert_corpus_to_distance(sen_list, best_sentences, model, counter)
#labels, tokens = convert_corpus_to_vector1(word_corpus, best_sentences, model, counter)
#tsne_model = TSNE(perplexity=40, n_components=2, init='pca', n_iter=2500, random_state=23)
#new_values = tsne_model.fit_transform(tokens)
plt.figure(figsize=(20, 20))
for i in range(len(scores)):
# s: size, color: color
if (labels[i] == 'x'):
plt.scatter(scores[i], redundants[i], s=20, color='blue') # marker = 's'
plt.annotate('', xy=(scores[i], redundants[i]), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom')
else:
plt.scatter(scores[i], redundants[i], s=2)
plt.annotate('', xy=(scores[i], redundants[i]), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom')
plt.xlabel("Number of redundant words")
plt.ylabel("Score")
plt.title('Sentences in corpus')
plt.savefig('show_sentence_plot.pdf')
plt.show()
# show plot of predicates (Wikidata properties)
def show_predicate_plot(predicate_list, axis_labels):
plt.figure(figsize=(12, 4))
for index, predicate_dict in enumerate(predicate_list):
labels = []
values = []
for k, v in predicate_dict.items():
labels.append(k)
values.append(v)
#tsne_model = TSNE(perplexity=100, n_components=1, init='pca', n_iter=5000)
#values = tsne_model.fit_transform(values)
#values = decomposition(values, 'pca', dimension = 2)
#values = MinMaxScaler().fit(values).transform(values)
x = []
y = []
sizes = []
i = 0
for v in values:
a = v[0]
b = v[1]
#print('+++', labels[i], a, b, int(v[2]*300)+1)
x.append(a)
y.append(b)
sizes.append(int(v[2]*300)+1)
i = i + 1
plt.rcParams.update({'font.size':10})
for i in range(len(x)):
# s: size, color: color
plt.scatter(1, 1, s=1, alpha=0.0)
plt.scatter(index + 2, y[i], s=sizes[i], alpha=0.6) # marker = 's'
if (i < 5):
temp_label = labels[i][labels[i].index('-')+1:]
#print('temp_label: ', temp_label)
plt.annotate(temp_label, xy=(index + 2, y[i]), xytext=(2, 2), textcoords='offset points', ha='right', va='bottom',
alpha=0.9, fontsize=8)
#fontsize=int(sizes[i]/10)+1
plt.grid(color = 'grey', linestyle = 'dotted', linewidth = 1)
#plt.gca().axes.get_xaxis().set_visible(False)
# axis labels
plt.xticks(range(2, len(axis_labels)+2), axis_labels)
plt.show()
# get all qualifiers by Wikidata properties
def get_all_qualifiers(sen_df):
result_list = []
result_dict = {}
for index, sen_row in sen_df.iterrow():
temp_list = get_qualifier_items(sen_row)
for t in temp_list:
value = t[t.index(':') + 1:t.index('-')]
result_list.append(value)
result_list = list(set(result_list))
result_list = sorted(result_list, key = lambda x: int(x[1:]))
for r in result_list:
root = get_wikidata_root(r)
label = get_label(root)
description = get_description(root)
aliases = ' '.join(e for e in get_alias(root))
def_string = label + ' ' + description + ' ' + aliases
def_list = []
doc = nlp(def_string)
for token in doc:
if (token.pos_ == "X"):
continue
if (token.pos_ == "PUNCT"):
continue
if (token.pos_ == "CCONJ"):
continue
if (token.pos_ == "ADP"):
continue
if (token.pos_ == "PRON"):
continue
if (token.pos_ == "PART"):
continue
if (token.pos_ == "DET"):
continue
if (token.dep_ == "punct"):
continue
def_list.append(token.text)
def_list = list(set(def_list))
#print('def_list:', r, def_list)
result_dict[r] = def_list
print('result_dict qualifiers: ', result_dict)
return result_dict
# sentence plot by redundant words
def sentence_plot_by_redundant_words(total_cumulative_list, labels, plot_title, x_axis_label, y_axis_label):
#cmap = plt.get_cmap('plasma')
#colors = cmap(np.linspace(0, 1, len(labels)))
colors = ['green', 'blue', 'red', 'coral', 'orchid', 'gray', 'gold']
colorcyler = cycle(colors)
lines = ['+', '*', '>', 'x', 'o', ':', '--']
linecycler = cycle(lines)
plt.rcParams.update({'font.size':10})
plt.ylabel(y_axis_label)
plt.xlabel(x_axis_label)
#plt.title(plot_title)
#plt.figure(figsize=(1,30))
#plt.figure(figsize=(1, 1), dpi=1000)
scale_factor = 30
xmin, xmax = plt.xlim()
plt.xlim(xmin * scale_factor, xmax * scale_factor)
for cumulative_list, name, color in zip(total_cumulative_list, labels, colors):
x, y = [], []
i = 0
for r in cumulative_list:
x.append(r[0])
y.append(r[1])
i = i + 1
plt.plot(x, y, next(linecycler), label=name, c=next(colorcyler))
plt.legend()
'''for i in range(len(y)):
plt.scatter(x[i], y[i], s=2, color=color)'''
#ymin, ymax = plt.ylim()
#plt.ylim(ymin * scale_factor, ymax * scale_factor)
plt.grid(color = 'grey', linestyle = 'dotted', linewidth = 0.5)
plt.savefig('sentence_plot_by_redundant_words.pdf')
plt.savefig('sentence_plot_by_redundant_words.svg')
plt.show()
plt.style.use('default') # reset style to default
# accumulative rate [0-1]
def cumulative_rate(rank_list):
result_list = []
total = sum([r[1] for r in rank_list])
temp = 0
for r in rank_list:
temp += r[1]/total
#print(temp)
result_list.append([r[0], temp])
#print(result_list)
return result_list
# minimums by redundant words
def minimums_by_redundant_words(scores, redundants):
result_dict = {}
for s, r in zip(scores, redundants):
if (r not in result_dict):
result_dict[r] = s
else: # get min
if s < result_dict[r]: result_dict[r] = s
result_dict = sorted(result_dict.items(), key=lambda x: x[0])
#print(result_dict)
return result_dict
# linear regression
def linear_regression(x, y):
print('x: ', x)
print('y: ', y)
x = np.array(x).reshape((-1, 1))
y = np.array(y)
model = ''
try:
model = LinearRegression().fit(x, y)
except:
model = LinearRegression().fit(x, y)
r_sq = model.score(x, y)
print('coefficient of determination:', r_sq)
print('intercept:', model.intercept_)
print('slope:', model.coef_)
y_pred = model.predict(x)
print('predicted response:', y_pred, sep='\n')
mae = metrics.mean_absolute_error(y, y_pred)
print('Mean Absolute Error:', mae)
mse = metrics.mean_squared_error(y, y_pred)
print('Mean Squared Error:', mse)
rmse = np.sqrt(metrics.mean_squared_error(y, y_pred))
print('Root Mean Squared Error:', rmse)
result_dict = {}
result_dict['y_pred'] = y_pred
result_dict['intercept'] = model.intercept_
result_dict['coef'] = model.coef_
result_dict['r_sq'] = r_sq
result_dict['mae'] = mae
result_dict['mse'] = mse
result_dict['rmse'] = rmse
return result_dict
# linear regression plot
def linear_regression_plot(x, y, dict1, dict2, plot_title, x_axis_label, y_axis_label):
plt.figure(figsize=(20, 20))
for i, j in zip(x, y):
plt.scatter(i, j, s=10, alpha=0.5)
plt.annotate('', xy=(i, j), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom')
axes1 = plt.gca()
x_vals1 = np.array(axes1.get_xlim())
y_vals1 = dict1['intercept'] + dict1['coef']*x_vals1
print('x_vals1, y_vals1: ', x_vals1, y_vals1)
plt.plot(x_vals1, y_vals1, '--')
axes2 = plt.gca()
x_vals2 = np.array(axes2.get_xlim())
y_vals2 = dict2['intercept'] + dict2['coef']*x_vals2
print('x_vals2, y_vals2: ', x_vals2, y_vals2)
plt.plot(x_vals2, y_vals2)
plt.grid(color = 'grey', linestyle = 'dotted', linewidth = 0.5)
plt.savefig('linear_regression_plot.pdf')
plt.show()
#plt.style.use('default') # reset style to default
# filter noise by cumulative rate
def filter_noise_by_cumulative_rate(sentences, redundant_word_list, number_redundant_word_list, cumulative_list,
rate = 0, top_words = 0):
sentences_, redundant_word_list_, number_redundant_word_list_ = [], [], []
if (rate == 0 and top_words == 0):
return sentences, redundant_word_list, number_redundant_word_list
bound = 0 # number of words used to filter
# filter by rate only
if (rate != 0 and top_words == 0):
for c in cumulative_list:
bound = c[0]
if (c[1] > rate):
break
elif(rate == 0 and top_words != 0):
bound = top_words
if (bound == 0):
return sentences, redundant_word_list, number_redundant_word_list
for a, b, c in zip(sentences, redundant_word_list, number_redundant_word_list):
if (c <= bound):
sentences_.append(a)
redundant_word_list_.append(b)
number_redundant_word_list_.append(c)
return sentences_, redundant_word_list_, number_redundant_word_list_
# filter noise by metrics
def filter_noise_by_metrics(df, field, frac=1, ascending=True):
# convert to numeric
df['local_tf_idf2'] = pd.to_numeric(df['local_tf_idf2'], errors='coerce') # standard
df[field] = pd.to_numeric(df[field], errors='coerce') # metric
df['length'] = pd.to_numeric(df['length'], errors='coerce') # number of redundant words
# sort df
sorted_df = df.sort_values(field, ascending=ascending)
# get fraction
df_len = len(sorted_df.index)
n = int(df_len*frac)
df_frac = sorted_df.head(n)
# linear regression
length_list = df_frac['length'].tolist()
field_list = df_frac['local_tf_idf2'].tolist()
#field_list = df_frac[field].tolist()
linear_regression(length_list, field_list)
'''for index, row in df_frac.iterrows():
labeled_sentence_2 = row['labeled_sentence_2']
length = row['length']
label = row['label']
score = row[field]
print('--------------')
print(label, length, score)
print(labeled_sentence_2)'''
def filter_noise_by_clustering_method(df, field, compared_field, method, frac=1, ascending=True):
"""
Not used ---
# filter noise by DBSCAN (not use)
# https://blog.dominodatalab.com/topology-and-density-based-clustering/
"""
# convert to numeric
df[compared_field] = pd.to_numeric(df[compared_field], errors='coerce') # standard
df[field] = pd.to_numeric(df[field], errors='coerce') # metric
df['length'] = | pd.to_numeric(df['length'], errors='coerce') | pandas.to_numeric |
import datetime
import logging
import math
import os
import warnings
from collections import namedtuple
from pprint import pprint
from time import sleep
from zipfile import ZipFile
import pandas as pd
import pytz
from oemof import solph
from oemof.tools import logger
from deflex import analyses
from deflex import config as cfg
from deflex import geometries
from deflex import main
from deflex import results
from deflex import tools
try:
from lmfit.models import LinearModel
except ModuleNotFoundError:
LinearModel = None
try:
from oemof_visio.plot import io_plot
from oemof_visio.plot import set_datetime_ticks
except ModuleNotFoundError:
io_plot = None
try:
from matplotlib import patches
from matplotlib import patheffects
from matplotlib import pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.dates import DateFormatter
from matplotlib.dates import HourLocator
except ModuleNotFoundError:
plt = None
MISSING_MODULES = []
OPSD_URL = (
"https://data.open-power-system-data.org/index.php?package="
"time_series&version=2019-06-05&action=customDownload&resource=3"
"&filter%5B_contentfilter_cet_cest_timestamp%5D%5Bfrom%5D="
"2005-01-01&filter%5B_contentfilter_cet_cest_timestamp%5D%5Bto%5D"
"=2019-05-01&filter%5BRegion%5D%5B%5D=DE&filter%5BVariable%5D%5B"
"%5D=price_day_ahead&downloadCSV=Download+CSV"
)
EXAMPLE_URL = (
"https://files.de-1.osf.io/v1/resources/a5xrj/providers/"
"osfstorage/5fdc7e0bf0df5405452ef6f0/?zip="
)
GITHUB_BASE_URL = "https://raw.githubusercontent.com/reegis/deflex/master/{0}"
BASEPATH = os.path.join(os.path.expanduser("~"), "deflex_examples")
IMAGETYPE = "png" # svg, pdf, png, eps
def download_example_scenarios(path):
"""Download example data from OSF and other files."""
# Examples from OSF
os.makedirs(path, exist_ok=True)
fn_zip = os.path.join(path, "software_x_scenario_examples.zip")
tools.download(fn_zip, EXAMPLE_URL)
# plot.ini from github
url = GITHUB_BASE_URL.format("examples/plot.ini")
fn_ini = os.path.join(path, "plot.ini")
tools.download(fn_ini, url)
with ZipFile(fn_zip, "r") as zip_ref:
zip_ref.extractall(path)
logging.info("All SoftwareX scenarios extracted to {}".format(path))
def get_price_from_opsd(path):
"""Get day ahead prices from opsd time series."""
fn = os.path.join(path, "opsd_day_ahead_prices.csv")
tools.download(fn, OPSD_URL)
de_ts = pd.read_csv(
fn,
index_col="utc_timestamp",
parse_dates=True,
date_parser=lambda col: pd.to_datetime(col, utc=True),
)
de_ts.index = de_ts.index.tz_convert("Europe/Berlin")
de_ts.index.rename("cet_timestamp", inplace=True)
berlin = pytz.timezone("Europe/Berlin")
start_date = berlin.localize(datetime.datetime(2014, 1, 1, 0, 0, 0))
end_date = berlin.localize(datetime.datetime(2014, 12, 31, 23, 0, 0))
return de_ts.loc[start_date:end_date, "DE_price_day_ahead"]
def get_scenario(path):
"""
Search for result files in the given directory and return them
as a list (ls) or a numbered dictionary (dc).
"""
d = namedtuple("sc", ("ls", "dc"))
s = results.search_results(path)
sc_dict = {k: v for v, k in zip(sorted(s), range(len(s)))}
pprint(sc_dict)
return d(ls=sorted(s), dc=sc_dict)
def get_key_values_from_results(result):
"""
Extract key values from a list of solph results dictionaries.
emissions_average: The average emissions per time step
emissions_mcp: The emissions of the most expensive running power plant
mcp: Market Clearing Price (MCP), the costs of the most expensive running
power plant.
Parameters
----------
result : list
A list of solph results dictionaries.
Returns
-------
pandas.DataFrame : Key values for each result dictionary.
"""
kv = pd.DataFrame(columns=pd.MultiIndex(levels=[[], []], codes=[[], []]))
for r in result:
name = r["meta"]["name"]
flow_res = analyses.get_flow_results(r)
if "chp" in flow_res["cost", "specific", "trsf"].columns:
kv["mcp", name] = flow_res.drop(
("cost", "specific", "trsf", "chp"), axis=1
)["cost", "specific"].max(axis=1)
else:
kv["mcp", name] = flow_res["cost", "specific"].max(axis=1)
mcp_id = flow_res["cost", "specific"].idxmax(axis=1)
emissions = flow_res["emission", "specific"]
kv["emissions_average", name] = (
flow_res["emission", "absolute"]
.sum(axis=1)
.div(flow_res["values", "absolute"].sum(axis=1))
)
kv["emissions_mcp", name] = pd.Series(
emissions.lookup(*zip(*pd.DataFrame(data=mcp_id).to_records()))
)
return kv
def plot_power_lines(
data,
key,
cmap_lines=None,
cmap_bg=None,
direction=True,
vmax=None,
label_min=None,
label_max=None,
unit="GWh",
size=None,
ax=None,
legend=True,
unit_to_label=False,
divide=1,
decimal=0,
exist=None,
):
"""
Parameters
----------
data
key
cmap_lines
cmap_bg
direction
vmax
label_min
label_max
unit
size
ax
legend
unit_to_label
divide
decimal
exist
Returns
-------
"""
if size is None and ax is None:
ax = plt.figure(figsize=(5, 5)).add_subplot(1, 1, 1)
elif size is not None and ax is None:
ax = plt.figure(figsize=size).add_subplot(1, 1, 1)
if unit_to_label is True:
label_unit = unit
else:
label_unit = ""
lines = geometries.deflex_power_lines("de21")
polygons = geometries.deflex_regions("de21")
if lines is None:
msg = (
"\nTo plot a map you need to install 'geopandas', 'descartes' and "
"'pygeos'\n\n pip install geopandas descartes pygeos\n"
)
raise ModuleNotFoundError(msg)
lines = lines.merge(data.div(divide), left_index=True, right_index=True)
lines["centroid"] = lines.to_crs(epsg=25832).centroid.to_crs(epsg="4326")
if cmap_bg is None:
cmap_bg = LinearSegmentedColormap.from_list(
"mycmap", [(0, "#aed8b4"), (1, "#bddce5")]
)
if cmap_lines is None:
cmap_lines = LinearSegmentedColormap.from_list(
"mycmap",
[(0, "#aaaaaa"), (0.0001, "green"), (0.5, "yellow"), (1, "red")],
)
offshore = geometries.divide_off_and_onshore(polygons).offshore
polygons["color"] = 0
polygons.loc[offshore, "color"] = 1
lines["reverse"] = lines[key] < 0
# if direction is False:
lines.loc[lines["reverse"], key] = lines.loc[lines["reverse"], key] * -1
if vmax is None:
vmax = lines[key].max()
if label_min is None:
label_min = vmax * 0.5
if label_max is None:
label_max = float("inf")
ax = polygons.plot(
edgecolor="#9aa1a9",
cmap=cmap_bg,
column="color",
ax=ax,
aspect="equal",
)
if exist is not None:
lines = lines.loc[lines[exist] == 1]
ax = lines.plot(
cmap=cmap_lines,
legend=legend,
ax=ax,
column=key,
vmin=0,
vmax=vmax,
aspect="equal",
)
for i, v in lines.iterrows():
x1 = v["geometry"].coords[0][0]
y1 = v["geometry"].coords[0][1]
x2 = v["geometry"].coords[1][0]
y2 = v["geometry"].coords[1][1]
value_relative = v[key] / vmax
mc = cmap_lines(value_relative)
orient = math.atan(abs(x1 - x2) / abs(y1 - y2))
if (y1 > y2) & (x1 > x2) or (y1 < y2) & (x1 < x2):
orient *= -1
if v["reverse"]:
orient += math.pi
if v[key] == 0 or not direction:
polygon = patches.RegularPolygon(
(v["centroid"].x, v["centroid"].y),
4,
0.15,
orientation=orient,
color=(0, 0, 0, 0),
zorder=10,
)
else:
polygon = patches.RegularPolygon(
(v["centroid"].x, v["centroid"].y),
3,
0.15,
orientation=orient,
color=mc,
zorder=10,
)
ax.add_patch(polygon)
if decimal == 0:
value = int(round(v[key]))
else:
value = round(v[key], decimal)
if label_min <= value <= label_max:
if v["reverse"] is True and direction is False:
value *= -1
ax.text(
v["centroid"].x,
v["centroid"].y,
"{0} {1}".format(value, label_unit),
color="#000000",
fontsize=11,
zorder=15,
path_effects=[
patheffects.withStroke(linewidth=3, foreground="w")
],
)
for spine in plt.gca().spines.values():
spine.set_visible(False)
ax.axis("off")
polygons.apply(
lambda x: ax.annotate(
x.name, xy=x.geometry.centroid.coords[0], ha="center"
),
axis=1,
)
return ax
def show_transmission(path, name=None, number=0):
"""
Parameters
----------
path
name
number
Returns
-------
"""
global MISSING_MODULES
if name is not None:
sc = [s for s in get_scenario(path).ls if name in s][0]
else:
sc = get_scenario(path).dc[number]
res = results.restore_results(sc)
r = res["Main"]
p = res["Param"]
flows = [
k for k in r.keys() if k[1] is not None and k[0].label.cat == "line"
]
transmission = pd.DataFrame()
trk = pd.DataFrame()
lines = geometries.deflex_power_lines("de21")
if lines is None:
lines = pd.DataFrame(index=["DE13-DE15", "DE08-DE09"])
for flow in flows:
name = "-".join([flow[0].label.subtag, flow[1].label.region])
if name in lines.index:
try:
capacity = p[flow]["scalars"].nominal_value
except AttributeError:
capacity = -1
back_flow = [
x
for x in flows
if x[0].label.subtag == flow[1].label.region
and x[1].label.region == flow[0].label.subtag
][0]
transmission[name] = (
r[flow]["sequences"]["flow"]
- r[back_flow]["sequences"]["flow"]
)
if capacity > 0:
trk.loc[name, "exist"] = True
trk.loc[name, "max_fraction"] = (
transmission[name].abs().max() / capacity * 100
)
trk.loc[name, "hours_90_prz"] = (
transmission[name]
.loc[transmission[name].abs().div(capacity) > 0.9]
.count()
)
trk.loc[name, "hours_90_prz_frac"] = (
trk.loc[name, "hours_90_prz"] / len(transmission) * 100
)
trk.loc[name, "avg_fraction"] = (
transmission[name].abs().sum()
/ (capacity * len(transmission))
* 100
)
elif capacity == 0 and transmission[name].max() > 0:
raise ValueError("Something odd happend")
else:
trk.loc[name, "exist"] = False
trk.loc[name, "max_fraction"] = -1
trk.loc[name, "avg_fraction"] = -1
trk.loc[name, "hours_90_prz_frac"] = 0
trk.loc[name, "hours_90_prz"] = -1
trk.loc[name, "max"] = transmission[name].abs().max()
trk.loc[name, "avg"] = transmission[name].abs().mean()
trk.loc[name, "sum"] = transmission[name].abs().sum()
if plt is not None:
f, ax = plt.subplots(1, 1, sharex=True, figsize=(8, 5))
plt.rcParams.update({"font.size": 11})
try:
plot_power_lines(
trk,
"hours_90_prz_frac",
direction=False,
vmax=25,
label_min=1,
unit_to_label=True,
unit="%",
ax=ax,
exist="exist",
)
except ModuleNotFoundError:
MISSING_MODULES.extend(["geopandas", "descartes", "pygeos"])
msg = (
"'geopandas', 'descartes' and 'pygeos' are missing. To show "
"the plot use:\n pip install geopandas descartes pygeos"
)
ax.text(0.2, 0.5, msg, fontsize=12)
plt.subplots_adjust(right=1, left=0, bottom=0.02, top=0.98)
figure_path = os.path.join(BASEPATH, "figures")
os.makedirs(figure_path, exist_ok=True)
plt.savefig(
os.path.join(figure_path, "transmission.{}".format(IMAGETYPE))
)
plt.show()
else:
print(
"Fraction of time [%] in which the use of the line is greater "
"than 90%"
)
print(trk["hours_90_prz_frac"].round(1).sort_values())
sleep(1)
MISSING_MODULES.extend(
["matplotlib", "geopandas", "descartes", "pygeos"]
)
msg = (
"\nTo see the mcp plot you need to install 'matplotlib' "
"\n\n pip install matplotlib\n"
)
warnings.warn(msg, UserWarning)
def show_relation(mcp, name="deflex_2014_de02"):
"""Show relation between OPSD price and scenario prices."""
if LinearModel is not None and plt is not None:
mean = mcp["opsd"].mean()
mcp = mcp.groupby("opsd").mean().loc[0:90]
model = LinearModel()
result = model.fit(mcp[name], x=mcp.index)
ax = result.plot_fit()
ax.set_xlabel("price from opsd data")
ax.set_ylabel("price from {0} data".format(name))
# line x=y to get an orientation
x = pd.Series([0, 40, 100])
ax.plot(x, x)
# mean price of opsd data
g1 = pd.Series([mean, mean])
g2 = | pd.Series([0, 100]) | pandas.Series |
import datetime
import gc
import glob
import numpy as np
import os
import pandas as pd
os.environ['KMP_DUPLICATE_LIB_OK']='True' # MacOS fix for libomp issues (https://github.com/dmlc/xgboost/issues/1715)
import lightgbm as lgb
from sklearn.metrics import log_loss, roc_auc_score, mean_squared_error
from sklearn.model_selection import KFold, RepeatedKFold, GroupKFold, StratifiedKFold
from sklearn.decomposition import PCA
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import NuSVC
from tqdm import tqdm as tqdm
from kinoa import kinoa
from scipy.stats import ttest_ind, ks_2samp
def dprint(*args, **kwargs):
print("[{}] ".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M")) + \
" ".join(map(str,args)), **kwargs)
dprint('PID: {}'.format(os.getpid()))
script_id = 0
data_path = '../input/'
train = pd.read_csv(os.path.join(data_path, 'training_v2.csv'))
id_col = 'encounter_id'
fd = pd.read_csv(os.path.join(data_path, 'WiDS Datathon 2020 Dictionary.csv'))
fd = fd[(fd['Data Type'] == 'string') | (fd['Data Type'] == 'binary')]
cat_features = list(fd['Variable Name'].values)
for c in cat_features:
if c not in train.columns or c == 'hospital_death':
cat_features.remove(c)
print(f'cat_features: {cat_features} ({len(cat_features)})')
extracted_files = glob.glob('./*.csv')
extracted_files = [f[2:-8] for f in extracted_files]
print(extracted_files)
# error
target_cols = []
for c in train.columns:
if c != id_col and c != 'hospital_death' and train[c].isnull().mean() > 0 and c not in extracted_files and c not in cat_features:
target_cols.append({'fname': c, 'type': 'regression'})
print(target_cols)
def preprocess(df, min_max_cols):
for c in min_max_cols:
vals = df[[c, c.replace('_min', '_max')]].values.copy()
df[c] = np.nanmin(vals, axis=1)
df[c.replace('_min', '_max')] = np.nanmax(vals, axis=1)
for t_i, target_data in enumerate(target_cols):
target_col = target_data['fname']
dprint(f'********************************* {target_col} ({t_i+1}/{len(target_cols)}) *********************************')
train = pd.read_csv(os.path.join(data_path, 'training_v2.csv'))
test = pd.read_csv(os.path.join(data_path, 'unlabeled.csv'))
min_max_cols = []
for c in train.columns:
if '_min' in c and c.replace('min', 'max') in train.columns:
min_max_cols.append(c)
print(f'min_max_cols: {min_max_cols} ({len(min_max_cols)})')
preprocess(train, min_max_cols)
preprocess(test, min_max_cols)
print(f'Number of missing values in train: {train[target_col].isnull().mean()}')
print(f'Number of missing values in test: {test[target_col].isnull().mean()}')
train['is_test'] = 0
test['is_test'] = 1
df_all = pd.concat([train, test], axis=0)
dprint('Label Encoder...')
cols = [f_ for f_ in df_all.columns if df_all[f_].dtype == 'object']
print(cols)
cnt = 0
for c in tqdm(cols):
if c != id_col and c != target_col:
# print(c)
le = LabelEncoder()
df_all[c] = le.fit_transform(df_all[c].astype(str))
cnt += 1
del le
dprint('len(cols) = {}'.format(cnt))
train = df_all.loc[df_all['is_test'] == 0].drop(['is_test'], axis=1)
test = df_all.loc[df_all['is_test'] == 1].drop(['is_test'], axis=1)
# del df_all
# gc.collect()
# Rearrange train and test
train = df_all[np.logical_not(df_all[target_col].isnull())].drop(['is_test'], axis=1)
test = df_all[df_all[target_col].isnull()].drop(['is_test'], axis=1)
dprint(train.shape, test.shape)
if target_data['type'] == 'classification':
tle = LabelEncoder()
train[target_col] = tle.fit_transform(train[target_col].astype(str))
empty_cols = []
for c in test.columns:
n = (~test[c].isnull()).sum()
if n == 0:
empty_cols.append(c)
print(f'empty_cols: {empty_cols}')
# error
features = list(train.columns.values)
features.remove(id_col)
features.remove(target_col)
# Build the model
cnt = 0
p_buf = []
n_splits = 4
n_repeats = 1
kf1 = RepeatedKFold(
n_splits=n_splits,
n_repeats=n_repeats,
random_state=0)
kf2 = RepeatedKFold(
n_splits=n_splits,
n_repeats=n_repeats,
random_state=1)
err_buf = []
undersampling = 0
if target_data['type'] == 'regression':
lgb_params = {
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': 'mse',
'max_depth': 8,
'learning_rate': 0.05,
'feature_fraction': 0.85,
'bagging_fraction': 0.85,
'bagging_freq': 5,
'lambda_l1': 1.0,
'lambda_l2': 1.0,
'verbose': -1,
'num_threads': -1,
}
elif target_data['type'] == 'classification':
dprint(f'Num classes: {train[target_col].nunique()} ({train[target_col].unique()})')
if train[target_col].nunique() == 2:
lgb_params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'binary_logloss',
'max_depth': 8,
'learning_rate': 0.05,
'feature_fraction': 0.85,
'bagging_fraction': 0.85,
'bagging_freq': 5,
'lambda_l1': 1.0,
'lambda_l2': 1.0,
'verbose': -1,
'num_threads': -1,
}
else:
lgb_params = {
'boosting_type': 'gbdt',
'objective': 'multiclass',
'metric': 'multi_logloss',
'max_depth': 8,
'learning_rate': 0.05,
'feature_fraction': 0.85,
'bagging_fraction': 0.85,
'bagging_freq': 5,
'lambda_l1': 1.0,
'lambda_l2': 1.0,
'verbose': -1,
'num_threads': -1,
'num_class': train[target_col].nunique()
}
cols_to_drop = [
id_col,
target_col,
'hospital_death',
# 'bmi',
] + empty_cols
# cols_to_use = features
X = train.drop(cols_to_drop, axis=1, errors='ignore')
y = train[target_col].values
id_train = train[id_col].values
X_test = test.drop(cols_to_drop, axis=1, errors='ignore')
id_test = test[id_col].values
feature_names = list(X.columns)
n_features = X.shape[1]
dprint(f'n_features: {n_features}')
p_test = []
dfs_train = []
dfs_test = []
for fold_i_oof, (train_index_oof, valid_index_oof) in enumerate(kf1.split(X, y)):
x_train_oof = X.iloc[train_index_oof]
x_valid_oof = X.iloc[valid_index_oof]
y_train_oof = y[train_index_oof]
y_valid_oof = y[valid_index_oof]
id_train_oof = id_train[valid_index_oof]
for fold_i, (train_index, valid_index) in enumerate(kf2.split(x_train_oof, y_train_oof)):
params = lgb_params.copy()
x_train = x_train_oof.iloc[train_index]
x_valid = x_train_oof.iloc[valid_index]
lgb_train = lgb.Dataset(
x_train,
y_train_oof[train_index],
feature_name=feature_names,
)
lgb_train.raw_data = None
lgb_valid = lgb.Dataset(
x_valid,
y_train_oof[valid_index],
)
lgb_valid.raw_data = None
model = lgb.train(
params,
lgb_train,
num_boost_round=5000,
valid_sets=[lgb_valid],
early_stopping_rounds=100,
verbose_eval=100,
)
if fold_i_oof == 0:
importance = model.feature_importance()
model_fnames = model.feature_name()
tuples = sorted(zip(model_fnames, importance), key=lambda x: x[1])[::-1]
tuples = [x for x in tuples if x[1] > 0]
print('Important features:')
for i in range(20):
if i < len(tuples):
print(tuples[i])
else:
break
del importance, model_fnames, tuples
p_lgbm = model.predict(x_valid, num_iteration=model.best_iteration)
if target_data['type'] == 'regression':
err = mean_squared_error(y_train_oof[valid_index], p_lgbm)
err_buf.append(err)
dprint('{} LGBM MSE: {:.4f}'.format(fold_i, err))
elif target_data['type'] == 'classification':
if train[target_col].nunique() == 2:
err = roc_auc_score(y_train_oof[valid_index], p_lgbm)
dprint('{} LGBM AUC: {:.6f}'.format(fold_i, err))
err = log_loss(y_train_oof[valid_index], p_lgbm)
err_buf.append(err)
dprint('{} LGBM LOSS: {:.4f}'.format(fold_i, err))
p_lgbm_train = model.predict(x_valid_oof, num_iteration=model.best_iteration)
p_lgbm_test = model.predict(X_test[feature_names], num_iteration=model.best_iteration)
df_train = pd.DataFrame()
df_train[id_col] = id_train_oof
if target_data['type'] == 'regression':
df_train[target_col] = p_lgbm_train
elif target_data['type'] == 'classification':
if train[target_col].nunique() == 2:
df_train[target_col] = p_lgbm_train
else:
for i, t in enumerate(np.sort(train[target_col].unique())):
df_train[str(t)] = p_lgbm_train[:, i]
dfs_train.append(df_train)
df_test = pd.DataFrame()
df_test[id_col] = id_test
if target_data['type'] == 'regression':
df_test[target_col] = p_lgbm_test
elif target_data['type'] == 'classification':
if train[target_col].nunique() == 2:
df_test[target_col] = p_lgbm_test
else:
for i, t in enumerate(np.sort(train[target_col].unique())):
df_test[str(t)] = p_lgbm_test[:, i]
dfs_test.append(df_test)
# p_test.append(p_lgbm_test)
del model, lgb_train, lgb_valid
gc.collect
# break
err_mean = np.mean(err_buf)
err_std = np.std(err_buf)
dprint('ERR: {:.4f} +/- {:.4f}'.format(err_mean, err_std))
dfs_train = pd.concat(dfs_train, axis=0)
if target_data['type'] == 'regression':
dfs_train = dfs_train.groupby(id_col)[target_col].mean().reset_index().rename({target_col: target_col + '_est'}, axis=1)
elif target_data['type'] == 'classification':
if train[target_col].nunique() == 2:
dfs_train = dfs_train.groupby(id_col)[target_col].mean().reset_index()
dfs_train[target_col] = tle.inverse_transform(np.round(dfs_train[target_col].values).astype(int))
dfs_train.rename({target_col: target_col + '_est'}, inplace=True, axis=1)
else:
dfs_train = dfs_train.groupby(id_col).mean().reset_index()
cols = np.sort(train[target_col].unique()).astype(str)
dfs_train[target_col + '_est'] = tle.inverse_transform(np.argmax(dfs_train[cols].values, axis=1))
print(dfs_train.head())
dfs_test = pd.concat(dfs_test, axis=0)
if target_data['type'] == 'regression':
dfs_test = dfs_test.groupby(id_col)[target_col].mean().reset_index().rename({target_col: target_col + '_est'}, axis=1)
elif target_data['type'] == 'classification':
if train[target_col].nunique() == 2:
dfs_test = dfs_test.groupby(id_col)[target_col].mean().reset_index()
dfs_test[target_col] = tle.inverse_transform(np.round(dfs_test[target_col].values).astype(int))
dfs_test.rename({target_col: target_col + '_est'}, inplace=True, axis=1)
else:
dfs_test = dfs_test.groupby(id_col).mean().reset_index()
cols = np.sort(train[target_col].unique()).astype(str)
dfs_test[target_col + '_est'] = tle.inverse_transform(np.argmax(dfs_test[cols].values, axis=1))
print(dfs_test.head())
out = | pd.concat([dfs_train, dfs_test], axis=0) | pandas.concat |
import os
import numpy as np
import pandas as pd
import logging
import array_analyzer.extract.constants as constants
def antigen2D_to_df1D(xlsx_path, sheet, data_col):
"""
Convert old 2D output format (per antigen) to 1D dataframe
:param str xlsx_path: path to the xlsx file
:param str sheet: sheet name to load
:param str data_col: new column name of the linearized values
:return dataframe df: linearized dataframe
"""
df = pd.read_excel(xlsx_path, sheet_name=sheet, index_col=0)
df = df.unstack().reset_index(name=data_col) # linearize the table
df.rename(columns={'level_1': 'antigen_row', 'level_0': 'antigen_col'}, inplace=True)
df[['antigen_row', 'antigen_col']] = df[['antigen_row', 'antigen_col']].applymap(int)
df = df[['antigen_row', 'antigen_col', data_col]]
df.dropna(inplace=True)
return df
def well2D_to_df1D(xlsx_path, sheet, data_col):
"""
Convert new 2D output format (per well) to 1D dataframe
:param str xlsx_path: path to the xlsx file
:param str sheet: sheet name to load
:param str data_col: new column name of the linearized values
:return dataframe df: linearized dataframe
"""
df = pd.read_excel(xlsx_path, sheet_name=sheet, index_col=0)
df = df.unstack().reset_index(name=data_col) # unpivot (linearize) the table
df.rename(columns={'level_1': 'row_id', 'level_0': 'col_id'}, inplace=True)
df['well_id'] = df.row_id + df.col_id.map(str)
df = df[['well_id', data_col]]
return df
def read_plate_info(metadata_xlsx):
"""read plate info from the metadata"""
print('Reading the plate info...')
sheet_names = ['serum ID',
'serum dilution',
'serum type',
'serum cat',
'secondary ID',
'secondary dilution',
'sample type']
plate_info_df = pd.DataFrame()
# get sheet names that are available in metadata
sheet_names = list(set(metadata_xlsx.sheet_names).intersection(sheet_names))
for sheet_name in sheet_names:
sheet_df = pd.read_excel(metadata_xlsx, sheet_name=sheet_name, index_col=0)
sheet_df = sheet_df.unstack().reset_index(name=sheet_name) # unpivot (linearize) the table
sheet_df.rename(columns={'level_1': 'row_id', 'level_0': 'col_id'}, inplace=True)
if plate_info_df.empty:
plate_info_df = sheet_df
else:
plate_info_df = pd.merge(plate_info_df,
sheet_df,
how='left', on=['row_id', 'col_id'])
plate_info_df['well_id'] = plate_info_df.row_id + plate_info_df.col_id.map(str)
sheet_names.append('well_id')
# convert to number and non-numeric to NaN
plate_info_df['serum dilution'] = \
plate_info_df['serum dilution'].apply(pd.to_numeric, errors='coerce')
logger = logging.getLogger(constants.LOG_NAME)
nan_cols = plate_info_df.columns[plate_info_df.isnull().any()].tolist()
if nan_cols:
logger.warning("Parsing metadata failed for some wells in tab {}. "
"Please check info in these tabs are all filled out and in the right format "
"(e.g. dilutions should not contain strings)".format(nan_cols))
plate_info_df.dropna(inplace=True)
plate_info_df.drop(['row_id', 'col_id'], axis=1, inplace=True)
if 'sample type' not in sheet_names:
plate_info_df['sample type'] = 'Serum'
return plate_info_df
def read_antigen_info(metadata_path):
"""read antigen info from the metadata"""
print('Reading antigen information...')
antigen_df = antigen2D_to_df1D(xlsx_path=metadata_path, sheet='antigen_array', data_col='antigen')
antigen_type_df = antigen2D_to_df1D(xlsx_path=metadata_path, sheet='antigen_type', data_col='antigen type')
antigen_df = pd.merge(antigen_df, antigen_type_df, how='left', on=['antigen_row', 'antigen_col'])
return antigen_df
def read_pysero_output(file_path, antigen_df, file_type='od'):
"""
read and re-format pysero spot fitting output
:param str file_path: path to the pysero output xlsx file
:param dataframe antigen_df:
:param str file_type: output file type. 'od', 'int', or 'bg'
:return: linearized dataframe
"""
print('Reading {}...'.format(file_type))
data_col = {'od': 'OD', 'int': 'intensity', 'bg': 'background'}
data_df = pd.DataFrame()
with pd.ExcelFile(file_path) as file:
sheet_names = file.sheet_names
for _, row in antigen_df.iterrows():
if sheet_names[0][0].isnumeric(): # new format
sheet_name = '{}_{}_{}'.format(row['antigen_row'], row['antigen_col'], row['antigen'])
else:
sheet_name = '{}_{}_{}_{}'.format(file_type, row['antigen_row'], row['antigen_col'], row['antigen'])
data_1_antiten_df = well2D_to_df1D(xlsx_path=file, sheet=sheet_name, data_col=data_col[file_type])
data_1_antiten_df['antigen_row'] = row['antigen_row']
data_1_antiten_df['antigen_col'] = row['antigen_col']
data_1_antiten_df['antigen'] = row['antigen']
data_df = data_df.append(data_1_antiten_df, ignore_index=True)
return data_df
def read_scn_output(file_path, plate_info_df):
"""
Read scienion intensity output and convert it to OD
:param str file_path: path to the scienion output xlsx file
:param dataframe plate_info_df: plate info dataframe
:return dataframe: scienion OD dataframe
"""
# Read analysis output from Scienion
scienion_df = pd.DataFrame()
with pd.ExcelFile(file_path) as scienion_xlsx:
for well_id in plate_info_df['well_id']:
OD_1_antiten_df = pd.read_excel(scienion_xlsx, sheet_name=well_id)
OD_1_antiten_df['well_id'] = well_id
scienion_df = scienion_df.append(OD_1_antiten_df, ignore_index=True)
# parse spot ids
spot_id_df = scienion_df['ID'].str.extract(r'spot-(\d)-(\d)')
spot_id_df = spot_id_df.astype(int) - 1 # index starting from 0
spot_id_df.rename(columns={0: 'antigen_row', 1: 'antigen_col'}, inplace=True)
scienion_df = pd.concat([spot_id_df, scienion_df], axis=1)
scienion_df.drop('ID', axis=1, inplace=True)
# invert the intensity and compute ODs
df_scn = scienion_df.loc[:, ['antigen_row', 'antigen_col', 'well_id']]
df_scn['intensity'] = 1 - scienion_df['Median'] / 255
df_scn['background'] = 1 - scienion_df['Background Median'] / 255
df_scn['OD'] = np.log10(df_scn['background'] / df_scn['intensity'])
return df_scn
def slice_df(df, slice_action, column, keys):
"""
Return sliced dataframe given the colume and keys
:param df: dataframe to slice
:param slice_action: 'keep' or 'drop'
:param column: column to slice based on
:param keys: key values to keep or drop
:return:
"""
if column is None or column != column:
return df
if not isinstance(keys, (list, np.ndarray)):
if keys != keys or keys is None: # nan
return df
keys = [keys]
if slice_action is None or slice_action != slice_action:
return df
elif slice_action == 'keep':
df = df.loc[df[column].isin(keys), :]
elif slice_action == 'drop':
df = df.loc[~df[column].isin(keys), :]
else:
raise ValueError('slice action has to be "keep" or "drop", not "{}"'.format(slice_action))
return df
def normalize_od_helper(norm_antigen):
def normalize(df):
norm_antigen_df = slice_df(df, 'keep', 'antigen', [norm_antigen])
norm_factor = norm_antigen_df['OD'].mean()
df['OD'] = df['OD'] / norm_factor
return df
return normalize
def normalize_od(df, norm_antigen=None, group='plate'):
"""
Normalize OD by OD of the reference antigen
:param dataframe df: dataframe containing serum OD info
:param str norm_antigen: reference antigen to normalize by
:param str group: unit to normalize. 'plate' or 'well'
:return dataframe df: dataframe with normalized serum OD info
"""
if norm_antigen is None:
return df
if group == 'plate':
groupby_cols = ['plate ID', 'pipeline', 'sample type']
elif group == 'well':
groupby_cols = ['plate ID', 'well_id', 'pipeline', 'sample type']
else:
ValueError('normalization group has to be plate or well, not {}'.format(group))
for pipeline in df['pipeline'].unique():
for sample_type in df['sample type'].unique():
norm_antigen_df = slice_df(df, 'keep', 'pipeline', [pipeline])
norm_antigen_df = slice_df(norm_antigen_df, 'keep', 'sample type', [sample_type])
norm_antigen_df = slice_df(norm_antigen_df, 'keep', 'antigen', [norm_antigen])
df.loc[(df['antigen'] == norm_antigen) &
(df['pipeline'] == pipeline) &
(df['sample type'] == sample_type), 'OD'] = \
norm_antigen_df['OD'] / norm_antigen_df['OD'].mean()
norm_fn = normalize_od_helper(norm_antigen)
df = df.groupby(groupby_cols).apply(norm_fn)
return df
def offset_od_helper(norm_antigen):
def offset(df):
norm_antigen_df = slice_df(df, 'keep', 'antigen', [norm_antigen])
norm_factor = norm_antigen_df['OD'].mean()
df['OD'] = df['OD'] - norm_factor
df.loc[df['OD'] < 0, 'OD'] = 0
return df
return offset
def offset_od(df, norm_antigen=None, group='plate'):
"""offset OD by OD of the reference antigen
"""
if norm_antigen is None:
return df
if group == 'plate':
groupby_cols = ['plate ID']
elif group == 'well':
groupby_cols = ['plate ID', 'well_id']
else:
ValueError('normalization group has to be plate or well, not {}'.format(group))
norm_fn = offset_od_helper(norm_antigen)
df = df.groupby(groupby_cols).apply(norm_fn)
return df
def read_scn_output_batch(scn_dirs_df):
"""
batch read scienion outputs
:param dataframe scn_dirs_df: dataframe loaded from the analysis config
containing directories of scienion output xlsx file, assuming the file name always
ends with '_analysis.xlsx'
:return dataframe scn_df: combined scienion OD dataframe from multiple outputs
"""
scn_df = | pd.DataFrame() | pandas.DataFrame |
import logging
import unittest
import os
import pandas as pd
import numpy as np
import h5py
import pandas.util.testing as pandas_testing
import cmapPy.pandasGEXpress.setup_GCToo_logger as setup_logger
import cmapPy.pandasGEXpress.GCToo as GCToo
import cmapPy.pandasGEXpress.parse_gctx as parse_gctx
import cmapPy.pandasGEXpress.mini_gctoo_for_testing as mini_gctoo_for_testing
import cmapPy.pandasGEXpress.subset_gctoo as subset_gctoo
import cmapPy.pandasGEXpress.write_gctx as write_gctx
__author__ = "<NAME>"
__email__ = "<EMAIL>"
FUNCTIONAL_TESTS_PATH = "../functional_tests"
logger = logging.getLogger(setup_logger.LOGGER_NAME)
version_node = "version"
rid_node = "/0/META/ROW/id"
cid_node = "/0/META/COL/id"
data_node = "/0/DATA/0/matrix"
row_meta_group_node = "/0/META/ROW"
col_meta_group_node = "/0/META/COL"
class MockHdf5Dset(object):
def __init__(self, data_list, dtype):
self.data_list = data_list
self.shape = (len(data_list),)
self.dtype = dtype
def read_direct(self, dest):
for i in range(len(dest)):
dest[i] = self.data_list[i]
class TestParseGctx(unittest.TestCase):
def test_parse(self):
# parse whole thing
mg1 = mini_gctoo_for_testing.make()
mg2 = parse_gctx.parse("../functional_tests/mini_gctoo_for_testing.gctx")
pandas_testing.assert_frame_equal(mg1.data_df, mg2.data_df)
pandas_testing.assert_frame_equal(mg1.row_metadata_df, mg2.row_metadata_df)
pandas_testing.assert_frame_equal(mg1.col_metadata_df, mg2.col_metadata_df)
# test with string rid/cid
test_rids = ['LJP007_MCF10A_24H:TRT_CP:BRD-K93918653:3.33', 'LJP007_MCF7_24H:CTL_VEHICLE:DMSO:-666']
test_cids = ['LJP007_MCF7_24H:TRT_POSCON:BRD-A61304759:10']
mg3 = subset_gctoo.subset_gctoo(mg1, rid=test_rids, cid=test_cids)
mg4 = parse_gctx.parse("../functional_tests/mini_gctoo_for_testing.gctx",
rid=test_rids, cid=test_cids)
pandas_testing.assert_frame_equal(mg3.data_df, mg4.data_df)
pandas_testing.assert_frame_equal(mg3.row_metadata_df, mg4.row_metadata_df)
pandas_testing.assert_frame_equal(mg3.col_metadata_df, mg4.col_metadata_df)
# first, make & write out temp version of mini_gctoo with int rids/cids
new_mg = mini_gctoo_for_testing.make(convert_neg_666=False)
int_indexed_data_df = new_mg.data_df.copy()
int_indexed_data_df.index = [str(i) for i in range(0, 6)]
int_indexed_data_df.columns = [str(i) for i in range(10, 16)]
int_indexed_row_meta = new_mg.row_metadata_df.copy()
int_indexed_row_meta.index = int_indexed_data_df.index
int_indexed_col_meta = new_mg.col_metadata_df.copy()
int_indexed_col_meta.index = int_indexed_data_df.columns
int_indexed_gctoo = GCToo.GCToo(data_df=int_indexed_data_df, row_metadata_df=int_indexed_row_meta,
col_metadata_df=int_indexed_col_meta)
write_gctx.write(int_indexed_gctoo, "int_indexed_mini_gctoo.gctx")
# test with numeric (repr as string) rid/cid
mg5 = GCToo.GCToo(data_df=int_indexed_data_df, row_metadata_df=int_indexed_row_meta,
col_metadata_df=int_indexed_col_meta)
mg5 = subset_gctoo.subset_gctoo(mg5, row_bool=[True, False, True, False, True, False],
col_bool=[True, False, False, True, True, True])
mg5.data_df.index.name = "rid"
mg5.data_df.columns.name = "cid"
mg5.row_metadata_df.index.name = "rid"
mg5.row_metadata_df.columns.name = "rhd"
mg5.col_metadata_df.index.name = "cid"
mg5.col_metadata_df.columns.name = "chd"
mg6 = parse_gctx.parse("int_indexed_mini_gctoo.gctx", rid=["0", "2", "4"],
cid=["10", "13", "14", "15"], convert_neg_666=False)
os.remove("int_indexed_mini_gctoo.gctx")
pandas_testing.assert_frame_equal(mg5.data_df, mg6.data_df)
pandas_testing.assert_frame_equal(mg5.row_metadata_df, mg6.row_metadata_df)
pandas_testing.assert_frame_equal(mg5.col_metadata_df, mg6.col_metadata_df)
# test with ridx/cidx
mg7 = subset_gctoo.subset_gctoo(mg1, rid=['LJP007_MCF7_24H:CTL_VEHICLE:DMSO:-666'],
cid=['LJP007_MCF7_24H:CTL_VEHICLE:DMSO:-666'])
mg8 = parse_gctx.parse("../functional_tests/mini_gctoo_for_testing.gctx", ridx=[4], cidx=[4])
pandas_testing.assert_frame_equal(mg7.data_df, mg8.data_df)
pandas_testing.assert_frame_equal(mg7.row_metadata_df, mg8.row_metadata_df)
pandas_testing.assert_frame_equal(mg7.col_metadata_df, mg8.col_metadata_df)
# test with rid/cidx
mg9 = parse_gctx.parse("../functional_tests/mini_gctoo_for_testing.gctx",
rid=['LJP007_MCF7_24H:CTL_VEHICLE:DMSO:-666'],
cidx=[4])
pandas_testing.assert_frame_equal(mg7.data_df, mg9.data_df)
pandas_testing.assert_frame_equal(mg7.row_metadata_df, mg9.row_metadata_df)
pandas_testing.assert_frame_equal(mg7.col_metadata_df, mg9.col_metadata_df)
# test with ridx/cid
mg10 = parse_gctx.parse("../functional_tests/mini_gctoo_for_testing.gctx", ridx=[4],
cid=['LJP007_MCF7_24H:CTL_VEHICLE:DMSO:-666'])
pandas_testing.assert_frame_equal(mg7.data_df, mg10.data_df)
pandas_testing.assert_frame_equal(mg7.row_metadata_df, mg10.row_metadata_df)
| pandas_testing.assert_frame_equal(mg7.col_metadata_df, mg10.col_metadata_df) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# =========================================================================== #
# Project : ML Studio #
# Version : 0.1.0 #
# File : benchmark.py #
# Python : 3.8.3 #
# -------------------------------------------------------------------------- #
# Author : <NAME> #
# Company : DecisionScients #
# Email : <EMAIL> #
# URL : https://github.com/decisionscients/MLStudio #
# -------------------------------------------------------------------------- #
# Created : Sunday, May 24th 2020, 11:06:10 am #
# Last Modified : Sunday, June 14th 2020, 9:48:14 pm #
# Modified By : <NAME> (<EMAIL>) #
# -------------------------------------------------------------------------- #
# License : BSD #
# Copyright (c) 2020 DecisionScients #
# =========================================================================== #
#%%
from collections import OrderedDict
from datetime import datetime
import os
from pathlib import Path
import sys
import pandas as pd
import numpy as np
homedir = str(Path(__file__).parents[3])
demodir = str(Path(__file__).parents[1])
sys.path.append(homedir)
from mlstudio.supervised.machine_learning.gradient_descent import GDPureOptimizer
from mlstudio.supervised.visual.animations import animate_optimization
from mlstudio.supervised.algorithms.optimization.services.benchmarks import Adjiman, StyblinskiTank, Wikipedia
from mlstudio.supervised.algorithms.optimization.services.benchmarks import ThreeHumpCamel, Ursem01, Branin02
from mlstudio.supervised.algorithms.optimization.services.optimizers import GradientDescentOptimizer, Momentum, Nesterov
from mlstudio.supervised.algorithms.optimization.services.optimizers import Adagrad, Adadelta, RMSprop
from mlstudio.supervised.algorithms.optimization.services.optimizers import Adam, AdaMax, AdamW
from mlstudio.supervised.algorithms.optimization.services.optimizers import Nadam, AMSGrad, QHAdam
from mlstudio.supervised.algorithms.optimization.services.optimizers import QuasiHyperbolicMomentum
from mlstudio.supervised.algorithms.optimization.services.optimizers import AggMo
from mlstudio.utils.data_analyzer import cosine
from mlstudio.utils.file_manager import save_df
# -------------------------------------------------------------------------- #
# Designate file locations
figures = os.path.join(demodir, "figures")
# -------------------------------------------------------------------------- #
# Package up the objective functions
optimizers = [Momentum(), Nesterov(), Adagrad(), Adadelta(), RMSprop(), Adam(),
AdaMax(), Nadam(), AMSGrad(), AdamW(), QHAdam(),
QuasiHyperbolicMomentum()]
objectives = [Adjiman(), Branin02(), Ursem01(),
StyblinskiTank(), ThreeHumpCamel(), Wikipedia()]
# -------------------------------------------------------------------------- #
# Train models
solutions = OrderedDict()
results = []
for objective in objectives:
estimators = OrderedDict()
for optimizer in optimizers:
estimators[optimizer.name] = {}
model = GDPureOptimizer(learning_rate=0.01,
theta_init=objective.start,
epochs=500, objective=objective,
optimizer=optimizer)
model.fit()
sim = cosine(objective.minimum, model.theta_)
d = {}
d['DateTime'] = datetime.now().strftime("%Y/%m/%d %H:%M:%S")
d['Objective'] = model.objective.name
d['Optimizer'] = optimizer.name
d['Epochs'] = model.epochs
d['Starting Learning Rate'] = model.learning_rate
d['Final Learning Rate'] = model.eta
if model.schedule:
d['Schedule'] = model.schedule.name
else:
d['Schedule'] = None
d['gradient_min'] = np.min(model.get_blackbox().epoch_log.get('gradient_norm'))
d['gradient_max'] = np.max(model.get_blackbox().epoch_log.get('gradient_norm'))
d['gradient_mean'] = np.mean(model.get_blackbox().epoch_log.get('gradient_norm'))
d['True'] = objective.minimum
d['est'] = model.theta_
d['sim'] = sim
results.append(d)
estimators[optimizer.name]['model'] = model
estimators[optimizer.name]['results'] = d
solutions[objective.name] = estimators
df = | pd.DataFrame(results) | pandas.DataFrame |
# experiment tracker
import sys
import os
import numpy as np
import pandas as pd
from dask import compute, delayed
sys.path.append('../../')
sys.path.append('../')
sys.path.append('../../experiment-impact-tracker/')
from experiment_impact_tracker.data_interface import DataInterface
from experiment_impact_tracker.data_utils import *
from experiment_impact_tracker.data_utils import (load_data_into_frame,
load_initial_info,
zip_data_and_info)
def compute_aggregate_power(df, info, PUE, task_epoch_df,use_cuda):
''' Aggregates and partitions power consumption based on task interval timpestamps. Allows to see breakdown of power consumptions for different subtasks.
'''
# time calcs
exp_end_timestamp = datetime.timestamp(info["experiment_end"])
exp_len = exp_end_timestamp - datetime.timestamp(info["experiment_start"])
exp_len_hours = exp_len / 3600.0
time_differences = df["timestamp_orig"].diff()
time_differences[0] = df["timestamp_orig"][0] - datetime.timestamp(
info["experiment_start"]
)
# Add final timestamp and extrapolate last row of power estimates
time_differences.loc[len(time_differences)] = (
exp_end_timestamp - df["timestamp_orig"][len(df["timestamp_orig"]) - 1]
)
time_differences_in_hours = time_differences / 3600.0
# rapl calcs
power_draw_rapl_kw = df["rapl_estimated_attributable_power_draw"] / 1000.0
power_draw_rapl_kw.loc[len(power_draw_rapl_kw)] = power_draw_rapl_kw.loc[
len(power_draw_rapl_kw) - 1
]
kw_hr_rapl = (
np.multiply(time_differences_in_hours, power_draw_rapl_kw)
if power_draw_rapl_kw is not None
else None
)
# nvidia calcs
if use_cuda:
num_gpus = len(info["gpu_info"])
nvidia_power_draw_kw = df["nvidia_estimated_attributable_power_draw"] / 1000.0
nvidia_power_draw_kw.loc[len(nvidia_power_draw_kw)] = nvidia_power_draw_kw.loc[
len(nvidia_power_draw_kw) - 1
]
# elementwise multiplication and sum
kw_hr_nvidia = np.multiply(time_differences_in_hours, nvidia_power_draw_kw)
# apply PUE
if use_cuda and (kw_hr_rapl is not None):
total_power_per_timestep = PUE * (kw_hr_nvidia + kw_hr_rapl)
elif kw_hr_rapl is not None:
total_power_per_timestep = PUE * (kw_hr_rapl)
elif use_cuda:
total_power_per_timestep = PUE * (kw_hr_nvidia)
else:
raise ValueError("Unable to get either GPU or CPU metric.")
# interpolate power based on timesteps
# Append last row which implies power draw from last sample extrapolated till the end of experiment
df.loc[len(df)] = df.loc[len(df) - 1] ## Duplicating last row to match length of total_power_per_timestep
df.loc[len(df)-1,'timestamp'] = task_epoch_df.loc[len(task_epoch_df)-1,'epoch_timestamp'] #update the timestamp to match end of experiment
df['total_power_per_timestep'] = total_power_per_timestep.copy()
task_power_df = pd.DataFrame(columns=['task','power'])
if total_power_per_timestep is not None:
# end-to-end power consumption
task_power_df.loc[0] = ['Experiment', total_power_per_timestep.sum()]
prev_epoch_power = 0
print('number of timestamps: {}'.format(len(total_power_per_timestep)))
# power consumption per task
for i in range(len(task_epoch_df)):
task = task_epoch_df.loc[i,'task']
epoch = task_epoch_df.loc[i,'epoch_timestamp']
epoch_idx = len(df[df['timestamp'] <= epoch])
current_epoch_power = total_power_per_timestep[:epoch_idx].sum()
task_power_df.loc[i+1] = [task, current_epoch_power - prev_epoch_power ]
prev_epoch_power = current_epoch_power
return df, task_power_df
def get_EIT_tracker_data(logdir, use_cuda, read_flops):
''' Fetches experiment impact tracker data from data_interface and separates it into 1) end-to-end experiment df 2) power consumption per sampling epoch df and 3) flops and power consumption per task df
'''
# try:
info = load_initial_info(logdir)
# Get total values from default data interface for the entire experiment
data_interface = DataInterface([logdir])
total_power = data_interface.total_power
total_carbon = data_interface.kg_carbon
PUE = data_interface.PUE
exp_len_hours = data_interface.exp_len_hours
# Calculate your own sepeartely for each subtask in the experiment
# impact tracker log
tracker_df = load_data_into_frame(logdir)
if use_cuda:
power_df = tracker_df[0][['timestamp','rapl_power_draw_absolute','rapl_estimated_attributable_power_draw','nvidia_draw_absolute','nvidia_estimated_attributable_power_draw']].copy()
power_df.loc[:,'total_attributable_power_draw'] = power_df['rapl_estimated_attributable_power_draw'] + power_df['nvidia_estimated_attributable_power_draw']
else:
power_df = tracker_df[0][['timestamp','rapl_power_draw_absolute','rapl_estimated_attributable_power_draw']].copy()
power_df.loc[:,'total_attributable_power_draw'] = power_df['rapl_estimated_attributable_power_draw']
# start time from 0
power_df.loc[:,'timestamp_orig'] = power_df['timestamp']
power_df.loc[:,'timestamp'] = power_df['timestamp'] - power_df['timestamp'][0]
# papi log
flops_df = None
total_duration = 0
if read_flops:
compute_flops_csv = logdir + 'compute_costs_flop.csv'
flops_df = pd.read_csv(compute_flops_csv)
flops_df.loc[:,'start_time'] = flops_df['start_time'] - flops_df['start_time'][0]
# Aggregate power draws per epoch for each papi context calculation (i.e. setup, axial, aggr etc))
epoch_power_draw_list = []
epoch_timestamps = list(flops_df['start_time'].values[1:]) + [flops_df['start_time'].values[-1] + flops_df['duration'].values[-1]]
task_epoch_df = pd.DataFrame()
task_epoch_df.loc[:,'task'] = flops_df['task'].values
task_epoch_df.loc[:,'epoch_timestamp'] = epoch_timestamps
power_df, task_power_df = compute_aggregate_power(power_df, info, PUE, task_epoch_df, use_cuda)
flops_df = pd.merge(flops_df,task_power_df,on='task',how='left')
print('total_power sanity check: default: {:6.5f}, calculated: {:6.5f}, {:6.5f}'.format(total_power, task_power_df.loc[0,'power'],power_df['total_power_per_timestep'].sum()))
total_duration_papi = (power_df['timestamp'].values[-1]-power_df['timestamp'].values[0])/3600
tracker_summary_df = pd.DataFrame(columns=['total_power','total_carbon','PUE','total_duration_papi','total_duration_impact_tracker'])
tracker_summary_df.loc[0] = [total_power,total_carbon,PUE,total_duration_papi,exp_len_hours]
return power_df, flops_df, tracker_summary_df
# except:
# print(f'No valid experiment impact tracker log found at {logdir}')
# return None
def collate_EIT_tracker_data(tracker_log_dir_list, use_cuda, read_flops):
''' Collates EIT tracker data from a set of experiments e.g. FastSurfer results for all subjects
'''
power_df_concat = pd.DataFrame()
flops_df_concat = pd.DataFrame()
tracker_summary_df_concat = pd.DataFrame()
values = [delayed(get_EIT_tracker_data)(tracker_log_dir, use_cuda, read_flops)
for tracker_log_dir in tracker_log_dir_list]
tracker_data_list = compute(*values, scheduler='threads',num_workers=4)
i = 0
for td in tracker_data_list:
if td is not None:
power_df, flops_df, tracker_summary_df = td
power_df_concat = power_df_concat.append(power_df)
flops_df_concat = flops_df_concat.append(flops_df)
tracker_summary_df_concat = tracker_summary_df_concat.append(tracker_summary_df)
return tracker_summary_df_concat, flops_df_concat, power_df_concat
def collate_CC_tracker_data(log_dirs):
''' Collates CodeCarbon tracker data from a set of experiments e.g. FastSurfer results for all subjects
'''
CC_df = pd.DataFrame()
for log_dir in log_dirs:
df = | pd.read_csv(f'{log_dir}/emissions.csv') | pandas.read_csv |
'''This file holds all relevant functions necessary for starting the data analysis.
An object class for all account data is established, which will hold the raw data after import,
the processed data and all subdata configuration necessary for plotting.
The account data is provided through the account identification process in account_ident.py
Necessary functions for holiday extraction, roundies calculation as well as merging and cashbook linkage are provided in the Accounts class
Excel file is exported at the end exported.'''
import datetime
import locale
import os
import platform
import numpy as np
import pandas as pd
from basefunctions import account_ident
if platform.system() == 'Windows':
locale.setlocale(locale.LC_ALL, 'German')
FOLDER_SEP = '\\'
elif platform.system() == 'Darwin':
locale.setlocale(locale.LC_ALL, 'de_DE.utf-8')
FOLDER_SEP = '/'
else:
locale.setlocale(locale.LC_ALL, 'de_DE.utf8')
FOLDER_SEP = '/'
#_______________________________________ read in longterm data for training machine learning algorithm _______________
def longtermdata_import(path, decrypt_success):
if decrypt_success:
longterm_data = pd.read_csv(path, sep=';', parse_dates=[0, 1])
else:
empty_dataframe = {'time1':np.datetime64, 'time2':np.datetime64, 'act':str, 'text':str, 'val':float, 'month':str, 'cat':str, 'main cat':str, 'acc_name':str}
longterm_data = pd.DataFrame(columns=empty_dataframe.keys()).astype(empty_dataframe)
#extract saved account names in longterm_data
saved_accnames = list(longterm_data['acc_name'].unique())
saved_dataframe = {} #stored dataframes from import
for account_name in saved_accnames: #iterate through list with indices
saved_dataframe[account_name] = longterm_data.loc[longterm_data['acc_name'] == account_name] #get saved dataframes
return saved_dataframe
def longterm_export(path, saved_dataframe):#needs to be outside class in case program is closed before data integration
longterm_data = pd.DataFrame(columns=['time1', 'time2', 'act', 'text', 'val', 'month', 'cat', 'main cat', 'acc_name'])
for account_name in saved_dataframe.keys():
account_name_concat = saved_dataframe[account_name]
account_name_concat['acc_name'] = account_name #set account name in dataframe to be saved
longterm_data = pd.concat([longterm_data, account_name_concat]) #concatinated data
longterm_data.to_csv(path, index=False, sep=';') #export data
class AccountsData:
def __init__(self, dir_result, classifier_class, langdict, saved_dataframe):
self.langdict = langdict
## set language variable
if self.langdict['result_pathvars'][0] == 'Ergebnisse_':
self.lang_choice = 'deu'
else:
self.lang_choice = 'eng'
#change locale to English
if platform.system() == 'Windows':
locale.setlocale(locale.LC_ALL, 'English')
elif platform.system() == 'Darwin':
locale.setlocale(locale.LC_ALL, 'en_US.utf-8')
else:
locale.setlocale(locale.LC_ALL, 'en_US.utf8')
self.current_date = datetime.datetime.now().strftime("%b'%y")
self.acc_names_rec = {} #longterm excel, excel and csv files
self.folder_sep = FOLDER_SEP
self.dir_result = dir_result+FOLDER_SEP+self.langdict['result_pathvars'][0]+self.current_date+FOLDER_SEP #adjusted complete path
self.folder_res = {}
self.raw_data = {}
self.raw_data_header = {}
self.basis_data = {}
self.month_data = {}
self.cat_data = {}
self.plotting_list = {}
self.error_codes = {}
self.classifier = classifier_class
self.saved_dataframe = saved_dataframe
def process_data(self, raw_fileinfo, import_type):
#unpack tuple with fileinformation
filename, filepath = raw_fileinfo
##read in csv-files from different account_types
##start functions for getting csv account type info and data input & adjustment
if import_type == 'csv_analyse':
#get account information
while True:
try:
acctypename_importedfile, raw_data, account_infos = account_ident.account_info_identifier(filepath)
except:
self.error_codes[filename] = 'Err01'
break
##unpack account read-in info tuple
header_columns, column_join, column_drop, acc_subtype, plot_info = account_infos #acc_subtype ('giro' or 'credit') currently not used, but kept in tuple list for possible later use
self.raw_data[filename] = raw_data
self.raw_data_header[filename] = header_columns
#data preprocess
try:
#select Euro entrys
if "Währung" in header_columns:
self.basis_data[filename] = self.raw_data[filename][self.raw_data[filename]["Währung"] == "EUR"].copy()
elif "currency" in header_columns:
self.basis_data[filename] = self.raw_data[filename][self.raw_data[filename]["currency"] == "EUR"].copy()
else:
self.basis_data[filename] = self.raw_data[filename].copy()
##do adjustment to transactions info (join columns to get more info) for categorization. Output is forced to be string datype
if column_join[0] == 'yes':
self.basis_data[filename]['text'] = self.basis_data[filename][self.basis_data[filename].columns[column_join[1]]].apply(lambda x: str(' || '.join(x.dropna())), axis=1)
else:
pass
##drop columns if necessary and reaarange columns
if column_drop[0] == 'yes':
self.basis_data[filename].drop(self.basis_data[filename].columns[column_drop[1]], axis=1, inplace=True)
self.basis_data[filename] = self.basis_data[filename].reindex(columns=self.basis_data[filename].columns[column_drop[2]])
else:
pass
##insert "act" column if necessary (currently only dkb_credit)
if len(self.basis_data[filename].columns) == 4:
self.basis_data[filename].insert(2, 'act', self.langdict['act_value'][0])
else:
pass
self.basis_data[filename].columns = ["time1", "time2", "act", "text", "val"]
#delete row with time1&time2 empty
self.basis_data[filename] = self.basis_data[filename].drop(self.basis_data[filename][(self.basis_data[filename]['time1'].isna())&(self.basis_data[filename]['time2'].isna())].index)
#adjust for missing time values in "time1"-columns
self.basis_data[filename]['time1'].mask(self.basis_data[filename]['time1'].isna(), self.basis_data[filename]['time2'], inplace=True) ##new
#make month-column and categorize
self.basis_data[filename]['month'] = self.basis_data[filename]['time1'].apply(lambda dates: dates.strftime('%b %Y'))
except:
self.error_codes[filename] = 'Err02'
break
#check if all transaction values is Null. If yes, abort and give errorcode '03'
if self.basis_data[filename]['val'].isna().all():
self.error_codes[filename] = 'Err03'
break
else:
pass
#try categorization else give error code '04'
try:
self.basis_data[filename] = self.classifier.categorize_rawdata(self.basis_data[filename], 'csvdata')
except:
self.error_codes[filename] = 'Err04'
break
#add account name to dictionary with imported data files and their respective account names (for cashbook, savecent and long term data saving)
self.acc_names_rec[filename] = acctypename_importedfile
#add variables for subsequent handling and plotting
self.plotting_list[filename] = plot_info
self.folder_res[filename] = self.dir_result+filename
break
#Plot manually manipulated excel files
elif import_type == 'xls_analyse':
#read excel
try:
raw_data = pd.read_excel(filepath, sheet_name=self.langdict['sheetname_basis'][0], engine='openpyxl') #main category is not read-in but separately assigned)
try:
testname = raw_data[raw_data.columns[8]][0] #get account name if existing
#check if testname is nan-value (as nan are not identical it can be checked with !=)
if testname != testname: #if imported account name field is Nan-value use "not assigned"
acctypename_importedfile = self.langdict['accname_labels'][1] #set acctypename to not assigned
else:
acctypename_importedfile = testname #take name which was read in
except: # if bank name is not existing set it to not assigned#
acctypename_importedfile = self.langdict['accname_labels'][1]
raw_data = raw_data[raw_data.columns[range(0, 7)]].copy()
raw_data.dropna(subset=raw_data.columns[[0, 4]], inplace=True) #delete rows where value in time1 or val column is empty
#check if raw data is in the right data format and contains at least one row
if (raw_data.columns.tolist() == self.langdict['sheetname_basis'][1][:-2]) and (len(raw_data) != 0):
#headers must be identical to those outputted via excel
raw_data.columns = ["time1", "time2", "act", "text", "val", 'month', 'cat']
#save histo data to saved file
histo_data = raw_data[['act', 'text', 'cat']].copy() #get a copy of relevant data for categorization
self.classifier.machineclassifier.adjust_histo_data(histo_data) # add data to existing history dataset
del histo_data
self.basis_data[filename] = raw_data.copy()
self.basis_data[filename] = self.classifier.assign_maincats(self.basis_data[filename]) #assign main categories
self.acc_names_rec[filename] = acctypename_importedfile #add account name to dictionary with imported data files and their respective account names (for cashbook, savecent and long term data saving)
self.plotting_list[filename] = 'normal'
self.folder_res[filename] = self.dir_result+filename
else:
self.error_codes[filename] = 'Err01'
del raw_data
except:
self.error_codes[filename] = 'Err01'
# Excel file for concatenation
elif import_type == 'xls_longterm':
#Longterm analysis: Read-in excel to concat csvs
try:
raw_data = pd.read_excel(filepath, sheet_name=self.langdict['sheetname_basis'][0], engine='openpyxl') #main category is not read-in but separately assigned)
#variabel "assigned_accname" does not need to be checked, as it is always 'use_acctype' for longterm excel concat
try:#try to get the account name from excel
testname = raw_data[raw_data.columns[8]][0] #get account name if existing
#check if testname is nan-value (as nan are not identical it can be checked with !=)
if testname != testname: #if imported account name field is Nan-value use "not assigned"
acctypename_importedfile = self.langdict['accname_labels'][1] #set acctypename to not assigned
else:
acctypename_importedfile = testname #take name which was read in
except: # if bank name is not existing set it to not assigned#
acctypename_importedfile = self.langdict['accname_labels'][1]
raw_data = raw_data[raw_data.columns[range(0, 7)]].copy()
raw_data.dropna(subset=raw_data.columns[[0, 4]], inplace=True) #delete rows where value in time1 or val column is empty
#check if raw data is in the right data format and contains at least one row
if (raw_data.columns.tolist() == self.langdict['sheetname_basis'][1][:-2]) and (len(raw_data) != 0):
#headers must be identical to those outputted via excel
raw_data.columns = ["time1", "time2", "act", "text", "val", 'month', 'cat']
#save histo data to saved file
histo_data = raw_data[['act', 'text', 'cat']].copy() #get a copy of relevant data for categorization
self.classifier.machineclassifier.adjust_histo_data(histo_data) # add data to existing history dataset
del histo_data
self.basis_data[filename] = raw_data.copy()
self.basis_data[filename] = self.classifier.assign_maincats(self.basis_data[filename]) #assign main categories
#account names for excel-extraimport only needed because of concatination function searching for identical "acc-name" values
self.basis_data[filename]['acc_name'] = None #create acctype column and set values to Nan
self.basis_data[filename].at[0, 'acc_name'] = acctypename_importedfile #set account name to basis dataframe for possible concatination
else:
self.error_codes[filename] = 'Err01'
del raw_data
except:
self.error_codes[filename] = 'Err01'
# Excel file for concatenation
elif import_type == 'xls_cashbook':
#cashbok analysis: Read-in to append info to csvs
try:
raw_data = pd.read_excel(filepath, sheet_name=self.langdict['cashbookvars_name'][0], usecols=[0, 1, 2, 3, 4, 5], engine='openpyxl')
raw_data.columns = ["time1", "text", "val", "cat", "acc_name", "cashcat"]
raw_data = raw_data[raw_data['time1'].isna() == False]
#adjust categories if no value is set
if raw_data[['time1', 'text', 'val']].isnull().values.any() == False:#reject cashbook if there are empty values in date, value or text
raw_data['val'] = -raw_data['val']
raw_data["time2"] = raw_data["time1"]
raw_data['month'] = raw_data['time1'].apply(lambda dates: dates.strftime('%b %Y'))
raw_data['act'] = self.langdict['cashbookvars_name'][1]
#do categorization for empty values in cashbook, get main cats and reorder dataframe
raw_data = self.classifier.categorize_rawdata(raw_data, 'cashbook')
raw_data = raw_data.reindex(columns=raw_data.columns[[0, 6, 8, 1, 2, 7, 3, 9, 4, 5]])
self.basis_data[self.langdict['cashbookvars_name'][0]] = raw_data.copy()
else:
self.error_codes[filename] = 'Err01'
del raw_data
except:
self.error_codes[filename] = 'Err01'
else:#no action needed
pass
def assign_fileaccnames(self, assign_list):
for entry in assign_list:
#entry[0] equals filename / entry[1] account name
#set account name to dictionary holding all account names (needed for cashbook, longterm and savecent)
self.acc_names_rec[entry[0]] = entry[1]
#create account name column for excel export
self.basis_data[entry[0]]['acc_name'] = None #create account name column and set values to Nan
self.basis_data[entry[0]].at[0, 'acc_name'] = entry[1] #set account name to basis dataframe (will be exported to excel)
def sorting_processor(self, element_name, balance_row_name, group_name, value_name):
basis_data_subset = self.basis_data[element_name].copy() #create subset
#make month data
self.month_data[element_name] = basis_data_subset.groupby('month', sort=False)[value_name].sum().reset_index() ##get monthly overview
if element_name == self.langdict['dataname_savecent'][0]: #do sorting of month data differently for savecents
self.month_data[element_name] = self.month_data[element_name].sort_values([value_name], ascending=False)
self.month_data[element_name].columns = ['month', 'val']
else: #sort monthly data for all other dataframes starting with first month up respective left in monthplot
self.month_data[element_name] = self.month_data[element_name][::-1]
month_number = self.month_data[element_name]['month'].nunique()
#process data and aggregate based on sorting type(category/main category)
grouped_data = basis_data_subset.groupby(group_name, sort=False)[value_name].sum().reset_index()
balance_row = pd.DataFrame([[balance_row_name, sum(basis_data_subset[value_name])]], columns=list(grouped_data.columns))
grouped_data = grouped_data.sort_values([value_name], ascending=False).reset_index(drop=True) #sort by values to have all positive values at top (necessary to get indices
income_data = grouped_data.loc[(grouped_data[value_name] > 0)].copy() #get positive valued entries
#get negative valued entries based on length of positive valued dataframe
if len(income_data.index) > 0:
cost_data = grouped_data[income_data.index[-1]+1:].copy()
else:
cost_data = grouped_data[0:].copy()
cost_data = cost_data.sort_values([value_name]) # sort negative valued dataframe, with most negative at top
result_data = income_data.append(cost_data, ignore_index=True) #append negative dataframe to positive dataframe
result_data = result_data.append(balance_row, ignore_index=True) # add balance row
result_data['val_month'] = result_data[value_name]/(month_number) #create value per month
return result_data
def month_cat_maker(self):
##categorize data and sort ascending. Same goes for monthly data
for element_name in list(self.folder_res.keys()):
if element_name == self.langdict['dataname_savecent'][0]:
main_cats = "empty"
subcats = self.sorting_processor(element_name, self.langdict['balance_savecent'][0], 'acc_origin', 'savecent')
subcats.columns = ['cat', 'val', 'val_month']
elif element_name == self.langdict['cashbookvars_name'][0]:
subcats = self.sorting_processor(element_name, self.langdict['balance_cashbook'][1], 'cat', 'val')
main_cats = self.sorting_processor(element_name, self.langdict['balance_cashbook'][1], 'acc_name', 'val') #main cats equals account name sorting
#rename columns maincat cashbook from 'acc_name' to 'cat'
main_cats.columns = ['cat', 'val', 'val_month']
elif element_name == self.langdict['holidayvars_name'][0]:
main_cats = "empty"
subcats = self.sorting_processor(element_name, self.langdict['balance_holiday'][0], 'cat', 'val')
else: #make cat data and month data for all other dataframes
main_cats = self.sorting_processor(element_name, self.langdict['balance_normal'][0], 'main cat', 'val') # create sorted dataframe for main categories
subcats = self.sorting_processor(element_name, self.langdict['balance_normal'][0], 'cat', 'val') # create sorted dataframe for categories
subcats = self.classifier.assign_maincats(subcats) #add main category column to cat data for later use
subcats.loc[subcats['cat'] == self.langdict['balance_normal'][0], 'main cat'] = self.langdict['balance_normal'][0] #adjust main category for balance category
subcats = subcats.reindex(columns=['main cat', 'cat', 'val', 'val_month'])#reorder columns
self.cat_data[element_name] = (subcats, main_cats)
#take saved long term data into data evaluation process
def longterm_evaluate(self): #this function is only called, when user opts for long term data evaluation
for account_name in self.saved_dataframe.keys():
account_dataframe = self.saved_dataframe[account_name].copy()
account_dataframe.reset_index(drop=True, inplace=True) #reset index to be able to place acc_name on index 0
account_dataframe['acc_name'] = None # clear account name
account_dataframe.at[0, 'acc_name'] = account_name#set new account type for this subframe
longterm_data_name = self.langdict['longterm_name'][0]+account_name # "Longterm_"+account fullname
self.basis_data[longterm_data_name] = account_dataframe #add longterm basis data to be analysed
self.acc_names_rec[longterm_data_name] = account_name #add longterm data name to recorded account names list--> makes cashbook evaluation possible
self.plotting_list[longterm_data_name] = 'normal' #set plotting info
self.folder_res[longterm_data_name] = self.dir_result+longterm_data_name # create export folder
# add newly added data to longterm data
def longterm_savedata(self):
#update saved longterm data, evaluate and output if user opted for it
#convert saved dataframes in dict entries into lists
for saved_element in self.saved_dataframe.keys():
self.saved_dataframe[saved_element] = [self.saved_dataframe[saved_element]]
#get basis dataframe for every assigned account name
for element_name in self.acc_names_rec.keys():
if self.acc_names_rec[element_name] != self.langdict['accname_labels'][1]: #check if the name is 'not assigned'. If yes skip, else import basis data
try: #if list with account name already exists, add dataframe
self.saved_dataframe[self.acc_names_rec[element_name]].append(self.basis_data[element_name])
except: #create list for account name with dataframe
self.saved_dataframe[self.acc_names_rec[element_name]] = [self.basis_data[element_name]]
else:
pass #nothing to do
#generate new dataframes
longterm_data_prep = {}
for account_name in self.saved_dataframe.keys():
account_name_concat = pd.concat(self.saved_dataframe[account_name]) #concat data for every account and list it
account_name_concat.drop_duplicates(subset=['time1', 'text', 'val'], inplace=True, ignore_index=True) #get rid of doubled entry rows with respect to time1,text and value
account_name_concat = account_name_concat.sort_values(['time1'], ascending=False).reset_index(drop=True) #sort values by booking date and reset index
longterm_data_prep[account_name] = account_name_concat #save concatted datframe to dict to be able to concat all data frames and store it as csv in longterm export
return longterm_data_prep
def concatter(self, concat_list):
#create list to find concat choice in datasets
for item in concat_list:
#tuple unpack concat choice values
framename, concat_choice = item
#get names for new datasets
concat_dataframes = []
accountnames = []# determine wether all dataframe have same account type or not
for choicename in concat_choice:
concat_dataframes.append(self.basis_data[choicename])
accountnames.append(self.basis_data[choicename][self.basis_data[choicename].columns[8]][0])#read account name of dataframes to be concatinated and save it to list
#concat and add to data-object
self.basis_data[framename] = pd.concat(concat_dataframes)
self.basis_data[framename].drop_duplicates(subset=['time1', 'text', 'val'], inplace=True, ignore_index=True) #get rid of doubled entry rows with respect to time1, text and value
self.basis_data[framename] = self.basis_data[framename].sort_values(['time1'], ascending=False).reset_index(drop=True)#sort values by booking date and reset index
#add account name
self.basis_data[framename]['acc_name'] = None #clear account name column
if all(elem == accountnames[0] for elem in accountnames): #if all entries in accountnames are identical, take first value of list else write unclear
self.basis_data[framename].at[0, 'acc_name'] = accountnames[0] #set account name
else:
self.basis_data[framename].at[0, 'acc_name'] = self.langdict['accname_labels'][1] #set account name to not assigned
self.folder_res[framename] = self.dir_result+framename
self.plotting_list[framename] = 'normal'
def bundle_holiday(self):
#concat holiday data from different csv-files
holidayvar_name = self.langdict['holidayvars_name'][0]
self.basis_data[holidayvar_name] = pd.DataFrame(columns=["time1", "time2", "act", "text", "val", "month", "cat", "main cat", "acc_name"])
for element_name in list(self.folder_res.keys()):
if not element_name in (self.langdict['dataname_savecent'][0], self.langdict['cashbookvars_name'][0]): #cashbook and savecentnames
data_hol = self.basis_data[element_name].loc[self.basis_data[element_name]['main cat'] == self.langdict['holiday_searchwords'][0]].copy()
self.basis_data[holidayvar_name] = self.basis_data[holidayvar_name].append(data_hol, ignore_index=True)
self.basis_data[holidayvar_name].drop_duplicates(subset=['time1', 'time2', 'text', 'val'], inplace=True, ignore_index=True) #drop dubplicates if existing
if len(self.basis_data[holidayvar_name].index) > 0:
self.basis_data[holidayvar_name] = self.basis_data[holidayvar_name].sort_values(['time1'], ascending=False)
self.basis_data[holidayvar_name]['acc_name'] = None #set all account name values from imports to NaN
self.basis_data[holidayvar_name].at[0, 'acc_name'] = self.langdict['accname_labels'][1] #set account type to not assigned
self.folder_res[holidayvar_name] = self.dir_result+self.langdict['holidayvars_name'][1]
self.plotting_list[holidayvar_name] = 'basic'
else:
del self.basis_data[holidayvar_name]
def savecent_calculation(self):
#account for difference between actucal cost value and full amount (rounding). Gives a number which can be invested every month. Since the data structure will be different, the results will be saved and plotted separately.
savecentvar_name = self.langdict['dataname_savecent'][0]
self.basis_data[savecentvar_name] = | pd.DataFrame(columns=["time1", "time2", "act", "text", "val", "month", "cat", "main cat", "savecent", "acc_origin"]) | pandas.DataFrame |
import logging
import os
import pickle
import random
from pathlib import Path
import itertools
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from src.data.make_dataset import DATE_COLUMNS, CAT_COLUMNS
import utm
project_dir = Path(__file__).resolve().parents[2]
def rule(row):
lat, long,_,_= utm.from_latlon(row["Surf_Latitude"], row["Surf_Longitude"], 45, 'K')
return | pd.Series({"lat": lat, "long": long}) | pandas.Series |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import unittest
import warnings
import pandas as pd
import numpy as np
from qiime2 import Artifact
from qiime2.metadata import (Metadata, CategoricalMetadataColumn,
NumericMetadataColumn)
from qiime2.core.testing.util import get_dummy_plugin, ReallyEqualMixin
class TestInvalidMetadataConstruction(unittest.TestCase):
def test_non_dataframe(self):
with self.assertRaisesRegex(
TypeError, 'Metadata constructor.*DataFrame.*not.*Series'):
Metadata(pd.Series([1, 2, 3], name='col',
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_no_ids(self):
with self.assertRaisesRegex(ValueError, 'Metadata.*at least one ID'):
Metadata(pd.DataFrame({}, index=pd.Index([], name='id')))
with self.assertRaisesRegex(ValueError, 'Metadata.*at least one ID'):
Metadata(pd.DataFrame({'column': []},
index=pd.Index([], name='id')))
def test_invalid_id_header(self):
# default index name
with self.assertRaisesRegex(ValueError, r'Index\.name.*None'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', 'b', 'c'])))
with self.assertRaisesRegex(ValueError, r'Index\.name.*my-id-header'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'b', 'c'], name='my-id-header')))
def test_non_str_id(self):
with self.assertRaisesRegex(
TypeError, 'non-string metadata ID.*type.*float.*nan'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', np.nan, 'c'], name='id')))
def test_non_str_column_name(self):
with self.assertRaisesRegex(
TypeError, 'non-string metadata column name.*type.*'
'float.*nan'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
np.nan: [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_empty_id(self):
with self.assertRaisesRegex(
ValueError, 'empty metadata ID.*at least one character'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', '', 'c'], name='id')))
def test_empty_column_name(self):
with self.assertRaisesRegex(
ValueError, 'empty metadata column name.*'
'at least one character'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
'': [4, 5, 6]}, index=pd.Index(['a', 'b', 'c'], name='id')))
def test_pound_sign_id(self):
with self.assertRaisesRegex(
ValueError, "metadata ID.*begins with a pound sign.*'#b'"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', '#b', 'c'], name='id')))
def test_id_conflicts_with_id_header(self):
with self.assertRaisesRegex(
ValueError, "metadata ID 'sample-id'.*conflicts.*reserved.*"
"ID header"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'sample-id', 'c'], name='id')))
def test_column_name_conflicts_with_id_header(self):
with self.assertRaisesRegex(
ValueError, "metadata column name 'featureid'.*conflicts.*"
"reserved.*ID header"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
'featureid': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_duplicate_ids(self):
with self.assertRaisesRegex(ValueError, "Metadata IDs.*unique.*'a'"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'b', 'a'], name='id')))
def test_duplicate_column_names(self):
data = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
with self.assertRaisesRegex(ValueError,
"Metadata column names.*unique.*'col1'"):
Metadata(pd.DataFrame(data, columns=['col1', 'col2', 'col1'],
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_unsupported_column_dtype(self):
with self.assertRaisesRegex(
TypeError, "Metadata column 'col2'.*unsupported.*dtype.*bool"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': [True, False, True]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_categorical_column_unsupported_type(self):
with self.assertRaisesRegex(
TypeError, "CategoricalMetadataColumn.*strings or missing "
r"values.*42\.5.*float.*'col2'"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', 'bar', 42.5]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_categorical_column_empty_str(self):
with self.assertRaisesRegex(
ValueError, "CategoricalMetadataColumn.*empty strings.*"
"column 'col2'"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', '', 'bar']},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_numeric_column_infinity(self):
with self.assertRaisesRegex(
ValueError, "NumericMetadataColumn.*positive or negative "
"infinity.*column 'col2'"):
Metadata(pd.DataFrame(
{'col1': ['foo', 'bar', 'baz'],
'col2': [42, float('+inf'), 4.3]},
index=pd.Index(['a', 'b', 'c'], name='id')))
class TestMetadataConstructionAndProperties(unittest.TestCase):
def assertEqualColumns(self, obs_columns, exp):
obs = [(name, props.type) for name, props in obs_columns.items()]
self.assertEqual(obs, exp)
def test_minimal(self):
md = Metadata(pd.DataFrame({}, index=pd.Index(['a'], name='id')))
self.assertEqual(md.id_count, 1)
self.assertEqual(md.column_count, 0)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('a',))
self.assertEqualColumns(md.columns, [])
def test_single_id(self):
index = pd.Index(['id1'], name='id')
df = pd.DataFrame({'col1': [1.0], 'col2': ['a'], 'col3': ['foo']},
index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 1)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1',))
self.assertEqualColumns(md.columns,
[('col1', 'numeric'), ('col2', 'categorical'),
('col3', 'categorical')])
def test_no_columns(self):
index = pd.Index(['id1', 'id2', 'foo'], name='id')
df = pd.DataFrame({}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 0)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'id2', 'foo'))
self.assertEqualColumns(md.columns, [])
def test_single_column(self):
index = pd.Index(['id1', 'a', 'my-id'], name='id')
df = pd.DataFrame({'column': ['foo', 'bar', 'baz']}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 1)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'a', 'my-id'))
self.assertEqualColumns(md.columns, [('column', 'categorical')])
def test_retains_column_order(self):
# Supply DataFrame constructor with explicit column ordering instead of
# a dict.
index = pd.Index(['id1', 'id2', 'id3'], name='id')
columns = ['z', 'a', 'ch']
data = [
[1.0, 'a', 'foo'],
[2.0, 'b', 'bar'],
[3.0, 'c', '42']
]
df = | pd.DataFrame(data, index=index, columns=columns) | pandas.DataFrame |
import os
import random
from itertools import cycle
import tsfel as ts
import tensorflow as tf
import numpy as np
from sklearn import metrics
import sensormotion as sm
import matplotlib.pyplot as plt
import math
from shutil import copy2
import seaborn as sns
import pandas as pd
from imblearn.metrics import sensitivity_specificity_support
from imblearn.over_sampling import SMOTE, ADASYN, KMeansSMOTE, RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler, NearMiss, EditedNearestNeighbours
from scipy import stats
from sklearn import preprocessing
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import VarianceThreshold, SelectFromModel, RFE
from sklearn.metrics import precision_recall_fscore_support, accuracy_score, classification_report, confusion_matrix
from sklearn.model_selection import GridSearchCV, train_test_split, KFold
from pathlib import Path
from datetime import datetime
from sklearn.svm import LinearSVC, SVR
from b_har.utility.configurator import Configurator, PrivateConfigurator
from b_har.models import Models, MlModels
import logging
import progressbar
from timeit import default_timer as timer
class B_HAR:
_data_delimiters = {
'tdc': (1, -1),
'tdcp': (1, -2),
'dc': (0, -1),
'dcp': (0, -2)
}
# --- Public Methods ---
def __init__(self, config_file_path) -> None:
super().__init__()
self.__cfg_path = config_file_path
def stats(self):
"""
Prints out the statistics of a given dataset
:return: None
"""
self._init_log()
df = self._decode_csv(ds_dir=Configurator(self.__cfg_path).get('dataset', 'path'),
separator=Configurator(self.__cfg_path).get('dataset', 'separator', fallback=' '),
header_type=Configurator(self.__cfg_path).get('dataset', 'header_type'),
has_header=Configurator(self.__cfg_path).getboolean('dataset', 'has_header')
)
header_type = Configurator(self.__cfg_path).get('dataset', 'header_type')
group_by = Configurator(self.__cfg_path).get('settings', 'group_by')
# Create stats directory
stats_dir = os.path.join(Configurator(self.__cfg_path).get('settings', 'log_dir'), 'stats')
Path(stats_dir).mkdir(parents=True, exist_ok=True)
# Plot settings
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
values = df[group_by].value_counts()
# Bar Plot Class
plt.title('Class Distribution')
df[group_by].value_counts().plot(kind='bar', color=colors)
i = 0
offset = 1000
for value in values:
plt.text(i, value + offset, str(np.round(value / np.sum(df[group_by].value_counts()) * 100)) + '%')
i += 1
plt.xlabel('Classes')
plt.ylabel('Instances')
plt.savefig(os.path.join(Configurator(self.__cfg_path).get('settings', 'log_dir'), 'stats/class_barplot.png'))
plt.show()
plt.close()
# Bar Plot Patients
if 'p' in header_type:
plt.title('Data per Patient')
df['P_ID'].value_counts().plot(kind='bar', color=colors)
plt.xlabel('Patients')
plt.ylabel('Instances')
plt.savefig(
os.path.join(Configurator(self.__cfg_path).get('settings', 'log_dir'), 'stats/patients_barplot.png'))
plt.show()
plt.close()
# Boxplot Data
df.boxplot(
column=list(df.columns[self._data_delimiters[header_type][0]: self._data_delimiters[header_type][1]]))
plt.savefig(os.path.join(Configurator(self.__cfg_path).get('settings', 'log_dir'), 'stats/data_boxplot.png'))
plt.show()
plt.close()
# Boxplot Class
df.boxplot(
column=list(df.columns[self._data_delimiters[header_type][0]: self._data_delimiters[header_type][1]]),
by='CLASS')
plt.savefig(os.path.join(Configurator(self.__cfg_path).get('settings', 'log_dir'), 'stats/class_boxplot.png'))
plt.show()
plt.close()
def get_baseline(self, ml_models, dl_models, discard_class: list = None, discard_patients: list = None,
ids_test: list = None):
"""
Evaluates the input dataset with different machine learning and deep learning models in order to get a baseline for
future analysis and comparisons.
:param ids_test: list of patient id used as testing set
:param ml_models: list of machine learning models
:param dl_models: list of cnn models
:param discard_class: list of class useless for analysis
:param discard_patients: list of patient id useless for analysis
:return:
"""
self._init_log()
# Evaluate number of sample per time window
time_window_size = int(Configurator(self.__cfg_path).getint('settings', 'sampling_frequency') *
Configurator(self.__cfg_path).getfloat('settings', 'time'))
# --- Load Data ---
try:
dataset = self._decode_csv(ds_dir=Configurator(self.__cfg_path).get('dataset', 'path'),
separator=Configurator(self.__cfg_path).get('dataset', 'separator', fallback=' '),
header_type=Configurator(self.__cfg_path).get('dataset', 'header_type'),
has_header=Configurator(self.__cfg_path).getboolean('dataset', 'has_header')
)
except Exception as e:
print('Failed to load data.')
print(e.args)
exit(10)
# -----------------
# --- Data Cleaning ---
try:
dataset = self._clean_data(df=dataset,
sampling_frequency=Configurator(self.__cfg_path).getint('settings', 'sampling_frequency'),
high_cutoff=Configurator(self.__cfg_path).getint('cleaning', 'high_cut', fallback=None),
low_cutoff=Configurator(self.__cfg_path).getint('cleaning', 'low_cut', fallback=None),
sub_method=Configurator(self.__cfg_path).get('cleaning', 'sub_method'),
header_type=Configurator(self.__cfg_path).get('dataset', 'header_type')
)
except Exception as e:
print('Failed to clean data.')
print(e.args)
exit(20)
# -----------------
# --- Data Treatment ---
data_treatment_type = Configurator(self.__cfg_path).get('settings', 'data_treatment')
if data_treatment_type == 'features_extraction':
try:
dt_dataset = self._features_extraction(df=dataset,
sampling_frequency=Configurator(self.__cfg_path).getint('settings', 'sampling_frequency'),
time_window=time_window_size,
overlap=Configurator(self.__cfg_path).getfloat('settings', 'overlap'),
header_type=Configurator(self.__cfg_path).get('dataset', 'header_type'))
except Exception as e:
print('Failed to extract features.')
print(e.args)
exit(30)
elif data_treatment_type == 'segmentation':
try:
dt_dataset = self._apply_segmentation(df=dataset,
sampling_frequency=Configurator(self.__cfg_path).getint('settings', 'sampling_frequency'),
time_window_size=time_window_size,
overlap=Configurator(self.__cfg_path).getfloat('settings', 'overlap'))
except Exception as e:
print('Failed during data segmentation.')
print(e.args)
exit(40)
elif data_treatment_type == 'raw':
dt_dataset = dataset
else:
print('*** Fallback: not recognised %s, using Raw instead ***' % data_treatment_type)
logging.info('*** Fallback: not recognised %s, using Raw instead ***' % data_treatment_type)
dt_dataset = dataset
# ----------------------
del dataset
# --- Preprocessing ---
try:
X_train_set, X_validation_set, Y_train_set, Y_validation_set, class_labels = self._data_preprocessing(
df=dt_dataset,
drop_class=discard_class,
drop_patient=discard_patients,
split_method=Configurator(self.__cfg_path).get('preprocessing', 'split_method'),
normalisation_method=Configurator(self.__cfg_path).get('preprocessing', 'normalisation_method'),
selection_method=Configurator(self.__cfg_path).get('preprocessing', 'selection_method'),
balancing_method=Configurator(self.__cfg_path).get('preprocessing', 'balancing_method'),
ids_test_set=ids_test
)
except Exception as e:
print('Failed during data preprocessing.')
print(e.args)
exit(50)
# ---------------------
del dt_dataset
# --- Start ML Evaluation ---
if Configurator(self.__cfg_path).getboolean('settings', 'use_ml'):
self._start_ml_evaluation(X_train_set, Y_train_set, X_validation_set, Y_validation_set, ml_models)
# ---------------------------
# --- Start DL Evaluation ---
if Configurator(self.__cfg_path).getboolean('settings', 'use_dl'):
self._dl_evaluation(X_train_set, Y_train_set, X_validation_set, Y_validation_set, dl_models, time_window_size)
# ---------------------------
# --- Shut down logging ---
logging.shutdown()
# ---------------------------
# --- Private Methods ---
def _get_cfg_path(self):
return self.__cfg_path
def _apply_segmentation(self, df, sampling_frequency, time_window_size, overlap):
has_patient = 'p' in Configurator(self.__cfg_path).get('dataset', 'header_type')
if has_patient:
x, y, p = self._get_window(df, sampling_frequency, time_window_size, overlap)
else:
x, y = self._get_window(df, sampling_frequency, time_window_size, overlap)
p = None
x_df = pd.DataFrame(x.reshape(-1, x.shape[1] * x.shape[2]))
x_df['CLASS'] = y
if has_patient and p is not None:
x_df['P_ID'] = p
return x_df
def _dl_evaluation(self, x_train, y_train, x_val, y_val, dl_models, time_window_size):
# --- Use Also Features ---
if Configurator(self.__cfg_path).getboolean('training', 'use_features') and \
Configurator(self.__cfg_path).get('settings', 'data_treatment') == 'features_extraction':
logging.info('--- Features as input of CNN ---')
# Set CNN input size
n_features = x_train.shape[1]
PrivateConfigurator().set('cnn', 'input_shape_x', '1')
PrivateConfigurator().set('cnn', 'input_shape_y', str(n_features))
# Set CNN kernel sizes
PrivateConfigurator().set('m1_acc', 'conv-kernel', '1')
PrivateConfigurator().set('m1_acc', 'pool-size', '1')
# Reshape to feed CNN
X_training_features = x_train.reshape((-1, 1, n_features))
X_testing_features = x_val.reshape((-1, 1, n_features))
y_training_features = np.asarray(y_train).reshape(-1)
y_testing_features = np.asarray(y_val).reshape(-1)
# Evaluation
self._start_cnn_evaluation(X_training_features,
y_training_features,
X_testing_features,
y_testing_features,
dl_models,
Configurator(self.__cfg_path).getint('training', 'epochs'),
Configurator(self.__cfg_path).getint('training', 'k_fold'),
Configurator(self.__cfg_path).getfloat('training', 'loss_threshold'),
np.unique(y_train),
'Features-as-input')
# ------------------------
# --- Use accelerometer data as input to the CNN ---
logging.info('--- Accelerometer data as input of CNN ---')
if Configurator(self.__cfg_path).get('settings', 'data_treatment') == 'segmentation':
# Get windowed data
n_features = int(x_train.shape[1] / time_window_size)
x_train = x_train.reshape((-1, time_window_size, n_features))
x_val = x_val.reshape((-1, time_window_size, n_features))
# Set CNN input size
PrivateConfigurator().set('cnn', 'input_shape_x', str(x_train.shape[1]))
PrivateConfigurator().set('cnn', 'input_shape_y', str(x_train.shape[2]))
# Set CNN kernel sizes
if n_features <= 3:
PrivateConfigurator().set('m1_acc', 'conv-kernel', '1')
PrivateConfigurator().set('m1_acc', 'pool-size', '1')
else:
PrivateConfigurator().set('m1_acc', 'conv-kernel', '3')
PrivateConfigurator().set('m1_acc', 'pool-size', '2')
# Evaluation
logging.info('--> Training set shape: %s' % str(x_train.shape))
logging.info('--> Validation set shape: %s' % str(x_val.shape))
self._start_cnn_evaluation(x_train,
y_train,
x_val,
y_val,
dl_models,
Configurator(self.__cfg_path).getint('training', 'epochs'),
Configurator(self.__cfg_path).getint('training', 'k_fold'),
Configurator(self.__cfg_path).getfloat('training', 'loss_threshold'),
np.unique(y_train),
'Accelerometer-data')
# ---------------------------
if Configurator(self.__cfg_path).get('settings', 'data_treatment') == 'raw':
# Set CNN input size
n_features = x_train.shape[1]
PrivateConfigurator().set('cnn', 'input_shape_x', '1')
PrivateConfigurator().set('cnn', 'input_shape_y', str(n_features))
# Set CNN kernel sizes
PrivateConfigurator().set('m1_acc', 'conv-kernel', '1')
PrivateConfigurator().set('m1_acc', 'pool-size', '1')
# Reshape to feed CNN
X_training_features = x_train.reshape((-1, 1, n_features))
X_testing_features = x_val.reshape((-1, 1, n_features))
y_training_features = np.asarray(y_train).reshape(-1)
y_testing_features = np.asarray(y_val).reshape(-1)
logging.info('--> Training set shape: %s' % str(x_train.shape))
logging.info('--> Validation set shape: %s' % str(x_val.shape))
# Evaluation
self._start_cnn_evaluation(X_training_features,
y_training_features,
X_testing_features,
y_testing_features,
dl_models,
Configurator(self.__cfg_path).getint('training', 'epochs'),
Configurator(self.__cfg_path).getint('training', 'k_fold'),
Configurator(self.__cfg_path).getfloat('training', 'loss_threshold'),
np.unique(y_train),
'Raw')
def _apply_filter(self, df, filter_name, sample_rate, frequency_cutoff, order):
b, a = sm.signal.build_filter(frequency=frequency_cutoff,
sample_rate=sample_rate,
filter_type=filter_name,
filter_order=order)
header_type = Configurator(self.__cfg_path).get('dataset', 'header_type')
for column in list(df.columns)[self._data_delimiters[header_type][0]: self._data_delimiters[header_type][1]]:
df[column] = sm.signal.filter_signal(b, a, signal=df[column].values)
return df
def _init_log(self):
now = datetime.now()
timestamp = datetime.fromtimestamp(datetime.timestamp(now))
log_dir_name = '%s - log' % str(timestamp).split('.')[0]
new_log_dir_path = os.path.join(Configurator(self.__cfg_path).get('settings', 'log_dir'), log_dir_name)
try:
# Create run log directory
Path(new_log_dir_path).mkdir(parents=True, exist_ok=True)
# Copy run settings
copy2(self._get_cfg_path(), new_log_dir_path)
# Update log dir path for future usage
Configurator(self._get_cfg_path()).set('settings', 'log_dir', new_log_dir_path)
# Configure logging
logging.basicConfig(filename=os.path.join(new_log_dir_path, 'log.rtf'), format='', level=logging.INFO)
except Exception as e:
print(e.args)
logging.info(e.args)
exit(1)
else:
logging.log(msg="Successfully created the directory %s " % new_log_dir_path, level=logging.INFO)
def _print_val_train_stats(self, history, title, save_to_dl_dir: False):
# Create training stats directory
if save_to_dl_dir:
dl_dir = os.path.join(Configurator(self.__cfg_path).get('settings', 'log_dir'), 'deep_learning')
training_stats_dir = os.path.join(dl_dir, 'training')
Path(training_stats_dir).mkdir(parents=True, exist_ok=True)
save_to = training_stats_dir
else:
Path(os.path.join(Configurator(self.__cfg_path).get('settings', 'log_dir'), 'training')).mkdir(parents=True,
exist_ok=True)
save_to = os.path.join(Configurator(self.__cfg_path).get('settings', 'log_dir'), 'training')
fig, axis = plt.subplots(2)
fig.suptitle('%s' % title)
axis[0].set_title('Accuracy')
axis[0].plot(history['accuracy'], label='acc', c='g')
axis[0].plot(history['val_accuracy'], label='val-acc', c='b')
axis[0].legend()
axis[1].set_title('Loss')
axis[1].plot(history['loss'], label='loss', c='r')
axis[1].plot(history['val_loss'], label='val-loss', c='m')
axis[1].legend()
plt.savefig(os.path.join(save_to, 'training_stats_%s.png' % title))
plt.show()
plt.close()
def _do_k_fold(self, x, y, kf, model, model_name, epochs, x_unseen=None, y_unseen=None):
# Define supporting variables
fold = 0
trained_models = dict()
initial_weights = model.get_weights()
accuracy_per_fold = list()
loss_per_fold = list()
# Do K-Fold
# Each fold is used once as a validation while the k - 1 remaining folds form the training set.
for train, test in kf.split(x):
fold += 1
# Define training set
x_train = x[train]
y_train = tf.keras.utils.to_categorical(y[train])
# Define testing set
x_test = x[test]
y_test = tf.keras.utils.to_categorical(y[test])
logging.info('------------------------------------------------------------------------')
logging.info('Training for fold %s ...' % str(fold))
# Training and Validation
history = model.fit(x_train, y_train,
validation_data=(x_test, y_test),
verbose=0,
epochs=epochs)
# Check if the gap between train and validation is constant
self._print_val_train_stats(history.history, '%s fold %s' % (model_name, fold),
Configurator(self.__cfg_path).getboolean('settings', 'use_dl'))
# Predict values of validation
pred = model.predict(x_test)
# Evaluate model score (Loss and Accuracy)
scores = model.evaluate(x_test, y_test, verbose=0)
logging.info('\nStats for fold %s: %s of %s; %s of %s'
% (str(fold), str(model.metrics_names[0]), str(scores[0]), str(model.metrics_names[1]),
str(scores[1] * 100))
)
accuracy_per_fold.append(scores[1] * 100)
loss_per_fold.append(scores[0])
# Test on unseen data
if x_unseen is not None and y_unseen is not None:
y_unseen_predicted = model.predict(x_unseen)
trained_models.update({fold: (model.get_weights(), y_test, pred, y_unseen_predicted, scores[0])})
else:
y_unseen_predicted = None
# Measure this fold's RMSE
mse = np.sqrt(metrics.mean_squared_error(y_test.argmax(1), pred.argmax(1))) # lower is better
# Save trained model with its predictions and ground truth
trained_models.update(
{fold: (model.get_weights(), y_test, pred, None, scores[0])}) # None instead of y_unseen_pred
logging.info(
'Classification Report on Test Fold\n%s' % str(classification_report(y_test.argmax(1), pred.argmax(1))))
if x_unseen is not None and y_unseen is not None:
logging.info('Classification Report on Unseen Data\n%s' %
str(classification_report(y_unseen, y_unseen_predicted.argmax(1))))
logging.info("\nMean Squared Error on Unseen Data: %s" %
str(np.sqrt(metrics.mean_squared_error(y_unseen, y_unseen_predicted.argmax(1)))))
logging.info("\nFold %s score: %s\n" % (str(fold), str(mse)))
logging.info('------------------------------------------------------------------------')
# Reset model weights for the next train session
model.set_weights(initial_weights)
# Print a final summary
logging.info('------------------------------------------------------------------------')
logging.info('Score per fold')
for i in range(0, len(accuracy_per_fold)):
logging.info('------------------------------------------------------------------------')
logging.info('> Fold %s - Loss: %s - Accuracy: %s' % (str(i + 1), loss_per_fold[i], accuracy_per_fold[i]))
logging.info('------------------------------------------------------------------------')
logging.info('Average scores for all folds:')
logging.info('> Accuracy: %s (+- %s)' % (str(np.mean(accuracy_per_fold)), str(np.std(accuracy_per_fold))))
logging.info('> Loss: %s' % (str(np.mean(loss_per_fold))))
logging.info('------------------------------------------------------------------------')
return np.mean(accuracy_per_fold), trained_models # Accuracy, models
def _model_ensembles(self, trained_models, model, x_test_unseen, y_test, model_name, class_values, threshold=.40):
predictions = list()
for key in trained_models:
# Use only models with a mean squared error under the threshold for prediction
if trained_models[key][4] <= threshold:
model.set_weights(trained_models[key][0])
y_pred = model.predict(x_test_unseen).argmax(1)
predictions.append(y_pred)
if not predictions:
logging.info('There are no predictions, models were too bad.')
else:
n_used_models = len(predictions)
predictions = stats.mode(np.asarray(predictions))[0][0]
# Metrics
metrics1 = sensitivity_specificity_support(y_test, predictions, average='weighted')
metrics2 = precision_recall_fscore_support(y_test, predictions, average='weighted')
metrics3 = accuracy_score(y_test, predictions, normalize=True)
logging.log(msg='--> clf: %s\n'
' • specificity: %s\n'
' • sensitivity: %s\n'
' • precision: %s\n'
' • accuracy: %s\n'
' • recall: %s\n'
' • f1-score: %s\n' % (
model_name, metrics1[1], metrics1[0], metrics2[0], metrics3, metrics2[1], metrics2[0]),
level=logging.INFO)
self._cm_analysis(y_test, predictions, class_values, model_name,
'%s Model Ensembles (used %s)' % (model_name, str(n_used_models)), 'deep_learning')
def _cm_analysis(self, y_true, y_pred, labels, model_name, title, path, ymap=None):
"""
Generate matrix plot of confusion matrix with pretty annotations.
The plot image is saved to disk.
args:
y_true: true label of the data, with shape (n_samples,)
y_pred: prediction of the data, with shape (n_samples,)
filename: filename of figure file to save
labels: string array, name the order of class labels in the confusion matrix.
use `clf.classes_` if using scikit-learn models.
with shape (nclass,).
ymap: dict: any -> string, length == nclass.
if not None, map the labels & ys to more understandable strings.
Caution: original y_true, y_pred and labels must align.
figsize: the size of the figure plotted.
"""
# Calc figure size based on number of class, for better visualisation
tail_size = 2
n_class = len(np.unique(y_true))
figsize = (tail_size * n_class, int(tail_size * n_class * 1.2))
if ymap is not None:
y_pred = [ymap[yi] for yi in y_pred]
y_true = [ymap[yi] for yi in y_true]
labels = [ymap[yi] for yi in labels]
cm = confusion_matrix(y_true, y_pred, labels=labels)
cm_sum = np.sum(cm, axis=1, keepdims=True)
cm_perc = cm / cm_sum.astype(float) * 100
annot = np.empty_like(cm).astype(str)
nrows, ncols = cm.shape
for i in range(nrows):
for j in range(ncols):
c = cm[i, j]
p = cm_perc[i, j]
if i == j:
s = cm_sum[i]
annot[i, j] = '%.1f%%\n%d/%d' % (p, c, s)
elif c == 0:
annot[i, j] = ''
else:
annot[i, j] = '%.1f%%\n%d' % (p, c)
cm = pd.DataFrame(cm, index=labels, columns=labels)
cm.index.name = 'Actual'
cm.columns.name = 'Predicted'
fig, ax = plt.subplots(figsize=figsize)
plt.title('Confusion Matrix %s' % title)
sns.heatmap(cm, annot=annot, fmt='', ax=ax)
plt.savefig(
os.path.join(Configurator(self.__cfg_path).get('settings', 'log_dir'),
'%s/CM %s %s.png' % (path, model_name, title)))
plt.show()
plt.close(fig)
plt.close()
def _select_patients_training_test(self, ds, p_ids: list = None):
patient_ids = ds['P_ID'].unique()
total_patients = len(patient_ids)
n_patient_validation = int(
np.floor(total_patients * Configurator(self.__cfg_path).getfloat('training', 'test_size')))
test_patients = list()
if p_ids is not None:
test_patients = p_ids
patient_ids = list(patient_ids)
patient_ids.remove(test_patients)
else:
# Select patient for validation
for _ in range(n_patient_validation):
# Select random patient
pos = random.randint(0, len(patient_ids) - 1)
random_p_id = patient_ids[pos]
patient_ids = np.delete(patient_ids, pos)
# Keep track of selected patients
test_patients.append(random_p_id)
logging.info('--> Training subjects: %s' % str(patient_ids))
logging.info('--> Testing subjects: %s' % str(test_patients))
# Define intra patient training and validation datasets
training_dataset = ds.loc[ds['P_ID'].isin(patient_ids)].sort_values(by=['P_ID', 'time'])
validation_dataset = ds.loc[ds['P_ID'].isin(test_patients)].sort_values(by=['P_ID', 'time'])
return training_dataset, validation_dataset
def _get_pbp_window_train_and_test(self, ds, training_dataset, validation_dataset, tw_size):
cnn_training_data = list()
cnn_training_labels = list()
cnn_validation_data = list()
cnn_validation_labels = list()
for p_id in training_dataset['P_ID'].unique():
data_i, label_i, _ = self._get_window(ds.loc[ds[(ds['P_ID'] == p_id)].index],
Configurator(self.__cfg_path).getint('settings',
'sampling_frequency'),
tw_size,
Configurator(self.__cfg_path).getfloat('settings', 'overlap'))
cnn_training_data.append(data_i)
cnn_training_labels.append(label_i)
for val_p_id in validation_dataset['P_ID'].unique():
data_i, label_i, _ = self._get_window(ds.loc[ds[(ds['P_ID'] == val_p_id)].index],
Configurator(self.__cfg_path).getint('settings',
'sampling_frequency'),
tw_size,
Configurator(self.__cfg_path).getfloat('settings', 'overlap'))
cnn_validation_data.append(data_i)
cnn_validation_labels.append(label_i)
# Create CNN format training dataset and labels
cnn_training_data = np.concatenate(cnn_training_data)
cnn_training_labels = np.concatenate(cnn_training_labels)
# Create CNN format training dataset and labels
cnn_validation_data = np.concatenate(cnn_validation_data)
cnn_validation_labels = np.concatenate(cnn_validation_labels)
return cnn_training_data, cnn_validation_data, cnn_training_labels, cnn_validation_labels
def _data_preprocessing(self, df, drop_class, drop_patient, split_method, normalisation_method, selection_method,
balancing_method, ids_test_set: list = None):
widgets = [
'Data Preprocessing',
' [', progressbar.Timer(), '] ',
progressbar.Bar(),
' (', progressbar.ETA(), ') ',
]
logging.info('--- Data Preprocessing ---')
with progressbar.ProgressBar(widgets=widgets, max_value=8) as bar:
# Drop unnecessary classes
if drop_class is not None:
for class_name in drop_class:
indexes = df[(df['CLASS'] == class_name)].index
df.drop(indexes, inplace=True)
# Drop unnecessary patients
if drop_patient is not None:
for patient_id in drop_patient:
indexes = df[(df['P_ID'] == patient_id)].index
df.drop(indexes, inplace=True)
bar.update()
# Class renaming
if df['CLASS'].unique().dtype == int:
old_labels = sorted(df['CLASS'].unique())
else:
old_labels = df['CLASS'].unique()
original_name_labels = list()
logging.info('--> New labels:')
for new, old in zip(range(len(old_labels)), old_labels):
logging.info('• %s --> %s' % (str(old), str(new)))
original_name_labels.append(str(old))
df['CLASS'] = df['CLASS'].replace({old: new})
bar.update()
# Train/Test split
header_type = Configurator(self.__cfg_path).get('dataset', 'header_type')
if split_method == 'inter' and 'p' in header_type:
# Define inter patient training and validation datasets
training_dataset, validation_dataset = self._select_patients_training_test(df, ids_test_set)
elif split_method == 'intra' and 'p' in header_type or split_method == 'holdout':
training_dataset, validation_dataset = train_test_split(df,
shuffle=True,
test_size=Configurator(self.__cfg_path).getfloat(
'training',
'test_size'),
random_state=28)
else:
training_dataset = None
validation_dataset = None
logging.info('*** Error: %s not a valid train/test split method ***' % split_method)
print('*** Error: %s not a valid train/test split method ***' % split_method)
exit(12)
bar.update()
# Get training data and labels
if 'P_ID' in training_dataset.columns:
# Drop P_ID if exists
training_data = training_dataset.drop(['CLASS', 'P_ID'], axis=1)
validation_data = validation_dataset.drop(['CLASS', 'P_ID'], axis=1)
else:
training_data = training_dataset.drop(['CLASS'], axis=1)
validation_data = validation_dataset.drop(['CLASS'], axis=1)
training_labels = training_dataset['CLASS']
validation_labels = validation_dataset['CLASS']
del training_dataset, validation_dataset
bar.update()
# Normalisation
if normalisation_method == 'minmax':
scaler = preprocessing.MinMaxScaler()
elif normalisation_method == 'robust':
scaler = preprocessing.RobustScaler()
else:
scaler = preprocessing.StandardScaler()
training_data = scaler.fit_transform(training_data)
validation_data = scaler.transform(validation_data)
bar.update()
if Configurator(self.__cfg_path).get('settings', 'data_treatment') == 'features_extraction':
# Create a directory in order to save extracted features names
features_path = os.path.join(Configurator(self.__cfg_path).get('settings', 'log_dir'), 'features')
Path(features_path).mkdir(parents=True, exist_ok=True)
with open(os.path.join(features_path, 'extracted_features.rtf'), 'a+') as exf:
exf.write('%s' % '\n'.join(list(df.columns)))
else:
features_path = None
bar.update()
if Configurator(self.__cfg_path).getboolean('settings', 'features_selection') and Configurator(
self.__cfg_path).get('settings', 'data_treatment') == 'features_extraction':
# Highly correlated features are removed
# corr_features = ts.correlated_features(training_data)
# training_data.drop(corr_features, axis=1, inplace=True)
# validation_data.drop(corr_features, axis=1, inplace=True)
if selection_method == 'variance':
# Remove low variance features
selector = VarianceThreshold()
training_data = selector.fit_transform(training_data)
validation_data = selector.transform(validation_data)
elif selection_method == 'l1':
lsvc = LinearSVC(C=0.01, penalty="l1", dual=False).fit(training_data, training_labels)
model = SelectFromModel(lsvc, prefit=True)
training_data = model.fit_transform(training_data)
validation_data = model.transform(validation_data)
elif selection_method == 'tree-based':
clf = ExtraTreesClassifier(n_estimators=50)
clf = clf.fit(training_data, training_labels)
model = SelectFromModel(clf, prefit=True)
training_data = model.fit_transform(training_data)
validation_data = model.transform(validation_data)
elif selection_method == 'recursive':
estimator = SVR(kernel="linear")
selector = RFE(estimator, n_features_to_select=5, step=1)
training_data = selector.fit_transform(training_data, training_labels)
validation_data = selector.transform(validation_data)
else:
logging.info('*** Error: %s not implemented features selection technique ***' % str(selection_method))
print('*** Error: %s not implemented features selection technique ***' % str(selection_method))
exit(5)
logging.info('--> Features selected: %s' % str(validation_data.shape[1]))
bar.update()
# Balancing
resampling_technique = Configurator(self.__cfg_path).get('settings', 'resampling')
if resampling_technique == 'under':
if balancing_method == 'random_under':
sampler = RandomUnderSampler()
elif balancing_method == 'near_miss':
sampler = NearMiss()
elif balancing_method == 'edited_nn':
sampler = EditedNearestNeighbours()
else:
logging.info(
'*** Fallback: not recognised %s, using RandomUnderSampler instead ***' % balancing_method)
sampler = RandomUnderSampler()
elif resampling_technique == 'over':
if balancing_method == 'smote':
sampler = SMOTE()
elif balancing_method == 'adasyn':
sampler = ADASYN()
elif balancing_method == 'kmeans_smote':
sampler = KMeansSMOTE(k_neighbors=3)
elif balancing_method == 'random_over':
sampler = RandomOverSampler()
else:
logging.info(
'*** Fallback: not recognised %s, using RandomOverSampler instead ***' % balancing_method)
sampler = RandomOverSampler()
else:
balancing_method = ''
sampler = None
if sampler is not None:
training_data, training_labels = sampler.fit_resample(training_data, training_labels)
else:
training_data, training_labels = training_data, training_labels
bar.update()
logging.info('--> Applied %s %s' % (resampling_technique, balancing_method))
logging.info('--> Train test shape: %s' % str(training_data.shape))
logging.info('--> Validation test shape: %s' % str(validation_data.shape))
return training_data, validation_data, training_labels.values, validation_labels.values, original_name_labels
def _pbp_features_extraction(self, df, tw_size):
patient_ids = df['P_ID'].unique()
features = list()
for p_id in patient_ids:
features_dataset_i = self._features_extraction(df.drop(df[(df['P_ID'] != p_id)].index),
Configurator(self.__cfg_path).getint('settings',
'sampling_frequency'),
tw_size,
Configurator(self.__cfg_path).getfloat('settings',
'overlap'),
Configurator(self.__cfg_path).getfloat('dataset',
'header_type'))
features.append(features_dataset_i)
# Features dataset of all patients
features_dataset = | pd.concat(features, ignore_index=True) | pandas.concat |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assertTrue(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assertNotIsInstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assertEqual(result.freqstr, 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_numpy_array_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assertTrue((result['B'] == dr).all())
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
self.assert_numpy_array_equal(result, expected)
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
self.assertTrue((result == expected).all())
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
self.assertTrue((result == expected).all())
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
randn(), r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
assert_array_equal(cols.values, joined.values)
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assertIs(index, joined)
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
if _np_version_under1p7:
raise nose.SkipTest
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * pd.datetools.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assertRaisesRegexp(ValueError, 'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02',
'2014-02', '2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3])
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(idx3))
class TestDatetime64(tm.TestCase):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(rand(len(dti)), dti)
def test_datetimeindex_accessors(self):
dti = DatetimeIndex(
freq='D', start=datetime(1998, 1, 1), periods=365)
self.assertEqual(dti.year[0], 1998)
self.assertEqual(dti.month[0], 1)
self.assertEqual(dti.day[0], 1)
self.assertEqual(dti.hour[0], 0)
self.assertEqual(dti.minute[0], 0)
self.assertEqual(dti.second[0], 0)
self.assertEqual(dti.microsecond[0], 0)
self.assertEqual(dti.dayofweek[0], 3)
self.assertEqual(dti.dayofyear[0], 1)
self.assertEqual(dti.dayofyear[120], 121)
self.assertEqual(dti.weekofyear[0], 1)
self.assertEqual(dti.weekofyear[120], 18)
self.assertEqual(dti.quarter[0], 1)
self.assertEqual(dti.quarter[120], 2)
self.assertEqual(dti.is_month_start[0], True)
self.assertEqual(dti.is_month_start[1], False)
self.assertEqual(dti.is_month_start[31], True)
self.assertEqual(dti.is_quarter_start[0], True)
self.assertEqual(dti.is_quarter_start[90], True)
self.assertEqual(dti.is_year_start[0], True)
self.assertEqual(dti.is_year_start[364], False)
self.assertEqual(dti.is_month_end[0], False)
self.assertEqual(dti.is_month_end[30], True)
self.assertEqual(dti.is_month_end[31], False)
self.assertEqual(dti.is_month_end[364], True)
self.assertEqual(dti.is_quarter_end[0], False)
self.assertEqual(dti.is_quarter_end[30], False)
self.assertEqual(dti.is_quarter_end[89], True)
self.assertEqual(dti.is_quarter_end[364], True)
self.assertEqual(dti.is_year_end[0], False)
self.assertEqual(dti.is_year_end[364], True)
self.assertEqual(len(dti.year), 365)
self.assertEqual(len(dti.month), 365)
self.assertEqual(len(dti.day), 365)
self.assertEqual(len(dti.hour), 365)
self.assertEqual(len(dti.minute), 365)
self.assertEqual(len(dti.second), 365)
self.assertEqual(len(dti.microsecond), 365)
self.assertEqual(len(dti.dayofweek), 365)
self.assertEqual(len(dti.dayofyear), 365)
self.assertEqual(len(dti.weekofyear), 365)
self.assertEqual(len(dti.quarter), 365)
self.assertEqual(len(dti.is_month_start), 365)
self.assertEqual(len(dti.is_month_end), 365)
self.assertEqual(len(dti.is_quarter_start), 365)
self.assertEqual(len(dti.is_quarter_end), 365)
self.assertEqual(len(dti.is_year_start), 365)
self.assertEqual(len(dti.is_year_end), 365)
dti = DatetimeIndex(
freq='BQ-FEB', start=datetime(1998, 1, 1), periods=4)
self.assertEqual(sum(dti.is_quarter_start), 0)
self.assertEqual(sum(dti.is_quarter_end), 4)
self.assertEqual(sum(dti.is_year_start), 0)
self.assertEqual(sum(dti.is_year_end), 1)
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay, CBD requires np >= 1.7
if not _np_version_under1p7:
bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
self.assertRaises(ValueError, lambda: dti.is_month_start)
dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
self.assertEqual(dti.is_month_start[0], 1)
tests = [
(Timestamp('2013-06-01', offset='M').is_month_start, 1),
(Timestamp('2013-06-01', offset='BM').is_month_start, 0),
(Timestamp('2013-06-03', offset='M').is_month_start, 0),
(Timestamp('2013-06-03', offset='BM').is_month_start, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_month_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_quarter_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_year_end, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_month_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_quarter_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_year_start, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_month_end, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_quarter_end, 0),
(Timestamp('2013-03-31', offset='QS-FEB').is_year_end, 0),
(Timestamp('2013-02-01', offset='QS-FEB').is_month_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_quarter_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_year_start, 1),
(Timestamp('2013-06-30', offset='BQ').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQ').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_quarter_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQS-APR').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQS-APR').is_quarter_end, 1),
(Timestamp('2013-03-29', offset='BQS-APR').is_year_end, 1),
(Timestamp('2013-11-01', offset='AS-NOV').is_year_start, 1),
(Timestamp('2013-10-31', offset='AS-NOV').is_year_end, 1)]
for ts, value in tests:
self.assertEqual(ts, value)
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
self.assert_numpy_array_equal(dti.nanosecond, np.arange(10))
def test_datetimeindex_diff(self):
dti1 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=100)
dti2 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=98)
self.assertEqual(len(dti1.diff(dti2)), 2)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(s[48], 48)
self.assertEqual(s['1/2/2009'], 48)
self.assertEqual(s['2009-1-2'], 48)
self.assertEqual(s[datetime(2009, 1, 2)], 48)
self.assertEqual(s[lib.Timestamp(datetime(2009, 1, 2))], 48)
self.assertRaises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
self.assertEqual(s[48], -1)
s['1/2/2009'] = -2
self.assertEqual(s[48], -2)
s['1/2/2009':'2009-06-05'] = -3
self.assertTrue((s[48:54] == -3).all())
def test_datetimeindex_constructor(self):
arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04']
self.assertRaises(Exception, DatetimeIndex, arr)
arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']
idx1 = DatetimeIndex(arr)
arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04']
idx2 = DatetimeIndex(arr)
arr = [lib.Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',
'2005-01-04']
idx3 = DatetimeIndex(arr)
arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005',
'2005-01-04'], dtype='O')
idx4 = DatetimeIndex(arr)
arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'])
idx5 = DatetimeIndex(arr)
arr = to_datetime(
['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04'])
idx6 = DatetimeIndex(arr)
idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True)
idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False,
yearfirst=True)
self.assertTrue(idx7.equals(idx8))
for other in [idx2, idx3, idx4, idx5, idx6]:
self.assertTrue((idx1.values == other.values).all())
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
idx = DatetimeIndex(start=sdate, freq='1B', periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[0], sdate + 0 * dt.bday)
self.assertEqual(idx.freq, 'B')
idx = DatetimeIndex(end=edate, freq=('D', 5), periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[-1], edate)
self.assertEqual(idx.freq, '5D')
idx1 = DatetimeIndex(start=sdate, end=edate, freq='W-SUN')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.Week(weekday=6))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='QS')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.QuarterBegin(startingMonth=1))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='BQ')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.BQuarterEnd(startingMonth=12))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
def test_dayfirst(self):
# GH 5917
arr = ['10/02/2014', '11/02/2014', '12/02/2014']
expected = DatetimeIndex([datetime(2014, 2, 10),
datetime(2014, 2, 11),
datetime(2014, 2, 12)])
idx1 = | DatetimeIndex(arr, dayfirst=True) | pandas.DatetimeIndex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.