prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import rba
import copy
import pandas
import time
import numpy
import seaborn
import matplotlib.pyplot as plt
from .rba_Session import RBA_Session
from sklearn.linear_model import LinearRegression
# import matplotlib.pyplot as plt
def find_ribosomal_proteins(rba_session, model_processes=['TranslationC', 'TranslationM'], external_annotations=None):
out = []
for i in model_processes:
out += [rba_session.ModelStructure.ProteinInfo.Elements[j]['ProtoID']
for j in list(rba_session.ModelStructure.ProcessInfo.Elements[i]['Composition'].keys()) if j in rba_session.ModelStructure.ProteinInfo.Elements.keys()]
if external_annotations is not None:
out += list(external_annotations['ID'])
return(list(set(out)))
def build_model_compartment_map(rba_session):
out = {rba_session.ModelStructure.ProteinInfo.Elements[i]['ProtoID']: rba_session.ModelStructure.ProteinInfo.Elements[i]['Compartment'] for i in list(
rba_session.ModelStructure.ProteinInfo.Elements.keys())}
return(out)
def build_compartment_annotations(Compartment_Annotations_external, model_protein_compartment_map):
for i in Compartment_Annotations_external.index:
if Compartment_Annotations_external.loc[i, 'ID'] in list(model_protein_compartment_map.keys()):
Compartment_Annotations_external.loc[i, 'modelproteinannotation'] = 1
else:
Compartment_Annotations_external.loc[i, 'modelproteinannotation'] = 0
Compartment_Annotations_internal = pandas.DataFrame()
Compartment_Annotations_internal['ID'] = list(model_protein_compartment_map.keys())
Compartment_Annotations_internal['ModelComp'] = list(model_protein_compartment_map.values())
Compartment_Annotations = pandas.concat(
[Compartment_Annotations_internal, Compartment_Annotations_external.loc[Compartment_Annotations_external['modelproteinannotation'] == 0, ['ID', 'ModelComp']]], axis=0)
return(Compartment_Annotations)
def build_dataset_annotations(input, ID_column, Uniprot, Compartment_Annotations, model_protein_compartment_map, ribosomal_proteins):
print('riboprots-----------------')
print(ribosomal_proteins)
out = pandas.DataFrame()
for g in list(input[ID_column]):
out.loc[g, 'ID'] = g
matches = [i for i in list(Uniprot.loc[pandas.isna(
Uniprot['Gene names']) == False, 'Gene names']) if g in i]
mass_prot = numpy.nan
if len(matches) > 0:
mass_prot = len(Uniprot.loc[Uniprot['Gene names'] == matches[0], 'Sequence'].values[0])
out.loc[g, 'AA_residues'] = mass_prot
if g in list(Compartment_Annotations['ID']):
out.loc[g, 'Location'] = Compartment_Annotations.loc[Compartment_Annotations['ID']
== g, 'ModelComp'].values[0]
in_model = 0
if g in model_protein_compartment_map.keys():
in_model = 1
is_ribosomal = 0
if g in ribosomal_proteins:
is_ribosomal = 1
out.loc[g, 'InModel'] = in_model
out.loc[g, 'IsRibosomal'] = is_ribosomal
return(out)
def build_full_annotations_from_dataset_annotations(annotations_list):
out = pandas.concat(annotations_list, axis=0)
index = out.index
is_duplicate = index.duplicated(keep="first")
not_duplicate = ~is_duplicate
out = out[not_duplicate]
return(out)
def infer_copy_numbers_from_reference_copy_numbers(fold_changes, absolute_data, matching_column_in_fold_change_data, matching_column_in_absolute_data, conditions_in_fold_change_data_to_restore):
out = pandas.DataFrame()
for i in list(absolute_data['Gene']):
if i in list(fold_changes['Gene']):
FoldChange_match = fold_changes.loc[fold_changes['Gene']
== i, matching_column_in_fold_change_data].values[0]
CopyNumber_match = absolute_data.loc[absolute_data['Gene']
== i, matching_column_in_absolute_data].values[0]
if not pandas.isna(FoldChange_match):
if not pandas.isna(CopyNumber_match):
out.loc[i, 'ID'] = i
out.loc[i, 'Absolute_Reference'] = CopyNumber_match/(2**FoldChange_match)
for gene in list(out['ID']):
Abs_Ref = out.loc[gene, 'Absolute_Reference']
for condition in conditions_in_fold_change_data_to_restore:
out.loc[gene, condition] = Abs_Ref * \
(2**fold_changes.loc[fold_changes['Gene'] == gene, condition].values[0])
return(out)
def add_annotations_to_proteome(input, ID_column, annotations):
for i in input.index:
if input.loc[i, ID_column] in annotations.index:
input.loc[i, 'AA_residues'] = annotations.loc[input.loc[i, ID_column], 'AA_residues']
input.loc[i, 'Location'] = annotations.loc[input.loc[i, ID_column], 'Location']
input.loc[i, 'InModel'] = annotations.loc[input.loc[i, ID_column], 'InModel']
input.loc[i, 'IsRibosomal'] = annotations.loc[input.loc[i, ID_column], 'IsRibosomal']
return(input)
def determine_compartment_occupation(Data, Condition, mass_col='AA_residues', only_in_model=False, compartments_to_ignore=['DEF'], compartments_no_original_PG=[], ribosomal_proteins_as_extra_compartment=True):
for i in compartments_to_ignore:
Data = Data.loc[Data['Location'] != i]
for i in compartments_no_original_PG:
Data = Data.loc[(Data['Location'] != i) | (Data['InModel'] == 1)]
if only_in_model:
Data = Data.loc[Data['InModel'] >= 1]
if ribosomal_proteins_as_extra_compartment:
Data_R = Data.loc[Data['IsRibosomal'] == 1].copy()
Data = Data.loc[Data['IsRibosomal'] == 0]
Data_R_df = Data_R.loc[:, [Condition, mass_col, 'Location']]
Data_R_df[Condition] = Data_R_df[Condition]*Data_R_df[mass_col]
Ribosomal_sum = Data_R_df[Condition].sum()
df = Data.loc[:, [Condition, mass_col, 'Location']]
df[Condition] = df[Condition]*df[mass_col]
out = pandas.DataFrame(df.groupby('Location').sum())
if ribosomal_proteins_as_extra_compartment:
out.loc['Ribosomes', Condition] = Ribosomal_sum
out.loc['Total', Condition] = out[Condition].sum()
out.loc[:, 'original_protein_fraction'] = out[Condition]/out.loc['Total', Condition]
out.rename(columns={Condition: 'original_amino_acid_occupation'}, inplace=True)
out.drop(columns=['AA_residues'], inplace=True)
return(out)
def build_proteome_overview(input, condition, compartments_to_ignore=['DEF', 'DEFA', 'Def'], compartments_no_original_PG=['n', 'Secreted'], ribosomal_proteins_as_extra_compartment=True):
out = determine_compartment_occupation(Data=input, Condition=condition, compartments_to_ignore=compartments_to_ignore,
compartments_no_original_PG=compartments_no_original_PG, ribosomal_proteins_as_extra_compartment=ribosomal_proteins_as_extra_compartment, only_in_model=False)
out_in_model = determine_compartment_occupation(Data=input, Condition=condition, compartments_to_ignore=compartments_to_ignore,
compartments_no_original_PG=compartments_no_original_PG, ribosomal_proteins_as_extra_compartment=ribosomal_proteins_as_extra_compartment, only_in_model=True)
out['original_PG_fraction'] = 1-out_in_model['original_amino_acid_occupation'] / \
out['original_amino_acid_occupation']
return(out)
def determine_correction_factor_A(fractions_entirely_replaced_with_expected_value):
expected_fraction_sum = 0
for i in fractions_entirely_replaced_with_expected_value.keys():
expected_fraction_sum += fractions_entirely_replaced_with_expected_value[i]
factor = 1/(1-expected_fraction_sum)
return(factor)
def determine_correction_factor_B(imposed_compartment_fractions):
expected_fractions = 0
for i in imposed_compartment_fractions.keys():
expected_fractions += imposed_compartment_fractions[i]
factor = 1-expected_fractions
return(factor)
def determine_correction_factor_C(input, condition, reference_condition):
return(input.loc[input['ID'] == 'Total_protein', condition].values[0]/input.loc[input['ID'] == 'Total_protein', reference_condition].values[0])
def correct_protein_fractions(input, factors, directly_corrected_compartments, imposed_compartment_fractions):
out = input.copy()
for c in out.index:
if c in directly_corrected_compartments:
out.loc[c, 'new_protein_fraction'] = out.loc[c,
'original_protein_fraction']*factors['A']*factors['B']
elif c in imposed_compartment_fractions.keys():
out.loc[c, 'new_protein_fraction'] = imposed_compartment_fractions[c]
return(out)
def correct_PG_fraction(input, factors, compartments_no_original_PG, merged_compartments):
out = input.copy()
for c in out.index:
if c == 'Total':
continue
else:
if c in compartments_no_original_PG:
original_fraction = out.loc[c, 'original_protein_fraction']
out.loc[c, 'new_PG_fraction'] = 1 - ((factors['A']*factors['B']*original_fraction) /
out.loc[c, 'new_protein_fraction'])
elif c in merged_compartments.keys():
out.loc[c, 'new_PG_fraction'] = out.loc[c, 'original_PG_fraction']*out.loc[c, 'original_protein_fraction']/(
out.loc[c, 'original_protein_fraction']+out.loc[merged_compartments[c], 'original_protein_fraction'])
else:
out.loc[c, 'new_PG_fraction'] = out.loc[c, 'original_PG_fraction']
return(out)
def merge_compartments(input, merged_compartments):
out = input.copy()
for c in merged_compartments.keys():
out.loc[c, 'new_protein_fraction'] = out.loc[c, 'new_protein_fraction'] + \
out.loc[merged_compartments[c], 'new_protein_fraction']
return(out)
def calculate_new_total_PG_fraction(input):
out = input.copy()
fraction = 0
for c in out.index:
if c not in ['Total', 'Ribosomes']:
fraction += out.loc[c, 'new_protein_fraction']*out.loc[c, 'new_PG_fraction']
out.loc['Total', 'new_PG_fraction'] = fraction
out.loc['Total', 'new_protein_fraction'] = 1
return(out)
def determine_apparent_process_efficiencies(growth_rate, input, rba_session, proteome_summary, protein_data, condition, gene_id_col):
process_efficiencies = pandas.DataFrame()
for i in input.index:
process_ID = input.loc[i, 'Process_ID']
process_name = input.loc[i, 'Process_Name']
process_client_compartments = input.loc[i, 'Client_Compartments'].split(' , ')
constituting_proteins = {rba_session.ModelStructure.ProteinInfo.Elements[i]['ProtoID']: rba_session.ModelStructure.ProteinInfo.Elements[
i]['AAnumber'] for i in rba_session.ModelStructure.ProcessInfo.Elements[process_name]['Composition'].keys()}
Total_client_fraction = sum([proteome_summary.loc[i, 'new_protein_fraction']
for i in process_client_compartments])
n_AAs_in_machinery = 0
machinery_size = 0
for i in constituting_proteins.keys():
if i in protein_data['ID']:
protein_data.loc[protein_data['ID'] == i, ]
n_AAs_in_machinery += protein_data.loc[protein_data['ID'] == i, condition].values[0] * \
protein_data.loc[protein_data['ID'] == i, 'AA_residues'].values[0]
machinery_size += constituting_proteins[i]
# right reference amounth?
if n_AAs_in_machinery > 0:
relative_Protein_fraction_of_machinery = n_AAs_in_machinery / \
proteome_summary.loc['Total', 'original_amino_acid_occupation']
specific_capacity = growth_rate*Total_client_fraction/relative_Protein_fraction_of_machinery
apparent_capacity = specific_capacity*machinery_size
# process_ID[process_name] = apparent_capacity
process_efficiencies.loc[process_name, 'Process'] = process_ID
process_efficiencies.loc[process_name, 'Parameter'] = str(
process_ID+'_apparent_efficiency')
process_efficiencies.loc[process_name, 'Value'] = apparent_capacity
return(process_efficiencies)
def correction_pipeline(input, condition, compartments_to_ignore, compartments_no_original_PG, fractions_entirely_replaced_with_expected_value, imposed_compartment_fractions, directly_corrected_compartments, merged_compartments):
out = build_proteome_overview(input=input, condition=condition, compartments_to_ignore=compartments_to_ignore,
compartments_no_original_PG=compartments_no_original_PG, ribosomal_proteins_as_extra_compartment=True)
factor_A = determine_correction_factor_A(fractions_entirely_replaced_with_expected_value={
i: imposed_compartment_fractions[i] for i in fractions_entirely_replaced_with_expected_value})
factor_B = determine_correction_factor_B(
imposed_compartment_fractions=imposed_compartment_fractions)
out = correct_protein_fractions(input=out, factors={
'A': factor_A, 'B': factor_B}, directly_corrected_compartments=directly_corrected_compartments, imposed_compartment_fractions=imposed_compartment_fractions)
out = correct_PG_fraction(input=out, factors={
'A': factor_A, 'B': factor_B}, compartments_no_original_PG=compartments_no_original_PG, merged_compartments=merged_compartments)
out = merge_compartments(input=out, merged_compartments=merged_compartments)
out = calculate_new_total_PG_fraction(input=out)
out.to_csv(str('Correction_overview_'+condition+'.csv'))
return({'Summary': out, 'Correction_factors': {'A': factor_A, 'B': factor_B}})
def build_input_for_default_kapp_estimation(input):
out = pandas.DataFrame(columns=['Compartment_ID', 'Density', 'PG_fraction'])
for i in input['Summary'].index:
if i not in ['Total', 'Ribosomes']:
out.loc[i, 'Compartment_ID'] = i
out.loc[i, 'Density'] = input['Summary'].loc[i, 'new_protein_fraction']
out.loc[i, 'PG_fraction'] = input['Summary'].loc[i, 'new_PG_fraction']
return(out)
def flux_bounds_from_input(input, condition, specific_exchanges=None):
flux_mean_df = input.loc[input['Type'] == 'ExchangeFlux_Mean', :]
flux_mean_SE = input.loc[input['Type'] == 'ExchangeFlux_StandardError', :]
out = pandas.DataFrame(columns=['Reaction_ID', 'LB', 'UB'])
if specific_exchanges is None:
exchanges_to_set = list(flux_mean_df['ID'])
else:
exchanges_to_set = specific_exchanges
for rx in exchanges_to_set:
mean_val = flux_mean_df.loc[flux_mean_df['ID'] == rx, condition].values[0]
if not | pandas.isna(mean_val) | pandas.isna |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 4 15:42:23 2020
@author: MichaelEK
"""
import pandas as pd
import numpy as np
import json
from pdsf import sflake as sf
from utils import split_months
def agg_allo(param, allo, use_mapping):
"""
"""
run_time_start = | pd.Timestamp.today() | pandas.Timestamp.today |
# -*- coding: utf-8 -*-
"""
.. module:: skimpy
:platform: Unix, Windows
:synopsis: Simple Kinetic Models in Python
.. moduleauthor:: SKiMPy team
[---------]
Copyright 2017 Laboratory of Computational Systems Biotechnology (LCSB),
Ecole Polytechnique Federale de Lausanne (EPFL), Switzerland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from ..viz.plotting import timetrace_plot, plot_population_per_variable
from ..utils import TabDict,iterable_to_tabdict
from copy import deepcopy
import pandas as pd
# Class for ode solutions
class ODESolution:
def __init__(self, model, solution):
self.ode_solution = solution
self.time = np.array(solution.values.t)
self.species = np.array(solution.values.y)
self.names = [x for x in model.ode_fun.variables]
# TODO: Cleanup this
concentrations = iterable_to_tabdict([])
for this_species, this_name in zip(self.species.T, self.names):
concentrations[this_name] = this_species
self.concentrations = pd.DataFrame.from_dict(concentrations, orient='columns')
def plot(self, filename='', **kwargs):
timetrace_plot(self.time, self.species, filename, legend=self.names, **kwargs)
def copy(self):
return deepcopy(self)
class ODESolutionPopulation:
def __init__(self, list_of_solutions, index=None):
sol_cols = list(list_of_solutions[0].concentrations.keys())
self.data = | pd.DataFrame(columns=['solution_id', 'time']+sol_cols) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_preprocessing
----------------------------------
Tests for `preprocessing` module.
"""
import pytest
from sktutor.preprocessing import (GroupByImputer, MissingValueFiller,
OverMissingThresholdDropper,
ValueReplacer, FactorLimiter,
SingleValueAboveThresholdDropper,
SingleValueDropper, ColumnExtractor,
ColumnDropper, DummyCreator,
ColumnValidator, TextContainsDummyExtractor,
BitwiseOperator, BoxCoxTransformer,
InteractionCreator, StandardScaler,
PolynomialFeatures, ContinuousFeatureBinner,
TypeExtractor, GenericTransformer,
MissingColumnsReplacer)
from sktutor.pipeline import make_union
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from random import shuffle
from sklearn.pipeline import make_pipeline
@pytest.mark.usefixtures("missing_data")
@pytest.mark.usefixtures("missing_data2")
class TestGroupByImputer(object):
def test_groups_most_frequent(self, missing_data):
# Test imputing most frequent value per group.
prep = GroupByImputer('most_frequent', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.0, 4.0, 4.0, 4.0, 7.0, 9.0, 9.0, 9.0],
'd': ['a', 'a', 'a', None, 'e', 'f', 'j', 'h', 'j', 'j'],
'e': [1, 2, 1, None, None, None, None, None, None, None],
'f': ['a', 'b', 'a', None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_groups_mean(self, missing_data):
# Test imputing mean by group.
prep = GroupByImputer('mean', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 7 + 2/3, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.5, 4.0, 4.0, 4.0, 7.0, 9.0, 8+1/3, 9.0],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_groups_median(self, missing_data):
# Test imputing median by group.
prep = GroupByImputer('median', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 1.5, 4, 4, 4, 7, 9, 9, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_most_frequent(self, missing_data):
# Test imputing most frequent with no group by.
prep = GroupByImputer('most_frequent')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, 2, 4, 4, 7, 8, 2, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 4.0, 4.0, 4.0, 4.0, 7.0, 9.0, 4.0, 9.0],
'd': ['a', 'a', 'a', 'a', 'e', 'f', 'a', 'h', 'j', 'j'],
'e': [1, 2, 1, 1, 1, 1, 1, 1, 1, 1],
'f': ['a', 'b', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a'],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_mean(self, missing_data):
# Test imputing mean with no group by.
prep = GroupByImputer('mean')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 5, 5, 4, 4, 7, 8, 5, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 5, 4, 4, 4, 7, 9, 5, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_median(self, missing_data):
# Test imputing median with no group by.
prep = GroupByImputer('median')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 4, 4, 4, 4, 7, 8, 4, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 4, 4, 4, 4, 7, 9, 4, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_value_error(self, missing_data):
# Test limiting options without a group by.
prep = GroupByImputer('stdev')
with pytest.raises(ValueError):
prep.fit(missing_data)
def test_key_error(self, missing_data):
# Test imputing with np.nan when a new group level is introduced in
# Transform.
prep = GroupByImputer('mean', 'b')
prep.fit(missing_data)
new_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'987', '987', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
new_data = pd.DataFrame(new_dict)
# set equal to the expected for test means group
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 7+2/3, 8],
'b': ['123', '123', '123',
'987', '987', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.5, 4.0, 4.0, 4.0, 7.0, 9.0, 8+1/3, 9.0],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
result = prep.transform(new_data)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_2groups_most_frequent(self, missing_data2):
# Test most frequent with group by with 2 columns.
prep = GroupByImputer('most_frequent', ['b', 'c'])
prep.fit(missing_data2)
result = prep.transform(missing_data2)
exp_dict = {'a': [1, 2, 1, 4, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', 'a', 'e', 'e', 'f', 'f', 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_2groups_mean(self, missing_data2):
# Test mean with group by with 2 columns.
prep = GroupByImputer('mean', ['b', 'c'])
prep.fit(missing_data2)
result = prep.transform(missing_data2)
exp_dict = {'a': [1, 2, 1.5, 4, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j',
'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_2groups_median(self, missing_data2):
# Test median with group by with 2 columns.
prep = GroupByImputer('median', ['b', 'c'])
prep.fit(missing_data2)
result = prep.transform(missing_data2)
exp_dict = {'a': [1, 2, 1.5, 4, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j',
'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = GroupByImputer('most_frequent', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.0, 4.0, 4.0, 4.0, 7.0, 9.0, 9.0, 9.0],
'd': ['a', 'a', 'a', None, 'e', 'f', 'j', 'h', 'j', 'j'],
'e': [1, 2, 1, None, None, None, None, None, None, None],
'f': ['a', 'b', 'a', None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data_factors")
@pytest.mark.usefixtures("missing_data_numeric")
class TestMissingValueFiller(object):
def test_missing_factors(self, missing_data_factors):
# Test filling in missing factors with a string.
prep = MissingValueFiller('Missing')
result = prep.fit_transform(missing_data_factors)
exp_dict = {'c': ['a', 'Missing', 'a', 'b', 'b', 'Missing', 'c', 'a',
'a', 'c'],
'd': ['a', 'a', 'Missing', 'Missing', 'e', 'f', 'Missing',
'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_missing_numeric(self, missing_data_numeric):
# Test filling in missing numeric data with a number.
prep = MissingValueFiller(0)
result = prep.fit_transform(missing_data_numeric)
exp_dict = {'a': [2, 2, 0, 0, 4, 4, 7, 8, 0, 8],
'c': [1, 2, 0, 4, 4, 4, 7, 9, 0, 9],
'e': [1, 2, 0, 0, 0, 0, 0, 0, 0, 0]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_unordered_index(self, missing_data_numeric):
# Test unordered index is handled properly
new_index = list(missing_data_numeric.index)
shuffle(new_index)
missing_data_numeric.index = new_index
prep = MissingValueFiller(0)
result = prep.fit_transform(missing_data_numeric)
exp_dict = {'a': [2, 2, 0, 0, 4, 4, 7, 8, 0, 8],
'c': [1, 2, 0, 4, 4, 4, 7, 9, 0, 9],
'e': [1, 2, 0, 0, 0, 0, 0, 0, 0, 0]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data")
class TestOverMissingThresholdDropper(object):
def test_drop_20(self, missing_data):
# Test dropping columns with missing over a threshold.
prep = OverMissingThresholdDropper(.2)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_threshold_high_value_error(self, missing_data):
# Test throwing error with threshold set too high.
with pytest.raises(ValueError):
svatd = OverMissingThresholdDropper(1.5)
svatd
def test_threshold_low_value_error(self, missing_data):
# Test throwing error with threshold set too low.
with pytest.raises(ValueError):
svatd = OverMissingThresholdDropper(-1)
svatd
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = OverMissingThresholdDropper(.2)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("full_data_factors")
class TestValueReplacer(object):
def test_mapper(self, full_data_factors):
# Test replacing values with mapper.
mapper = {'c': {'a': 'z', 'b': 'z'},
'd': {'a': 'z', 'b': 'z', 'c': 'y', 'd': 'y', 'e': 'x',
'f': 'x', 'g': 'w', 'h': 'w', 'j': 'w'
}
}
prep = ValueReplacer(mapper)
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c': ['z', 'z', 'z', 'z', 'z', 'c', 'c', 'z', 'z', 'c'],
'd': ['z', 'z', 'y', 'y', 'x', 'x', 'w', 'w', 'w', 'w']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_inverse_mapper(self, full_data_factors):
# Test replacing values with inverse_mapper.
inv_mapper = {'c': {'z': ['a', 'b']},
'd': {'z': ['a', 'b'],
'y': ['c', 'd'],
'x': ['e', 'f'],
'w': ['g', 'h', 'j']
}
}
prep = ValueReplacer(inverse_mapper=inv_mapper)
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c': ['z', 'z', 'z', 'z', 'z', 'c', 'c', 'z', 'z', 'c'],
'd': ['z', 'z', 'y', 'y', 'x', 'x', 'w', 'w', 'w', 'w']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_extra_column_value_error(self, full_data_factors):
# Test throwing error when replacing values with a non-existant column.
mapper = {'c': {'a': 'z', 'b': 'z'},
'e': {'a': 'z', 'b': 'z', 'c': 'y', 'd': 'y', 'e': 'x',
'f': 'x', 'g': 'w', 'h': 'w', 'j': 'w'
}
}
prep = ValueReplacer(mapper)
with pytest.raises(ValueError):
prep.fit(full_data_factors)
def test_2_mappers_value_error(self):
# Test throwing error when specifying mapper and inverse_mapper.
mapper = {'c': {'a': 'z', 'b': 'z'},
'e': {'a': 'z', 'b': 'z', 'c': 'y', 'd': 'y', 'e': 'x',
'f': 'x', 'g': 'w', 'h': 'w', 'j': 'w'
}
}
inv_mapper = {'c': {'z': ['a', 'b']},
'd': {'z': ['a', 'b'],
'y': ['c', 'd'],
'x': ['e', 'f'],
'w': ['g', 'h', 'j']
}
}
with pytest.raises(ValueError):
prep = ValueReplacer(mapper=mapper, inverse_mapper=inv_mapper)
prep
def test_no_mappers_value_error(self):
# Test throwing error when not specifying mapper or inverse_mapper.
with pytest.raises(ValueError):
prep = ValueReplacer()
prep
def test_unordered_index(self, full_data_factors):
# Test unordered index is handled properly
new_index = list(full_data_factors.index)
shuffle(new_index)
full_data_factors.index = new_index
mapper = {'c': {'a': 'z', 'b': 'z'},
'd': {'a': 'z', 'b': 'z', 'c': 'y', 'd': 'y', 'e': 'x',
'f': 'x', 'g': 'w', 'h': 'w', 'j': 'w'
}
}
prep = ValueReplacer(mapper)
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c': ['z', 'z', 'z', 'z', 'z', 'c', 'c', 'z', 'z', 'c'],
'd': ['z', 'z', 'y', 'y', 'x', 'x', 'w', 'w', 'w', 'w']
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data_factors")
class TestFactorLimiter(object):
def test_limiter(self, missing_data_factors):
# Test limiting factor levels to specified levels with default.
factors = {'c': {'factors': ['a', 'b'],
'default': 'a'
},
'd': {'factors': ['a', 'b', 'c', 'd'],
'default': 'd'
}
}
prep = FactorLimiter(factors)
prep.fit(missing_data_factors)
result = prep.transform(missing_data_factors)
exp_dict = {'c': ['a', 'a', 'a', 'b', 'b', 'a', 'a', 'a', 'a', 'a'],
'd': ['a', 'a', 'd', 'd', 'd', 'd', 'd', 'd', 'd', 'd']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_extra_column_value_error(self, missing_data_factors):
# Test throwing error when limiting values with a non-existant column.
factors = {'c': {'factors': ['a', 'b'],
'default': 'a'
},
'e': {'factors': ['a', 'b', 'c', 'd'],
'default': 'd'
}
}
fl = FactorLimiter(factors)
with pytest.raises(ValueError):
fl.fit(missing_data_factors)
def test_unordered_index(self, missing_data_factors):
# Test unordered index is handled properly
new_index = list(missing_data_factors.index)
shuffle(new_index)
missing_data_factors.index = new_index
factors = {'c': {'factors': ['a', 'b'],
'default': 'a'
},
'd': {'factors': ['a', 'b', 'c', 'd'],
'default': 'd'
}
}
prep = FactorLimiter(factors)
prep.fit(missing_data_factors)
result = prep.transform(missing_data_factors)
exp_dict = {'c': ['a', 'a', 'a', 'b', 'b', 'a', 'a', 'a', 'a', 'a'],
'd': ['a', 'a', 'd', 'd', 'd', 'd', 'd', 'd', 'd', 'd']
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data")
class TestSingleValueAboveThresholdDropper(object):
def test_drop_70_with_na(self, missing_data):
# test dropping columns with over 70% single value, including NaNs.
prep = SingleValueAboveThresholdDropper(.7, dropna=False)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_drop_70_without_na(self, missing_data):
# test dropping columns with over 70% single value, not including NaNs.
prep = SingleValueAboveThresholdDropper(.7, dropna=True)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j',
'j'],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_threshold_high_value_error(self, missing_data):
# Test throwing error with threshold set too high.
with pytest.raises(ValueError):
prep = SingleValueAboveThresholdDropper(1.5)
prep
def test_threshold_low_value_error(self, missing_data):
# Test throwing error with threshold set too low.
with pytest.raises(ValueError):
prep = SingleValueAboveThresholdDropper(-1)
prep
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = SingleValueAboveThresholdDropper(.7, dropna=False)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("single_values_data")
class TestSingleValueDropper(object):
def test_without_na(self, single_values_data):
# Test dropping columns with single values, excluding NaNs as a value.
prep = SingleValueDropper(dropna=True)
prep.fit(single_values_data)
result = prep.transform(single_values_data)
exp_dict = {'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'e': [1, 2, None, None, None, None, None, None, None,
None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_with_na(self, single_values_data):
# Test dropping columns with single values, including NaNs as a value.
prep = SingleValueDropper(dropna=False)
prep.fit(single_values_data)
result = prep.transform(single_values_data)
exp_dict = {'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'd': [1, 1, 1, 1, 1, 1, 1, 1, 1, None],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_unordered_index(self, single_values_data):
# Test unordered index is handled properly
new_index = list(single_values_data.index)
shuffle(new_index)
single_values_data.index = new_index
prep = SingleValueDropper(dropna=False)
prep.fit(single_values_data)
result = prep.transform(single_values_data)
exp_dict = {'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'd': [1, 1, 1, 1, 1, 1, 1, 1, 1, None],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data")
class TestColumnExtractor(object):
def test_extraction(self, missing_data):
# Test extraction of columns from a DataFrame.
prep = ColumnExtractor(['a', 'b', 'c'])
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_column_missing_error(self, missing_data):
# Test throwing error when an extraction is requested of a missing.
# column
prep = ColumnExtractor(['a', 'b', 'z'])
with pytest.raises(ValueError):
prep.fit(missing_data)
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = ColumnExtractor(['a', 'b', 'c'])
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data")
class TestColumnDropper(object):
def test_drop_multiple(self, missing_data):
# Test extraction of columns from a DataFrame
prep = ColumnDropper(['d', 'e', 'f', 'g', 'h'])
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_drop_single(self, missing_data):
# Test extraction of columns from a DataFrame
prep = ColumnDropper('d')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_error(self, missing_data):
# Test throwing error when dropping is requested of a missing column
prep = ColumnDropper(['a', 'b', 'z'])
with pytest.raises(ValueError):
prep.fit(missing_data)
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = ColumnDropper(['d', 'e', 'f', 'g', 'h'])
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("full_data_factors")
@pytest.mark.usefixtures("full_data_factors_subset")
@pytest.mark.usefixtures("missing_data_factors")
class TestDummyCreator(object):
def test_default_dummies(self, full_data_factors):
# Test creating dummies variables from a DataFrame
prep = DummyCreator()
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c_a': [1, 1, 1, 0, 0, 0, 0, 1, 1, 0],
'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 1, 1, 0, 0, 1],
'd_a': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'd_b': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_c': [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
'd_d': [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_g': [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_fit_transform(self, full_data_factors):
# Test creating dummies variables from a DataFrame
prep = DummyCreator()
result = prep.fit_transform(full_data_factors)
exp_dict = {'c_a': [1, 1, 1, 0, 0, 0, 0, 1, 1, 0],
'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 1, 1, 0, 0, 1],
'd_a': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'd_b': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_c': [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
'd_d': [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_g': [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_drop_first_dummies(self, full_data_factors):
# Test dropping first dummies for each column.
kwargs = {'drop_first': True}
prep = DummyCreator(**kwargs)
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 1, 1, 0, 0, 1],
'd_b': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_c': [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
'd_d': [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_g': [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_drop_first_dummies_missing_levels(self, full_data_factors,
full_data_factors_subset):
# Test dropping first dummies for each column.
kwargs = {'drop_first': True}
prep = DummyCreator(**kwargs)
prep.fit(full_data_factors)
result = prep.transform(full_data_factors_subset)
exp_dict = {'c_b': [1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 1, 1, 0, 0, 1],
'd_b': [0, 0, 0, 0, 0, 0, 0],
'd_c': [0, 0, 0, 0, 0, 0, 0],
'd_d': [1, 0, 0, 0, 0, 0, 0],
'd_e': [0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 1, 0, 0, 0, 0],
'd_g': [0, 0, 0, 1, 0, 0, 0],
'd_h': [0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_dummy_na_false_dummies(self, missing_data_factors):
# Test not creating dummies for NaNs.
prep = DummyCreator()
prep.fit(missing_data_factors)
result = prep.transform(missing_data_factors)
exp_dict = {'c_a': [1, 0, 1, 0, 0, 0, 0, 1, 1, 0],
'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 0, 1, 0, 0, 1],
'd_a': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_dummy_na_true_dummies(self, missing_data_factors):
# Test creating dummies for NaNs.
kwargs = {'dummy_na': True}
prep = DummyCreator(**kwargs)
prep.fit(missing_data_factors)
result = prep.transform(missing_data_factors)
exp_dict = {'c_a': [1, 0, 1, 0, 0, 0, 0, 1, 1, 0],
'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 0, 1, 0, 0, 1],
'c_nan': [0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
'd_a': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'd_nan': [0, 0, 1, 1, 0, 0, 1, 0, 0, 0]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_fillin_missing_dummies(self, full_data_factors):
# Test filling missing dummies with a transform data missing levels
# present in the fitting data set.
prep = DummyCreator()
prep.fit(full_data_factors)
new_dict = {'c': ['b', 'c'],
'd': ['a', 'b']
}
new_data = pd.DataFrame(new_dict)
result = prep.transform(new_data)
exp_dict = {'c_a': [0, 0],
'c_b': [1, 0],
'c_c': [0, 1],
'd_a': [1, 0],
'd_b': [0, 1],
'd_c': [0, 0],
'd_d': [0, 0],
'd_e': [0, 0],
'd_f': [0, 0],
'd_g': [0, 0],
'd_h': [0, 0],
'd_j': [0, 0]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_unordered_index(self, full_data_factors):
# Test unordered index is handled properly
new_index = list(full_data_factors.index)
shuffle(new_index)
full_data_factors.index = new_index
prep = DummyCreator()
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c_a': [1, 1, 1, 0, 0, 0, 0, 1, 1, 0],
'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 1, 1, 0, 0, 1],
'd_a': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'd_b': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_c': [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
'd_d': [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_g': [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("full_data_factors")
class TestColumnValidator(object):
def test_order(self, full_data_factors):
# Test extraction of columns from a DataFrame
prep = ColumnValidator()
prep.fit(full_data_factors)
new_dict = {'d': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'j'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c']
}
new_data = pd.DataFrame(new_dict)
result = prep.transform(new_data)
exp_dict = {'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_missing_columns_error(self, full_data_factors):
# Test throwing an error when the new data is missing columns
prep = ColumnValidator()
prep.fit(full_data_factors)
new_dict = {'d': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'j']
}
new_data = pd.DataFrame(new_dict)
with pytest.raises(ValueError):
prep.transform(new_data)
def test_new_columns_error(self, full_data_factors):
# Test throwing an error when the new data is missing columns
prep = ColumnValidator()
prep.fit(full_data_factors)
new_dict = {'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'j'],
'e': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'j']
}
new_data = pd.DataFrame(new_dict)
with pytest.raises(ValueError):
prep.transform(new_data)
def test_unordered_index(self, full_data_factors):
# Test unordered index is handled properly
new_index = list(full_data_factors.index)
shuffle(new_index)
full_data_factors.index = new_index
prep = ColumnValidator()
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("text_data")
class TestTextContainsDummyExtractor(object):
def test_mapper(self, text_data):
# Test text contains dummy with mapper.
mapper = {'a':
{'a_1':
[{'pattern': 'birthday', 'kwargs': {'case': False}},
{'pattern': 'bday', 'kwargs': {'case': False}}
],
'a_2':
[{'pattern': 'b.*day', 'kwargs': {'case': False}}
],
},
'b':
{'b_1':
[{'pattern': 'h.*r', 'kwargs': {'case': False}}
],
'b_2':
[{'pattern': '!', 'kwargs': {'case': False}},
]
}
}
prep = TextContainsDummyExtractor(mapper)
prep.fit(text_data)
result = prep.transform(text_data)
exp_dict = {'a': ['Happy Birthday!', 'It\'s your bday!'],
'b': ['Happy Arbor Day!', 'Happy Gilmore'],
'c': ['a', 'b'],
'a_1': [1, 1],
'a_2': [1, 1],
'b_1': [1, 1],
'b_2': [1, 0]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_extra_column_value_error(self, text_data):
# Test throwing error when replacing values with a non-existant column.
mapper = {'a':
{'a_1':
[{'pattern': 'birthday', 'kwargs': {'case': False}},
{'pattern': 'bday', 'kwargs': {'case': False}}
],
'a_2':
[{'pattern': 'b.*day', 'kwargs': {'case': False}}
],
},
'd':
{'b_1':
[{'pattern': 'h.*r', 'kwargs': {'case': False}}
],
'b_2':
[{'pattern': '!', 'kwargs': {'case': False}},
]
}
}
prep = TextContainsDummyExtractor(mapper)
with pytest.raises(ValueError):
prep.fit(text_data)
def test_unordered_index(self, text_data):
# Test unordered index is handled properly
new_index = list(text_data.index)
shuffle(new_index)
text_data.index = new_index
mapper = {'a':
{'a_1':
[{'pattern': 'birthday', 'kwargs': {'case': False}},
{'pattern': 'bday', 'kwargs': {'case': False}}
],
'a_2':
[{'pattern': 'b.*day', 'kwargs': {'case': False}}
],
},
'b':
{'b_1':
[{'pattern': 'h.*r', 'kwargs': {'case': False}}
],
'b_2':
[{'pattern': '!', 'kwargs': {'case': False}},
]
}
}
prep = TextContainsDummyExtractor(mapper)
prep.fit(text_data)
result = prep.transform(text_data)
exp_dict = {'a': ['Happy Birthday!', 'It\'s your bday!'],
'b': ['Happy Arbor Day!', 'Happy Gilmore'],
'c': ['a', 'b'],
'a_1': [1, 1],
'a_2': [1, 1],
'b_1': [1, 1],
'b_2': [1, 0]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
@pytest.mark.usefixtures("boolean_data")
class TestBitwiseOperator(object):
def test_operator_value_error(self, text_data):
# Test throwing error when using invalid operator parameter
mapper = {'f': ['c', 'd', 'e'],
'g': ['a', 'b']
}
with pytest.raises(ValueError):
prep = BitwiseOperator('with', mapper)
prep
def test_or_mapper_boolean(self, boolean_data):
# Test bitwise or applied to booleans
mapper = {'f': ['c', 'd', 'e'],
'g': ['a', 'b']
}
prep = BitwiseOperator('or', mapper)
prep.fit(boolean_data)
result = prep.transform(boolean_data)
exp_dict = {'a': [True, True, False, False],
'b': [True, False, False, True],
'c': [False, True, True, False],
'd': [True, False, True, False],
'e': [False, True, False, True],
'f': [1, 1, 1, 1],
'g': [1, 1, 0, 1],
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_or_mapper_binary(self, boolean_data):
# Test bitwise or applied to integers
mapper = {'f': ['c', 'd', 'e'],
'g': ['a', 'b']
}
prep = BitwiseOperator('or', mapper)
prep.fit(boolean_data)
result = prep.transform(boolean_data)
exp_dict = {'a': [1, 1, 0, 0],
'b': [1, 0, 0, 1],
'c': [0, 1, 1, 0],
'd': [1, 0, 1, 0],
'e': [0, 1, 0, 1],
'f': [1, 1, 1, 1],
'g': [1, 1, 0, 1],
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_or_extra_column_value_error(self, text_data):
# Test throwing error when replacing values with a non-existant column.
mapper = {'f': ['c', 'd', 'e'],
'g': ['a', 'b']
}
prep = BitwiseOperator('or', mapper)
with pytest.raises(ValueError):
prep.fit(text_data)
def test_and_mapper_boolean(self, boolean_data):
# Test bitwise and applied to booleans
mapper = {'f': ['c', 'd', 'e'],
'g': ['a', 'b']
}
prep = BitwiseOperator('and', mapper)
prep.fit(boolean_data)
result = prep.transform(boolean_data)
exp_dict = {'a': [True, True, False, False],
'b': [True, False, False, True],
'c': [False, True, True, False],
'd': [True, False, True, False],
'e': [False, True, False, True],
'f': [0, 0, 0, 0],
'g': [1, 0, 0, 0]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_and_mapper_binary(self, boolean_data):
# Test bitwise and applied to integers
mapper = {'f': ['c', 'd', 'e'],
'g': ['a', 'b']
}
prep = BitwiseOperator('and', mapper)
prep.fit(boolean_data)
result = prep.transform(boolean_data)
exp_dict = {'a': [1, 1, 0, 0],
'b': [1, 0, 0, 1],
'c': [0, 1, 1, 0],
'd': [1, 0, 1, 0],
'e': [0, 1, 0, 1],
'f': [0, 0, 0, 0],
'g': [1, 0, 0, 0]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_and_extra_column_value_error(self, text_data):
# Test throwing error when replacing values with a non-existant column.
mapper = {'f': ['c', 'd', 'e'],
'g': ['a', 'b']
}
prep = BitwiseOperator('and', mapper)
with pytest.raises(ValueError):
prep.fit(text_data)
def test_unordered_index(self, boolean_data):
# Test unordered index is handled properly
new_index = list(boolean_data.index)
shuffle(new_index)
boolean_data.index = new_index
mapper = {
'f': ['c', 'd', 'e'],
'g': ['a', 'b']
}
prep = BitwiseOperator('or', mapper)
prep.fit(boolean_data)
result = prep.transform(boolean_data)
exp_dict = {'a': [True, True, False, False],
'b': [True, False, False, True],
'c': [False, True, True, False],
'd': [True, False, True, False],
'e': [False, True, False, True],
'f': [1, 1, 1, 1],
'g': [1, 1, 0, 1],
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
@pytest.mark.usefixtures("full_data_numeric")
class TestBoxCoxTransformer(object):
def test_fit_transfrom(self, full_data_numeric):
# test default functionalty
prep = BoxCoxTransformer()
result = prep.fit_transform(full_data_numeric)
exp_dict = {'a': [0.71695113, 0.71695113, 0.71695113,
1.15921005, 1.48370246, 1.48370246,
2.1414305, 2.30371316, 2.30371316,
2.30371316],
'c': [0., 0.8310186, 1.47159953, 2.0132148,
2.0132148, 2.0132148, 3.32332097, 4.0444457,
4.0444457, 4.0444457],
'e': [0., 0.89952678, 1.67649211, 2.38322965,
3.04195191, 3.66477648, 4.25925117,
4.83048775, 5.38215505, 5.91700138]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_fit_then_transform(self, full_data_numeric):
# test using fit then transform
prep = BoxCoxTransformer()
prep.fit(full_data_numeric)
result = prep.transform(full_data_numeric)
exp_dict = {'a': [0.71695113, 0.71695113, 0.71695113,
1.15921005, 1.48370246, 1.48370246,
2.1414305, 2.30371316, 2.30371316,
2.30371316],
'c': [0., 0.8310186, 1.47159953, 2.0132148,
2.0132148, 2.0132148, 3.32332097, 4.0444457,
4.0444457, 4.0444457],
'e': [0., 0.89952678, 1.67649211, 2.38322965,
3.04195191, 3.66477648, 4.25925117,
4.83048775, 5.38215505, 5.91700138]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_unordered_index(self, full_data_numeric):
# Test unordered index is handled properly
new_index = list(full_data_numeric.index)
shuffle(new_index)
full_data_numeric.index = new_index
prep = BoxCoxTransformer()
result = prep.fit_transform(full_data_numeric)
exp_dict = {'a': [0.71695113, 0.71695113, 0.71695113,
1.15921005, 1.48370246, 1.48370246,
2.1414305, 2.30371316, 2.30371316,
2.30371316],
'c': [0., 0.8310186, 1.47159953, 2.0132148,
2.0132148, 2.0132148, 3.32332097, 4.0444457,
4.0444457, 4.0444457],
'e': [0., 0.89952678, 1.67649211, 2.38322965,
3.04195191, 3.66477648, 4.25925117,
4.83048775, 5.38215505, 5.91700138]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
@pytest.mark.usefixtures("interaction_data")
class TestInteractionCreator(object):
def test_interactions(self, interaction_data):
# test generation of interactions
prep = InteractionCreator(columns1=['a', 'b'],
columns2=['c', 'd', 'e'])
result = prep.fit_transform(interaction_data)
exp_dict = {'a': [2, 3, 4, 5],
'b': [1, 0, 0, 1],
'c': [0, 1, 1, 0],
'd': [1, 0, 1, 0],
'e': [0, 1, 0, 1],
'a:c': [0, 3, 4, 0],
'a:d': [2, 0, 4, 0],
'a:e': [0, 3, 0, 5],
'b:c': [0, 0, 0, 0],
'b:d': [1, 0, 0, 0],
'b:e': [0, 0, 0, 1]
}
expected = pd.DataFrame(exp_dict)
print(result)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test__extra_column_value_error(self, interaction_data):
# test value error with non-existent columns
prep = InteractionCreator(columns1=['a', 'f'],
columns2=['c', 'd', 'g'])
with pytest.raises(ValueError):
prep.fit_transform(interaction_data)
def test_unordered_index(self, interaction_data):
# Test unordered index is handled properly
new_index = list(interaction_data.index)
shuffle(new_index)
interaction_data.index = new_index
prep = InteractionCreator(columns1=['a', 'b'],
columns2=['c', 'd', 'e'])
result = prep.fit_transform(interaction_data)
exp_dict = {'a': [2, 3, 4, 5],
'b': [1, 0, 0, 1],
'c': [0, 1, 1, 0],
'd': [1, 0, 1, 0],
'e': [0, 1, 0, 1],
'a:c': [0, 3, 4, 0],
'a:d': [2, 0, 4, 0],
'a:e': [0, 3, 0, 5],
'b:c': [0, 0, 0, 0],
'b:d': [1, 0, 0, 0],
'b:e': [0, 0, 0, 1]
}
expected = pd.DataFrame(exp_dict, index=new_index)
print(result)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
@pytest.mark.usefixtures("full_data_numeric")
class TestStandardScaler(object):
def test_fit_transform(self, full_data_numeric):
# test default functionalty
prep = StandardScaler()
result = prep.fit_transform(full_data_numeric)
exp_dict = {'a': [-1.11027222, -1.11027222, -1.11027222, -0.71374643,
-0.31722063, -0.31722063, 0.87235674, 1.26888254,
1.26888254, 1.26888254],
'c': [-1.45260037, -1.10674314, -0.76088591, -0.41502868,
-0.41502868, -0.41502868, 0.62254302, 1.31425748,
1.31425748, 1.31425748],
'e': [-1.5666989, -1.21854359, -0.87038828, -0.52223297,
-0.17407766, 0.17407766, 0.52223297, 0.87038828,
1.21854359, 1.5666989]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_fit_transform_defined_columns(self, full_data_numeric):
# test defining which columns to apply standardization to
prep = StandardScaler(columns=['a', 'e'])
result = prep.fit_transform(full_data_numeric)
exp_dict = {
'a': [-1.11027222, -1.11027222, -1.11027222, -0.71374643,
-0.31722063, -0.31722063, 0.87235674, 1.26888254,
1.26888254, 1.26888254],
'c': [1, 2, 3, 4, 4, 4, 7, 9, 9, 9],
'e': [-1.5666989, -1.21854359, -0.87038828, -0.52223297,
-0.17407766, 0.17407766, 0.52223297, 0.87038828,
1.21854359, 1.5666989]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_fit_then_transform(self, full_data_numeric):
# test using fit then transform
prep = StandardScaler()
prep.fit(full_data_numeric)
result = prep.transform(full_data_numeric)
exp_dict = {'a': [-1.11027222, -1.11027222, -1.11027222, -0.71374643,
-0.31722063, -0.31722063, 0.87235674, 1.26888254,
1.26888254, 1.26888254],
'c': [-1.45260037, -1.10674314, -0.76088591, -0.41502868,
-0.41502868, -0.41502868, 0.62254302, 1.31425748,
1.31425748, 1.31425748],
'e': [-1.5666989, -1.21854359, -0.87038828, -0.52223297,
-0.17407766, 0.17407766, 0.52223297, 0.87038828,
1.21854359, 1.5666989]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_fit_then_transform_defined_columns(self, full_data_numeric):
# test defining which columns to apply standardization to
prep = StandardScaler(columns=['a', 'e'])
prep.fit(full_data_numeric)
result = prep.transform(full_data_numeric)
exp_dict = {
'a': [-1.11027222, -1.11027222, -1.11027222, -0.71374643,
-0.31722063, -0.31722063, 0.87235674, 1.26888254,
1.26888254, 1.26888254],
'c': [1, 2, 3, 4, 4, 4, 7, 9, 9, 9],
'e': [-1.5666989, -1.21854359, -0.87038828, -0.52223297,
-0.17407766, 0.17407766, 0.52223297, 0.87038828,
1.21854359, 1.5666989]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_fit_then_partial_transform(self, full_data_numeric):
# test using fit then transform on specified columns
prep = StandardScaler()
prep.fit(full_data_numeric)
result = prep.transform(X=full_data_numeric, partial_cols=['c', 'e'])
exp_dict = {'a': [-1.11027222, -1.11027222, -1.11027222, -0.71374643,
-0.31722063, -0.31722063, 0.87235674, 1.26888254,
1.26888254, 1.26888254],
'c': [-1.45260037, -1.10674314, -0.76088591, -0.41502868,
-0.41502868, -0.41502868, 0.62254302, 1.31425748,
1.31425748, 1.31425748],
'e': [-1.5666989, -1.21854359, -0.87038828, -0.52223297,
-0.17407766, 0.17407766, 0.52223297, 0.87038828,
1.21854359, 1.5666989]
}
expected = pd.DataFrame(exp_dict)
expected = expected[['c', 'e']]
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_unordered_index(self, full_data_numeric):
# Test unordered index is handled properly
new_index = list(full_data_numeric.index)
shuffle(new_index)
full_data_numeric.index = new_index
prep = StandardScaler()
prep.fit(full_data_numeric)
result = prep.transform(full_data_numeric)
exp_dict = {'a': [-1.11027222, -1.11027222, -1.11027222, -0.71374643,
-0.31722063, -0.31722063, 0.87235674, 1.26888254,
1.26888254, 1.26888254],
'c': [-1.45260037, -1.10674314, -0.76088591, -0.41502868,
-0.41502868, -0.41502868, 0.62254302, 1.31425748,
1.31425748, 1.31425748],
'e': [-1.5666989, -1.21854359, -0.87038828, -0.52223297,
-0.17407766, 0.17407766, 0.52223297, 0.87038828,
1.21854359, 1.5666989],
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=False)
def test_inverse_transform(self, full_data_numeric):
# test inverse_transform
new_index = list(full_data_numeric.index)
shuffle(new_index)
full_data_numeric.index = new_index
prep = StandardScaler()
transformed = prep.fit_transform(full_data_numeric)
original = prep.inverse_transform(transformed)
tm.assert_frame_equal(
full_data_numeric,
original,
check_dtype=False,
check_like=True
)
def test_inverse_partial_transform(self, full_data_numeric):
# test inverse_transform
new_index = list(full_data_numeric.index)
shuffle(new_index)
full_data_numeric.index = new_index
prep = StandardScaler()
transformed = prep.fit_transform(full_data_numeric)
partial_original = prep.inverse_transform(
transformed, partial_cols=['a', 'e']
)
tm.assert_frame_equal(
full_data_numeric[['a', 'e']],
partial_original,
check_dtype=False,
check_like=True
)
def test_inverse_transform_defined_columns(self, full_data_numeric):
# test defining which columns to apply standardization to
prep = StandardScaler(columns=['a', 'e'])
prep.fit(full_data_numeric)
transformed = prep.fit_transform(full_data_numeric)
result = prep.inverse_transform(transformed)
tm.assert_frame_equal(
result, full_data_numeric, check_dtype=False, check_like=True
)
@pytest.mark.usefixtures("full_data_numeric")
class TestPolynomialFeatures(object):
def test_polynomial_features(self, full_data_numeric):
# test polynomial feature creation
prep = PolynomialFeatures(degree=3)
result = prep.fit_transform(full_data_numeric)
exp_dict = {
'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'c': [1, 2, 3, 4, 4, 4, 7, 9, 9, 9],
'e': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'a^2': [4, 4, 4, 9, 16, 16, 49, 64, 64, 64],
'a*c': [2, 4, 6, 12, 16, 16, 49, 72, 72, 72],
'a*e': [2, 4, 6, 12, 20, 24, 49, 64, 72, 80],
'c^2': [1, 4, 9, 16, 16, 16, 49, 81, 81, 81],
'c*e': [1, 4, 9, 16, 20, 24, 49, 72, 81, 90],
'e^2': [1, 4, 9, 16, 25, 36, 49, 64, 81, 100],
'a^3': [8, 8, 8, 27, 64, 64, 343, 512, 512, 512],
'a^2*c': [4, 8, 12, 36, 64, 64, 343, 576, 576, 576],
'a^2*e': [4, 8, 12, 36, 80, 96, 343, 512, 576, 640],
'a*c^2': [2, 8, 18, 48, 64, 64, 343, 648, 648, 648],
'a*c*e': [2, 8, 18, 48, 80, 96, 343, 576, 648, 720],
'a*e^2': [2, 8, 18, 48, 100, 144, 343, 512, 648, 800],
'c^3': [1, 8, 27, 64, 64, 64, 343, 729, 729, 729],
'c^2*e': [1, 8, 27, 64, 80, 96, 343, 648, 729, 810],
'c*e^2': [1, 8, 27, 64, 100, 144, 343, 576, 729, 900],
'e^3': [1, 8, 27, 64, 125, 216, 343, 512, 729, 1000]
}
expected = pd.DataFrame(exp_dict)
expected = expected[[
'a', 'c', 'e', 'a^2', 'a*c', 'a*e',
'c^2', 'c*e', 'e^2', 'a^3', 'a^2*c',
'a^2*e', 'a*c^2', 'a*c*e', 'a*e^2',
'c^3', 'c^2*e', 'c*e^2', 'e^3'
]]
tm.assert_frame_equal(
result,
expected,
check_dtype=False,
)
def test_polynomial_features_interactions(self, full_data_numeric):
# test polynomial feature creation
prep = PolynomialFeatures(interaction_only=True)
result = prep.fit_transform(full_data_numeric)
exp_dict = {
'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'c': [1, 2, 3, 4, 4, 4, 7, 9, 9, 9],
'e': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'a*c': [2, 4, 6, 12, 16, 16, 49, 72, 72, 72],
'a*e': [2, 4, 6, 12, 20, 24, 49, 64, 72, 80],
'c*e': [1, 4, 9, 16, 20, 24, 49, 72, 81, 90],
}
expected = pd.DataFrame(exp_dict)
expected = expected[[
'a', 'c', 'e', 'a*c', 'a*e', 'c*e'
]]
tm.assert_frame_equal(
result,
expected,
check_dtype=False,
)
def test_unordered_index(self, full_data_numeric):
# Test unordered index is handled properly
new_index = list(full_data_numeric.index)
shuffle(new_index)
full_data_numeric.index = new_index
# test polynomial feature creation
prep = PolynomialFeatures(degree=3)
result = prep.fit_transform(full_data_numeric)
exp_dict = {
'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'c': [1, 2, 3, 4, 4, 4, 7, 9, 9, 9],
'e': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'a^2': [4, 4, 4, 9, 16, 16, 49, 64, 64, 64],
'a*c': [2, 4, 6, 12, 16, 16, 49, 72, 72, 72],
'a*e': [2, 4, 6, 12, 20, 24, 49, 64, 72, 80],
'c^2': [1, 4, 9, 16, 16, 16, 49, 81, 81, 81],
'c*e': [1, 4, 9, 16, 20, 24, 49, 72, 81, 90],
'e^2': [1, 4, 9, 16, 25, 36, 49, 64, 81, 100],
'a^3': [8, 8, 8, 27, 64, 64, 343, 512, 512, 512],
'a^2*c': [4, 8, 12, 36, 64, 64, 343, 576, 576, 576],
'a^2*e': [4, 8, 12, 36, 80, 96, 343, 512, 576, 640],
'a*c^2': [2, 8, 18, 48, 64, 64, 343, 648, 648, 648],
'a*c*e': [2, 8, 18, 48, 80, 96, 343, 576, 648, 720],
'a*e^2': [2, 8, 18, 48, 100, 144, 343, 512, 648, 800],
'c^3': [1, 8, 27, 64, 64, 64, 343, 729, 729, 729],
'c^2*e': [1, 8, 27, 64, 80, 96, 343, 648, 729, 810],
'c*e^2': [1, 8, 27, 64, 100, 144, 343, 576, 729, 900],
'e^3': [1, 8, 27, 64, 125, 216, 343, 512, 729, 1000]
}
expected = pd.DataFrame(exp_dict, index=new_index)
expected = expected[[
'a', 'c', 'e', 'a^2', 'a*c', 'a*e',
'c^2', 'c*e', 'e^2', 'a^3', 'a^2*c',
'a^2*e', 'a*c^2', 'a*c*e', 'a*e^2',
'c^3', 'c^2*e', 'c*e^2', 'e^3'
]]
tm.assert_frame_equal(
result,
expected,
check_dtype=False,
)
@pytest.mark.usefixtures("missing_data")
class TestContinuousFeatureBinner(object):
def test_feature_binning(self, missing_data):
# test standard use
prep = ContinuousFeatureBinner(
field='a',
bins=[0, 3, 6, 9]
)
result = prep.fit_transform(missing_data)
expected = {
'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123', '234', '456',
'456', '789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, None, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None, None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None],
'a_GRP': ['(0.0, 3.0]', '(0.0, 3.0]', 'Other', 'Other',
'(3.0, 6.0]', '(3.0, 6.0]', '(6.0, 9.0]',
'(6.0, 9.0]', 'Other', '(6.0, 9.0]']
}
expected = pd.DataFrame(expected, index=missing_data.index)
expected = expected[['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'a_GRP']]
tm.assert_frame_equal(
result,
expected,
check_dtype=False,
)
def test_missing_field_error(self, missing_data):
# test that specifying a field that doesn't exist returns error
prep = ContinuousFeatureBinner(
field='x',
bins=[0, 3, 6, 9]
)
with pytest.raises(ValueError):
prep.fit_transform(missing_data)
def test_feature_binning_right(self, missing_data):
# test standard use
prep = ContinuousFeatureBinner(
field='a',
bins=[0, 4, 8],
right_inclusive=False
)
result = prep.fit_transform(missing_data)
expected = {
'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123', '234', '456',
'456', '789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, None, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None, None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None],
'a_GRP': ['[0.0, 4.0)', '[0.0, 4.0)', 'Other', 'Other',
'[4.0, 8.0)', '[4.0, 8.0)', '[4.0, 8.0)', 'Other',
'Other', 'Other']
}
expected = | pd.DataFrame(expected, index=missing_data.index) | pandas.DataFrame |
import pandas as pd
import numpy as np
import scipy.sparse as spl
from concurrent.futures import ProcessPoolExecutor
import sys
threads = 4
all_tasks = [
[5, 8000, ['5t', '5nt'], 0.352],
[10, 12000, ['10t', '10nt'], 0.38],
[25, 40000, ['25f'], 0.43386578246281293],
[25, 9000, ['25r'], 0.4],
[100, 4000, ['100r'], 0.39],
]
split, knn_k, test_task, powb = all_tasks[int(sys.argv[1])]
def recode(column, min_val=0):
uniques = column.unique()
codes = range(min_val, len(uniques) + min_val)
code_map = dict(zip(uniques, codes))
return (column.map(code_map), code_map)
def reverse_code(column, code_map):
inv_map = {v: k for k, v in code_map.items()}
return column.map(inv_map)
playlist_meta = pd.read_csv('data/million_playlist_dataset/playlist_meta.csv')
playlist_meta_c = pd.read_csv('data/challenge_set/playlist_meta.csv')
playlist_meta = | pd.concat([playlist_meta, playlist_meta_c], axis=0, ignore_index=True) | pandas.concat |
import tensorflow as tf
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
tf.keras.backend.set_session(tf.Session(config=tf_config))
from tensorflow.python.keras.models import load_model
import numpy as np
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, cohen_kappa_score
import configparser, argparse, datetime, json, os, visualize
import pandas as pd
from data_generator import test_crop_generator
timestamps = {'temporal-1dcnn':['crop-1dcnn_1dcnn'+x+'.h5' for x in ['2020-02-13 17:01:10']]}
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--configPath', help="""path to config file""", default='./config.ini')
parser.add_argument('-t', '--task', help="""task you are performing - refers to the header for each section in the config file""", default='temporal-1dcnn')
parser_args = parser.parse_args()
config = configparser.ConfigParser()
config.read(parser_args.configPath)
te_path = str(config[parser_args.task]['TEST_FOLDER'])
test_accuracies = []
kappa_scores = []
for timestamp in timestamps[parser_args.task]:
model_name = os.path.join('/home/kgadira/multi-modal-crop-classification/8_models/', timestamp)
print(model_name)
test_datagen = test_crop_generator(input_path=te_path, batch_size=1, mode="test", num_classes =6, epsilon = 0, resize_params = (224, 224), do_shuffle=True)
print('Loading model {}'.format(model_name))
model = load_model(model_name)
print(model.summary())
all_predictions = []
all_gt = []
data_paths = []
results = []
for te_data, label, curr_path in test_datagen:
result = model.predict(te_data, verbose=0)
results.append(result)
#print(result.shape)
prediction = np.argmax(result, axis = 1)
#print(prediction, label)
all_predictions.append(prediction)
all_gt.append(label)
data_paths.append(curr_path)
#print(all_predictions)
results = np.array(results)
#results = np.squeeze(results, axis=1)
print(results.shape)
cm = confusion_matrix(all_gt, all_predictions)
print(cm)
classes_lst = ['Corn', 'Cotton', 'Soy', 'Spring Wheat', 'Winter Wheat', 'Barley']
creport = classification_report(y_true = all_gt, y_pred=all_predictions, target_names = classes_lst, digits = 4, output_dict = True)
creport_df = | pd.DataFrame(creport) | pandas.DataFrame |
""" Test cases for DataFrame.plot """
import warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
@td.skip_if_no_mpl
class TestDataFrameColor(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
import matplotlib as mpl
mpl.rcdefaults()
self.tdf = tm.makeTimeDataFrame()
self.hexbin_df = DataFrame(
{
"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange(20) + np.random.uniform(size=20),
}
)
def test_mpl2_color_cycle_str(self):
# GH 15516
df = DataFrame(np.random.randn(10, 3), columns=["a", "b", "c"])
colors = ["C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9"]
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", "MatplotlibDeprecationWarning")
for color in colors:
_check_plot_works(df.plot, color=color)
# if warning is raised, check that it is the exact problematic one
# GH 36972
if w:
match = "Support for uppercase single-letter colors is deprecated"
warning_message = str(w[0].message)
msg = "MatplotlibDeprecationWarning related to CN colors was raised"
assert match not in warning_message, msg
def test_color_single_series_list(self):
# GH 3486
df = DataFrame({"A": [1, 2, 3]})
_check_plot_works(df.plot, color=["red"])
@pytest.mark.parametrize("color", [(1, 0, 0), (1, 0, 0, 0.5)])
def test_rgb_tuple_color(self, color):
# GH 16695
df = DataFrame({"x": [1, 2], "y": [3, 4]})
_check_plot_works(df.plot, x="x", y="y", color=color)
def test_color_empty_string(self):
df = DataFrame(np.random.randn(10, 2))
with pytest.raises(ValueError):
df.plot(color="")
def test_color_and_style_arguments(self):
df = DataFrame({"x": [1, 2], "y": [3, 4]})
# passing both 'color' and 'style' arguments should be allowed
# if there is no color symbol in the style strings:
ax = df.plot(color=["red", "black"], style=["-", "--"])
# check that the linestyles are correctly set:
linestyle = [line.get_linestyle() for line in ax.lines]
assert linestyle == ["-", "--"]
# check that the colors are correctly set:
color = [line.get_color() for line in ax.lines]
assert color == ["red", "black"]
# passing both 'color' and 'style' arguments should not be allowed
# if there is a color symbol in the style strings:
with pytest.raises(ValueError):
df.plot(color=["red", "black"], style=["k-", "r--"])
@pytest.mark.parametrize(
"color, expected",
[
("green", ["green"] * 4),
(["yellow", "red", "green", "blue"], ["yellow", "red", "green", "blue"]),
],
)
def test_color_and_marker(self, color, expected):
# GH 21003
df = DataFrame(np.random.random((7, 4)))
ax = df.plot(color=color, style="d--")
# check colors
result = [i.get_color() for i in ax.lines]
assert result == expected
# check markers and linestyles
assert all(i.get_linestyle() == "--" for i in ax.lines)
assert all(i.get_marker() == "d" for i in ax.lines)
@pytest.mark.slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
default_colors = self._unpack_cycler(plt.rcParams)
df = DataFrame(np.random.randn(5, 5))
ax = df.plot.bar()
self._check_colors(ax.patches[::5], facecolors=default_colors[:5])
tm.close()
custom_colors = "rgcby"
ax = df.plot.bar(color=custom_colors)
self._check_colors(ax.patches[::5], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.bar(colormap="jet")
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.bar(colormap=cm.jet)
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
ax = df.loc[:, [0]].plot.bar(color="DodgerBlue")
self._check_colors([ax.patches[0]], facecolors=["DodgerBlue"])
tm.close()
ax = df.plot(kind="bar", color="green")
self._check_colors(ax.patches[::5], facecolors=["green"] * 5)
tm.close()
def test_bar_user_colors(self):
df = DataFrame(
{"A": range(4), "B": range(1, 5), "color": ["red", "blue", "blue", "red"]}
)
# This should *only* work when `y` is specified, else
# we use one color per column
ax = df.plot.bar(y="A", color=df["color"])
result = [p.get_facecolor() for p in ax.patches]
expected = [
(1.0, 0.0, 0.0, 1.0),
(0.0, 0.0, 1.0, 1.0),
(0.0, 0.0, 1.0, 1.0),
(1.0, 0.0, 0.0, 1.0),
]
assert result == expected
@pytest.mark.slow
def test_if_scatterplot_colorbar_affects_xaxis_visibility(self):
# addressing issue #10611, to ensure colobar does not
# interfere with x-axis label and ticklabels with
# ipython inline backend.
random_array = np.random.random((1000, 3))
df = DataFrame(random_array, columns=["A label", "B label", "C label"])
ax1 = df.plot.scatter(x="A label", y="B label")
ax2 = df.plot.scatter(x="A label", y="B label", c="C label")
vis1 = [vis.get_visible() for vis in ax1.xaxis.get_minorticklabels()]
vis2 = [vis.get_visible() for vis in ax2.xaxis.get_minorticklabels()]
assert vis1 == vis2
vis1 = [vis.get_visible() for vis in ax1.xaxis.get_majorticklabels()]
vis2 = [vis.get_visible() for vis in ax2.xaxis.get_majorticklabels()]
assert vis1 == vis2
assert (
ax1.xaxis.get_label().get_visible() == ax2.xaxis.get_label().get_visible()
)
@pytest.mark.slow
def test_if_hexbin_xaxis_label_is_visible(self):
# addressing issue #10678, to ensure colobar does not
# interfere with x-axis label and ticklabels with
# ipython inline backend.
random_array = np.random.random((1000, 3))
df = DataFrame(random_array, columns=["A label", "B label", "C label"])
ax = df.plot.hexbin("A label", "B label", gridsize=12)
assert all(vis.get_visible() for vis in ax.xaxis.get_minorticklabels())
assert all(vis.get_visible() for vis in ax.xaxis.get_majorticklabels())
assert ax.xaxis.get_label().get_visible()
@pytest.mark.slow
def test_if_scatterplot_colorbars_are_next_to_parent_axes(self):
import matplotlib.pyplot as plt
random_array = np.random.random((1000, 3))
df = DataFrame(random_array, columns=["A label", "B label", "C label"])
fig, axes = plt.subplots(1, 2)
df.plot.scatter("A label", "B label", c="C label", ax=axes[0])
df.plot.scatter("A label", "B label", c="C label", ax=axes[1])
plt.tight_layout()
points = np.array([ax.get_position().get_points() for ax in fig.axes])
axes_x_coords = points[:, :, 0]
parent_distance = axes_x_coords[1, :] - axes_x_coords[0, :]
colorbar_distance = axes_x_coords[3, :] - axes_x_coords[2, :]
assert np.isclose(parent_distance, colorbar_distance, atol=1e-7).all()
@pytest.mark.parametrize("cmap", [None, "Greys"])
def test_scatter_with_c_column_name_with_colors(self, cmap):
# https://github.com/pandas-dev/pandas/issues/34316
df = DataFrame(
[[5.1, 3.5], [4.9, 3.0], [7.0, 3.2], [6.4, 3.2], [5.9, 3.0]],
columns=["length", "width"],
)
df["species"] = ["r", "r", "g", "g", "b"]
ax = df.plot.scatter(x=0, y=1, c="species", cmap=cmap)
assert ax.collections[0].colorbar is None
def test_scatter_colors(self):
df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3]})
with pytest.raises(TypeError):
df.plot.scatter(x="a", y="b", c="c", color="green")
default_colors = self._unpack_cycler(self.plt.rcParams)
ax = df.plot.scatter(x="a", y="b", c="c")
tm.assert_numpy_array_equal(
ax.collections[0].get_facecolor()[0],
np.array(self.colorconverter.to_rgba(default_colors[0])),
)
ax = df.plot.scatter(x="a", y="b", color="white")
tm.assert_numpy_array_equal(
ax.collections[0].get_facecolor()[0],
np.array([1, 1, 1, 1], dtype=np.float64),
)
def test_scatter_colorbar_different_cmap(self):
# GH 33389
import matplotlib.pyplot as plt
df = DataFrame({"x": [1, 2, 3], "y": [1, 3, 2], "c": [1, 2, 3]})
df["x2"] = df["x"] + 1
fig, ax = plt.subplots()
df.plot("x", "y", c="c", kind="scatter", cmap="cividis", ax=ax)
df.plot("x2", "y", c="c", kind="scatter", cmap="magma", ax=ax)
assert ax.collections[0].cmap.name == "cividis"
assert ax.collections[1].cmap.name == "magma"
@pytest.mark.slow
def test_line_colors(self):
from matplotlib import cm
custom_colors = "rgcby"
df = DataFrame(np.random.randn(5, 5))
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
ax2 = df.plot(color=custom_colors)
lines2 = ax2.get_lines()
for l1, l2 in zip(ax.get_lines(), lines2):
assert l1.get_color() == l2.get_color()
tm.close()
ax = df.plot(colormap="jet")
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot(colormap=cm.jet)
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
ax = df.loc[:, [0]].plot(color="DodgerBlue")
self._check_colors(ax.lines, linecolors=["DodgerBlue"])
ax = df.plot(color="red")
self._check_colors(ax.get_lines(), linecolors=["red"] * 5)
tm.close()
# GH 10299
custom_colors = ["#FF0000", "#0000FF", "#FFFF00", "#000000", "#FFFFFF"]
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
@pytest.mark.slow
def test_dont_modify_colors(self):
colors = ["r", "g", "b"]
DataFrame(np.random.rand(10, 2)).plot(color=colors)
assert len(colors) == 3
@pytest.mark.slow
def test_line_colors_and_styles_subplots(self):
# GH 9894
from matplotlib import cm
default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(np.random.randn(5, 5))
axes = df.plot(subplots=True)
for ax, c in zip(axes, list(default_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# single color char
axes = df.plot(subplots=True, color="k")
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=["k"])
tm.close()
# single color str
axes = df.plot(subplots=True, color="green")
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=["green"])
tm.close()
custom_colors = "rgcby"
axes = df.plot(color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
axes = df.plot(color=list(custom_colors), subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# GH 10299
custom_colors = ["#FF0000", "#0000FF", "#FFFF00", "#000000", "#FFFFFF"]
axes = df.plot(color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
for cmap in ["jet", cm.jet]:
axes = df.plot(colormap=cmap, subplots=True)
for ax, c in zip(axes, rgba_colors):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.loc[:, [0]].plot(color="DodgerBlue", subplots=True)
self._check_colors(axes[0].lines, linecolors=["DodgerBlue"])
# single character style
axes = df.plot(style="r", subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=["r"])
tm.close()
# list of styles
styles = list("rgcby")
axes = df.plot(style=styles, subplots=True)
for ax, c in zip(axes, styles):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
@pytest.mark.slow
def test_area_colors(self):
from matplotlib import cm
from matplotlib.collections import PolyCollection
custom_colors = "rgcby"
df = DataFrame(np.random.rand(5, 5))
ax = df.plot.area(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=custom_colors)
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, facecolors=custom_colors)
for h in handles:
assert h.get_alpha() is None
tm.close()
ax = df.plot.area(colormap="jet")
jet_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=jet_colors)
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, facecolors=jet_colors)
for h in handles:
assert h.get_alpha() is None
tm.close()
# When stacked=False, alpha is set to 0.5
ax = df.plot.area(colormap=cm.jet, stacked=False)
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors]
self._check_colors(poly, facecolors=jet_with_alpha)
handles, labels = ax.get_legend_handles_labels()
linecolors = jet_with_alpha
self._check_colors(handles[: len(jet_colors)], linecolors=linecolors)
for h in handles:
assert h.get_alpha() == 0.5
@pytest.mark.slow
def test_hist_colors(self):
default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(np.random.randn(5, 5))
ax = df.plot.hist()
self._check_colors(ax.patches[::10], facecolors=default_colors[:5])
tm.close()
custom_colors = "rgcby"
ax = df.plot.hist(color=custom_colors)
self._check_colors(ax.patches[::10], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.hist(colormap="jet")
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.hist(colormap=cm.jet)
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
ax = df.loc[:, [0]].plot.hist(color="DodgerBlue")
self._check_colors([ax.patches[0]], facecolors=["DodgerBlue"])
ax = df.plot(kind="hist", color="green")
self._check_colors(ax.patches[::10], facecolors=["green"] * 5)
tm.close()
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_colors(self):
from matplotlib import cm
custom_colors = "rgcby"
df = DataFrame(np.random.rand(5, 5))
ax = df.plot.kde(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
ax = df.plot.kde(colormap="jet")
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot.kde(colormap=cm.jet)
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_colors_and_styles_subplots(self):
from matplotlib import cm
default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(np.random.randn(5, 5))
axes = df.plot(kind="kde", subplots=True)
for ax, c in zip(axes, list(default_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# single color char
axes = df.plot(kind="kde", color="k", subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=["k"])
tm.close()
# single color str
axes = df.plot(kind="kde", color="red", subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=["red"])
tm.close()
custom_colors = "rgcby"
axes = df.plot(kind="kde", color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
for cmap in ["jet", cm.jet]:
axes = df.plot(kind="kde", colormap=cmap, subplots=True)
for ax, c in zip(axes, rgba_colors):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.loc[:, [0]].plot(kind="kde", color="DodgerBlue", subplots=True)
self._check_colors(axes[0].lines, linecolors=["DodgerBlue"])
# single character style
axes = df.plot(kind="kde", style="r", subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=["r"])
tm.close()
# list of styles
styles = list("rgcby")
axes = df.plot(kind="kde", style=styles, subplots=True)
for ax, c in zip(axes, styles):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
@pytest.mark.slow
def test_boxplot_colors(self):
def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None):
# TODO: outside this func?
if fliers_c is None:
fliers_c = "k"
self._check_colors(bp["boxes"], linecolors=[box_c] * len(bp["boxes"]))
self._check_colors(
bp["whiskers"], linecolors=[whiskers_c] * len(bp["whiskers"])
)
self._check_colors(
bp["medians"], linecolors=[medians_c] * len(bp["medians"])
)
self._check_colors(bp["fliers"], linecolors=[fliers_c] * len(bp["fliers"]))
self._check_colors(bp["caps"], linecolors=[caps_c] * len(bp["caps"]))
default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(np.random.randn(5, 5))
bp = df.plot.box(return_type="dict")
_check_colors(bp, default_colors[0], default_colors[0], default_colors[2])
tm.close()
dict_colors = dict(
boxes="#572923", whiskers="#982042", medians="#804823", caps="#123456"
)
bp = df.plot.box(color=dict_colors, sym="r+", return_type="dict")
_check_colors(
bp,
dict_colors["boxes"],
dict_colors["whiskers"],
dict_colors["medians"],
dict_colors["caps"],
"r",
)
tm.close()
# partial colors
dict_colors = dict(whiskers="c", medians="m")
bp = df.plot.box(color=dict_colors, return_type="dict")
_check_colors(bp, default_colors[0], "c", "m")
| tm.close() | pandas._testing.close |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2014-2019 OpenEEmeter contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime, timedelta
from pkg_resources import resource_stream
import numpy as np
import pandas as pd
import pytest
import pytz
from eemeter.transform import (
as_freq,
clean_caltrack_billing_data,
downsample_and_clean_caltrack_daily_data,
clean_caltrack_billing_daily_data,
day_counts,
get_baseline_data,
get_reporting_data,
get_terms,
remove_duplicates,
NoBaselineDataError,
NoReportingDataError,
overwrite_partial_rows_with_nan,
)
def test_as_freq_not_series(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
with pytest.raises(ValueError):
as_freq(meter_data, freq="H")
def test_as_freq_hourly(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
as_hourly = as_freq(meter_data.value, freq="H")
assert as_hourly.shape == (18961,)
assert round(meter_data.value.sum(), 1) == round(as_hourly.sum(), 1) == 21290.2
def test_as_freq_daily(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
as_daily = as_freq(meter_data.value, freq="D")
assert as_daily.shape == (792,)
assert round(meter_data.value.sum(), 1) == round(as_daily.sum(), 1) == 21290.2
def test_as_freq_daily_all_nones_instantaneous(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
meter_data["value"] = np.nan
assert meter_data.shape == (27, 1)
as_daily = as_freq(meter_data.value, freq="D", series_type="instantaneous")
assert as_daily.shape == (792,)
assert round(meter_data.value.sum(), 1) == round(as_daily.sum(), 1) == 0
def test_as_freq_daily_all_nones(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
meter_data["value"] = np.nan
assert meter_data.shape == (27, 1)
as_daily = as_freq(meter_data.value, freq="D")
assert as_daily.shape == (792,)
assert round(meter_data.value.sum(), 1) == round(as_daily.sum(), 1) == 0
def test_as_freq_month_start(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
as_month_start = as_freq(meter_data.value, freq="MS")
assert as_month_start.shape == (28,)
assert round(meter_data.value.sum(), 1) == round(as_month_start.sum(), 1) == 21290.2
def test_as_freq_hourly_temperature(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
assert temperature_data.shape == (19417,)
as_hourly = as_freq(temperature_data, freq="H", series_type="instantaneous")
assert as_hourly.shape == (19417,)
assert round(temperature_data.mean(), 1) == round(as_hourly.mean(), 1) == 54.6
def test_as_freq_daily_temperature(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
assert temperature_data.shape == (19417,)
as_daily = as_freq(temperature_data, freq="D", series_type="instantaneous")
assert as_daily.shape == (811,)
assert abs(temperature_data.mean() - as_daily.mean()) <= 0.1
def test_as_freq_month_start_temperature(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
assert temperature_data.shape == (19417,)
as_month_start = as_freq(temperature_data, freq="MS", series_type="instantaneous")
assert as_month_start.shape == (29,)
assert round(as_month_start.mean(), 1) == 53.4
def test_as_freq_daily_temperature_monthly(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
temperature_data = temperature_data.groupby(pd.Grouper(freq="MS")).mean()
assert temperature_data.shape == (28,)
as_daily = as_freq(temperature_data, freq="D", series_type="instantaneous")
assert as_daily.shape == (824,)
assert round(as_daily.mean(), 1) == 54.5
def test_as_freq_empty():
meter_data = pd.DataFrame({"value": []})
empty_meter_data = as_freq(meter_data.value, freq="H")
assert empty_meter_data.empty
def test_as_freq_perserves_nulls(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
monthly_with_nulls = meter_data[meter_data.index.year != 2016].reindex(
meter_data.index
)
daily_with_nulls = as_freq(monthly_with_nulls.value, freq="D")
assert (
round(monthly_with_nulls.value.sum(), 2)
== round(daily_with_nulls.sum(), 2)
== 11094.05
)
assert monthly_with_nulls.value.isnull().sum() == 13
assert daily_with_nulls.isnull().sum() == 365
def test_day_counts(il_electricity_cdd_hdd_billing_monthly):
data = il_electricity_cdd_hdd_billing_monthly["meter_data"].value
counts = day_counts(data.index)
assert counts.shape == (27,)
assert counts.iloc[0] == 29.0
assert pd.isnull(counts.iloc[-1])
assert counts.sum() == 790.0
def test_day_counts_empty_series():
index = pd.DatetimeIndex([])
index.freq = None
data = pd.Series([], index=index)
counts = day_counts(data.index)
assert counts.shape == (0,)
def test_get_baseline_data(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
baseline_data, warnings = get_baseline_data(meter_data)
assert meter_data.shape == baseline_data.shape == (19417, 1)
assert len(warnings) == 0
def test_get_baseline_data_with_timezones(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data.tz_convert("America/New_York")
)
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data.tz_convert("Australia/Sydney")
)
assert len(warnings) == 0
def test_get_baseline_data_with_end(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_start_date = il_electricity_cdd_hdd_hourly["blackout_start_date"]
baseline_data, warnings = get_baseline_data(meter_data, end=blackout_start_date)
assert meter_data.shape != baseline_data.shape == (8761, 1)
assert len(warnings) == 0
def test_get_baseline_data_with_end_no_max_days(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_start_date = il_electricity_cdd_hdd_hourly["blackout_start_date"]
baseline_data, warnings = get_baseline_data(
meter_data, end=blackout_start_date, max_days=None
)
assert meter_data.shape != baseline_data.shape == (9595, 1)
assert len(warnings) == 0
def test_get_baseline_data_empty(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_start_date = il_electricity_cdd_hdd_hourly["blackout_start_date"]
with pytest.raises(NoBaselineDataError):
get_baseline_data(meter_data, end=pd.Timestamp("2000").tz_localize("UTC"))
def test_get_baseline_data_start_gap(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
start = meter_data.index.min() - timedelta(days=1)
baseline_data, warnings = get_baseline_data(meter_data, start=start, max_days=None)
assert meter_data.shape == baseline_data.shape == (19417, 1)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == "eemeter.get_baseline_data.gap_at_baseline_start"
assert (
warning.description
== "Data does not have coverage at requested baseline start date."
)
assert warning.data == {
"data_start": "2015-11-22T06:00:00+00:00",
"requested_start": "2015-11-21T06:00:00+00:00",
}
def test_get_baseline_data_end_gap(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
end = meter_data.index.max() + timedelta(days=1)
baseline_data, warnings = get_baseline_data(meter_data, end=end, max_days=None)
assert meter_data.shape == baseline_data.shape == (19417, 1)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == "eemeter.get_baseline_data.gap_at_baseline_end"
assert (
warning.description
== "Data does not have coverage at requested baseline end date."
)
assert warning.data == {
"data_end": "2018-02-08T06:00:00+00:00",
"requested_end": "2018-02-09T06:00:00+00:00",
}
def test_get_baseline_data_with_overshoot(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=32,
allow_billing_period_overshoot=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=32,
allow_billing_period_overshoot=False,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=True,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_baseline_data_with_ignored_gap(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=45,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=45,
ignore_billing_period_gap_for_day_count=False,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=25,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_baseline_data_with_overshoot_and_ignored_gap(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=True,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2016, 11, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=False,
ignore_billing_period_gap_for_day_count=False,
)
assert baseline_data.shape == (1, 1)
assert round(baseline_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_baseline_data_n_days_billing_period_overshoot(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
baseline_data, warnings = get_baseline_data(
meter_data,
end=datetime(2017, 11, 9, tzinfo=pytz.UTC),
max_days=45,
allow_billing_period_overshoot=True,
n_days_billing_period_overshoot=45,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 526.25
assert len(warnings) == 0
def test_get_baseline_data_too_far_from_date(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
end_date = datetime(2020, 11, 9, tzinfo=pytz.UTC)
max_days = 45
baseline_data, warnings = get_baseline_data(
meter_data,
end=end_date,
max_days=max_days,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (2, 1)
assert round(baseline_data.value.sum(), 2) == 1393.4
assert len(warnings) == 0
with pytest.raises(NoBaselineDataError):
get_baseline_data(
meter_data,
end=end_date,
max_days=max_days,
n_days_billing_period_overshoot=45,
ignore_billing_period_gap_for_day_count=True,
)
baseline_data, warnings = get_baseline_data(
meter_data,
end=end_date,
max_days=max_days,
allow_billing_period_overshoot=True,
ignore_billing_period_gap_for_day_count=True,
)
assert baseline_data.shape == (3, 1)
assert round(baseline_data.value.sum(), 2) == 2043.92
assert len(warnings) == 0
# Includes 3 data points because data at index -3 is closer to start target
# then data at index -2
start_target = baseline_data.index[-1] - timedelta(days=max_days)
assert abs((baseline_data.index[0] - start_target).days) < abs(
(baseline_data.index[1] - start_target).days
)
with pytest.raises(NoBaselineDataError):
get_baseline_data(
meter_data,
end=end_date,
max_days=max_days,
allow_billing_period_overshoot=True,
n_days_billing_period_overshoot=45,
ignore_billing_period_gap_for_day_count=True,
)
def test_get_reporting_data(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
reporting_data, warnings = get_reporting_data(meter_data)
assert meter_data.shape == reporting_data.shape == (19417, 1)
assert len(warnings) == 0
def test_get_reporting_data_with_timezones(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
reporting_data, warnings = get_reporting_data(
meter_data.tz_convert("America/New_York")
)
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data.tz_convert("Australia/Sydney")
)
assert len(warnings) == 0
def test_get_reporting_data_with_start(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_end_date = il_electricity_cdd_hdd_hourly["blackout_end_date"]
reporting_data, warnings = get_reporting_data(meter_data, start=blackout_end_date)
assert meter_data.shape != reporting_data.shape == (8761, 1)
assert len(warnings) == 0
def test_get_reporting_data_with_start_no_max_days(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_end_date = il_electricity_cdd_hdd_hourly["blackout_end_date"]
reporting_data, warnings = get_reporting_data(
meter_data, start=blackout_end_date, max_days=None
)
assert meter_data.shape != reporting_data.shape == (9607, 1)
assert len(warnings) == 0
def test_get_reporting_data_empty(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
blackout_end_date = il_electricity_cdd_hdd_hourly["blackout_end_date"]
with pytest.raises(NoReportingDataError):
get_reporting_data(meter_data, start=pd.Timestamp("2030").tz_localize("UTC"))
def test_get_reporting_data_start_gap(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
start = meter_data.index.min() - timedelta(days=1)
reporting_data, warnings = get_reporting_data(
meter_data, start=start, max_days=None
)
assert meter_data.shape == reporting_data.shape == (19417, 1)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == "eemeter.get_reporting_data.gap_at_reporting_start"
assert (
warning.description
== "Data does not have coverage at requested reporting start date."
)
assert warning.data == {
"data_start": "2015-11-22T06:00:00+00:00",
"requested_start": "2015-11-21T06:00:00+00:00",
}
def test_get_reporting_data_end_gap(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
end = meter_data.index.max() + timedelta(days=1)
reporting_data, warnings = get_reporting_data(meter_data, end=end, max_days=None)
assert meter_data.shape == reporting_data.shape == (19417, 1)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == "eemeter.get_reporting_data.gap_at_reporting_end"
assert (
warning.description
== "Data does not have coverage at requested reporting end date."
)
assert warning.data == {
"data_end": "2018-02-08T06:00:00+00:00",
"requested_end": "2018-02-09T06:00:00+00:00",
}
def test_get_reporting_data_with_overshoot(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=30,
allow_billing_period_overshoot=True,
)
assert reporting_data.shape == (2, 1)
assert round(reporting_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=30,
allow_billing_period_overshoot=False,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=True,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_reporting_data_with_ignored_gap(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=45,
ignore_billing_period_gap_for_day_count=True,
)
assert reporting_data.shape == (2, 1)
assert round(reporting_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=45,
ignore_billing_period_gap_for_day_count=False,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=25,
ignore_billing_period_gap_for_day_count=True,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_reporting_data_with_overshoot_and_ignored_gap(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=True,
ignore_billing_period_gap_for_day_count=True,
)
assert reporting_data.shape == (2, 1)
assert round(reporting_data.value.sum(), 2) == 632.31
assert len(warnings) == 0
reporting_data, warnings = get_reporting_data(
meter_data,
start=datetime(2016, 9, 9, tzinfo=pytz.UTC),
max_days=25,
allow_billing_period_overshoot=False,
ignore_billing_period_gap_for_day_count=False,
)
assert reporting_data.shape == (1, 1)
assert round(reporting_data.value.sum(), 2) == 0
assert len(warnings) == 0
def test_get_terms_unrecognized_method(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
with pytest.raises(ValueError):
get_terms(meter_data.index, term_lengths=[365], method="unrecognized")
def test_get_terms_unsorted_index(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
with pytest.raises(ValueError):
get_terms(meter_data.index[::-1], term_lengths=[365])
def test_get_terms_bad_term_labels(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
with pytest.raises(ValueError):
terms = get_terms(
meter_data.index,
term_lengths=[60, 60, 60],
term_labels=["abc", "def"], # too short
)
def test_get_terms_default_term_labels(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
terms = get_terms(meter_data.index, term_lengths=[60, 60, 60])
assert [t.label for t in terms] == ["term_001", "term_002", "term_003"]
def test_get_terms_custom_term_labels(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
terms = get_terms(
meter_data.index, term_lengths=[60, 60, 60], term_labels=["abc", "def", "ghi"]
)
assert [t.label for t in terms] == ["abc", "def", "ghi"]
def test_get_terms_empty_index_input(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
terms = get_terms(meter_data.index[:0], term_lengths=[60, 60, 60])
assert len(terms) == 0
def test_get_terms_strict(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
strict_terms = get_terms(
meter_data.index,
term_lengths=[365, 365],
term_labels=["year1", "year2"],
start=datetime(2016, 1, 15, tzinfo=pytz.UTC),
method="strict",
)
assert len(strict_terms) == 2
year1 = strict_terms[0]
assert year1.label == "year1"
assert year1.index.shape == (12,)
assert (
year1.target_start_date
== pd.Timestamp("2016-01-15 00:00:00+0000", tz="UTC").to_pydatetime()
)
assert (
year1.target_end_date
== pd.Timestamp("2017-01-14 00:00:00+0000", tz="UTC").to_pydatetime()
)
assert year1.target_term_length_days == 365
assert (
year1.actual_start_date
== year1.index[0]
== pd.Timestamp("2016-01-22 06:00:00+0000", tz="UTC")
)
assert (
year1.actual_end_date
== year1.index[-1]
== pd.Timestamp("2016-12-19 06:00:00+0000", tz="UTC")
)
assert year1.actual_term_length_days == 332
assert year1.complete
year2 = strict_terms[1]
assert year2.index.shape == (13,)
assert year2.label == "year2"
assert year2.target_start_date == pd.Timestamp("2016-12-19 06:00:00+0000", tz="UTC")
assert (
year2.target_end_date
== pd.Timestamp("2018-01-14 00:00:00+0000", tz="UTC").to_pydatetime()
)
assert year2.target_term_length_days == 365
assert (
year2.actual_start_date
== year2.index[0]
== | pd.Timestamp("2016-12-19 06:00:00+00:00", tz="UTC") | pandas.Timestamp |
#---------------------------------------------------------------
#__main__.py
#this script collates measurements from individual csv outputs of
#the morphometriX GUI
#the csvs can be saved either all in one folder or within each individual
#animals folder.
#this version includes a safety net that recalculates the measurement using
#accurate altitude and focal lengths that the user must provie in csvs.
# this version uses PyQt5 instead of easygui (used in v2.0)
#created by: <NAME> (<EMAIL>), March 2020
#updated by: <NAME>, June 2021
#----------------------------------------------------------------
#import modules
import pandas as pd
import numpy as np
import os, sys
import math
from PyQt5 import QtCore
from PyQt5.QtWidgets import QApplication, QWidget, QInputDialog, QLineEdit, QFileDialog, QMessageBox, QLabel, QVBoxLayout
from PyQt5.QtGui import QIcon
import collatrix.collatrix_functions
from collatrix.collatrix_functions import anydup, readfile, fheader, lmeas, wmeas, setup, pull_data, safe_data, end_concat, df_formatting
from collatrix.collatrix_functions import collate_v4and5, collate_v6
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'close box to end script'
self.left = 10
self.top = 10
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.show()
#add message box with link to github documentation
msgBox = QMessageBox()
msgBox.setWindowTitle("For detailed input info click link below")
msgBox.setTextFormat(QtCore.Qt.RichText)
msgBox.setText('<a href = "https://github.com/cbirdferrer/collatrix#inputs">CLICK HERE</a> for detailed input instructions, \n then click on OK button to continue')
x = msgBox.exec_()
#do you want the Animal ID to be assigned based on the name of the folder
items = ('yes', 'no')
anFold, okPressed = QInputDialog.getItem(self,"Input #1", "Do you want the Animal ID to be assigned based on the name of the folder? \n yes or no",items,0,False)
if okPressed and anFold:
print("{0} Animal ID in folder name".format(anFold))
#ask if they want safey net
items = ('yes', 'no')
safety, okPressed = QInputDialog.getItem(self,"Input #2", "Do you want to use the safety? \n Yes or No?",items,0,False)
if okPressed and safety:
print("{0} safety".format(safety))
#if safety yes, ask for file
if safety == 'yes':
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
safe_csv, _ = QFileDialog.getOpenFileName(self,"2.1 Safety File: Image list with altitudes and other information.", "","All Files (*);;csv files (*.csv)", options=options)
print("safety csv = {0}".format(safe_csv))
elif safety == 'no':
pass
#animal id list?
items = ('no','yes')
idchoice, okPressed = QInputDialog.getItem(self, "Input #3", "Do you want output to only contain certain individuals? \n Yes or No?",items,0,False)
if idchoice and okPressed:
print("{0} subset list".format(idchoice))
if idchoice == 'yes':
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
idsCSV, _ = QFileDialog.getOpenFileName(self,"3.1 File containing ID list", "","All Files (*);;csv files (*.csv)", options=options)
if idsCSV:
print("ID list file = {0}".format(idsCSV))
elif idchoice == 'no':
pass
#ask for name of output
outname, okPressed = QInputDialog.getText(self, "Input #4", "Prefix for output file",QLineEdit.Normal,"")
#import safety csv if safety selected
if safety == 'yes':
dfList = pd.read_csv(safe_csv, sep = ",")
dfList = dfList.dropna(how="all",axis='rows').reset_index()
df_L = dfList.groupby('Image').first().reset_index()
df_L['Image'] = [x.strip() for x in df_L['Image']]
elif safety == 'no':
df_L = "no safety"
#get folders
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
GUIfold = QFileDialog.getExistingDirectory(None, "Input 5. Folder containing MorphoMetriX outputs",options=options)
saveFold = QFileDialog.getExistingDirectory(None,"Input 6. Folder where output should be saved",options = options)
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
#make lists
#for csvs
csvs_all = []
csvs = []
not_mmx = []
#for measurements
measurements = []
nonPercMeas = []
#walk through all folders in GUI folder and collect all csvs
for root,dirs,files in os.walk(GUIfold):
csvs_all += [os.path.join(root,f) for f in files if f.endswith('.csv')]
#make sure the csvs are morphometrix outputs by checking first row
csvs += [c for c in csvs_all if 'Image ID' in pd.read_csv(c,sep='^',header=None,prefix='X',engine = 'python',quoting=3, na_values = ['""','"'],encoding_errors = "ignore")['X0'][0]]
#make list of all csvs that were not morphometrix csvs to tell user
not_mmx += [x for x in csvs_all if x not in csvs]
#check for csvs that (for whatever reason) hit an error when being read in.
#makes a list of those csvs for users to examine
badcsvs = []
for f in csvs:
try:
temp= | pd.read_csv(f,sep='^',header=None,prefix='X',engine = 'python',quoting=3, na_values = ['""','"'],encoding_errors = "ignore") | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from collections import OrderedDict
from datetime import datetime
import numpy as np
import pytest
from pandas.compat import lrange
from pandas import DataFrame, MultiIndex, Series, date_range, notna
import pandas.core.panel as panelm
from pandas.core.panel import Panel
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal,
makeCustomDataframe as mkdf, makeMixedDataFrame)
from pandas.tseries.offsets import MonthEnd
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class PanelTests(object):
panel = None
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
pytest.raises(TypeError, hash, c_empty)
pytest.raises(TypeError, hash, c)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class SafeForSparse(object):
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).iloc[0]
ops = ['add', 'sub', 'mul', 'truediv',
'floordiv', 'div', 'mod', 'pow']
for op in ops:
with pytest.raises(NotImplementedError):
getattr(p, op)(d, axis=0)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class CheckIndexing(object):
def test_delitem_and_pop(self):
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
tm.assert_frame_equal(panelc[1], panel[1])
tm.assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
tm.assert_frame_equal(panelc[0], panel[0])
tm.assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
tm.assert_frame_equal(panelc[1], panel[1])
tm.assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# bad shape
p = Panel(np.random.randn(4, 3, 2))
msg = (r"shape of value must be \(3, 2\), "
r"shape of given object was \(4, 2\)")
with pytest.raises(ValueError, match=msg):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notna(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notna(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_getitem_fancy_slice(self):
pass
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.loc[:, 22, [111, 333]] = b
assert_frame_equal(a.loc[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort_values()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.loc[0, :, 0] = b
assert_series_equal(df.loc[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.loc[:, 0, 0] = b
assert_series_equal(df.loc[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.loc[0, 0, :] = b
assert_series_equal(df.loc[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class TestPanel(PanelTests, CheckIndexing, SafeForSparse):
def test_constructor_cast(self):
# can't cast
data = [[['foo', 'bar', 'baz']]]
pytest.raises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
assert len(empty.items) == 0
assert len(empty.major_axis) == 0
assert len(empty.minor_axis) == 0
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
assert panel.values.dtype == np.object_
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
assert panel[i].values.dtype.name == dtype
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(
np.random.randn(2, 10, 5),
items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5),
dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
msg = "The number of dimensions required is 3"
with pytest.raises(ValueError, match=msg):
Panel(np.random.randn(10, 2))
def test_ctor_orderedDict(self):
keys = list(set(np.random.randint(0, 5000, 100)))[
:50] # unique random int keys
d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
p = Panel(d)
assert list(p.items) == keys
p = Panel.from_dict(d)
assert list(p.items) == keys
def test_from_dict_mixed_orient(self):
df = tm.makeDataFrame()
df['foo'] = 'bar'
data = {'k1': df, 'k2': df}
panel = Panel.from_dict(data, orient='minor')
assert panel['foo'].values.dtype == np.object_
assert panel['A'].values.dtype == np.float64
def test_constructor_error_msgs(self):
msg = (r"Shape of passed values is \(3, 4, 5\), "
r"indices imply \(4, 5, 5\)")
with pytest.raises(ValueError, match=msg):
Panel(np.random.randn(3, 4, 5),
lrange(4), lrange(5), lrange(5))
msg = (r"Shape of passed values is \(3, 4, 5\), "
r"indices imply \(5, 4, 5\)")
with pytest.raises(ValueError, match=msg):
Panel(np.random.randn(3, 4, 5),
lrange(5), lrange(4), lrange(5))
msg = (r"Shape of passed values is \(3, 4, 5\), "
r"indices imply \(5, 5, 4\)")
with pytest.raises(ValueError, match=msg):
Panel(np.random.randn(3, 4, 5),
lrange(5), lrange(5), lrange(4))
def test_apply_slabs(self):
# with multi-indexes
# GH7469
index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
'two', 'a'), ('two', 'b')])
dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
p = Panel({'f': dfa, 'g': dfb})
result = p.apply(lambda x: x.sum(), axis=0)
# on windows this will be in32
result = result.astype('int64')
expected = p.sum(0)
assert_frame_equal(result, expected)
def test_apply_no_or_zero_ndim(self):
# GH10332
self.panel = Panel(np.random.rand(5, 5, 5))
result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
result_int64 = self.panel.apply(
lambda df: np.int64(0), axis=[1, 2])
result_float64 = self.panel.apply(lambda df: np.float64(0.0),
axis=[1, 2])
expected_int = expected_int64 = Series([0] * 5)
expected_float = expected_float64 = Series([0.0] * 5)
assert_series_equal(result_int, expected_int)
assert_series_equal(result_int64, expected_int64)
assert_series_equal(result_float, expected_float)
assert_series_equal(result_float64, expected_float64)
def test_fillna(self):
# limit not implemented when only value is specified
p = Panel(np.random.randn(3, 4, 5))
p.iloc[0:2, 0:2, 0:2] = np.nan
pytest.raises(NotImplementedError,
lambda: p.fillna(999, limit=1))
def test_to_frame_multi_major(self):
idx = MultiIndex.from_tuples(
[(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1],
[3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
expected_idx = MultiIndex.from_tuples(
[
(1, 'one', 'A'), (1, 'one', 'B'),
(1, 'one', 'C'), (1, 'two', 'A'),
(1, 'two', 'B'), (1, 'two', 'C'),
(2, 'one', 'A'), (2, 'one', 'B'),
(2, 'one', 'C'), (2, 'two', 'A'),
(2, 'two', 'B'), (2, 'two', 'C')
],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1, 'a', 1, 2, 'b', 1, 3,
'c', 1, 4, 'd', 1],
'i2': [1, 'a', 1, 2, 'b',
1, 3, 'c', 1, 4, 'd', 1]},
index=expected_idx)
result = wp.to_frame()
assert_frame_equal(result, expected)
wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
result = wp.to_frame()
assert_frame_equal(result, expected[1:])
idx = MultiIndex.from_tuples(
[(1, 'two'), (1, 'one'), (2, 'one'), (np.nan, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1],
[3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
ex_idx = MultiIndex.from_tuples([(1, 'two', 'A'), (1, 'two', 'B'),
(1, 'two', 'C'),
(1, 'one', 'A'),
(1, 'one', 'B'),
(1, 'one', 'C'),
(2, 'one', 'A'),
(2, 'one', 'B'),
(2, 'one', 'C'),
(np.nan, 'two', 'A'),
(np.nan, 'two', 'B'),
(np.nan, 'two', 'C')],
names=[None, None, 'minor'])
expected.index = ex_idx
result = wp.to_frame()
assert_frame_equal(result, expected)
def test_to_frame_multi_major_minor(self):
cols = MultiIndex(levels=[['C_A', 'C_B'], ['C_1', 'C_2']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two'), (3, 'three'), (4, 'four')])
df = DataFrame([[1, 2, 11, 12], [3, 4, 13, 14],
['a', 'b', 'w', 'x'],
['c', 'd', 'y', 'z'], [-1, -2, -3, -4],
[-5, -6, -7, -8]], columns=cols, index=idx)
wp = Panel({'i1': df, 'i2': df})
exp_idx = MultiIndex.from_tuples(
[(1, 'one', 'C_A', 'C_1'), (1, 'one', 'C_A', 'C_2'),
(1, 'one', 'C_B', 'C_1'), (1, 'one', 'C_B', 'C_2'),
(1, 'two', 'C_A', 'C_1'), (1, 'two', 'C_A', 'C_2'),
(1, 'two', 'C_B', 'C_1'), (1, 'two', 'C_B', 'C_2'),
(2, 'one', 'C_A', 'C_1'), (2, 'one', 'C_A', 'C_2'),
(2, 'one', 'C_B', 'C_1'), (2, 'one', 'C_B', 'C_2'),
(2, 'two', 'C_A', 'C_1'), (2, 'two', 'C_A', 'C_2'),
(2, 'two', 'C_B', 'C_1'), (2, 'two', 'C_B', 'C_2'),
(3, 'three', 'C_A', 'C_1'), (3, 'three', 'C_A', 'C_2'),
(3, 'three', 'C_B', 'C_1'), (3, 'three', 'C_B', 'C_2'),
(4, 'four', 'C_A', 'C_1'), (4, 'four', 'C_A', 'C_2'),
(4, 'four', 'C_B', 'C_1'), (4, 'four', 'C_B', 'C_2')],
names=[None, None, None, None])
exp_val = [[1, 1], [2, 2], [11, 11], [12, 12],
[3, 3], [4, 4],
[13, 13], [14, 14], ['a', 'a'],
['b', 'b'], ['w', 'w'],
['x', 'x'], ['c', 'c'], ['d', 'd'], [
'y', 'y'], ['z', 'z'],
[-1, -1], [-2, -2], [-3, -3], [-4, -4],
[-5, -5], [-6, -6],
[-7, -7], [-8, -8]]
result = wp.to_frame()
expected = DataFrame(exp_val, columns=['i1', 'i2'], index=exp_idx)
assert_frame_equal(result, expected)
def test_to_frame_multi_drop_level(self):
idx = MultiIndex.from_tuples([(1, 'one'), (2, 'one'), (2, 'two')])
df = DataFrame({'A': [np.nan, 1, 2]}, index=idx)
wp = Panel({'i1': df, 'i2': df})
result = wp.to_frame()
exp_idx = MultiIndex.from_tuples(
[(2, 'one', 'A'), (2, 'two', 'A')],
names=[None, None, 'minor'])
expected = | DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx) | pandas.DataFrame |
# Core Pkg
import streamlit as st
import pandas as pd
import numpy as np
import pickle # loading model
import base64 # enable file download
#function to load and cache(faster) the dataset and set mutation to True
@st.cache(allow_output_mutation=True)
def load_data(dataset):
df = | pd.read_csv(dataset) | pandas.read_csv |
import pandas as pd
class TripleBarrier:
def __init__(self, price, vol_span=50, barrier_horizon=5, factors=None, label=0):
"""
Labels the Data with the Triple Barrier Method
:param price: closing price
:param vol_span: look back to dertermine volatility increment threshold
:param barrier_horizon: represents vertical length (days) for barrier
:param factors: repreesnts scalar for barrier height
:param label: 0 represents label for classification [-1, 0, 1], 1 represenst label for regression -1 <= x <= 1
"""
self.label = label
if factors is None:
factors = [2, 2]
daily_vol = self.get_daily_vol(prices=price, lookback=vol_span)
vertical_barriers = self.add_vertical_barrier(
close=price, num_days=barrier_horizon
)
triple_barrier_events = self.get_events(
close=price,
factor=factors,
target=daily_vol,
vertical_barrier=vertical_barriers,
)
self.labels = self.get_labels(triple_barrier_events, price)
@staticmethod
def get_daily_vol(prices, lookback=50):
"""
Daily Volatility Estimates
Computes the daily volatility at intraday estimation points, applying a span of lookback days to an
exponentially weighted moving standard deviation.
This function is used to compute dynamic thresholds for profit taking and stop loss limits
"""
# find the timestamps at [t-1]
df = prices.index.searchsorted(prices.index - pd.Timedelta(days=1))
df = df[df > 0]
# align timestamps of [t-1] to timestamp p
df = pd.Series(
prices.index[df - 1], index=prices.index[prices.shape[0] - df.shape[0] :]
)
# get value by tiemstamps
df = prices.loc[df.index] / prices.loc[df.values].values - 1
# estimate rolling std
df = df.ewm(span=lookback).std()
return df
@staticmethod
def add_vertical_barrier(close, num_days=0):
"""
Adds the vertical barrier
For each index in events, find the timestamp of the next bar at or immediately after a number of days.
This function creates a series that has all the timestamps of when the vertical barrier is reached.
"""
timedelta = pd.Timedelta("{} days".format(num_days))
nearest_index = close.index.searchsorted(close.index + timedelta)
nearest_index = nearest_index[nearest_index < close.shape[0]]
nearest_timestamp = close.index[nearest_index]
return pd.Series(
data=nearest_timestamp, index=close.index[: nearest_timestamp.shape[0]]
)
@staticmethod
def touch_barrier(close, events, factor, dates):
"""
This function applies the triple-barrier. It works on a set of datetime index values.
Mainly it returns a DataFrame of timestamps regarding the time when the first barriers were reached.
"""
# Apply Stop Loss / Profit Taking, if it takes place before t1 (end of event)
events_ = events.loc[dates]
out = events_[["t1"]].copy(deep=True)
# Profit taking active
if factor[0] > 0:
profit_taking = factor[0] * events_["trgt"]
else:
profit_taking = pd.Series(index=events.index)
# Stop loss active
if factor[1] > 0:
stop_loss = -factor[1] * events_["trgt"]
else:
stop_loss = pd.Series(index=events.index)
out["pt"] = pd.Series(dtype=events.index.dtype)
out["sl"] = | pd.Series(dtype=events.index.dtype) | pandas.Series |
import funcy
import functools
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import shap
from copy import deepcopy
font = {
"size": 30
}
matplotlib.rc("font", **font)
def plot_results_2x2(summaries, save_fpath, fformat="pdf", dpi=300):
fig = plt.figure(figsize=(20, 20))
nrows, ncols = 2, 2
ax1 = fig.add_subplot(nrows, ncols, 1)
for summary in summaries:
ax1.plot(summary["fpr"], summary["tpr"], lw=2, label="AUC = %0.2f" % summary["roc_auc"])
ax1.plot([0, 1], [0, 1], color="red", lw=2, linestyle="--")
ax1.set_xlim([-0.05, 1.05])
ax1.set_ylim([-0.05, 1.05])
ax1.set_ylabel("Sensitivity")
ax1.set_xlabel("1 - Specificity")
ax1.legend(loc="lower right")
ax1.grid(which="major")
ax1.grid(which="minor", linestyle='--', alpha=0.4)
ax1.minorticks_on()
ax2 = fig.add_subplot(nrows, ncols, 2)
for summary in summaries:
ax2.step(summary["recall"], summary["precision"], lw=2, label="Avg Prec = %0.2f" % summary["avg_precision"])
ax2.set_xlim([-0.05, 1.05])
ax2.set_ylim([-0.05, 1.05])
ax2.set_xlabel("Recall")
ax2.set_ylabel("Precision")
ax2.legend(loc="lower left")
ax2.grid(which="major")
ax2.grid(which="minor", linestyle='--', alpha=0.4)
ax2.minorticks_on()
ax3 = fig.add_subplot(nrows, ncols, 3)
ax3.plot([0., 1.], [0., 1.], "r--")
for summary in summaries:
ax3.plot(summary["mean_predicted_value"], summary["fraction_of_positives"], "s-")
ax3.set_xlim([-0.05, 1.05])
ax3.set_ylim([-0.05, 1.05])
ax3.set_xlabel("Mean predicted value")
ax3.set_ylabel("Fraction of positive cases")
ax3.grid(which="major")
ax3.grid(which="minor", linestyle='--', alpha=0.4)
ax3.minorticks_on()
df_summary = pd.DataFrame(functools.reduce(lambda l1, l2: l1+l2, [
[
("ROC AUC", summary["roc_auc"]),
("Average\nPrecision", summary["avg_precision"]),
("Best F1", summary["best_f1"]),
("Best\nAccuracy", summary["best_acc"])
]
for summary in summaries
]), columns=["Metric", "Value"])
ax4 = fig.add_subplot(nrows, ncols, 4)
sns.boxplot(x="Metric", y="Value", data=df_summary, width=0.3, palette="muted")
ax4.set_ylim([0., 1.])
ax4.grid(which="major")
ax4.grid(which="minor", linestyle='--', alpha=0.4)
ax4.minorticks_on()
plt.tight_layout(pad=3)
plt.savefig(save_fpath, format=fformat, dpi=dpi)
plt.close()
return df_summary.groupby("Metric").agg({
"Value": ["mean", "std"]
}).reset_index()
def plot_results_1x2(summaries, save_fpath, fformat="pdf", dpi=300):
fig = plt.figure(figsize=(20, 10))
nrows, ncols = 1, 2
mean_fpr = np.linspace(0, 1, 100)
aucs = []
tprs = []
for summary in summaries:
aucs.append(summary["roc_auc"])
interp_tpr = np.interp(mean_fpr, summary["fpr"], summary["tpr"])
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc, std_auc = np.mean(aucs), np.std(aucs)
ax1 = fig.add_subplot(nrows, ncols, 1)
ax1.plot(
mean_fpr, mean_tpr, color="b", lw=2, alpha=1.,
label="Mean AUC = %0.2f $\pm$ %0.2f" % (mean_auc, std_auc)
)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax1.fill_between(mean_fpr, tprs_lower, tprs_upper, color="grey", alpha=.4, label=r"$\pm$ 1 std. dev.")
ax1.plot([0, 1], [0, 1], color="red", lw=2, linestyle="--")
for summary in summaries:
ax1.plot(summary["fpr"], summary["tpr"], "--", lw=2, label="AUC = %0.2f" % summary["roc_auc"], alpha=1.)
ax1.legend(loc="lower right", framealpha=0.7)
ax1.set_xlim([-0.05, 1.05])
ax1.set_ylim([-0.05, 1.05])
ax1.set_ylabel("Sensitivity")
ax1.set_xlabel("1 - Specificity")
ax1.grid(which="major")
ax1.grid(which="minor", linestyle='--', alpha=0.4)
ax1.minorticks_on()
df_summary = pd.DataFrame(functools.reduce(lambda l1, l2: l1+l2, [
[
("ROC AUC", summary["roc_auc"]),
("Average\nPrecision", summary["avg_precision"]),
("Best F1", summary["best_f1"]),
("Best\nAccuracy", summary["best_acc"])
]
for summary in summaries
]), columns=["Metric", "Value"])
ax4 = fig.add_subplot(nrows, ncols, 2)
sns.boxplot(x="Metric", y="Value", data=df_summary, width=0.3, palette="muted")
ax4.set_ylim([0., 1.])
ax4.grid(which="major")
ax4.grid(which="minor", linestyle='--', alpha=0.4)
ax4.minorticks_on()
plt.tight_layout(pad=3)
plt.savefig(save_fpath, format=fformat, dpi=dpi)
plt.close()
return df_summary.groupby("Metric").agg({
"Value": ["mean", "std"]
}).reset_index()
def plot_shap_values(X_test, summaries, feature_names, save_fpath, fformat="pdf", dpi=300):
plt.figure()
shap_values = np.stack([summary["shap_values"] for summary in summaries])
shap_values = np.mean(shap_values, axis=0)
shap.summary_plot(
shap_values, X_test, plot_type="violin", plot_size=(10, 7), sort=False, show=False,
feature_names=feature_names
)
plt.tight_layout()
plt.savefig(save_fpath, format=fformat, dpi=dpi)
plt.close()
def plot_survival(df_test, features, summaries, save_fpath, fformat="pdf", dpi=300):
use_df = deepcopy(df_test)
use_df["tempo_estadia_hospitalar"] = use_df.apply(
lambda row: 100000 if not row["obito"] else row["tempo_estadia_hospitalar"],
axis=1
)
grouped = use_df.groupby("record_id")
data_tmp = []
for record_id, group in grouped:
sorted_group = group.sort_values("data")
data_tmp.append(dict(sorted_group.iloc[0]))
df_first_register = | pd.DataFrame(data_tmp) | pandas.DataFrame |
import pandas as pd
def llr(k):
'''
Compute loglikelihood ratio see
http://tdunning.blogspot.de/2008/03/surprise-and-coincidence.html
And
https://github.com/apache/mahout/blob/4f2108c576daaa3198671568eaa619266e787b1a/math/src/main/java/org/apache/mahout/math/stats/LogLikelihood.java#L100
And https://en.wikipedia.org/wiki/G-test
'''
def H(k):
N = k.values.sum()
wtf = pd.np.log(k / N + (k == 0).astype(int))
return (k / N * wtf).values.sum()
return 2 * k.values.sum() * (H(k) - H(k.sum(0)) - H(k.sum(1)))
def compute_scores(A, B, skip_diagonal=False):
'''
Compute the scores for a primary and secondary action (across all items)
'A' is the user x item matrix of the primary action
'B' is the user x item matrix of a secondary action
the result is a dataframe where
'primary_item' is the item associated with the primary event (ie 'buy')
'secondary_item' is the item associated with the secondary event (ie 'click')
'score' is the log likelihood score representing the strength of
association (the higher the score the stronger association)
For example, people who 'primary action' item_A do 'secondary action'
item_B with strength 'score'
Loosely based on:
https://github.com/apache/mahout/blob/4f2108c576daaa3198671568eaa619266e787b1a/math-scala/src/main/scala/org/apache/mahout/math/cf/SimilarityAnalysis.scala#L312
'''
# We ignore the original interaction value and create a binary (binarize) 0-1 matrix
# as we only consider whether interactions happened or did not happen
# only consider action B for users where action A occured
A = (A != 0).astype(int)
B = (B != 0).astype(int)
AtB = A.loc[B.index, B.columns].transpose().dot(B)
numInteractionsWithAandB = AtB
numInteractionsWithA = A.sum()
numInteractionsWithB = B.sum()
# Total number of interactions is
# total number of users where primary event occurs
numInteractions = len(A)
K11 = numInteractionsWithAandB
K12 = numInteractionsWithAandB.rsub(numInteractionsWithA, axis=0).dropna()
K21 = numInteractionsWithAandB.rsub(numInteractionsWithB, axis=1)
K22 = numInteractions + numInteractionsWithAandB.sub(
numInteractionsWithB, axis=1).sub(
numInteractionsWithA, axis=0)
the_data = zip(
K11.apply(lambda x: x.index + '_' + x.name).values.flatten(),
K11.values.flatten(), K12.values.flatten(), K21.values.flatten(),
K22.values.flatten())
container = []
for name, k11, k12, k21, k22 in the_data:
item_A, item_B = name.split('_')
if k11 != 0 and not (skip_diagonal and item_A == item_B):
df = pd.DataFrame([[k11, k12], [k21, k22]])
score = llr(df)
else:
score = 0 # Warning! while llr score could be calculated, for cooccurance purposes, it doesn't makes sense to compute llr when cooccurnace (k11) is zero
container.append((item_A, item_B, score))
return pd.DataFrame(
container, columns=['primary_item', 'secondary_item',
'score']).sort_values(
['primary_item', 'score'],
ascending=[True, False])
def train(raw_data, primary_action):
'''
this is like the 'main' funciton: takes a dataset and returns a dataframe with LLR scores
raw_data is a dataframe with the columns: user, action, item
primary_action is the action from raw_data that we want to determine associations for
'A' is the matrix of primary actions
'B' is a matrix of secondary actions
'''
# pretty sure we only want to keep users and user metadata for only the users where the primary action occurs
# not sure where this happens int he Mahout code though...
users_who_did_primary_action = pd.unique(
raw_data.loc[raw_data.action == primary_action, 'user'])
data = raw_data.loc[raw_data.user.isin(users_who_did_primary_action), :]
freq = data.groupby(['user', 'action',
'item']).size().to_frame('freq').reset_index()
freq_actions = freq.groupby('action')
A = freq_actions.get_group(primary_action).pivot(
index='user', columns='item', values='freq').fillna(0)
cco_results = []
for action, matrix in freq_actions:
skip_diagonal = primary_action == action
B = matrix.pivot(index='user', columns='item', values='freq').fillna(0)
scores = compute_scores(A, B, skip_diagonal)
scores['primary_action'] = primary_action
scores['secondary_action'] = action
cco_results.append(scores)
all_data = pd.concat(cco_results, ignore_index=True)
return all_data[[
'primary_action', 'primary_item', 'secondary_action', 'secondary_item',
'score'
]]
if __name__ == '__main__':
'''
These unit tests are the same as the Apache Mahout unit tests per:
https://github.com/apache/mahout/blob/08e02602e947ff945b9bd73ab5f0b45863df3e53/spark/src/test/scala/org/apache/mahout/cf/SimilarityAnalysisSuite.scala#L49
https://github.com/apache/mahout/blob/08e02602e947ff945b9bd73ab5f0b45863df3e53/math/src/test/java/org/apache/mahout/math/stats/LogLikelihoodTest.java#L50
https://github.com/apache/mahout/blob/4f2108c576daaa3198671568eaa619266e787b1a/math/src/main/java/org/apache/mahout/math/stats/LogLikelihood.java#L1
'''
# test compute_scores
a = pd.DataFrame(
[(1, 1, 0, 0, 0), (0, 0, 1, 1, 0), (0, 0, 0, 0, 1), (1, 0, 0, 1, 0)],
columns=['a', 'b', 'c', 'd', 'e'])
b = pd.DataFrame(
[(1, 1, 1, 1, 0), (1, 1, 1, 1, 0), (0, 0, 1, 0, 1), (1, 1, 0, 1, 0)],
columns=['a', 'b', 'c', 'd', 'e'])
AtAControl = pd.DataFrame(
[(0.0, 1.7260924347106847, 0.0, 0.0, 0.0),
(1.7260924347106847, 0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 1.7260924347106847, 0.0),
(0.0, 0.0, 1.7260924347106847, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0, 0.0)])\
.round(5)\
.as_matrix()
AtBControl = pd.DataFrame(
[(1.7260924347106847, 1.7260924347106847, 1.7260924347106847, 1.7260924347106847, 0.0),
(0.6795961471815897, 0.6795961471815897, 0.6795961471815897, 0.6795961471815897, 0.0),
(0.6795961471815897, 0.6795961471815897, 0.6795961471815897, 0.6795961471815897, 0.0),
(1.7260924347106847, 1.7260924347106847, 1.7260924347106847, 1.7260924347106847, 0.0),
(0.0, 0.0, 0.6795961471815897, 0.0, 4.498681156950466)])\
.round(5)\
.as_matrix()
ata = compute_scores(a, a, True).pivot(
index='primary_item', columns='secondary_item',
values='score').round(5).as_matrix()
atb = compute_scores(a, b, False).pivot(
index='primary_item', columns='secondary_item',
values='score').round(5).as_matrix()
assert pd.np.array_equal(ata, AtAControl)
assert pd.np.array_equal(atb, AtBControl)
# test llr
assert 2.773 == round(llr(pd.DataFrame([[1, 0], [0, 1]])), 3)
assert 27.726 == round(llr(pd.DataFrame([[10, 0], [0, 10]])), 3)
assert 39.331 == round(llr(pd.DataFrame([[5, 1995], [0, 100000]])), 3)
assert 4730.737 == round(
llr(pd.DataFrame([[1000, 1995], [1000, 100000]])), 3)
assert 5734.343 == round(
llr(pd.DataFrame([[1000, 1000], [1000, 100000]])), 3)
assert 5714.932 == round(
llr( | pd.DataFrame([[1000, 1000], [1000, 99000]]) | pandas.DataFrame |
import requests
import pandas as pd
import numpy as np
from pandas import json_normalize
from scipy.optimize import curve_fit
from time import gmtime, strftime
import streamlit as st
base_url = 'http://corona-api.com/countries'
def getcountrylist():
response = requests.get(base_url).json()
countrylistcode = {}
for country in response['data']:
countrylistcode[country['name']] = country['code']
return countrylistcode
def getcountrydata(countrycode):
url = f'{base_url}/{countrycode}'
response = requests.get(url).json()
df = | json_normalize(response['data']) | pandas.json_normalize |
import pandas as pd
import numpy as np
import lightgbm as lgb
import time
train_1 = | pd.read_csv("dataset/validation_2/train_complete.csv") | pandas.read_csv |
import sys
import re
import json
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import MultiLabelBinarizer
from scipy.spatial.distance import cdist
from colorama import Fore, Style
from kneed import KneeLocator
import copy
import time
import pickle
import os
def error_msg(error_msg, arg):
"""
Helper function to display error message on the screen.
Input:
The error message along with its respective argument.
(Values include - filename, selected action).
Output:
The formatted error message on the screen along with the argument.
"""
print("****************************")
print(Fore.RED, end='')
print(error_msg,":", arg)
print(Style.RESET_ALL, end='')
print("****************************")
sys.exit(0)
def printINFO(info):
"""
Helper function to ask the user for Input.
Input:
The message that is to be displayed.
Output:
The formatted message on the screen.
"""
print(Fore.BLUE, end='')
print(info)
print(Style.RESET_ALL, end='')
# *****************************************************************************
# *****************************************************************************
# Helper Methods Start
def calculate_num_clusters(df, acl_weights):
"""
Calculates the optimal number of clusters using the elbow_graph approach.
Input:
The Pandas dataframe of the input file (ACL.json)
output:
The value of k that provides the least MSE.
"""
files = ['IP_Access_List', 'Route_Filter_List', 'VRF', 'AS_Path_Access_List',
'IKE_Phase1_Keys', 'IPsec_Phase2_Proposals', 'Routing_Policy']
k_select_vals = [41, 17, 42, 5, 3, 2, 58]
curr_file = file_name.split(".")[0]
file_index = files.index(curr_file)
return k_select_vals[file_index]
features = df[df.columns]
ran = min(len(df.columns), len(discrete_namedstructure))
if ran > 50:
k_range = range(1, 587)
else:
k_range = range(1, ran)
print(k_range)
k_range = range(1, 580)
distortions = []
np.seed = 0
clusters_list = []
f = open('distortions.txt', 'w')
for k in k_range:
print(k)
kmeans = KMeans(n_clusters=k).fit(features, None, sample_weight=acl_weights)
clusters_list.append(kmeans)
cluster_centers = kmeans.cluster_centers_
k_distance = cdist(features, cluster_centers, "euclidean")
distance = np.min(k_distance, axis=1)
distortion = np.sum(distance)/features.shape[0]
distortions.append(distortion)
f.write(str(distortion))
f.write("\n")
kn = KneeLocator(list(k_range), distortions, S=3.0, curve='convex', direction='decreasing')
print("Knee is: ", kn.knee)
plt.xlabel('k')
plt.ylabel('Distortion')
plt.title('The Elbow Method showing the optimal k')
plt.plot(k_range, distortions, 'bx-')
plt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')
plt.show()
if kn.knee is None:
if ran < 5:
return ran - 1
else:
return 5
return kn.knee
'''
for i in range(1, len(avg_within)):
if (avg_within[i-1] - avg_within[i]) < 1:
break
# return i-1 if len(avg_within) > 1 else 1
# return i - 1 if i > 1 else 1
'''
def perform_kmeans_clustering(df, ns_weights):
"""
To get a mapping of the rows into respective clusters generated using the K-means algorithm.
Input:
df:The Pandas data-frame of the input file (ACL.json)
ns_weights: The weights of each name structure which allows the weighted k-means algorithm to work.
Output:
Adding respective K-means cluster label to the input dataframe.
Example:
Row1 - Label 0 //Belongs to Cluster 0
Row2 - Label 0 //Belongs to Cluster 0
Row3 - Label 1 //Belongs to Cluster 1
"""
global k_select
k_select = calculate_num_clusters(df, ns_weights)
features = df[df.columns]
kmeans = KMeans(n_clusters=k_select)
kmeans.fit(features, None, sample_weight=ns_weights)
labels = kmeans.labels_
df["kmeans_cluster_number"] = pd.Series(labels)
def extract_keys(the_dict, prefix=''):
"""
Recursive approach to gather all the keys that have nested keys in the input file.
Input:
The dictionary file to find all the keys in.
Output:
All the keys found in the nested dictionary.
Example:
Consider {key1:value1, key2:{key3:value3}, key4:[value4], key5:[key6:{key7:value7}]}
The function returns key2, key5=key6
"""
key_list = []
for key, value in the_dict.items():
if len(prefix) == 0:
new_prefix = key
else:
new_prefix = prefix + '=' + key
try:
if type(value) == dict:
key_list.extend(extract_keys(value, new_prefix))
elif type(value) == list and type(value[0]) == dict:
key_list.extend(extract_keys(value[0], new_prefix))
elif type(value) == list and type(value[0]) != dict:
key_list.append(new_prefix)
else:
key_list.append(new_prefix)
except:
key_list.append(new_prefix)
return key_list
def get_uniques(data):
"""
A helper function to get unique elements in a List.
Input:
A list that we need to capture uniques from.
Output:
A dictionary with unique entries and count of occurrences.
"""
acl_count_dict = {}
for acl in data:
acl = json.dumps(acl)
if acl not in acl_count_dict:
acl_count_dict[acl] = 1
else:
value = acl_count_dict[acl]
value += 1
acl_count_dict[acl] = value
keys = []
values = []
for key, value in acl_count_dict.items():
keys.append(key)
values.append(value)
return keys, values
def overall_dict(data_final):
"""
Parses through the dictionary and appends the frequency with which the keys occur.
Input:
A nested dictionary.
Example:
{key1:{key2:value1, key3:value2, key4:{key5:value3}}
{key6:{key7:value2}
{key8:{key3:value3, key4:value5, key6:value3}}
Output:
Returns a new array with the nested keys appended along with a tuple containing the un-nested value along with
the frequency count.
[{
key1=key2:{'value1':1},
key1=key3:{'value2':2},
key1=key4=key5:{'value3':3},
key6=key7:{'value2':2},
key8=key3:{'value3':3},
key8=key4:{'value5':1},
key8=key6:{'value3':1}
}]
"""
overall_array = []
for data in data_final:
overall = {}
for item in data:
if item[0] is None:
continue
result = extract_keys(item[0])
for element in result:
value = item[0]
for key in element.split("="):
new_value = value[key]
if type(new_value) == list:
if len(new_value) != 0:
new_value = new_value[0]
else:
new_value = "#BUG#"
value = new_value
if element not in overall:
overall[element] = {}
if value not in overall[element]:
overall[element][value] = 1
else:
overall[element][value] += 1
overall_array.append(overall)
return overall_array
def get_overall_dict(data_final):
"""
Parses through the dictionary and appends the frequency with which the keys occur.
Input:
A nested dictionary.
Example:
{key1:{key2:value1, key3:value2, key4:{key5:value3}}
{key6:{key7:value2}
{key8:{key3:value3, key4:value5, key6:value3}}
Output:
Returns a new array with the nested keys appended along with a tuple containing the unnested value along with the frequency count.
[{
key1=key2:{'value1':1},
key1=key3:{'value2':2},
key1=key4=key5:{'value3':3},
key6=key7:{'value2':2},
key8=key3:{'value3':3},
key8=key4:{'value5':1},
key8=key6:{'value3':1}
}]
"""
overall_array = []
for data in data_final:
overall = {}
new_value = None
flag = 0
for item in data:
visited = {"lines=name":1}
if item[0] is None:
continue
result = extract_keys(item[0])
for element in result:
value = item[0]
for key in element.split("="):
if element not in visited:
visited[element] = 1
new_value = value[key]
flag = 0
if type(new_value) == list:
if len(new_value) > 0:
for list_data in new_value:
if element not in overall:
overall[element] = {}
temp = element
temp_val = list_data
temp = temp.split("=", 1)[-1]
while len(temp.split("=")) > 1:
temp_val = temp_val[temp.split("=")[0]]
temp = temp.split("=", 1)[-1]
list_key = temp
check = 0
try:
if type(temp_val[list_key]) == list:
if temp_val[list_key][0] not in overall[element]:
overall[element][temp_val[list_key][0]] = 1
check = 1
else:
if temp_val[list_key] not in overall[element]:
overall[element][temp_val[list_key]] = 1
check = 1
except:
dummy=0
'''
do nothing
'''
try:
if check == 0:
if type(temp_val[list_key]) == list:
if temp_val[list_key][0] in overall[element]:
overall[element][temp_val[list_key][0]] += 1
else:
if temp_val[list_key] in overall[element]:
overall[element][temp_val[list_key]] += 1
except:
dummy=0
flag = 1
value = new_value
else:
'''
Type is not list
'''
value = new_value
else:
if flag == 0:
if element not in overall:
overall[element] = {}
if new_value not in overall[element]:
overall[element][new_value] = 1
else:
overall[element][new_value] += 1
if flag == 0:
if element not in overall:
overall[element] = {}
if new_value not in overall[element]:
overall[element][new_value] = 1
else:
overall[element][new_value] += 1
overall_array.append(overall)
return overall_array
def calculate_z_score(arr):
"""
Calculates the Z-score (uses mean) (or) Modified Z-score (uses median) of data-points
Input:
Data points generated from parsing through the input file.
Also considers the Z_SCORE_FLAG that is set previously with 0 (default) using the Modified Z-score and 1 using Z-score.
Output:
The Z-score of given data-points array.
"""
if len(arr) == 1:
return arr
z_score = []
'''
Calculates the Z-score using mean. Generally used if distribution is normal (Bell curve).
'''
if Z_SCORE_FLAG:
mean = np.mean(arr)
std = np.std(arr)
if std == 0:
return np.ones(len(arr)) * 1000
for val in arr:
z_score.append((val - mean) / std)
'''
Modified Z-score approach.
Calculates the Z-score using median. Generally used if distribution is skewed.
'''
else:
median_y = np.median(arr)
medians = [np.abs(y - median_y) for y in arr]
med = np.median(medians)
median_absolute_deviation_y = np.median([np.abs(y - median_y) for y in arr])
if median_absolute_deviation_y == 0:
return np.ones(len(arr)) * 1000
z_score = [0.6745 * (y - median_y) / median_absolute_deviation_y for y in arr]
return z_score
def calculate_signature_d(overall_arr):
"""
Uses Z-score to generate the signatures of data-points and also maps points on level of significance (include for
signature calculation, include for bug calculation, no significance).
If Z-score is equal to 1000.0 or in between sig_threshold and bug_threshold, no-significance.
If Z-score is >= sig_threshold, include for signature calculation.
If Z-score is <= bug_threshold, include for bug calculation.
Input:
The individual master-signature generated for each Cluster.
Output:
An array containing dictionaries marked with tags that represent the action that needs to be performed on them.
"""
signature = {}
for key, value in overall_arr.items():
sig_threshold = 0.5
bug_threshold = -0.1
key_points = []
data_points = []
sig_values = []
for k, v in value.items():
key_points.append(k)
data_points.append(v)
if len(data_points) == 1:
sig_values.append((key_points[0], (data_points[0])))
'''
Check for two data points case
'''
else:
z_score = calculate_z_score(data_points)
if len(z_score) > 0:
avg_z_score = sum(z_score)/len(z_score)
bug_threshold = bug_threshold + (avg_z_score - sig_threshold)
for i in range(len(z_score)):
present_zscore = z_score[i]
if present_zscore == 1000.0:
sig_values.append((key_points[i], "*", (data_points[i])))
elif present_zscore >= sig_threshold:
sig_values.append((key_points[i], (data_points[i])))
elif present_zscore <= bug_threshold:
sig_values.append((key_points[i], "!", (data_points[i])))
elif (present_zscore < sig_threshold) and (present_zscore > bug_threshold):
sig_values.append((key_points[i], "*", (data_points[i])))
if key in signature:
signature[key].append(sig_values)
else:
signature[key] = []
signature[key] += sig_values
return signature
def results(data, signatures):
title = file_name.split(".")[0] + "_Results.txt"
if not os.path.exists(os.path.dirname(title)):
os.makedirs(os.path.dirname(title))
f = open(title, "w")
f.write(title + "\n")
f.write("\n")
totalBugs = 0
totalConformers = 0
for cluster_index, clustered_namedStructure in enumerate(data):
numBugs = 0
numConformers = 0
cluster_signature = signatures[cluster_index]
for namedStructure in clustered_namedStructure:
keys = extract_keys(namedStructure[0])
namedStructure = flatten_json((namedStructure[0]), '=')
isNamedStructureABug = False
newNamedStructure = {}
for key, value in namedStructure.items():
flag = 0
for index, char in enumerate(key):
if char == '0' or char == '1' or char == '2' or char == '3' or char == '4' or char == '5' or char == '6' or char == '7' or char == '8' or char == '9':
flag = 1
if index == len(key)-1:
new_key = str(key[0:index-1])
newNamedStructure[new_key] = value
else:
new_key = str(key[0:index-1]) + str(key[index+1:len(key)])
newNamedStructure[new_key] = value
if not flag:
newNamedStructure[key] = value
flag = 0
for propertyKey, propertyValue in newNamedStructure.items():
try:
propValues = cluster_signature[propertyKey]
except:
print("EXCEPTION OCCURRED!")
print(propertyKey)
for value in propValues:
if value[0] == propertyValue and value[1] == '!':
numBugs += 1
isNamedStructureABug = True
if isNamedStructureABug:
numBugs += 1
else:
numConformers += 1
numBugs = len(clustered_namedStructure) - numConformers
f.write("Cluster Index: " + str(cluster_index) + "\n")
f.write(" Number of elements in Cluster = " + str(len(clustered_namedStructure)) + "\n")
f.write(" Number of Bugs using Z-score: " + str(len(clustered_namedStructure) - numConformers) + "\n")
f.write(" Number of Conformers using Z-score: " + str(numConformers) + "\n")
f.write("\n")
totalBugs += numBugs
totalConformers += numConformers
print("Total Bugs = ", totalBugs)
print("Total Confomers = ", totalConformers)
f.write("\n")
f.write("\n")
f.write("Total Bugs using Z-score: " + str(totalBugs) + "\n")
f.write("Total Conformers using Z-score: " + str(totalConformers))
def transform_data(data):
"""
A helper function to extract nested keys from the ACL and to add the frequency of the repeated value. Helps score data.
Input:
An ACL in the form {key1:value1, key2:{key3:value3}, key4:[value4], key5:[key6:{key7:value7}]}.
Output:
Extracted nested keys from the extract_keys function along with the frequency count.
Example:
[
{key1:{key2:value1, key3:value2, key4:{key5:value3}}
{key6:{key7:value2}
{key8:{key3:value3, key4:value5, key6:value3}}
]
Returns a new array with the nested keys appended along with a tuple containing the unnested value along with the frequency count.
[{
key1=key2:{'value1':1},
key1=key3:{'value2':2},
key1=key4=key5:{'value3':3},
key6=key7:{'value2':2},
key8=key3:{'value3':3},
key8=key4:{'value5':1},
key8=key6:{'value3':3}
}]
"""
count = 1
overall = {}
flag = 0
i = 0
while i < count:
value = None
result = None
new_value = None
for item in data:
result = extract_keys(item)
for element in result:
value = item
for key in element.split("="):
if key in value:
new_value = value[key]
if (type(new_value) == list) and (len(new_value) > 1):
if flag == 0:
count = len(new_value)
flag = 1
try:
new_value = new_value[i]
except:
new_value = new_value[-1]
elif (type(new_value) == list) and (len(new_value) == 1):
new_value = new_value[0]
value = new_value
if element not in overall:
overall[element] = {}
if type(value) != dict and type(value) != list:
if value not in overall[element]:
overall[element][value] = 1
i += 1
return overall
def calculate_signature_score(signature):
"""
Calculates the signature score for each signature as the sum of all the weights in it but ignoring the weights marked with "*".
Input:
A signature that contains tags of whether or not the weight should be included in calculating the signature.
Output:
An array containing the weights of all the signatures that should be considered.
Example:
Consider [
{'key1=key2':['val1', 40], 'key3=key4':['val2':90]}, //40 + 90
{'key5=key6=key7':['val3', *, 20], 'key8=key9':['val4':80]}, //80
{'key10=key11':['val5', 40]} //40
Returns [130, 80, 40].
"""
score_arr = []
for sig in signature:
score = 0
for key, value in sig.items():
for val in value:
if (val[1] != "!") and (val[1] != "*"):
score += val[1]
elif val[1] == "!":
score += val[2]
score_arr.append(score)
return score_arr
def calculate_namedstructure_scores(data_final, all_signatures):
"""
Calculate the individual scores for each discrete-ACL. This includes calculating human_error scores,
signature_scores, and deviant scores.
Input:
data_final:
List of ACLs grouped into a Cluster.
Example:
[
[acl-1, acl-4, acl-5, acl-9], //Cluster-0
[acl-2, acl-3], //Cluster-1
[acl-7], //Cluster-2
[acl-6, acl-8] //Cluster-3
]
all_signatures:
Consolidated signature for each Cluster.
Output:
deviant_arr: Returns all deviant properties for the ACL. Empty list is returned if no deviant property
in the ACL.
count_arr: [[TODO]]
dev_score: Returns the deviant score for the deviant properties found. 0 if no deviant property.
acls_arr: [[TODO]]
sig_score: Returns the signature score of the ACL.
cluster_num: Returns the cluster number that the ACL belongs to.
acls_score: The score that is generated for each acl
human_errors_arr: Returns the human_error properties (IPValidity, DigitRepetition, PortRange) for each ACL and
empty list if no human_error properties present in the ACL.
human_error_score: Returns the score of the human error property calculated for the ACL. 0 is returned if
no human_error property exists in the ACL.
"""
deviant_arr = []
count_arr = []
acls_dict = {}
acls_arr = []
acls_score = []
sig_score = []
dev_score = []
cluster_num = []
human_errors_arr = []
human_errors_score = []
i = 0
for acl_list in data_final:
bug_count = 0
conformer_count = 0
signature = all_signatures[i]
for acl in acl_list:
flag = 0
if str(acl[0]) not in acls_dict:
acls_dict[str(acl[0])] = 1
acls_arr.append(acl[0])
cluster_num.append(i)
flag = 1
else:
print(acl[0])
print(acls_dict)
continue
sig_score.append(signature_scores[i])
deviant = []
count = 0
dev_c = 0
acl_c = 0
human_errors = []
human_error_category = {}
data = transform_data(acl)
for data_key, data_val in data.items():
if data_key in signature:
'''
Key Valid. Now check for actual Value
'''
for val in data_val.items():
(error_key, error_value), error_category = calculateHumanErrors(data_key, val[0], signature[data_key], file_name.split(".")[0])
if error_category:
human_errors.append((error_key, error_value))
if error_category not in human_error_category:
human_error_category[error_category] = 0
human_error_category[error_category] += 1
for sig_val in signature[data_key]:
if val[0] == sig_val[0]:
'''
value also present. Now check if value part of bug/sig/skip
'''
if sig_val[1] == "!":
dev_c += sig_val[2]
acl_c += sig_val[2]
deviant.append((data_key, sig_val[0]))
bug_count += 1
elif sig_val[1] == "*":
conformer_count += 1
continue
else:
conformer_count += 1
count += sig_val[1]
acl_c += sig_val[1]
else:
'''
Deviant Key
'''
if data_key != "lines=name":
deviant.append(data_key)
dev_c += data_val
acl_c += data_val
if flag == 1:
count_arr.append(count)
deviant_arr.append(deviant)
dev_score.append(dev_c)
acls_score.append(acl_c)
human_errors_arr.append(human_errors)
human_errors_score.append(calculate_human_error_score(human_error_category))
i += 1
return deviant_arr, count_arr, dev_score, acls_arr, sig_score, cluster_num, acls_score, human_errors_arr, human_errors_score
def checkIPValidity(ip_address):
"""
A reg-ex check to verify the validity of an IP address.
Input:
A list of IP addresses
Output:
A boolean representing the validity of the IP address.
Returns 'True' if all the IPs are valid and 'False' if any of the IP is invalid.
"""
try:
ip_address = ip_address.split(":")
for ip in ip_address:
IP_check = "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])?(\/)?((3[01]|3[02]|[12][0-9]|[0-9])?)$"
match = re.match(IP_check, ip)
if not match:
return False
return True
except e:
print(e)
return True
def checkPortRange(port_range):
"""
A check to verify that the port range is specified correctly (elem0 <= elem1).
Input:
A string that contains two numbers separated by a '-'.
Output:
A boolean representing the validity of the range (elem0 <= elem1).
Example:
52108-52109 (True)
466 - 466 (True)
466 - 465 (False)
"""
try:
port_split = port_range.split("-")
if port_split[-1] < port_split[0]:
return False
return True
except:
return True
def checkDigitRepetition(digit, signature):
"""
Checks for Digit repetition.
Input:
The value for the following keys: srcPorts, dstPorts, lengthRange
Output:
Returns True if there is any Human Error and the digit is repeated twice.
"""
try:
if type(digit) == str:
digit = float(digit.split(":")[0])
if digit == 0:
return False
for item in signature:
if type(item) == str:
item = int(item.split(":")[0])
if digit == (item*10+item%10):
print("--------", digit, item*10 + item%10)
return True
return False
except:
return False
def calculateHumanErrors(data_key, data, signature, namedStructure):
"""
Checks for simple human errors like entering invalid IP Addresses, incorrect port-ranges, and digit repetitions.
Input:
data_key: The nested keys calculated in the overall_dict and get_overall_dict methods.
Example: key1=key2=key4
data: The data value for the keys.
signature: The signature for the keys that was calculated in the calculate_signature_d method.
namedStructure: The type of the IP file.
Possible values: IP_Access_List, Route_Filter_List, Routing_Policy, VRF, others.
Output:
Returns the error and the category it belongs to.
Example:
key1=key2=key3 [1333.0.0.13] [13172.16.31.10] IP_Access_List
Returns:
key1=key2=key3 [1333.0.0.13] IP
"""
human_error = (None, None)
category = None
data_key = data_key.split("=")[-1]
signature_items = []
for sig_item in signature:
signature_items.append(sig_item[0])
if namedStructure == "IP_Access_List":
if data_key == "ipWildcard":
if not checkIPValidity(data):
'''
Invalid IP
'''
human_error = (data_key, data)
category = "IP"
elif data_key in ["dstPorts", "srcPorts"]:
if not checkPortRange(data):
'''
Invalid Ports Range
'''
human_error = (data_key, data)
category = "RANGE"
elif namedStructure == "Route_Filter_List":
if data_key == "ipWildcard":
if not checkIPValidity(data):
'''
Invalid IP
'''
human_error = (data_key, data)
category = "IP"
elif data_key == "lengthRange":
if not checkPortRange(data):
'''
Invalid Ports Range
'''
human_error = (data_key, data)
category = "RANGE"
elif namedStructure == "Routing_Policy":
if data_key == "communities":
if checkDigitRepetition(data, signature_items):
'''
Error Copying digits
'''
human_error = (data_key, data)
category = "DIGIT"
elif data_key == "ips":
if not checkIPValidity(data):
'''
Invalid IP
'''
human_error = (data_key, data)
category = "IP"
elif namedStructure == "VRF":
if data_key in ["administrativeCost", "remoteAs", "metric", "localAs", "referenceBandwidth", ]:
if checkDigitRepetition(data, signature_items):
'''
Error Copying digits
'''
human_error = (data_key, data)
category = "DIGIT"
elif data_key in ["peerAddress", "localIp", "routerId", "network"]:
if not checkIPValidity(data):
'''
Invalid IP
'''
human_error = (data_key, data)
category = "IP"
'''
Any Other namedStructure
'''
else:
try:
if re.search('IP|ip', data_key) and not re.search('[a-zA-Z]', data):
if not checkIPValidity(data):
'''
Invalid IP
'''
human_error = (data_key, data)
category = "IP"
elif not re.search("[a-zA-Z]", data):
if checkDigitRepetition(data, signature_items):
'''
Error Copying digits
'''
human_error = (data_key, data)
category = "DIGIT"
except:
pass
return human_error, category
def calculate_human_error_score(category_dict):
"""
Scores the human_errors that have been found with IPValidity and DigitRepetition errors
weighed as 'high,' i.e, 0.8 and PortRange errors weighed 'medium,' i.e., 0.5.
Input:
A dictionary containing the count of the error occurrences.
Output:
A weighted sum of all the errors found.
"""
total_score = 0
low = 0.2
medium = 0.5
high = 0.8
weightage_dict = {"IP": high, "RANGE": medium, "DIGIT": high}
for category, count in category_dict.items():
if count != 0:
#print("* Human Error Found *")
total_score += weightage_dict[category]/np.log(1+count)
return round(total_score/len(category_dict), 2) if category_dict else total_score
def flatten_json(data, delimiter):
"""
Flattens a JSON file.
Input:
data:
A JSON dictionary of hierarchical format.
{key1: {key2: value2, key3: value3}, key4: {key5: value5, key6: [value6, value7, value8]}}
delimiter:
A parameter to separate the keys in order to facilitate easy splitting.
Output:
A flattened dictionary with keys separated by the delimiter parameter.
key1_key2:value2, key1_key3:value3, key4_key5:value5, key4_key6:value6, key4_key6:value7, key4_key6:value8
"""
out = {}
def flatten(data, name=''):
if type(data) is dict:
for key in data:
flatten(data[key], name + key + delimiter)
elif type(data) is list:
i = 0
for elem in data:
flatten(elem, name + str(i) + delimiter)
i += 1
else:
out[name[:-1]] = data
flatten(data)
return out
def encode_data(data):
"""
Converts categorical values into numeric values. We use MultiLabelBinarizer to encode categorical data.
This is done in order to pass the data into clustering and other similar algorithms that can only handle numerical data.
Flattens each ACL list and then encodes them.
Input:
A Python list that contains all discrete-ACLs.
Output:
A Python list after encoding.
"""
flattenedData = []
allKeys = []
for NS in data:
flattenedNamedStructure = flatten_json(NS, '_')
flattenedData.append(flattenedNamedStructure)
for key in flattenedNamedStructure.keys():
if key not in allKeys:
allKeys.append(key)
mergedData = []
for NS in flattenedData:
mergedNS = []
for key, value in NS.items():
mergedNS.append(str(value))
mergedData.append(mergedNS)
mlb = MultiLabelBinarizer()
data_T = mlb.fit_transform(mergedData)
print("MLb classes=")
print(mlb.classes_)
return data_T, mlb.classes_
def export_clusters(data, acl_weight_mapper):
"""
Helper Method to verify authenticity of Clusters being formed.
Input:
The data that is sorted into list of Clusters.
Example:
[
[acl-1, acl-4, acl-5, acl-9], //Cluster-0
[acl-2, acl-3], //Cluster-1
[acl-7], //Cluster-2
[acl-6, acl-8] //Cluster-3
]
We also make use of acl_dict and node_name_dict dictionaries by searching for the ACL and
then getting the appropriate ACL_name and the nodes that the ACL is present in.
Output:
A csv file by the name of Generated_Clusters is written in the format:
Cluster-0 |||| Cluster-0 Names |||| Cluster-0 Nodes |||| Cluster-1 |||| Cluster-1 Names |||| Cluster-1 Nodes
acl-1 |||| permit tcp eq 51107 |||| st55in15hras |||| acl-2 |||| permit udp any eq 1200 |||| rt73ve11m5ar
acl-4 |||| permit tcp eq 51102 |||| st55in15hras, st55in17hras |||| acl-3 |||| permit udp any eq 120002 |||| rt73ve10m4ar
acl-5 |||| permit tcp eq 51100 |||| st55in17hras ||||
acl-9 |||| permit tcp eq 51109 |||| st55in17hras ||||
"""
column_labels = []
for index in range(len(data)):
column_labels.append("Cluster " + str(index))
column_labels.append("Cluster " + str(index) + " ACL Weights")
column_labels.append("Cluster " + str(index) + " Nodes")
data_to_export = pd.DataFrame(columns=column_labels)
for cluster_index, cluster_data in enumerate(data):
discrete_ACL_nodes = []
cluster_weights = []
for discrete_ACL in cluster_data:
temp = json.dumps(discrete_ACL[0], sort_keys=True)
temp_arr = []
try:
for node in namedstructure_node_mapper[temp]:
temp_arr.append(node)
discrete_ACL_nodes.append(temp_arr)
except:
discrete_ACL_nodes.append(None)
cluster_weights.append(acl_weight_mapper[temp])
cluster_data = | pd.Series(cluster_data) | pandas.Series |
from __future__ import print_function
from __future__ import division
import source.cymdist_tool.tool as cymdist
import v2gsim
import pandas
import datetime
import random
import numpy
import matplotlib.pyplot as plt
import progressbar
import traceback
try:
import cympy
except:
pass
class EVForecast(object):
"""Forecast EV demand at a feeder"""
def __init__(self):
self.ev_forecast = None
self.vehicle_project = None
self.directory = None
self.pk = None
self.feeder_timestep = None
self.itinerary_path = None
self.power_demand_path = None
self.configuration = None
def initialize(self, feeder):
"""Initialize feeder inputs"""
self.ev_forecast = pandas.read_excel(feeder.cyder_input_row.ev_forecast)
self.vehicle_project = pandas.read_excel(
self.ev_forecast.loc[0, 'vehicle_parameters'], sheetname=None)
self.directory = feeder.directory
self.pk = feeder.pk
self.feeder_timestep = feeder.timestep
self.feeder = feeder
self.itinerary_path = self.directory + str(self.pk) + '_itinerary.csv'
self.power_demand_path = self.directory + str(self.pk) + '_power.csv'
self.configuration = feeder.configuration
def forecast(self):
"""Forecast EV demand and return configuration file for CyDER"""
# Create an itinerary file from an occupancy schedule
self._occupancy_to_itineraries()
# Forecast power demand based on the an itinerary file
power_demand = self._itineraries_to_power_demand()
# Save power demand with the right format
formatted_power_demand = self._save_power_demand(power_demand)
# Update the configuration file
self._update_configuration(formatted_power_demand)
# Read power demand and plot
(formatted_power_demand / 1000).plot()
plt.ylabel('Power demand [kW]')
plt.xlabel('Time')
plt.show()
return self.configuration
def _open_itinerary_database(self):
"""Open itinerary database to pick itineraries"""
# Create a V2G-Sim project
itinerary_db = v2gsim.model.Project()
# Initialize starting date before loading vehicles
itinerary_db.date = self.vehicle_project['project'].loc[0, 'start_date']
# Load vehicles into the V2G-Sim project
print('')
print('Loading itineraries...')
df_itinerary_db = pandas.read_excel(
io=self.ev_forecast.loc[0, 'itinerary_database'], sheetname='Activity')
itinerary_db = v2gsim.itinerary.from_excel(
itinerary_db, is_preload=True, df=df_itinerary_db)
# Filter out vehicles that don't have a full cycle over the day
# (avoid midnight mismatch)
itinerary_db.vehicles = v2gsim.itinerary.get_cycling_itineraries(itinerary_db)
return itinerary_db, df_itinerary_db
def _preprocess_itinerary_database(self, row, itinerary_db):
"""Filter out itineraries before picking itineraries
Return a dictionary containing boolean describing vehicle match with the
occupancy schedule"""
def activity_to_boolean(start, end, boolean):
delta_minutes = int((end - start).total_seconds() / 60)
return [boolean] * delta_minutes
# Load occupancy
dfocc = | pandas.read_pickle(row.occupancy_filename) | pandas.read_pickle |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import datetime, timedelta
import itertools
from numpy import nan
import numpy as np
from pandas import (DataFrame, Series, Timestamp, date_range, compat,
option_context, Categorical)
from pandas.core.arrays import IntervalArray, integer_array
from pandas.compat import StringIO
import pandas as pd
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
# Segregated collection of methods that require the BlockManager internal data
# structure
class TestDataFrameBlockInternals():
def test_cast_internals(self, float_frame):
casted = DataFrame(float_frame._data, dtype=int)
expected = DataFrame(float_frame._series, dtype=int)
assert_frame_equal(casted, expected)
casted = DataFrame(float_frame._data, dtype=np.int32)
expected = DataFrame(float_frame._series, dtype=np.int32)
assert_frame_equal(casted, expected)
def test_consolidate(self, float_frame):
float_frame['E'] = 7.
consolidated = float_frame._consolidate()
assert len(consolidated._data.blocks) == 1
# Ensure copy, do I want this?
recons = consolidated._consolidate()
assert recons is not consolidated
tm.assert_frame_equal(recons, consolidated)
float_frame['F'] = 8.
assert len(float_frame._data.blocks) == 3
float_frame._consolidate(inplace=True)
assert len(float_frame._data.blocks) == 1
def test_consolidate_inplace(self, float_frame):
frame = float_frame.copy() # noqa
# triggers in-place consolidation
for letter in range(ord('A'), ord('Z')):
float_frame[chr(letter)] = chr(letter)
def test_values_consolidate(self, float_frame):
float_frame['E'] = 7.
assert not float_frame._data.is_consolidated()
_ = float_frame.values # noqa
assert float_frame._data.is_consolidated()
def test_modify_values(self, float_frame):
float_frame.values[5] = 5
assert (float_frame.values[5] == 5).all()
# unconsolidated
float_frame['E'] = 7.
float_frame.values[6] = 6
assert (float_frame.values[6] == 6).all()
def test_boolean_set_uncons(self, float_frame):
float_frame['E'] = 7.
expected = float_frame.values.copy()
expected[expected > 1] = 2
float_frame[float_frame > 1] = 2
assert_almost_equal(expected, float_frame.values)
def test_values_numeric_cols(self, float_frame):
float_frame['foo'] = 'bar'
values = float_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
def test_values_lcd(self, mixed_float_frame, mixed_int_frame):
# mixed lcd
values = mixed_float_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
values = mixed_float_frame[['A', 'B', 'C']].values
assert values.dtype == np.float32
values = mixed_float_frame[['C']].values
assert values.dtype == np.float16
# GH 10364
# B uint64 forces float because there are other signed int types
values = mixed_int_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
values = mixed_int_frame[['A', 'D']].values
assert values.dtype == np.int64
# B uint64 forces float because there are other signed int types
values = mixed_int_frame[['A', 'B', 'C']].values
assert values.dtype == np.float64
# as B and C are both unsigned, no forcing to float is needed
values = mixed_int_frame[['B', 'C']].values
assert values.dtype == np.uint64
values = mixed_int_frame[['A', 'C']].values
assert values.dtype == np.int32
values = mixed_int_frame[['C', 'D']].values
assert values.dtype == np.int64
values = mixed_int_frame[['A']].values
assert values.dtype == np.int32
values = mixed_int_frame[['C']].values
assert values.dtype == np.uint8
def test_constructor_with_convert(self):
# this is actually mostly a test of lib.maybe_convert_objects
# #2845
df = DataFrame({'A': [2 ** 63 - 1]})
result = df['A']
expected = Series(np.asarray([2 ** 63 - 1], np.int64), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2 ** 63]})
result = df['A']
expected = Series(np.asarray([2 ** 63], np.uint64), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [datetime(2005, 1, 1), True]})
result = df['A']
expected = Series(np.asarray([datetime(2005, 1, 1), True], np.object_),
name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [None, 1]})
result = df['A']
expected = Series(np.asarray([np.nan, 1], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0, 2]})
result = df['A']
expected = Series(np.asarray([1.0, 2], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, 3]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, 3], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, 3.0]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, 3.0], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, True]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, True], np.object_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0, None]})
result = df['A']
expected = Series(np.asarray([1.0, np.nan], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, None]})
result = df['A']
expected = Series(np.asarray(
[1.0 + 2.0j, np.nan], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2.0, 1, True, None]})
result = df['A']
expected = Series(np.asarray(
[2.0, 1, True, None], np.object_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2.0, 1, datetime(2006, 1, 1), None]})
result = df['A']
expected = Series(np.asarray([2.0, 1, datetime(2006, 1, 1),
None], np.object_), name='A')
assert_series_equal(result, expected)
def test_construction_with_mixed(self, float_string_frame):
# test construction edge cases with mixed types
# f7u12, this does not work without extensive workaround
data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 1)]]
df = DataFrame(data)
# check dtypes
result = df.get_dtype_counts().sort_values()
expected = Series({'datetime64[ns]': 3})
# mixed-type frames
float_string_frame['datetime'] = datetime.now()
float_string_frame['timedelta'] = timedelta(days=1, seconds=1)
assert float_string_frame['datetime'].dtype == 'M8[ns]'
assert float_string_frame['timedelta'].dtype == 'm8[ns]'
result = float_string_frame.get_dtype_counts().sort_values()
expected = Series({'float64': 4,
'object': 1,
'datetime64[ns]': 1,
'timedelta64[ns]': 1}).sort_values()
assert_series_equal(result, expected)
def test_construction_with_conversions(self):
# convert from a numpy array of non-ns timedelta64
arr = np.array([1, 2, 3], dtype='timedelta64[s]')
df = DataFrame(index=range(3))
df['A'] = arr
expected = DataFrame({'A': pd.timedelta_range('00:00:01', periods=3,
freq='s')},
index=range(3))
assert_frame_equal(df, expected)
expected = DataFrame({
'dt1': Timestamp('20130101'),
'dt2': date_range('20130101', periods=3),
# 'dt3' : date_range('20130101 00:00:01',periods=3,freq='s'),
}, index=range(3))
df = DataFrame(index=range(3))
df['dt1'] = np.datetime64('2013-01-01')
df['dt2'] = np.array(['2013-01-01', '2013-01-02', '2013-01-03'],
dtype='datetime64[D]')
# df['dt3'] = np.array(['2013-01-01 00:00:01','2013-01-01
# 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]')
assert_frame_equal(df, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
data = list(itertools.repeat((datetime(2001, 1, 1),
"aa", 20), 9))
return DataFrame(data=data,
columns=["A", "B", "C"],
dtype=dtype)
pytest.raises(NotImplementedError, f,
[("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
# 10822
# invalid error message on dt inference
if not compat.is_platform_windows():
f('M8[ns]')
def test_equals_different_blocks(self):
# GH 9330
df0 = pd.DataFrame({"A": ["x", "y"], "B": [1, 2],
"C": ["w", "z"]})
df1 = df0.reset_index()[["A", "B", "C"]]
# this assert verifies that the above operations have
# induced a block rearrangement
assert (df0._data.blocks[0].dtype != df1._data.blocks[0].dtype)
# do the real tests
assert_frame_equal(df0, df1)
assert df0.equals(df1)
assert df1.equals(df0)
def test_copy_blocks(self, float_frame):
# API/ENH 9607
df = DataFrame(float_frame, copy=True)
column = df.columns[0]
# use the default copy=True, change a column
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
blocks = df.as_blocks()
for dtype, _df in blocks.items():
if column in _df:
_df.loc[:, column] = _df[column] + 1
# make sure we did not change the original DataFrame
assert not _df[column].equals(df[column])
def test_no_copy_blocks(self, float_frame):
# API/ENH 9607
df = DataFrame(float_frame, copy=True)
column = df.columns[0]
# use the copy=False, change a column
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
blocks = df.as_blocks(copy=False)
for dtype, _df in blocks.items():
if column in _df:
_df.loc[:, column] = _df[column] + 1
# make sure we did change the original DataFrame
assert _df[column].equals(df[column])
def test_copy(self, float_frame, float_string_frame):
cop = float_frame.copy()
cop['E'] = cop['A']
assert 'E' not in float_frame
# copy objects
copy = float_string_frame.copy()
assert copy._data is not float_string_frame._data
def test_pickle(self, float_string_frame, empty_frame, timezone_frame):
unpickled = tm.round_trip_pickle(float_string_frame)
assert_frame_equal(float_string_frame, unpickled)
# buglet
float_string_frame._data.ndim
# empty
unpickled = tm.round_trip_pickle(empty_frame)
repr(unpickled)
# tz frame
unpickled = tm.round_trip_pickle(timezone_frame)
assert_frame_equal(timezone_frame, unpickled)
def test_consolidate_datetime64(self):
# numpy vstack bug
data = """\
starting,ending,measure
2012-06-21 00:00,2012-06-23 07:00,77
2012-06-23 07:00,2012-06-23 16:30,65
2012-06-23 16:30,2012-06-25 08:00,77
2012-06-25 08:00,2012-06-26 12:00,0
2012-06-26 12:00,2012-06-27 08:00,77
"""
df = pd.read_csv(StringIO(data), parse_dates=[0, 1])
ser_starting = df.starting
ser_starting.index = ser_starting.values
ser_starting = ser_starting.tz_localize('US/Eastern')
ser_starting = ser_starting.tz_convert('UTC')
ser_starting.index.name = 'starting'
ser_ending = df.ending
ser_ending.index = ser_ending.values
ser_ending = ser_ending.tz_localize('US/Eastern')
ser_ending = ser_ending.tz_convert('UTC')
ser_ending.index.name = 'ending'
df.starting = ser_starting.index
df.ending = ser_ending.index
tm.assert_index_equal(pd.DatetimeIndex(
df.starting), ser_starting.index)
tm.assert_index_equal(pd.DatetimeIndex(df.ending), ser_ending.index)
def test_is_mixed_type(self, float_frame, float_string_frame):
assert not float_frame._is_mixed_type
assert float_string_frame._is_mixed_type
def test_get_numeric_data(self):
# TODO(wesm): unused?
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
'f': Timestamp('20010102')},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, 'float64': 1,
datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
assert_series_equal(result, expected)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
'd': np.array([1.] * 10, dtype='float32'),
'e': np.array([1] * 10, dtype='int32'),
'f': np.array([1] * 10, dtype='int16'),
'g': Timestamp('20010102')},
index=np.arange(10))
result = df._get_numeric_data()
expected = df.loc[:, ['a', 'b', 'd', 'e', 'f']]
assert_frame_equal(result, expected)
only_obj = df.loc[:, ['c', 'g']]
result = only_obj._get_numeric_data()
expected = df.loc[:, []]
assert_frame_equal(result, expected)
df = DataFrame.from_dict(
{'a': [1, 2], 'b': ['foo', 'bar'], 'c': [np.pi, np.e]})
result = df._get_numeric_data()
expected = DataFrame.from_dict({'a': [1, 2], 'c': [np.pi, np.e]})
assert_frame_equal(result, expected)
df = result.copy()
result = df._get_numeric_data()
expected = df
assert_frame_equal(result, expected)
def test_get_numeric_data_extension_dtype(self):
# GH 22290
df = DataFrame({
'A': integer_array([-10, np.nan, 0, 10, 20, 30], dtype='Int64'),
'B': Categorical(list('abcabc')),
'C': integer_array([0, 1, 2, 3, np.nan, 5], dtype='UInt8'),
'D': IntervalArray.from_breaks(range(7))})
result = df._get_numeric_data()
expected = df.loc[:, ['A', 'C']]
assert_frame_equal(result, expected)
def test_convert_objects(self, float_string_frame):
oops = float_string_frame.T.T
converted = oops._convert(datetime=True)
assert_frame_equal(converted, float_string_frame)
assert converted['A'].dtype == np.float64
# force numeric conversion
float_string_frame['H'] = '1.'
float_string_frame['I'] = '1'
# add in some items that will be nan
length = len(float_string_frame)
float_string_frame['J'] = '1.'
float_string_frame['K'] = '1'
float_string_frame.loc[0:5, ['J', 'K']] = 'garbled'
converted = float_string_frame._convert(datetime=True, numeric=True)
assert converted['H'].dtype == 'float64'
assert converted['I'].dtype == 'int64'
assert converted['J'].dtype == 'float64'
assert converted['K'].dtype == 'float64'
assert len(converted['J'].dropna()) == length - 5
assert len(converted['K'].dropna()) == length - 5
# via astype
converted = float_string_frame.copy()
converted['H'] = converted['H'].astype('float64')
converted['I'] = converted['I'].astype('int64')
assert converted['H'].dtype == 'float64'
assert converted['I'].dtype == 'int64'
# via astype, but errors
converted = float_string_frame.copy()
with tm.assert_raises_regex(ValueError, 'invalid literal'):
converted['H'].astype('int32')
# mixed in a single column
df = DataFrame(dict(s=Series([1, 'na', 3, 4])))
result = df._convert(datetime=True, numeric=True)
expected = DataFrame(dict(s=Series([1, np.nan, 3, 4])))
assert_frame_equal(result, expected)
def test_convert_objects_no_conversion(self):
mixed1 = DataFrame(
{'a': [1, 2, 3], 'b': [4.0, 5, 6], 'c': ['x', 'y', 'z']})
mixed2 = mixed1._convert(datetime=True)
assert_frame_equal(mixed1, mixed2)
def test_infer_objects(self):
# GH 11221
df = DataFrame({'a': ['a', 1, 2, 3],
'b': ['b', 2.0, 3.0, 4.1],
'c': ['c', datetime(2016, 1, 1),
datetime(2016, 1, 2),
datetime(2016, 1, 3)],
'd': [1, 2, 3, 'd']},
columns=['a', 'b', 'c', 'd'])
df = df.iloc[1:].infer_objects()
assert df['a'].dtype == 'int64'
assert df['b'].dtype == 'float64'
assert df['c'].dtype == 'M8[ns]'
assert df['d'].dtype == 'object'
expected = DataFrame({'a': [1, 2, 3],
'b': [2.0, 3.0, 4.1],
'c': [datetime(2016, 1, 1),
datetime(2016, 1, 2),
datetime(2016, 1, 3)],
'd': [2, 3, 'd']},
columns=['a', 'b', 'c', 'd'])
# reconstruct frame to verify inference is same
tm.assert_frame_equal(df.reset_index(drop=True), expected)
def test_stale_cached_series_bug_473(self):
# this is chained, but ok
with option_context('chained_assignment', None):
Y = DataFrame(np.random.random((4, 4)), index=('a', 'b', 'c', 'd'),
columns=('e', 'f', 'g', 'h'))
repr(Y)
Y['e'] = Y['e'].astype('object')
Y['g']['c'] = np.NaN
repr(Y)
result = Y.sum() # noqa
exp = Y['g'].sum() # noqa
assert pd.isna(Y['g']['c'])
def test_get_X_columns(self):
# numeric and object columns
df = DataFrame({'a': [1, 2, 3],
'b': [True, False, True],
'c': ['foo', 'bar', 'baz'],
'd': [None, None, None],
'e': [3.14, 0.577, 2.773]})
tm.assert_index_equal(df._get_numeric_data().columns,
| pd.Index(['a', 'b', 'e']) | pandas.Index |
"""Implementation of prototype set models with sklearn compatible interface.
Copyright by <NAME>
Released under the MIT license - see LICENSE file for details
This submodule creates a logger named like itself that logs to a NullHandler and tracks progress on model fitting at log
level INFO. The invoking application needs to manage log output.
"""
from abc import ABCMeta, abstractmethod
import logging
import numpy as np
import pandas as pd
from scipy.optimize import fmin_l_bfgs_b
from scipy.stats import rankdata
from sklearn.base import BaseEstimator
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import check_X_y
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.validation import check_array, check_is_fitted, check_random_state
from statsmodels.distributions.empirical_distribution import ECDF
from proset.objective import ClassifierObjective
from proset.set_manager import ClassifierSetManager
from proset.shared import find_changes, check_feature_names, check_scale_offset, LOG_OFFSET
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
LOG_CAPTION = " ".join(["{:>10s}"] * 6 + ["{:s}"]).format(
"Iterations", "Calls", "Objective", "Gradient", "Features", "Prototypes", "Status"
)
LOG_MESSAGE = " ".join(["{:10d}", "{:10d}", "{:10.1e}", "{:10.1e}", "{:10d}", "{:10d}", "{:s}"])
LIMITED_M = 10 # parameters controlling L-BFGS-B fit
LIMITED_FACTR = 1e7
LIMITED_PGTOL = 1e-5
LIMITED_MAXFUN = 15000
LIMITED_MAXITER = 15000
LIMITED_MAXLS = 20
# noinspection PyPep8Naming, PyAttributeOutsideInit
class Model(BaseEstimator, metaclass=ABCMeta):
"""Base class for prototype set models.
"""
def __init__(
self,
n_iter=1,
lambda_v=1e-3,
lambda_w=1e-8,
alpha_v=0.95,
alpha_w=0.95,
num_candidates=1000,
max_fraction=0.5,
random_state=None
):
"""Initialize prototype set model with hyperparameters.
:param n_iter: non-negative integer; number of batches of prototypes to fit
:param lambda_v: non-negative float; penalty weight for the feature weights
:param lambda_w: non-negative float; penalty weight for the prototype weights
:param alpha_v: float in [0.0, 1.0]; fraction of lambda_v assigned as l2 penalty weight to feature weights; the
remainder is assigned as l1 penalty weight
:param alpha_w: float in [0.0, 1.0]; fraction of lambda_w assigned as l2 penalty weight to prototype weights;
the remainder is assigned as l1 penalty weight
:param num_candidates: positive integer; number of candidates for prototypes to try for each batch
:param max_fraction: float in (0.0, 1.0); maximum fraction of candidates to draw from one group of candidates;
candidates are grouped by class and whether the current model classifies them correctly or not
:param random_state: instance of np.random.RandomState, integer, or None; if a random state is passed, that
state will be used for randomization; if an integer or None is passed, a new random state is generated using
the argument as seed for every call to fit()
"""
self.n_iter = n_iter
self.lambda_v = lambda_v
self.lambda_w = lambda_w
self.alpha_v = alpha_v
self.alpha_w = alpha_w
self.num_candidates = num_candidates
self.max_fraction = max_fraction
self.random_state = random_state
def fit(self, X, y, sample_weight=None, warm_start=False):
"""Fit proset model to data.
:param X: 2D numpy float array; feature matrix; sparse matrices or infinite/missing values not supported
:param y: list-like object; target for supervised learning
:param sample_weight: 1D numpy array of positive floats or None; sample weights used for likelihood calculation;
pass None to use unit weights
:param warm_start: boolean; whether to create a new model or to add batches to an existing model
:return: no return value; model updated in place
"""
self._check_hyperparameters()
X, y, sample_weight = self._validate_arrays(X=X, y=y, sample_weight=sample_weight, reset=not warm_start)
logger.info("Fit proset model with {} batches and penalties lambda_v = {:0.2e}, lambda_w = {:0.2e}".format(
self.n_iter, self.lambda_v, self.lambda_w
))
MySetManager, MyObjective = self._get_compute_classes() # pylint: disable=invalid-name
if not warm_start or not hasattr(self, "set_manager_"):
self.set_manager_ = MySetManager(target=y) # pylint: disable=attribute-defined-outside-init
for i in range(self.n_iter):
objective = MyObjective(
features=X,
target=y,
weights=sample_weight,
num_candidates=self.num_candidates,
max_fraction=self.max_fraction,
set_manager=self.set_manager_,
lambda_v=self.lambda_v,
lambda_w=self.lambda_w,
alpha_v=self.alpha_v,
alpha_w=self.alpha_w,
random_state=check_random_state(self.random_state)
)
starting_point, bounds = objective.get_starting_point_and_bounds()
solution = fmin_l_bfgs_b(
func=objective.evaluate,
x0=starting_point,
bounds=bounds,
m=LIMITED_M,
factr=LIMITED_FACTR,
pgtol=LIMITED_PGTOL,
maxfun=LIMITED_MAXFUN,
maxiter=LIMITED_MAXITER,
maxls=LIMITED_MAXLS
)
batch_info = objective.get_batch_info(solution[0]) # solution[0] is the parameter vector
self.set_manager_.add_batch(batch_info)
if logger.isEnabledFor(logging.INFO): # pragma: no cover
logger.info("Batch {} fit results".format(i + 1))
logger.info(LOG_CAPTION)
logger.info(LOG_MESSAGE.format(
solution[2]["nit"],
solution[2]["funcalls"],
solution[1],
np.max(np.abs(solution[2]["grad"])),
len(np.nonzero(batch_info["feature_weights"])[0]),
len(np.nonzero(batch_info["prototype_weights"])[0]),
self._parse_solver_status(solution[2])
))
logger.info("Model fit complete")
return self
def _check_hyperparameters(self):
"""Check that model hyperparameters are valid.
:return: no return value; raises a ValueError if an issue is found
"""
if not np.issubdtype(type(self.n_iter), np.integer):
raise TypeError("Parameter n_iter must be integer.")
if self.n_iter < 0:
raise ValueError("Parameter n_iter must not be negative.")
# validation of other parameters is left to the classes or functions relying on them
# noinspection PyMethodMayBeStatic, PyUnresolvedReferences
def _validate_arrays(self, X, y, sample_weight, reset):
"""Check or transform input target, features, and sample weights as appropriate for the model.
:param X: see docstring of fit() for details
:param y: see docstring of fit() for details
:param sample_weight: see docstring of fit() for details
:param reset: boolean; whether to prepare the model for a new fit or enable warm start
:return: transformed versions of X and y; may also update the state of the model instance
"""
X, y = check_X_y(X=X, y=y)
if reset or not hasattr(self, "n_features_in_"):
self.n_features_in_ = X.shape[1] # pylint: disable=attribute-defined-outside-init
# the n_features_in_ attribute for tabular input is an sklearn convention
elif self.n_features_in_ != X.shape[1]:
raise ValueError("Parameter X must have {} columns.".format(self.n_features_in_))
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
if sample_weight.shape[0] != X.shape[0]:
raise ValueError("Parameter sample_weight must have one element per row of X if not None.")
return X, self._validate_y(y, reset), sample_weight
@abstractmethod
def _validate_y(self, y, reset): # pragma: no cover
"""Perform checks on estimator target that depend on estimator type.
:param y: 1D numpy array; target for supervised learning
:param reset: boolean; whether to prepare the model for a new fit or enable warm start
:return: y after applying appropriate checks and transforms
"""
raise NotImplementedError("Abstract method Model._validate_y() has no default implementation.")
@staticmethod
@abstractmethod
def _get_compute_classes(): # pragma: no cover
"""Provide classes implementing the set manager and objective function for the model.
:return: subclasses of proset.set_manager.SetManager and proset.objective.Objective
"""
raise NotImplementedError("Abstract method Model._get_compute_classes() has no default implementation.")
@staticmethod
def _parse_solver_status(solver_status):
"""Translate L-BFGS-B solver status into human-readable format.
:param solver_status: dict; third output argument of scipy.fmin_l_bfgs_b()
:return: string; solver exit status
"""
if solver_status["warnflag"] == 0:
return "converged"
if solver_status["warnflag"] == 1:
return "reached limit on iterations or function calls"
return "not converged ({})".format(solver_status["task"])
def predict(self, X, n_iter=None, compute_familiarity=False):
"""Predict class labels for a feature matrix.
:param X: 2D numpy array; feature matrix; sparse matrices or infinite/missing values not supported
:param n_iter: non-negative integer, 1D numpy array of non-negative and strictly increasing integers, or None;
number of batches to use for evaluation; pass None for all batches; pass an array to evaluate for multiple
values at once
:param compute_familiarity: boolean; whether to compute the familiarity for each sample
:return: 1D numpy array or list of 1D numpy arrays; if n_iter is integer or None, a single set of predictions is
returned as an array; if n_iter is an array, a list of predictions is returned with one element for each
element of the array; if compute_familiarity is True, also returns a 1D numpy float array or list of float
arrays containing the familiarity of each sample
"""
check_is_fitted(self, attributes="set_manager_")
return self._compute_prediction(X=check_array(X), n_iter=n_iter, compute_familiarity=compute_familiarity)
@abstractmethod
def _compute_prediction(self, X, n_iter, compute_familiarity): # pragma: no cover
"""Compute prediction.
:param X: see docstring of predict() for details
:param n_iter: see docstring of predict() for details
:param compute_familiarity: see docstring of predict() for details
:return: see docstring of predict() for details
"""
raise NotImplementedError("Abstract method Model._get_prediction() has no default implementation.")
def score(self, X, y, sample_weight=None, n_iter=None):
"""Use trained model to score sample data.
:param X: 2D numpy array; feature matrix; sparse matrices or infinite/missing values not supported
:param y: list-like object; target for supervised learning
:param sample_weight: 1D numpy array of positive floats or None; sample weights used for likelihood calculation;
pass None to use unit weights
:param n_iter: non-negative integer, 1D numpy array of non-negative and strictly increasing integers, or None;
number of batches to use for evaluation; pass None for all batches; pass an array to evaluate for multiple
values at once
:return: float or 1D numpy array of floats; if n_iter is integer or None, a single score is returned as a float
value; if n_iter is an array, an array of scores of the same length is returned
"""
check_is_fitted(self, attributes="set_manager_")
X, y, sample_weight = self._validate_arrays(X=X, y=y, sample_weight=sample_weight, reset=False)
return self._compute_score(X=X, y=y, sample_weight=sample_weight, n_iter=n_iter)
@abstractmethod
def _compute_score(self, X, y, sample_weight, n_iter): # pragma: no cover
"""Compute score.
:param X: see docstring of score() for details
:param y: numpy array; target for supervised learning
:param sample_weight: see docstring of score() for details
:param n_iter: see docstring of score() for details
:return: as return value of score()
"""
raise NotImplementedError("Abstract method Model._compute_score() has no default implementation.")
def export(
self,
n_iter=None,
train_names=None,
include_features=True,
feature_names=None,
scale=None,
offset=None
):
"""Export information on prototypes and parameters from trained model.
:param n_iter: non-negative integer, or None; number of batches to use for evaluation; pass None for all
batches
:param train_names: list of strings or None; names for the original training samples in order; these are
associated with the prototypes in the report; pass None to use default names 'sample 0', 'sample 1', etc.
:param include_features: boolean; whether to include information on relevant features
:param feature_names: list of strings or None; if not None, must have one element per column of features;
feature names to be used as column headers; pass None to use default names X0, X1, etc.; only used if
include_features is True
:param scale: 1D numpy array of positive floats or None; if not None, must have one element per column of
features; use this to scale features back to their original values for the report; pass None for no scaling;
only used if include_features is True
:param offset: 1D numpy array of floats or None; if not None, must have one element per column of features; use
this to shift features back to their original values for the report; pass None for no offset; only used if
include_features is True
:return: pandas data frame with the following columns; columns containing the feature name are repeated once for
each active feature; active features are ordered by decreasing weight over batches as per
set_manager.SetManager.get_feature_weights():
- batch: non-negative float; integer batch index for prototypes, np.Nan for properties of the baseline
distribution
- sample: non-negative float; integer sample index for prototypes, np.Nan for properties of the baseline
distribution
- sample name: string; sample name
- target: varies; target for supervised learning
- prototype weight: positive float; prototype weight
- <feature> weight: non-negative float; feature weight for the associated batch, np.NaN means the feature
plays no role for the batch; only included of include_features is True
- <feature> value: float; feature value as used by the model; set to np.NaN if the feature weight is np.NaN;
only included of include_features is True
- <feature> original: float; original feature value; set to np.NaN if the feature weight is np.Nan; this
column is not generated if both scale and offset are None; only included of include_features is True
"""
check_is_fitted(self, attributes="set_manager_")
feature_columns, include_original, scale, offset = self._check_report_input(
feature_names=feature_names,
num_features=self.n_features_in_,
scale=scale,
offset=offset,
sample_name=None
)[:4]
batches = self.set_manager_.get_batches(features=None, num_batches=n_iter)
report = self._make_prototype_report(batches=batches, train_names=train_names, compute_impact=False)
if include_features:
report = pd.concat([report, self._make_feature_report(
batches=batches,
feature_columns=feature_columns,
include_original=include_original,
scale=scale,
offset=offset,
active_features=self.set_manager_.get_feature_weights(num_batches=n_iter)["feature_index"],
include_similarities=False
)], axis=1)
report = report.sort_values(["batch", "prototype weight"], ascending=[True, False])
report = pd.concat([self._make_baseline_for_export(), report])
report.reset_index(inplace=True, drop=True)
return report
@staticmethod
def _check_report_input(feature_names, num_features, scale, offset, sample_name):
"""Check input for export() and explain() for consistency and apply defaults.
:param feature_names: see docstring of export() for details
:param num_features: positive integer; number of features
:param scale: see docstring of export() for details
:param offset: see docstring of export() for details
:param sample_name: string or None; name used for reference sample
:return: five return arguments:
- list of lists of strings; each list contains column names associated with one feature in the report
- boolean; whether original values need to be included in the report
- 1D numpy float array; scale as input or vector of ones if input is None
- 1D numpy float array; offset as input or vector of zeros if input is None
- string; sample name as input or default
raise an error if a check fails
"""
feature_names = check_feature_names(
num_features=num_features,
feature_names=feature_names,
active_features=None
)
feature_columns = [[
"{} weight".format(feature_name),
"{} value".format(feature_name),
"{} original".format(feature_name),
"{} similarity".format(feature_name)
] for feature_name in feature_names]
include_original = scale is not None or offset is not None
scale, offset = check_scale_offset(num_features=num_features, scale=scale, offset=offset)
if sample_name is None:
sample_name = "new sample"
return feature_columns, include_original, scale, offset, sample_name
@classmethod
def _make_prototype_report(cls, batches, train_names, compute_impact):
"""Format prototype information for report.
:param batches: list as generated by set_manager.SetManager.get_batches()
:param train_names: see docstring of export() for details
:param compute_impact: boolean; whether to compute the similarity and impact for each prototype relative to a
reference sample; if True, the information for each non-empty batch needs to contain the key 'similarities'
:return: pandas data frame with the following columns:
- batch: positive integer; batch index
- sample: non-negative integer; sample index for prototypes
- sample name: string; sample name
- target: varies; target for supervised learning
- prototype weight: positive float; prototype weight
- similarity: float in (0.0, 1.0]; similarity between prototype and reference sample; only included if
compute_impact is True
- impact: positive float; impact of prototype on reference sample; only included if compute_impact is True
"""
parts = [
cls._format_batch(batch=batch, batch_index=i, train_names=train_names)
for i, batch in enumerate(batches) if batch is not None
]
if len(parts) > 0:
report = pd.concat(parts, axis=0)
report.reset_index(inplace=True, drop=True)
return report
columns = ["batch", "sample", "sample name", "target", "prototype weight"]
if compute_impact:
columns.extend(["similarity", "impact"])
return pd.DataFrame(columns=columns)
@staticmethod
def _format_batch(batch, batch_index, train_names):
"""Format information for a single batch of prototypes to include in the report.
:param batch: one element from the output list generated by set_manager.SetManager.get_batches(); must not be
None
:param batch_index: non-negative integer; batch index
:param train_names: see docstring of export() for details
:return: as return value of _make_prototype_report(); the function determines whether impact needs to be
computed by checking whether the batch definitions contain the key "similarities"
"""
formatted = {
"batch": batch_index + 1,
"sample": batch["sample_index"],
"sample name": [
train_names[j] if train_names is not None else "sample {}".format(j) for j in batch["sample_index"]
],
"target": batch["target"],
"prototype weight": batch["prototype_weights"]
}
columns = ["batch", "sample", "sample name", "target", "prototype weight"]
if "similarities" in batch.keys():
formatted["similarity"] = np.exp(np.sum(np.log(batch["similarities"] + LOG_OFFSET), axis=1))
# use sum of logarithms instead of product for numerical stability
formatted["impact"] = formatted["similarity"] * formatted["prototype weight"]
columns.extend(["similarity", "impact"])
return pd.DataFrame(formatted, columns=columns)
@classmethod
def _make_feature_report(
cls,
batches,
feature_columns,
include_original,
scale,
offset,
active_features,
include_similarities
):
"""Format feature information for report.
:param batches: list as generated by set_manager.SetManager.get_batches()
:param feature_columns: as first return value of _check_report_input()
:param include_original: boolean; whether to include original feature values in the report
:param scale: see docstring of export() for details; None is not allowed
:param offset: see docstring of export() for details; None is not allowed
:param active_features: 1D numpy array of non-negative integers; indices of active features across all batches
:param include_similarities: boolean; whether to include per-feature similarities in the report; if True, the
information for each non-empty batch needs to contain the key 'similarities'
:return: pandas data frame with the following columns:
- <feature> weight: non-negative float; feature weight for the associated batch, np.NaN means the feature
plays no role for the batch
- <feature> value: float; feature value as used by the model; set to np.NaN if the feature weight is np.NaN
- <feature> original: float; original feature value; set to np.NaN if the feature weight is np.Nan; this
column is not generated if both scale and offset are None
- <feature> similarity: float in (0.0, 1.0]; per-feature similarity between the prototype and reference
sample; this is only included if include_similarities is True
"""
if active_features.shape[0] == 0:
return pd.DataFrame()
return pd.concat([cls._format_feature(
batches=batches,
feature_index=i,
feature_columns=feature_columns,
include_original=include_original,
scale=scale,
offset=offset,
include_similarities=include_similarities
) for i in active_features], axis=1)
@staticmethod
def _format_feature(batches, feature_index, feature_columns, include_original, scale, offset, include_similarities):
"""Format information for a single feature.
:param batches: list as generated by set_manager.SetManager.get_batches()
:param feature_index: positive integer; index of feature
:param feature_columns: list of strings; as return value of _check_report_input()
:param include_original: boolean; whether to include original feature values in the report
:param scale: see docstring of export() for details; None is not allowed
:param offset: see docstring of export() for details; None is not allowed
:param include_similarities: boolean; whether to include per-feature similarities in the report
:return: as one set of columns for the return value of _make_feature_report()
"""
feature_columns = feature_columns[feature_index]
scale = scale[feature_index]
offset = offset[feature_index]
result = []
for batch in batches:
if batch is not None:
position = np.nonzero(feature_index == batch["active_features"])[0]
if len(position) == 0: # feature is not used by current batch
nan_column = np.NaN * np.zeros(batch["prototype_weights"].shape[0], dtype=float)
new_info = pd.DataFrame({
feature_columns[0]: nan_column,
feature_columns[1]: nan_column
}, columns=feature_columns[:2])
if include_original:
new_info[feature_columns[2]] = nan_column
if include_similarities:
new_info[feature_columns[3]] = nan_column
else:
new_info = pd.DataFrame({
feature_columns[0]: batch["feature_weights"][position[0]],
feature_columns[1]: np.reshape(
batch["prototypes"][:, position], batch["prototype_weights"].shape[0]
)
}, columns=feature_columns[:2])
if include_original:
new_info[feature_columns[2]] = scale * new_info[feature_columns[1]] + offset
if include_similarities:
new_info[feature_columns[3]] = batch["similarities"][:, position]
result.append(new_info)
if len(result) > 0:
result = | pd.concat(result, axis=0) | pandas.concat |
#!/usr/bin/env python3
from argparse import ArgumentParser
from pathlib import Path
import anndata
import h5py
import numpy as np
import pandas as pd
import scipy.io
import scipy.sparse
def main(
umap_coords_csv: Path,
cell_by_gene_raw_mtx: Path,
cell_by_gene_smoothed_hdf5: Path,
cell_by_bin_mtx: Path,
cell_by_bin_barcodes: Path,
cell_by_bin_bins: Path,
):
umap_coords_df = pd.read_csv(umap_coords_csv, index_col=0)
umap_coords_df.loc[:, "cluster"] = umap_coords_df.loc[:, "cluster"].astype("category")
umap_coords = umap_coords_df.loc[:, ["umap.1", "umap.2"]].to_numpy()
cell_by_bin_mat = scipy.io.mmread(cell_by_bin_mtx).astype(bool).tocsr()
with open(cell_by_bin_barcodes) as f:
barcodes = [line.strip() for line in f]
with open(cell_by_bin_bins) as f:
bins = [line.strip() for line in f]
assert barcodes == list(umap_coords_df.index)
obs = umap_coords_df.loc[:, ["cluster"]].copy()
obsm = {"X_umap": umap_coords}
chroms = []
bin_start = []
bin_stop = []
for b in bins:
chrom, pos = b.rsplit(":", 1)
start, stop = (int(p) for p in pos.split("-"))
chroms.append(chrom)
bin_start.append(start)
bin_stop.append(stop)
var = pd.DataFrame(
{
"chrom": chroms,
"bin_start": bin_start,
"bin_stop": bin_stop,
},
index=bins,
)
cell_by_bin = anndata.AnnData(
cell_by_bin_mat,
obs=obs,
obsm=obsm,
var=var,
dtype=bool,
)
print("Saving cell by bin matrix")
cell_by_bin.write_h5ad("cell_by_bin.h5ad")
cell_by_gene_raw = scipy.sparse.csr_matrix(scipy.io.mmread(cell_by_gene_raw_mtx))
with h5py.File(cell_by_gene_smoothed_hdf5, "r") as f:
cell_by_gene_smoothed = np.array(f["cell_by_gene_smoothed"]).T
cells = [row.decode("utf-8") for row in np.array(f["barcodes"])]
genes = [col.decode("utf-8") for col in np.array(f["genes"])]
assert barcodes == cells
cell_by_gene = anndata.AnnData(
cell_by_gene_raw,
obs=obs,
obsm=obsm,
var= | pd.DataFrame(index=genes) | pandas.DataFrame |
import os
import time
import pandas as pd
import numpy as np
import json
from hydroDL import kPath
from hydroDL.data import usgs, gageII, gridMET, ntn
from hydroDL.post import axplot, figplot
import matplotlib.pyplot as plt
dirInv = os.path.join(kPath.dirData, 'USGS', 'inventory')
fileSiteNo = os.path.join(dirInv, 'siteNoLst-1979')
siteNoLstAll = pd.read_csv(fileSiteNo, header=None, dtype=str)[0].tolist()
dfCount = pd.read_csv(os.path.join(dirInv, 'codeCount.csv'),
dtype={'siteNo': str}).set_index('siteNo')
# pick some sites
code = '00945'
varC = [code]
varNtn = ['SO4']
siteNoLst = dfCount[dfCount[code] > 100].index.tolist()
nSite = dfCount.loc[siteNoLst][code].values
dfCrd = gageII.readData(
varLst=['LAT_GAGE', 'LNG_GAGE', 'CLASS'], siteNoLst=siteNoLst)
dfCrd = gageII.updateCode(dfCrd)
lat = dfCrd['LAT_GAGE'].values
lon = dfCrd['LNG_GAGE'].values
# add a start/end date to improve efficiency.
t = | pd.date_range(start='1979-01-01', end='2019-12-30', freq='W-TUE') | pandas.date_range |
import pandas as pd
import numpy as np
import requests
import time
import argparse
from tqdm import tqdm
from pyarrow import feather
def get_edit_history(
userid=None, user=None, latest_timestamp=None, earliest_timestamp=None, limit=None
):
"""For a particular user, pull their whole history of edits.
Args:
param1 (int): The first parameter.
param2 (str): The second parameter.
Returns:
bool: The return value. True for success, False otherwise.
"""
S = requests.Session()
S.headers.update(
{"User-Agent": "WikiRecs (<EMAIL>) One-time pull"}
)
URL = "https://en.wikipedia.org/w/api.php"
PARAMS = {
"action": "query",
"format": "json",
"ucnamespace": "0",
"list": "usercontribs",
"ucuserids": userid,
"ucprop": "title|ids|sizediff|flags|comment|timestamp",
"ucshow=": "!minor|!new",
}
if latest_timestamp is not None:
PARAMS["ucstart"] = latest_timestamp
if earliest_timestamp is not None:
PARAMS["ucend"] = earliest_timestamp
if user is not None:
PARAMS["ucuser"] = user
if userid is not None:
PARAMS["ucuserid"] = userid
PARAMS["uclimit"] = 500
R = S.get(url=URL, params=PARAMS)
DATA = R.json()
if "query" not in DATA:
print(DATA)
raise ValueError
USERCONTRIBS = DATA["query"]["usercontribs"]
all_ucs = USERCONTRIBS
i = 500
while i < 100000:
if "continue" not in DATA:
break
last_continue = DATA["continue"]
PARAMS.update(last_continue)
R = S.get(url=URL, params=PARAMS)
DATA = R.json()
USERCONTRIBS = DATA["query"]["usercontribs"]
all_ucs.extend(USERCONTRIBS)
i = i + 500
return all_ucs
def pull_edit_histories(
sampled_users_file,
edit_histories_file_pattern,
users_per_chunk,
earliest_timestamp,
start=0,
):
histories = []
cols = ["userid", "user", "pageid", "title", "timestamp", "sizediff"]
sampled_users = pd.read_csv(sampled_users_file)
sampled_users.loc[:, "userid"].astype(int)
sampled_users = sampled_users.reset_index()
# Iterate through all the users in the list
for i, (user, userid) in tqdm(
iterable=enumerate(
zip(sampled_users["user"][start:], sampled_users["userid"][start:]),
start=start,
),
total=len(sampled_users),
initial=start,
):
# Get the history of edits for this userid
thehistory = get_edit_history(
userid=int(userid), earliest_timestamp=earliest_timestamp
)
# If no edits, skip
if len(thehistory) == 0:
continue
thehistory = pd.DataFrame(thehistory)
# Remove edits using automated tools by looking for the word "using" in the comments
try:
thehistory = thehistory[
np.invert(thehistory.comment.astype(str).str.contains("using"))
]
except AttributeError:
continue
if len(thehistory) == 0:
continue
histories.append(thehistory.loc[:, cols])
if np.mod(i, 50) == 0:
print(
"Most recent: {}/{} {} ({}) has {} edits".format(
i, len(sampled_users), user, int(userid), len(thehistory)
)
)
# Every x users save it out, for the sake of ram limitations
if np.mod(i, users_per_chunk) == 0:
feather.write_feather(
| pd.concat(histories) | pandas.concat |
from django.core.files import temp
from django.shortcuts import render
from django.conf import settings
from django.http import HttpResponse
from django.core.files.storage import FileSystemStorage
from django.http import FileResponse
from django.views.static import serve
import xlsxwriter
import pdfkit
import csv
import numpy
#import required libraries
import pandas as pd
import pyexcel
import xlrd
from matplotlib import pylab
from matplotlib import collections as mc
from pylab import *
from pylev3 import Levenshtein
from matplotlib.ticker import PercentFormatter
from matplotlib import pyplot
import matplotlib.pyplot as plt
import PIL, PIL.Image
import os
try:
from StringIO import BytesIO
except ImportError:
from io import BytesIO
'''from google.colab import drive
drive.mount('/content/drive')'''
# Create your views here.
def welcome(request):
return HttpResponse("Welcome")
def ourResponse(request):
return HttpResponse("OUR RESPONSE")
def takeInput(request):
return render(request,'input.html')
def similarity(seq1, seq2):
l1 , l2 = len(seq1), len(seq2)
ldist = Levenshtein.wf(seq1, seq2)
return (1 - ldist/max(l1, l2))*100
def df_gdomain_counter(df):
df_count = df["ProteinID"].value_counts()
return df_count
def match(x, y, mm):
mismatch = 0
for i in range(len(x)):
if (x[i] == 'X' or x[i] == y[i]):
pass
else:
mismatch += 1
if (mismatch <= mm):
return True
else:
return False
def shuffler(word):
word_to_scramble = list(word)
numpy.random.shuffle(word_to_scramble)
# O=seq= ''.join(seq_temp)
new_word = ''.join(word_to_scramble)
return new_word
def list_of_7mer_X(sevenmer):
x_data = []
for r1 in range(7):
x = list(sevenmer)
x[r1] = "X"
x = ''.join(x)
x_data.append(x)
return x_data
def performAlgo(request):
myfile = request.FILES['document']
print(myfile.name)
fs = FileSystemStorage()
'''fs.save(myfile.name, myfile)'''
workbook = xlsxwriter.Workbook('media/new.xlsx')
family = request.POST.get("input01")
outpath = "media/new.xlsx"
df1 = pd.read_excel(myfile)
df2 = df1
for i in range((df1.shape[0] - 1)):
A = df1.loc[i, "Sequence"]
B = df1.loc[(i + 1), "Sequence"]
percent_similarity = similarity(A, B)
if (percent_similarity >= 90):
df2 = df2.drop(df2[df2.Sequence == B].index)
df2.to_excel(outpath, index=False)
NumProteins = df2.shape[0]
def H(protein_id, protein, x1, x2, x3, x4, mm1, mm2, mm3, mm4, min13, min34, min45, max13, max34, max45):
pL1 = []
pL2 = []
pL3 = []
pL4 = []
L1 = []
L2 = []
L3 = []
L4 = []
for i in range(len(protein) - len(x1)):
if (match(x1, protein[i:i + len(x1)], mm1) == True):
# global L1
pL1 = pL1 + [i]
L1 = L1 + [protein[i:i + len(x1)]]
# print "L1 = ", pL1,L1
for j in range(len(protein) - len(x2)):
if (match(x2, protein[j:j + len(x2)], mm2) == True):
# global L2
pL2 = pL2 + [j]
L2 = L2 + [protein[j:j + len(x2)]]
# print "L2 = ", pL2,L2
for k in range(len(protein) - len(x3)):
if (match(x3, protein[k:k + len(x3)], mm3) == True):
# global L3
pL3 = pL3 + [k]
L3 = L3 + [protein[k:k + len(x3)]]
# print "L3 = ", pL3,L3
for l in range(len(protein) - len(x4)):
if (match(x4, protein[l:l + len(x4)], mm4) == True):
# global L3
pL4 = pL4 + [l]
L4 = L4 + [protein[l:l + len(x4)]]
candidates = []
for i in range(len(pL1)):
for j in range(len(pL2)):
for k in range(len(pL3)):
for l in range(len(pL4)):
if (min13 <= pL2[j] - pL1[i] <= max13 and min34 <= pL3[k] - pL2[j] <= max34 and min45 <=
pL4[l] - pL3[k] <= max45):
# if 80 <=pL2[j]-pL1[i] <= 120 and 40 <=pL3[k]- pL2[j] <= 80 and 20 <=pL4[l]- pL3[k] <= 80
a = L1[i]
a_pos = pL1[i]
b = L2[j]
b_pos = pL2[j]
c = L3[k]
c_pos = pL3[k]
d = L4[l]
d_pos = pL4[l]
candidates.append((protein_id, a, a_pos, b, b_pos, c, c_pos, d, d_pos))
return candidates
abc = []
l1 = []
inpath = "media/new.xlsx"
mismatch1 = int(request.POST.get("mismatch1"))
mismatch2 = int(request.POST.get("mismatch2"))
mismatch3 = int(request.POST.get("mismatch3"))
mismatch4 = int(request.POST.get("mismatch4"))
mismatch41 = mismatch4
x1 = request.POST.get("x1")
x2 = request.POST.get("x2")
x3 = request.POST.get("x3")
x4 = request.POST.get("x4")
Min_G1_G3 = int(request.POST.get("Min_G1_G3"))
Max_G1_G3 = int(request.POST.get("Max_G1_G3"))
Min_G3_G4 = int(request.POST.get("Min_G3_G4"))
Max_G3_G4 = int(request.POST.get("Max_G3_G4"))
Min_G4_G5 = int(request.POST.get("Min_G4_G5"))
Max_G4_G5 = int(request.POST.get("Max_G4_G5"))
workbook = xlsxwriter.Workbook('media/output_wo_bias.xlsx')
outpath = "media/output_wo_bias.xlsx"
df1 = pd.read_excel(inpath)
df2 = df1.set_index("Entry", drop=False)
protein = df2.loc[:, "Sequence"]
protein_id = df2.loc[:, "Entry"]
protein_id
for i in range(len(protein)):
l = H(protein_id[i], protein[i], x1, x2, x3, x4, mismatch1, mismatch2, mismatch3, mismatch4, Min_G1_G3,
Min_G3_G4, Min_G4_G5, Max_G1_G3, Max_G3_G4, Max_G4_G5)
l1.append(l)
abc = [item for sublist in l1 for item in sublist]
gdomains = pd.DataFrame(abc,
columns=['ProteinID', 'G1-box', 'Position', 'G3-box', 'Position.1', 'G4-box', 'Position.2',
'G5-box', 'Position.3'])
gdomains = gdomains[gdomains['ProteinID'].astype(bool)]
gdomains.head()
gdomains.to_excel(outpath, index=False)
abc = []
l1 = []
workbook = xlsxwriter.Workbook('media/SA_nomismatch.xlsx')
outpath = "media/SA_nomismatch.xlsx"
str1 = "XXX"
x41 = str1 + x4 + "X"
mismatch41 = 0
df1 = pd.read_excel(inpath)
df2 = df1.set_index("Entry", drop=False)
protein = df2.loc[:, "Sequence"]
protein_id = df2.loc[:, "Entry"]
#protein_id
for i in range(len(protein)):
l = H(protein_id[i], protein[i], x1, x2, x3, x41, mismatch1, mismatch2, mismatch3, mismatch41, Min_G1_G3,
Min_G3_G4, Min_G4_G5, Max_G1_G3, Max_G3_G4, Max_G4_G5)
l1.append(l)
abc = [item for sublist in l1 for item in sublist]
gdomains = pd.DataFrame(abc,
columns=['ProteinID', 'G1-box', 'Position', 'G3-box', 'Position', 'G4-box', 'Position',
'G5-box', 'Position'])
gdomains = gdomains[gdomains['ProteinID'].astype(bool)]
gdomains.head()
gdomains.to_excel(outpath, index=False)
abc = []
l1 = []
workbook = xlsxwriter.Workbook('media/SA_mismatch.xlsx')
outpath = "media/SA_mismatch.xlsx"
df1 = pd.read_excel(inpath)
df2 = df1.set_index("Entry", drop=False)
protein = df2.loc[:, "Sequence"]
protein_id = df2.loc[:, "Entry"]
protein_id
for i in range(len(protein)):
l = H(protein_id[i], protein[i], x1, x2, x3, x41, mismatch1, mismatch2, mismatch3, mismatch4, Min_G1_G3,
Min_G3_G4, Min_G4_G5, Max_G1_G3, Max_G3_G4, Max_G4_G5)
l1.append(l)
abc = [item for sublist in l1 for item in sublist]
gdomains = pd.DataFrame(abc,
columns=['ProteinID', 'G1-box', 'Position', 'G3-box', 'Position', 'G4-box', 'Position',
'G5-box', 'Position'])
gdomains = gdomains[gdomains['ProteinID'].astype(bool)]
gdomains.head()
gdomains.to_excel(outpath, index=False)
abc = []
l1 = []
workbook = xlsxwriter.Workbook('media/A_nomismatch.xlsx')
outpath = "media/A_nomismatch.xlsx"
y = x4[1:]
z = y[:-1]
x42 = str1 + z + str1
df1 = pd.read_excel(inpath)
df2 = df1.set_index("Entry", drop=False)
protein = df2.loc[:, "Sequence"]
protein_id = df2.loc[:, "Entry"]
protein_id
for i in range(len(protein)):
l = H(protein_id[i], protein[i], x1, x2, x3, x42, mismatch1, mismatch2, mismatch3, mismatch41, Min_G1_G3,
Min_G3_G4, Min_G4_G5, Max_G1_G3, Max_G3_G4, Max_G4_G5)
l1.append(l)
abc = [item for sublist in l1 for item in sublist]
gdomains = pd.DataFrame(abc,
columns=['ProteinID', 'G1-box', 'Position', 'G3-box', 'Position', 'G4-box', 'Position',
'G5-box', 'Position'])
gdomains = gdomains[gdomains['ProteinID'].astype(bool)]
gdomains.head()
gdomains.to_excel(outpath, index=False)
inpath_SA_mm = "media/SA_mismatch.xlsx"
workbook = xlsxwriter.Workbook('media/SA_mm_7mer_X_dict.xlsx')
outpath1_SA_mm = "media/SA_mm_7mer_X_dict.xlsx"
workbook = xlsxwriter.Workbook('media/SA_mm_7mer_X_dict_count.xlsx')
outpath2_SA_mm = "media/SA_mm_7mer_X_dict_count.xlsx"
str2 = [["Rab", 470], ["Rac", 128], ["Ran", 29], ["Ras", 190], ["Roc", 19], ["Arf", 140], ["AlG1", 44],
["Era", 188], ["FeoB", 18], ["Hflx", 26], ["GB1", 116], ["EngB", 401], ["Dynamin", 115], ["IRG", 10],
["Obg", 659], ["Septin", 86], ["SRP", 99], ["Translational", 2869], ["tRme", 454], ["EngA", 424]]
#for i in str2:
# if (i[0] == family):
# #total = i[1]
total = NumProteins
data = pd.read_excel(inpath_SA_mm)
unique_7mers = data['G5-box'].unique()
temp = data
id_set = {}
for j in range(temp.shape[0]):
seq = temp.loc[j, "G5-box"]
ID = temp.loc[j, "ProteinID"]
x_data = list_of_7mer_X(seq)
for x in x_data:
if (x not in id_set):
id_set[x] = set()
id_set[x].add(ID)
else:
id_set[x].add(ID)
id_set.items()
with open(outpath1_SA_mm, 'w') as f:
[f.write('{0},{1}\n'.format(key, value)) for key, value in id_set.items()]
with open(outpath2_SA_mm, 'w') as f:
[f.write('{0},{1}\n'.format(key, [len(value), round((100 * len(value) / total), 2)])) for key, value in
id_set.items()]
inpath_A_nomm = "media/A_nomismatch.xlsx"
workbook = xlsxwriter.Workbook('media/A_nomm_7mer_X_dict.xlsx')
outpath1_A_nomm = "media/A_nomm_7mer_X_dict.xlsx"
workbook = xlsxwriter.Workbook('media/A_nomm_7mer_X_dict_count.xlsx')
outpath2_A_nomm = "media/A_nomm_7mer_X_dict_count.xlsx"
data1 = pd.read_excel(inpath_A_nomm)
unique_7mers = data1['G5-box'].unique()
temp = data1
id_set1 = {}
for j in range(temp.shape[0]):
seq = temp.loc[j, "G5-box"]
ID = temp.loc[j, "ProteinID"]
x_data = list_of_7mer_X(seq)
for x in x_data:
if (x not in id_set1):
id_set1[x] = set()
id_set1[x].add(ID)
else:
id_set1[x].add(ID)
id_set1.items()
with open(outpath1_A_nomm, 'w') as f:
[f.write('{0},{1}\n'.format(key, value)) for key, value in id_set1.items()]
with open(outpath2_A_nomm, 'w') as f:
[f.write('{0},{1}\n'.format(key, [len(value), round((100 * len(value) / total), 2)])) for key, value in
id_set1.items()]
inpath_SA_nomm = "media/SA_nomismatch.xlsx"
workbook = xlsxwriter.Workbook('media/SA_nomm_7mer_X_dict.xlsx')
outpath1_SA_nomm = "media/SA_nomm_7mer_X_dict.xlsx"
workbook = xlsxwriter.Workbook('media/SA_nomm_7mer_X_dict_count.xlsx')
outpath2_SA_nomm = "media/SA_nomm_7mer_X_dict_count.xlsx"
data2 = pd.read_excel(inpath_SA_nomm)
unique_7mers = data2['G5-box'].unique()
temp = data2
id_set2 = {}
for j in range(temp.shape[0]):
seq = temp.loc[j, "G5-box"]
ID = temp.loc[j, "ProteinID"]
x_data = list_of_7mer_X(seq)
for x in x_data:
if (x not in id_set2):
id_set2[x] = set()
id_set2[x].add(ID)
else:
id_set2[x].add(ID)
id_set2.items()
with open(outpath1_SA_nomm, 'w') as f:
[f.write('{0},{1}\n'.format(key, value)) for key, value in id_set2.items()]
with open(outpath2_SA_nomm, 'w') as f:
[f.write('{0},{1}\n'.format(key, [len(value), round((100 * len(value) / total), 2)])) for key, value in
id_set2.items()]
workbook = xlsxwriter.Workbook('media/7mer_X_dict.xlsx')
outpath1 = "media/7mer_X_dict.xlsx"
workbook = xlsxwriter.Workbook('media/7mer_X_count_dict.xlsx')
outpath2 = "media/7mer_X_count_dict.xlsx"
SA_nomm = pd.read_excel(inpath_SA_nomm)
A_nomm = pd.read_excel(inpath_A_nomm)
SA_mm = pd.read_excel(inpath_SA_mm)
table = [SA_nomm[['ProteinID', 'G5-box', 'Position']], A_nomm[['ProteinID', 'G5-box', 'Position']],
SA_mm[['ProteinID', 'G5-box', 'Position']]]
# to be used when SA with no mismatch doesn't give any result.
# table= [A_nomm[['Entry', 'G5_box', 'Value']], SA_mm[['Entry', 'G5_box', 'Value']]]
data3 = pd.concat(table)
data3 = data3.reset_index(drop=True)
unique_7mers = data3['G5-box'].unique()
temp = data3
id_set3 = {}
for j in range(temp.shape[0]):
seq = temp.loc[j, "G5-box"]
ID = temp.loc[j, "ProteinID"]
x_data = list_of_7mer_X(seq)
for x in x_data:
if (x not in id_set3):
id_set3[x] = set()
id_set3[x].add(ID)
else:
id_set3[x].add(ID)
id_set3.items()
with open(outpath1, 'w') as f:
[f.write('{0},{1}\n'.format(key, value)) for key, value in id_set3.items()]
with open(outpath2, 'w') as f:
[f.write('{0},{1}\n'.format(key, [len(value), round((100 * len(value) / total), 2)])) for key, value in
id_set3.items()]
def list_of_7mer_2X(sevenmer):
x_data = []
for r1 in range(7):
for r2 in range(7):
if (r1 != r2):
x = list(sevenmer)
x[r1] = "X"
x[r2] = "X"
x = ''.join(x)
x_data.append(x)
return x_data
workbook = xlsxwriter.Workbook('media/SA_mm_7mer_2X_dict.xlsx')
outpath1_SA_mm = "media/SA_mm_7mer_2X_dict.xlsx"
workbook = xlsxwriter.Workbook('media/SA_mm_7mer_2X_dict_count.xlsx')
outpath2_SA_mm = "media/SA_mm_7mer_2X_dict_count.xlsx"
data = | pd.read_excel(inpath_SA_mm) | pandas.read_excel |
# coding: utf-8
"""
.. _l-estim-sird-theory:
Estimation des paramètres d'un modèle SIRD
==========================================
On part d'un modèle :class:`CovidSIRD <aftercovid.models.CovidSIRD>`
qu'on utilise pour simuler des données. On regarde s'il est possible
de réestimer les paramètres du modèle à partir des observations.
.. contents::
:local:
Simulation des données
++++++++++++++++++++++
"""
import warnings
from pprint import pprint
import numpy
from matplotlib.cbook.deprecation import MatplotlibDeprecationWarning
import matplotlib.pyplot as plt
import pandas
from aftercovid.models import EpidemicRegressor, CovidSIRD
model = CovidSIRD()
model
###########################################
# Mise à jour des coefficients.
model['beta'] = 0.4
model["mu"] = 0.06
model["nu"] = 0.04
pprint(model.P)
###################################
# Point de départ
pprint(model.Q)
###################################
# Simulation
X, y = model.iterate2array(50, derivatives=True)
data = {_[0]: x for _, x in zip(model.Q, X.T)}
data.update({('d' + _[0]): c for _, c in zip(model.Q, y.T)})
df = | pandas.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import numpy as np
import math
from scipy.stats import hypergeom
from prettytable import PrettyTable
from scipy.special import betainc
class DISA:
"""
A class to analyse the subspaces inputted for their analysis
Parameters
----------
data : pandas.Dataframe
patterns : list
[x] : dict, where x can represent any position of the list
"lines" : list (mandatory)
"columns" : list (mandatory)
"column_values": list (optional)
"noise": list (optional)
"type" : string (optional)
outcome : dict
"values": pandas.Series
"outcome_value" : int
"type": string
border_values : boolean (default=False)
Class Attributes
----------------
border_values : boolean
data : pandas.Dataframe
size_of_dataset : int
y_column : pandas.Series
outcome_type : string
patterns : dict
Contains all the auxiliary information needed by the metrics
"""
def __init__(self, data, patterns, outcome, border_values=False):
self.border_values = border_values
self.data = data
self.size_of_dataset = len(outcome["values"])
self.y_column = outcome["values"]
self.outcome_type = outcome["type"]
self.y_value = outcome["outcome_value"] if "outcome_value" in list(outcome.keys()) else None
# Check if numerical to binarize or categorical to determine the categories
if outcome["type"] == "Numerical":
self.unique_classes = [0, 1]
else:
self.unique_classes = []
for value in outcome["values"].unique():
if np.issubdtype(value, np.integer):
self.unique_classes.append(value)
elif value.is_integer():
self.unique_classes.append(value)
self.patterns = []
for i in range(len(patterns)):
column_values = patterns[i]["column_values"] if "column_values" in list(patterns[i].keys()) else None
if column_values is not None:
col_values_counter = 0
for value in column_values:
column_values[col_values_counter] = float(value)
col_values_counter += 1
patterns[i]["lines"] = list(map(int, patterns[i]["lines"]))
outcome_to_assess = self.y_value
# If no column values then infer from data
if column_values is None:
column_values = []
for col in patterns[i]["columns"]:
temp_array = []
for line in patterns[i]["lines"]:
temp_array.append(self.data.at[line, col])
column_values.append(np.median(temp_array))
# If no noise inputted then all column contain 0 noise
noise = patterns[i]["noise"] if "noise" in list(patterns[i].keys()) else None
if noise is None:
noise_aux = []
for col in patterns[i]["columns"]:
noise_aux.append(0)
noise = noise_aux
# If no type then assume its a constant subspace
type = patterns[i]["type"] if "type" in list(patterns[i].keys()) else "Constant"
nr_cols = len(patterns[i]["columns"])
x_space = outcome["values"].filter(axis=0, items=patterns[i]["lines"])
_x_space = outcome["values"].drop(axis=0, labels=patterns[i]["lines"])
x_data = data.drop(columns=data.columns.difference(patterns[i]["columns"])).filter(axis=0, items=patterns[i]["lines"])
Cx = len(patterns[i]["lines"])
C_x = self.size_of_dataset - Cx
intervals = None
if outcome["type"] == "Numerical":
outcome_to_assess = 1
intervals = self.handle_numerical_outcome(x_space)
c1 = 0
for value in outcome["values"]:
if intervals[0] <= float(value) <= intervals[1]:
c1 += 1
Cy = c1
C_y = self.size_of_dataset - Cy
c1 = 0
for value in x_space:
if intervals[0] <= float(value) <= intervals[1]:
c1 += 1
Cxy = c1
Cx_y = len(x_space) - Cxy
c1 = 0
for value in _x_space:
if intervals[0] <= float(value) <= intervals[1]:
c1 += 1
C_xy = c1
C_x_y = len(_x_space) - C_xy
else:
if outcome_to_assess is None:
maxLift = 0
discriminative_unique_class = 0
for unique_class in self.unique_classes:
testY = len(outcome["values"][outcome["values"] == unique_class])
omega = max(Cx + testY - 1, 1 / self.size_of_dataset)
v = 1 / max(Cx, testY)
testXY = len(x_space[x_space == unique_class])
if testXY == 0:
continue
lift_of_pattern = testXY / (Cx * testY)
curr_lift = (lift_of_pattern - omega) / (v - omega)
if curr_lift > maxLift:
maxLift = curr_lift
discriminative_unique_class = unique_class
outcome_to_assess = discriminative_unique_class
Cy = len(outcome["values"][outcome["values"] == outcome_to_assess])
Cxy = len(x_space[x_space == outcome_to_assess])
C_xy = len(_x_space[_x_space == outcome_to_assess])
Cx_y = len(x_space) - len(x_space[x_space == outcome_to_assess])
C_x_y = len(_x_space) - len(_x_space[_x_space == outcome_to_assess])
if border_values:
Cy += len(outcome["values"][outcome["values"] == outcome_to_assess-0.5]) \
+ len(outcome["values"][outcome["values"] == outcome_to_assess+0.5])
Cxy += len(x_space[x_space == outcome_to_assess-0.5]) \
+ len(x_space[x_space == outcome_to_assess+0.5])
C_xy = len(_x_space[_x_space == outcome_to_assess-0.5]) \
+ len(_x_space[_x_space == outcome_to_assess+0.5])
Cx_y -= len(x_space[x_space == outcome_to_assess-0.5]) \
- len(x_space[x_space == outcome_to_assess+0.5])
C_x_y -= len(_x_space[_x_space == outcome_to_assess-0.5]) \
- len(_x_space[_x_space == outcome_to_assess+0.5])
C_y = self.size_of_dataset - Cy
X = Cx / self.size_of_dataset
_X = 1 - X
Y = Cy / self.size_of_dataset
_Y = 1 - Y
XY = Cxy / self.size_of_dataset
_XY = C_xy / self.size_of_dataset
X_Y = Cx_y / self.size_of_dataset
_X_Y = C_x_y / self.size_of_dataset
self.patterns.append({
"outcome_to_assess": outcome_to_assess,
"outcome_intervals": intervals,
"columns": patterns[i]["columns"],
"lines": patterns[i]["lines"],
"nr_cols": nr_cols,
"column_values": column_values,
"noise": noise,
"type": type,
"x_space": x_space,
"_x_space": _x_space,
"x_data": x_data,
"Cx": Cx,
"C_x": C_x,
"Cy": Cy,
"C_y": C_y,
"Cxy": Cxy,
"C_xy": C_xy,
"Cx_y": Cx_y,
"C_x_y": C_x_y,
"X": X,
"_X": _X,
"Y": Y,
"_Y": _Y,
"XY": XY,
"_XY": _XY,
"X_Y": X_Y,
"_X_Y": _X_Y
})
def assess_patterns(self, print_table=False):
"""
Executes all the subspace metrics for the inputted patterns
Parameters
----------
print_table : boolean
If true, prints a table containing the metric values
Returns
-------
list
[x] : dictionary :
"Outcome selected for analysis", "Information Gain", "Chi-squared", "Gini index", "Difference in Support",
"Bigger Support", "Confidence", "All-Confidence", "Lift", "Standardised Lift", "Standardised Lift (with correction)",
"Collective Strength", "Cosine", "Interestingness", "Comprehensibility", "Completeness", "Added Value",
"Casual Confidence", "Casual Support", "Certainty Factor", "Conviction", "Coverage (Support)",
"Descriptive Confirmed Confidence", "Difference of Proportions", "Example and Counter Example",
"Imbalance Ratio", "Fisher's Exact Test (p-value)", "Hyper Confidence", "Hyper Lift", "Laplace Corrected Confidence",
"Importance", "Jaccard Coefficient", "J-Measure", "Kappa", "Klosgen", "Kulczynski", "Goodman-Kruskal's Lambda",
"Least Contradiction", "Lerman Similarity", "Piatetsky-Shapiro", "Max Confidence", "Odds Ratio",
"Phi Correlation Coefficient", "Ralambondrainy", "Relative Linkage Disequilibrium", "Relative Risk"
"Rule Power Factor", "Sebag-Schoenauer", "Yule Q", "Yule Y", "Weighted Support", "Weighted Rule Support"
"Weighted Confidence", "Weighted Lift", "Statistical Significance", "FleBiC Score"
where "x" represents the position of a subspace, and the dictionary the corresponding metrics calculated for
the subspace. More details about the metrics are given in the methods.
"""
dict = []
for i in range(len(self.patterns)):
information_gain = self.information_gain(i)
chi_squared = self.chi_squared(i)
gini_index = self.gini_index(i)
diff_sup = self.diff_sup(i)
bigger_sup = self.bigger_sup(i)
confidence = self.confidence(i)
all_confidence = self.all_confidence(i)
lift = self.lift(i)
standardisation_of_lift = self.standardisation_of_lift(i)
collective_strength = self.collective_strength(i)
cosine = self.cosine(i)
interestingness = self.interestingness(i)
comprehensibility = self.comprehensibility(i)
completeness = self.completeness(i)
added_value = self.added_value(i)
casual_confidence = self.casual_confidence(i)
casual_support = self.casual_support(i)
certainty_factor = self.certainty_factor(i)
conviction = self.conviction(i)
coverage = self.coverage(i)
descriptive_confirmed_confidence = self.descriptive_confirmed_confidence(i)
difference_of_confidence = self.difference_of_confidence(i)
example_counter_example = self.example_counter_example(i)
imbalance_ratio = self.imbalance_ratio(i)
fishers_exact_test_p_value = self.fishers_exact_test_p_value(i)
hyper_confidence = self.hyper_confidence(i)
hyper_lift = self.hyper_lift(i)
laplace_corrected_confidence = self.laplace_corrected_confidence(i)
importance = self.importance(i)
jaccard_coefficient = self.jaccard_coefficient(i)
j_measure = self.j_measure(i)
kappa = self.kappa(i)
klosgen = self.klosgen(i)
kulczynski = self.kulczynski(i)
kruskal_lambda = self.kruskal_lambda(i)
least_contradiction = self.least_contradiction(i)
lerman_similarity = self.lerman_similarity(i)
piatetsky_shapiro = self.piatetsky_shapiro(i)
max_confidence = self.max_confidence(i)
odds_ratio = self.odds_ratio(i)
phi_correlation_coefficient = self.phi_correlation_coefficient(i)
ralambondrainy_measure = self.ralambondrainy_measure(i)
rld = self.rld(i)
relative_risk = self.relative_risk(i)
rule_power_factor = self.rule_power_factor(i)
sebag = self.sebag(i)
yule_q = self.yule_q(i)
yule_y = self.yule_y(i)
Wsup_pattern = self.Wsup_pattern(i) if "column_values" in list(self.patterns[i].keys()) else "Not enough information to calculate"
Wsup_rule = self.Wsup_rule(i) if "column_values" in list(self.patterns[i].keys()) else "Not enough information to calculate"
Wconf = self.Wconf(i) if "column_values" in list(self.patterns[i].keys()) else "Not enough information to calculate"
WLift = self.WLift(i) if "column_values" in list(self.patterns[i].keys()) else "Not enough information to calculate"
Tsig = self.Tsig(i) if "column_values" in list(self.patterns[i].keys()) else "Not enough information to calculate"
FleBiC_score = self.FleBiC_score(i) if "column_values" in list(self.patterns[i].keys()) else "Not enough information to calculate"
dict.append({
"Outcome selected for analysis": self.patterns[i]["outcome_to_assess"],
"Information Gain": information_gain,
"Chi-squared": chi_squared,
"Gini index": gini_index,
"Difference in Support": diff_sup,
"Bigger Support": bigger_sup,
"Confidence": confidence,
"All-Confidence": all_confidence,
"Lift": lift,
"Standardised Lift": standardisation_of_lift,
"Collective Strength": collective_strength,
"Cosine": cosine,
"Interestingness": interestingness,
"Comprehensibility": comprehensibility,
"Completeness": completeness,
"Added Value": added_value,
"Casual Confidence": casual_confidence,
"Casual Support": casual_support,
"Certainty Factor": certainty_factor,
"Conviction": conviction,
"Coverage (Support)": coverage,
"Descriptive Confirmed Confidence": descriptive_confirmed_confidence,
"Difference of Proportions": difference_of_confidence,
"Example and Counter Example": example_counter_example,
"Imbalance Ratio": imbalance_ratio,
"Fisher's Exact Test (p-value)": fishers_exact_test_p_value,
"Hyper Confidence": hyper_confidence,
"Hyper Lift": hyper_lift,
"Laplace Corrected Confidence": laplace_corrected_confidence,
"Importance": importance,
"Jaccard Coefficient": jaccard_coefficient,
"J-Measure": j_measure,
"Kappa": kappa,
"Klosgen": klosgen,
"Kulczynski": kulczynski,
"Goodman-Kruskal's Lambda": kruskal_lambda,
"Least Contradiction": least_contradiction,
"Lerman Similarity": lerman_similarity,
"Piatetsky-Shapiro": piatetsky_shapiro,
"Max Confidence": max_confidence,
"Odds Ratio": odds_ratio,
"Phi Correlation Coefficient": phi_correlation_coefficient,
"Ralambondrainy": ralambondrainy_measure,
"Relative Linkage Disequilibrium": rld,
"Relative Risk": relative_risk,
"Rule Power Factor": rule_power_factor,
"Sebag-Schoenauer": sebag,
"Yule Q": yule_q,
"Yule Y": yule_y,
"Weighted Support": Wsup_pattern,
"Weighted Rule Support": Wsup_rule,
"Weighted Confidence": Wconf,
"Weighted Lift": WLift,
"Statistical Significance": Tsig,
"FleBiC Score": FleBiC_score
})
if print_table:
columns = ['Metric']
for i in range(len(self.patterns)):
columns.append('P'+str(i+1))
t = PrettyTable(columns)
for metric in list(dict[0].keys()):
line = [metric]
for x in range(len(self.patterns)):
line.append(str(dict[x][metric]))
t.add_row(line)
print(t)
return dict
def information_gain(self, i):
""" Calculates information gain of the subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Information gain of subspace
"""
one = self.patterns[i]["XY"]*math.log(self.patterns[i]["XY"]/(self.patterns[i]["X"]*self.patterns[i]["Y"]), 10) if self.patterns[i]["XY"] != 0 else 0
two = self.patterns[i]["X_Y"]*math.log(self.patterns[i]["X_Y"]/(self.patterns[i]["X"]*self.patterns[i]["_Y"]), 10) if self.patterns[i]["X_Y"] != 0 else 0
three = self.patterns[i]["_XY"]*math.log(self.patterns[i]["_XY"]/(self.patterns[i]["_X"]*self.patterns[i]["Y"]),10) if self.patterns[i]["_XY"] != 0 else 0
four = self.patterns[i]["_X_Y"]*math.log(self.patterns[i]["_X_Y"]/(self.patterns[i]["_X"]*self.patterns[i]["_Y"]), 10) if self.patterns[i]["_X_Y"] != 0 else 0
frac_up = one + two + three + four
frac_down_one = - (self.patterns[i]["X"] * math.log(self.patterns[i]["X"],10) + self.patterns[i]["_X"] * math.log(self.patterns[i]["_X"], 10)) if self.patterns[i]["X"] != 0 and self.patterns[i]["_X"] != 0 else 0
frac_down_two = - (self.patterns[i]["Y"] * math.log(self.patterns[i]["Y"],10) + self.patterns[i]["_Y"] * math.log(self.patterns[i]["_Y"], 10)) if self.patterns[i]["Y"] != 0 and self.patterns[i]["_Y"] != 0 else 0
frac_down = min(frac_down_one,frac_down_two)
return frac_up / frac_down
def chi_squared(self, i):
""" Calculates the Chi-squared test statistic given a subspace
https://doi.org/10.1145/253260.253327
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Chi-squared test statistic of subspace
"""
one=((self.patterns[i]["Cxy"]-(self.patterns[i]["Cx"]*self.patterns[i]["Cy"]/self.size_of_dataset))**2)/(self.patterns[i]["Cx"]*self.patterns[i]["Cy"]/self.size_of_dataset)
two=((self.patterns[i]["C_xy"]-(self.patterns[i]["C_x"]*self.patterns[i]["Cy"]/self.size_of_dataset))**2)/(self.patterns[i]["C_x"]*self.patterns[i]["Cy"]/self.size_of_dataset)
three=((self.patterns[i]["Cx_y"]-(self.patterns[i]["Cx"]*self.patterns[i]["C_y"]/self.size_of_dataset))**2)/(self.patterns[i]["Cx"]*self.patterns[i]["C_y"]/self.size_of_dataset)
four=((self.patterns[i]["C_x_y"]-(self.patterns[i]["C_x"]*self.patterns[i]["C_y"]/self.size_of_dataset))**2)/(self.patterns[i]["C_x"]*self.patterns[i]["C_y"]/self.size_of_dataset)
return one + two + three + four
def gini_index(self, i):
""" Calculates the gini index metric of a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Gini index of subspace
"""
return (self.patterns[i]["X"] * (((self.patterns[i]["XY"]/self.patterns[i]["X"])**2)+((self.patterns[i]["X_Y"]/self.patterns[i]["X"])**2)))\
+ (self.patterns[i]["_X"] * (((self.patterns[i]["_XY"]/self.patterns[i]["_X"])**2)+((self.patterns[i]["_X_Y"]/self.patterns[i]["_X"])**2)))\
- (self.patterns[i]["Y"]**2) - (self.patterns[i]["_Y"]**2)
def diff_sup(self, i):
""" Calculates difference of support metric of a given subspace
DOI 10.1109/TKDE.2010.241
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Difference in support of subspace
"""
return abs((self.patterns[i]["XY"]/self.patterns[i]["Y"]) - (self.patterns[i]["X_Y"]/self.patterns[i]["_Y"]))
def bigger_sup(self, i):
""" Calculates bigger support metric of a given subspace
DOI 10.1109/TKDE.2010.241
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Bigger support of subspace
"""
return max((self.patterns[i]["XY"]/self.patterns[i]["Y"]), (self.patterns[i]["X_Y"]/self.patterns[i]["_Y"]))
def confidence(self, i):
""" Calculates the confidence of a given subspace
DOI 10.1145/170036.170072
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Confidence of subspace
"""
return self.patterns[i]["XY"] / self.patterns[i]["X"]
def all_confidence(self, i):
""" Calculates the all confidence metric of a given subspace
DOI 10.1109/TKDE.2003.1161582
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
All confidence of subspace
"""
return self.patterns[i]["XY"] / max(self.patterns[i]["X"], self.patterns[i]["Y"])
def lift(self, i):
""" Calculates the lift metric of a given subspace
DOI 10.1145/170036.170072
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Lift of subspace
"""
return self.patterns[i]["XY"] / (self.patterns[i]["X"] * self.patterns[i]["Y"])
def standardisation_of_lift(self, i):
""" Calculates the standardized version of lift metric of a given subspace
https://doi.org/10.1016/j.csda.2008.03.013
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Standardized lift of subspace
"""
omega = max(self.patterns[i]["X"] + self.patterns[i]["Y"] - 1, 1/self.size_of_dataset)
v = 1 / max(self.patterns[i]["X"], self.patterns[i]["Y"])
return (self.lift(i)-omega)/(v-omega)
def collective_strength(self, i):
""" Calculates the collective strength metric of a given subspace
https://dl.acm.org/doi/pdf/10.1145/275487.275490
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Collective strength of subspace
"""
return (self.patterns[i]["XY"] + self.patterns[i]["_X_Y"] / self.patterns[i]["_X"]) / (self.patterns[i]["X"] * self.patterns[i]["Y"] + self.patterns[i]["_X"] * self.patterns[i]["_Y"])
def cosine(self, i):
""" Calculates cosine metric of a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Cosine of subspace
"""
return self.patterns[i]["XY"] / math.sqrt(self.patterns[i]["X"] * self.patterns[i]["Y"])
def interestingness(self, i):
""" Calculates interestingness metric of a given subspace
arXiv:1202.3215
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Interestingness of subspace
"""
return (self.patterns[i]["XY"] / self.patterns[i]["X"]) * (self.patterns[i]["XY"] / self.patterns[i]["Y"]) * (1 - (self.patterns[i]["XY"]/self.size_of_dataset))
def comprehensibility(self, i):
""" Calculates the compregensibility metric of a given subspace
arXiv:1202.3215
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Interestingness of subspace
"""
return np.log(1+1)/np.log(1+self.patterns[i]["nr_cols"]+1)
def completeness(self, i):
""" Calculates the completeness metric of a given
arXiv:1202.3215
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Completeness of subspace
"""
return self.patterns[i]["XY"] / self.patterns[i]["Y"]
def added_value(self, i):
""" Calculates the added value metric of a subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Added value of subspace
"""
return self.confidence(i) - (self.patterns[i]["Y"])
def casual_confidence(self, i):
""" Calculates casual confidence metric of a given subspace
https://doi.org/10.1007/3-540-44673-7_1
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Casual confidence of subspace
"""
return 0.5 * ((self.patterns[i]["XY"]/self.patterns[i]["X"]) + (self.patterns[i]["XY"]/self.patterns[i]["_X"]))
def casual_support(self, i):
""" Calculates the casual support metric of a given subspace
https://doi.org/10.1007/3-540-44673-7_1
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Casual support of subspace
"""
return self.patterns[i]["XY"] + self.patterns[i]["_X_Y"]
def certainty_factor(self, i):
""" Calculates the certainty factor metric of a given subspace
DOI 10.3233/IDA-2002-6303
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Certainty factor metric of a given subspace
"""
return ((self.patterns[i]["XY"] / self.patterns[i]["X"]) - self.patterns[i]["Y"])/self.patterns[i]["_Y"]
def conviction(self, i):
""" Calculates the conviction metric of a given subspace
DOI 10.1145/170036.170072
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Conviction of subspace
"""
if self.patterns[i]["X_Y"] == 0:
return math.inf
else:
return self.patterns[i]["X"] * self.patterns[i]["_Y"] / self.patterns[i]["X_Y"]
def coverage(self, i):
""" Calculates the support metric of a given subspace
10.1145/170036.170072
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Support of subspace
"""
return self.patterns[i]["X"]
def descriptive_confirmed_confidence(self, i):
""" Calculates the descriptive confidence of a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Descriptive confidence of subspace
"""
return (self.patterns[i]["XY"]/self.patterns[i]["X"]) - (self.patterns[i]["X_Y"]/self.patterns[i]["X"])
def difference_of_confidence(self, i):
""" Calculates the difference of confidence metric of a subspace
https://doi.org/10.1007/s001800100075
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Difference of confidence of subspace
"""
return (self.patterns[i]["XY"] / self.patterns[i]["X"]) - (self.patterns[i]["_XY"] / self.patterns[i]["_X"])
def example_counter_example(self, i):
""" Calculates
Generation of rules with certainty and confidence factors from incomplete and incoherent learning bases
author : <NAME> <NAME>
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Example and counter example metric of subspace
"""
if self.patterns[i]["XY"] == 0:
return "No intersection between subspace and outcome"
return (self.patterns[i]["XY"] - self.patterns[i]["X_Y"]) / self.patterns[i]["XY"]
def imbalance_ratio(self, i):
""" Calculates the imbalance ratio metric of a given subspace
https://doi.org/10.1007/s10618-009-0161-2
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Imbalance ratio of subspace
"""
if self.patterns[i]["XY"] == 0:
return "No intersection between subspace and outcome"
return abs((self.patterns[i]["XY"]/self.patterns[i]["X"])-(self.patterns[i]["XY"]/self.patterns[i]["Y"]))/((self.patterns[i]["XY"]/self.patterns[i]["X"])+(self.patterns[i]["XY"]/self.patterns[i]["Y"])-((self.patterns[i]["XY"]/self.patterns[i]["X"])*(self.patterns[i]["XY"]/self.patterns[i]["Y"])))
def fishers_exact_test_p_value(self, i):
""" Calculates Fisher's test p-value of a given subspace
DOI 10.3233/IDA-2007-11502
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
P-value of Fisher's test of subspace
"""
comb3 = math.factorial(self.size_of_dataset) // (math.factorial(self.patterns[i]["Cx"]) * math.factorial(self.size_of_dataset - self.patterns[i]["Cx"]))
sum_Pcxy = 0
for counter in range(0, self.patterns[i]["Cxy"]):
comb1 = math.factorial(self.patterns[i]["Cy"])//(math.factorial(counter)*math.factorial(self.patterns[i]["Cy"]-counter))
comb2_aux = (self.size_of_dataset-self.patterns[i]["Cy"])-(self.patterns[i]["Cx"]-counter)
if comb2_aux < 0:
comb2_aux = 0
comb2 = math.factorial(self.size_of_dataset-self.patterns[i]["Cy"])//(math.factorial(self.patterns[i]["Cx"]-counter)*math.factorial(comb2_aux))
sum_Pcxy += ((comb1*comb2)/comb3)
return 1 - sum_Pcxy
def hyper_confidence(self, i):
""" Calculates the Hyper confidence metric of a given subspace
DOI 10.3233/IDA-2007-11502
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Hyper confidence of subspace
"""
return 1 - self.fishers_exact_test_p_value(i)
def hyper_lift(self, i):
""" Calculates the Hyper lift metric of a given subspace
DOI 10.3233/IDA-2007-11502
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Hyper lift of subspace
"""
[M, n, N] = [self.size_of_dataset, self.patterns[i]["Cy"], self.patterns[i]["Cx"]]
ppf95 = hypergeom.ppf(0.95, M, n, N)
return self.patterns[i]["Cxy"]/ppf95
def laplace_corrected_confidence(self, i):
""" Calculates the laplace corrected confidence of a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Laplace corrected confidence
"""
return (self.patterns[i]["Cxy"]+1)/(self.patterns[i]["Cx"]+(len(self.unique_classes)))
def importance(self, i):
""" Calculates the importance metric of a given subspace
https://docs.microsoft.com/en-us/analysis-services/data-mining/microsoft-association-algorithm-technical-reference?view=asallproducts-allversions&viewFallbackFrom=sql-server-ver15
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Importance metric of subspace
"""
return math.log(((self.patterns[i]["Cxy"]+1)/(self.patterns[i]["Cx"]+len(self.unique_classes))) / ((self.patterns[i]["Cx_y"]+1)/(self.patterns[i]["Cx"]+len(self.unique_classes))), 10)
def jaccard_coefficient(self, i):
""" Calculates the jaccard coefficient metric of a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Jaccard coefficient of subspace
"""
return self.patterns[i]["XY"]/(self.patterns[i]["X"]+self.patterns[i]["Y"]-self.patterns[i]["XY"])
def j_measure(self, i):
""" Calculates the J-Measure (scaled version of cross entropy) of a given subspace
NII Article ID (NAID) 10011699020
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
J-Measure of subspace
"""
a = (self.patterns[i]["XY"]/self.patterns[i]["X"])/self.patterns[i]["Y"]
if a == 0:
a = 0
else:
a = self.patterns[i]["XY"] * math.log((self.patterns[i]["XY"]/self.patterns[i]["X"])/self.patterns[i]["Y"], 10)
b = (self.patterns[i]["X_Y"]/self.patterns[i]["X"])/self.patterns[i]["_Y"]
if b == 0:
b = 0
else:
b = self.patterns[i]["X_Y"] * math.log((self.patterns[i]["X_Y"] / self.patterns[i]["X"]) / self.patterns[i]["_Y"], 10)
return a + b
def kappa(self, i):
""" Calculates the kappa metric for a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Kappa of subspace
"""
return (self.patterns[i]["XY"] + self.patterns[i]["_X_Y"]-(self.patterns[i]["X"] * self.patterns[i]["Y"])-(self.patterns[i]["_X"]*self.patterns[i]["_Y"])) / (1-(self.patterns[i]["X"]*self.patterns[i]["Y"])-(self.patterns[i]["_X"]*self.patterns[i]["_Y"]))
def klosgen(self, i):
""" Calculates the klosgen metric for a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Klosgen metric of subspace
"""
return math.sqrt(self.patterns[i]["XY"])*((self.patterns[i]["XY"]/self.patterns[i]["X"])-self.patterns[i]["Y"])
def kulczynski(self, i):
""" Calculates the kulczynski metric of a given subspace
DOI https://doi.org/10.1007/s10618-009-0161-2
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Kulczynski metric of subspace
"""
return 0.5 * ((self.patterns[i]["XY"] / self.patterns[i]["X"]) + (self.patterns[i]["XY"] / self.patterns[i]["Y"]))
def kruskal_lambda(self, i):
""" Calculates the goodman-kruskal lambda metric for a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Goodman-kruskal lambda of subspace
"""
return ((1-self.patterns[i]["XY"])-(1-self.patterns[i]["Y"]))/(1-self.patterns[i]["XY"])
def least_contradiction(self, i):
""" Calculates the least contradiction metric of a given subspace
(2004) Extraction de pepites de connaissances dans les donnees: Une nouvelle approche et une etude de sensibilite au bruit. In Mesures de Qualite pour la fouille de donnees. Revue des Nouvelles Technologies de l’Information, RNTI
author : <NAME>. and <NAME>
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Least contradiction of subspace
"""
return (self.patterns[i]["XY"] - self.patterns[i]["X_Y"]) / self.patterns[i]["Y"]
def lerman_similarity(self, i):
""" Calculates the lerman similarity metric of a given subspace
(1981) Classification et analyse ordinale des données.
Author : Lerman, Israel-César.
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Lerman similarity of subspace
"""
return (self.patterns[i]["Cxy"] - ((self.patterns[i]["Cx"] * self.patterns[i]["Cy"]) / self.size_of_dataset)) / math.sqrt((self.patterns[i]["Cx"] * self.patterns[i]["Cy"]) / self.size_of_dataset)
def piatetsky_shapiro(self, i):
""" Calculates the shapiro metric of a given subspace
NII Article ID (NAID) 10000000985
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Shapiro metric of subspace
"""
return self.patterns[i]["XY"] - (self.patterns[i]["X"] * self.patterns[i]["Y"])
def max_confidence(self, i):
""" Calculates the maximum confidence metric of a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Max Confidence of subspace
"""
return max(self.patterns[i]["XY"] / self.patterns[i]["X"], self.patterns[i]["XY"] / self.patterns[i]["Y"])
def odds_ratio(self, i):
""" Calculates the odds ratio metric of a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Odds ratio of subspace
"""
if self.patterns[i]["X_Y"] == 0 or self.patterns[i]["_XY"] == 0:
return math.inf
else:
return (self.patterns[i]["XY"] * self.patterns[i]["_X_Y"]) / (self.patterns[i]["X_Y"] * self.patterns[i]["_XY"])
def phi_correlation_coefficient(self, i):
""" Calculates the phi correlation coefficient metric of a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Phi correlation coefficient of subspace
"""
return math.sqrt(self.chi_squared(i)/self.size_of_dataset)
def ralambondrainy_measure(self, i):
""" Calculates the support of the counter examples of a given subspace
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Ralambondrainy metric of subspace
"""
return self.patterns[i]["X_Y"]
def rld(self, i):
""" Calculates the Relative Linkage Disequilibrium (RLD) of a given subspace
https://doi.org/10.1007/978-3-540-70720-2_15
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
RLD of subspace
"""
rld = 0
d = (self.patterns[i]["Cxy"]*self.patterns[i]["C_x_y"])-(self.patterns[i]["Cx_y"]*self.patterns[i]["C_xy"])
if d > 0:
if self.patterns[i]["C_xy"] < self.patterns[i]["Cx_y"]:
rld = d / (d+(self.patterns[i]["C_xy"] / self.size_of_dataset))
else:
rld = d / (d+(self.patterns[i]["Cx_y"] / self.size_of_dataset))
else:
if self.patterns[i]["Cxy"] < self.patterns[i]["C_x_y"]:
rld = d / (d-(self.patterns[i]["Cxy"] / self.size_of_dataset))
else:
rld = d / (d-(self.patterns[i]["C_x_y"] / self.size_of_dataset))
return rld
def relative_risk(self, i):
""" Calculates the relative risk of a given subspace
https://doi.org/10.1148/radiol.2301031028
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Relative risk of subspace
"""
if self.patterns[i]["_XY"] == 0:
return math.inf
return (self.patterns[i]["XY"]/self.patterns[i]["X"])/(self.patterns[i]["_XY"]/self.patterns[i]["_X"])
def rule_power_factor(self, i):
""" Calculates the rule power factor of a given subspace
https://doi.org/10.1016/j.procs.2016.07.175
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Rule power factor of subspace
"""
return (self.patterns[i]["XY"]**2)/self.patterns[i]["X"]
def sebag(self, i):
""" Calculates the sebag metric of a given subspace
Generation of rules with certainty and confidence factors from incomplete and incoherent learning bases
author : <NAME> <NAME>
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Sebag metric of subspace
"""
if self.patterns[i]["X_Y"] == 0:
return math.inf
else:
return self.patterns[i]["XY"]/self.patterns[i]["X_Y"]
def yule_q(self, i):
""" Calculates the yule's Q metric of a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Yule's Q of subspace
"""
return (self.patterns[i]["XY"]*self.patterns[i]["_X_Y"] - self.patterns[i]["X_Y"]*self.patterns[i]["_XY"]) / (self.patterns[i]["XY"]*self.patterns[i]["_X_Y"] + self.patterns[i]["X_Y"]*self.patterns[i]["_XY"])
def yule_y(self, i):
""" Calculates the yule's Y of a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Yule's Y of subspace
"""
return (math.sqrt(self.patterns[i]["XY"] * self.patterns[i]["_X_Y"]) - math.sqrt(self.patterns[i]["X_Y"] * self.patterns[i]["_XY"])) / (math.sqrt(self.patterns[i]["XY"] * self.patterns[i]["_X_Y"]) + math.sqrt(self.patterns[i]["X_Y"] * self.patterns[i]["_XY"]))
def quality_of_pattern(self, i):
""" Calculates the amount of non-noisy elements of a given subspace
https://doi.org/10.1016/j.patcog.2021.107900
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Percentage of non-noisy elements of subspace
"""
counter = 0
col_pos = 0
for column in self.patterns[i]["columns"]:
for row in self.patterns[i]["lines"]:
column_value = self.patterns[i]["column_values"][col_pos]
if | pd.isna(self.data.at[row, column]) | pandas.isna |
import Functions
import pandas as pd
import matplotlib.pyplot as plt
def group_sentiment(dfSentiment):
dfSentiment['datetime'] = pd.to_datetime(dfSentiment['created_utc'], unit='s')
dfSentiment['date'] = pd.DatetimeIndex(dfSentiment['datetime']).date
dfSentiment = dfSentiment[
['created_utc', 'negative_comment', 'neutral_comment', 'positive_comment', 'datetime', 'date']]
dfSentiment = dfSentiment.groupby(by=['date']).sum()
return dfSentiment
def cleaning(df):
# Importing Bot user names
bots = | pd.read_csv(r'Data\Bots.csv', index_col=0, sep=';') | pandas.read_csv |
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
import numpy as np
import os
from py2neo import Graph, Node, Relationship, NodeMatcher, RelationshipMatcher
# from neo4j import GraphDatabase
# import neo4j
import networkx as nx
import json
import datetime
import matplotlib.pyplot as plt
# from ggplot import *
from shutil import copytree
import math
# from graph_tool.all import *
import json
import random
# Choose a path for the Neo4j_Imports folder to import the data from MOD into Neo4j
# formose_MOD_exports_path = "../data/formose/Neo4j_Imports"
formose_MOD_exports_path = "../data/pyruvic_acid/Neo4j_Imports"
glucose_MOD_exports_path = "../data/glucose/Neo4j_Imports"
# exports_folder_paths = [formose_MOD_exports_path, glucose_MOD_exports_path]
EXPORT_PATHS = [glucose_MOD_exports_path]
# Set the following to False if you want to leave order of import records in
# each generation file the same; set to True to randomly shuffle the order of
# the records within each file. By shuffling the order, the order at which the
# molecules are imported into Neo4j will be randomized, and thus the start point
# at which the cycles pattern match begins is randomized each time, so we can
# get samples at different starting points in the network since it is too
# computationally intensive to match for all possible patterns in the network.
SHUFFLE_GENERATION_DATA = True
# Repeat the whole import and pattern match routine REPEAT_RUNS amount of times.
# Pair this with SHUFFLE_GENERATION_DATA so that if SHUFFLE_GENERATION_DATA
# is True, sample pattern matches on the graph REPEAT_RUNS amount of times
# starting from random points on the graph from the shuffling, where each
# run matches up to NUM_STRUCTURES_LIMIT of patterns.
REPEAT_RUNS = 10
# Filter out these molecules by smiles string from being imported into Neo4j
# for pattern match / network statistic calculations.
MOLECULE_FILTER = ['O']
# If True, will match for autocatalytic pattern mattches using the pattern match
# query in graph_queries/_FINAL_QUERY_PARAMETERIZED.txt. If not, will skip this
# and just do node degree / rank calculations. (One reason you might want to disable
# pattern match query results is because this is very computationally intensive
# and takes a lot of time; so disable if you are just looking for network statistics.)
PATTERN_MATCHES = True
# Rather than disabling completely if running into performance issues, limit the
# number of patterns that can be matched so that the query stops executing as
# soon as it reaches the pattern limit, and the matches are returned.
NUM_STRUCTURES_LIMIT = 100
# Limit the range of the ring size. Note that the ring size includes molecule
# and reaction nodes, so if a ring of 3 molecules to 6 molecules is desired,
# for example, then RING_SIZE_RANGE would be (3*2, 6*2), or (6, 12)
RING_SIZE_RANGE = (6, 8) # (6, 8) is size 6-8 reaction+molecule nodes, or 3-4 molecule nodes only
# Limit the number of generations that each network can be imported on. If None,
# no limit--will default to the maximum number of generations generated. You may
# want to limit this to ~4 generations or less if performance is an issue; the
# network will grow exponentially, so pattern match queries might take too long
# to produce results.
GENERATION_LIMIT = 4 # None
# If NETWORK_SNAPSHOTS is True, the program gathers data on the network at each generation
# in the reaction netowrk. If False, the program gathers data only on the state of
# the network once all generations have completely finished being loaded (snapshot
# only of the final generation).
NETWORK_SNAPSHOTS = True
# Enable this only if you want to capture network statistics (such as node degree
# plots over generation)
COLLECT_NETWORK_STATISTICS = False
# Set this to True if you want to generate a static image of the network after
# loading. Might run into Out of Memory error. Default leaving this as False
# because we generated a much nicer visualization of the full network using Gephi.
FULL_NETWORK_VISUALIZATION = False
# configure network database Neo4j
url = "bolt://neo4j:0000@localhost:7687"
graph = Graph(url)
matcher = NodeMatcher(graph)
rel_matcher = RelationshipMatcher(graph)
def get_timestamp():
return str(datetime.datetime.now()).replace(":","-").replace(" ","_").replace(".","-")
def create_molecule_if_not_exists(smiles_str, generation_formed, exact_mass=0):
"""
Create molecule in DB if not exists.
"""
molecule = matcher.match("Molecule", smiles_str = smiles_str).first()
if molecule is None:
# molecule does not exist, create node with generation information
tx = graph.begin()
new_m = Node("Molecule",
smiles_str = smiles_str,
exact_mass = round(float(exact_mass),3),
generation_formed = generation_formed)
tx.create(new_m)
tx.commit()
return new_m
return molecule
def create_reaction_if_not_exists(id, rule, generation_formed):
reaction = matcher.match("Reaction", id = id).first()
if reaction is None:
tx = graph.begin()
new_rxn = Node("Reaction",
id = id,
rule = rule,
generation_formed = generation_formed)
tx.create(new_rxn)
tx.commit()
return new_rxn
return reaction
def create_reactant_rel_if_not_exists(smiles_str, rxn_id, generation_formed):
molecule = matcher.match("Molecule", smiles_str = smiles_str).first()
reaction = matcher.match("Reaction", id = rxn_id).first()
match_pattern = rel_matcher.match(nodes=(molecule, reaction),
r_type="REACTANT" #,
# properties = {"generation_formed": generation_formed}
)
# if pattern does not exist in db
if len(list(match_pattern)) <= 0:
tx = graph.begin()
# see documentation for weird Relationship function; order of args go:
# from node, relationship, to node, and then kwargs for relationship properties
# https://py2neo.org/v4/data.html#py2neo.data.Relationship
new_r = Relationship(molecule, "REACTANT", reaction,
generation_formed=generation_formed)
tx.create(new_r)
tx.commit()
return new_r
return match_pattern
def create_product_rel_if_not_exists(smiles_str, rxn_id, generation_formed):
molecule = matcher.match("Molecule", smiles_str = smiles_str).first()
reaction = matcher.match("Reaction", id = rxn_id).first()
match_pattern = rel_matcher.match(nodes=(reaction, molecule),
r_type="PRODUCT" #,
# properties = {"generation_formed": generation_formed}
)
# if pattern does not exist in db
if len(list(match_pattern)) <= 0:
tx = graph.begin()
# see documentation for weird Relationship function; order of args go:
# from node, relationship, to node, and then kwargs for relationship properties
# https://py2neo.org/v4/data.html#py2neo.data.Relationship
new_r = Relationship(reaction, "PRODUCT", molecule,
generation_formed=generation_formed)
tx.create(new_r)
tx.commit()
return new_r
return match_pattern
def save_query_results(generation_num, query_result, file_name, this_out_folder):
with open(f'output/' + this_out_folder + f"/{generation_num}/{file_name}.json", 'w') as file_data_out:
json.dump(query_result, file_data_out)
data_df = pd.read_json(f'output/' + this_out_folder + f"/{generation_num}/{file_name}.json")
data_df.to_csv(f'output/' + this_out_folder + f"/{generation_num}/{file_name}.csv", index=False)
def read_query_results(file_path):
try:
df = pd.read_csv(file_path)
except:
df = pd.DataFrame()
return df
def run_single_value_query(query, value):
return graph.run(query).data()[0][value]
def get_tabulated_possible_autocatalytic_cycles(generation_num,
mod_exports_folder_path,
this_out_folder,
ring_size_range = (6,8),
feeder_molecule_generation_range = None,
num_structures_limit = 100
):
"""
After the graph has been loaded with data, let's execute a query and export
the tabulated results.
An input of "None" to any of the params means no limit. By default the ring
size will be from 3 molecules to 7.
"""
print("\t\t\tPreparing query for cycles...")
# make sure inputs are okay
print("\t\t\t\tChecking input parameters...")
min_ring_size = ring_size_range[0]
max_ring_size = ring_size_range[1]
if min_ring_size < 0 or max_ring_size < 0:
print("Ring sizes can not be negative.")
quit()
if min_ring_size > max_ring_size:
print("The minimum ring size must not exceed the maximum.")
quit()
if min_ring_size <= 2:
print("The minimum ring size must be above 2.")
quit()
if feeder_molecule_generation_range != None:
min_feeder_gen = feeder_molecule_generation_range[0]
max_feeder_gen = feeder_molecule_generation_range[1]
if min_feeder_gen < 0 or max_feeder_gen < 0:
print("The feeder generation can not be negative.")
quit()
if min_feeder_gen > max_feeder_gen:
print("The minimum feeder generation must not exceed the maximum.")
quit()
else:
min_feeder_gen = None
max_feeder_gen = None
# load query and insert params
print("\t\t\t\tReplacing query parameters in query string...")
query_txt = open("graph_queries/_FINAL_QUERY_PARAMETERIZED.txt",'r').read()
query_txt = query_txt.replace("{{MIN_RING_SIZE}}", str(min_ring_size))
query_txt = query_txt.replace("{{MAX_RING_SIZE}}", str(max_ring_size))
if feeder_molecule_generation_range == None:
query_txt = query_txt.replace("{{COMMENT_OUT_FEEDER_GEN_LOGIC}}", "//")
else:
query_txt = query_txt.replace("{{COMMENT_OUT_FEEDER_GEN_LOGIC}}", "")
query_txt = query_txt.replace("{{MIN_FEEDER_GENERATION}}", str(min_feeder_gen))
query_txt = query_txt.replace("{{MAX_FEEDER_GENERATION}}", str(max_feeder_gen))
query_txt = query_txt.replace("{{NUM_STRUCTURES_LIMIT}}", str(num_structures_limit))
# Get the max ID of all molecules to get a random molecule to start with.
# Query several times in small chunks to stochastically estimate the behavior
# of the graph without having to traverse the entire thing for this query.
# max_node_id = run_single_value_query("MATCH (n) RETURN max(ID(n)) AS max_node_id","max_node_id")
# WHERE ID(beginMol) = round(rand() * {{MAX_NODE_ID}})
# print("\t\t\t" + query_txt)
# Execute query in Neo4j. If out of memory error occurs, need to change DB settings:
# I used heap initial size set to 20G, heap max size set to 20G, and page cache size set to 20G,
# but these settings would depend on your hardware limitations.
# See Neo4j Aura for cloud hosting: https://neo4j.com/aura/
print("\t\t\t\tExecuting query and collecting results (this may take awhile)...")
print(f"\t\t\t\tTime start: {get_timestamp()}")
query_result = graph.run(query_txt).data()
print(f"\t\t\t\tTime finish: {get_timestamp()}")
# print("\t\tQuery results:")
# print(query_result[0])
print("\t\t\t\tSaving query results and meta info...")
# save data as JSON and CSV (JSON for easy IO, CSV for human readability)
save_query_results(generation_num = generation_num,
query_result = query_result,
file_name = "autocat_query_results",
this_out_folder = this_out_folder)
# save meta info as well in out folder
with open(f"output/" + this_out_folder + f"/{generation_num}/autocat_query.txt", 'w') as file_query_out:
file_query_out.write(query_txt)
query_params = pd.DataFrame( {"parameter": ["min_ring_size","max_ring_size","min_feeder_gen","max_feeder_gen","num_structures_limit"],
"value": [min_ring_size, max_ring_size, min_feeder_gen, max_feeder_gen, num_structures_limit] } )
query_params.to_csv(f"output/" + this_out_folder + f"/{generation_num}/autocat_query_parameters.csv", index=False)
return this_out_folder
def analyze_possible_autocatalytic_cycles(generation_num, mod_exports_folder_path, query_results_folder):
"""
Now that we have the tabulated results of the graph queries, let's do some
analysis on what's going on.
1. Ring size frequency distribution
2. Total mass per cycle per feeder molecule's generation (calculate total
using only the molecules in the ring, and use the feeder molecule's
generation as the ring's generation).
Note: make sure to remove duplicates when getting sum of mass in ringPathNodes
because the beginMol is counted twice (it is the start and end node in the path).
3. Count of cycles by feeder generation
"""
print("Generating some plots on cycle size distribution / stats by generation...")
# 1.
query_data = pd.read_json(f"output/" + query_results_folder + f"/{generation_num}/autocat_query_results.json")
if query_data.empty:
print("No cycles found.")
return
# print(query_data.describe())
# print(query_data.head())
# cycle distribution (y axis is frequency, x axis is ring size)
fig, ax = plt.subplots()
# print(query_data.head())
# query_data['countMolsInRing'] = query_data['countMolsInRing'].astype(int)
query_data['countMolsInRing'].value_counts().plot(ax = ax,
kind='bar',
title = "Ring Size Frequency Distribution")
ax.set_xlabel("Ring Size (# of Molecules)")
ax.set_ylabel("Count of Cycles")
plt.savefig(f"output/" + query_results_folder + f"/{generation_num}/ring_size_distribution.png")
# plt.show()
# 2.
# Total mass of cycle per generation. Not really needed.
# 3.
# count of cycles by feeder generation
fig, ax = plt.subplots()
gen_formed_arr = []
feederMolData = list(query_data['feederMol'])
for feederMol in feederMolData:
gen_formed_arr.append(feederMol['generation_formed'])
# get unique list of feeder generations and sum by generation
gen_formed_arr = np.array(gen_formed_arr)
feeder_gen_counts = np.unique(gen_formed_arr, return_counts=True)
feeder_gen_counts = np.transpose(feeder_gen_counts)
cycles_by_gen_df = pd.DataFrame(feeder_gen_counts, columns=['feeder_gen',
'cycle_count'])
cycles_by_gen_df.plot(ax=ax,
x = "feeder_gen",
y = "cycle_count",
kind = "bar",
legend = False,
title = "Count of Cycles by Feeder Generation")
ax.set_xlabel("Cycle Generation (Generation Formed of Feeder Molecule)")
ax.set_ylabel("Count of Cycles")
plt.savefig(f"output/" + query_results_folder + f"/{generation_num}/count_cycles_by_feeder_generation.png")
# close all plots so they don't accumulate memory
print("\tAutocatalysis pattern matching done.")
plt.close('all')
def plot_hist(query_results_folder, generation_num, file_name, statistic_col_name, title, x_label, y_label):
# fig, ax = plt.subplots()
# df = pd.read_csv(f"output/{query_results_folder}/{file_name}.csv")
# num_bins = int(math.sqrt(df.shape[0])) # estimate the number of bins by taking the square root of the number of rows in the dataset
# df.plot.hist(bins=num_bins, ax=ax)
# ax.set_xlabel(x_label)
# ax.set_ylabel(y_label)
# plt.savefig(f"output/{query_results_folder}/{file_name}.png")
fig, ax = plt.subplots()
df = pd.read_csv(f"output/{query_results_folder}/{generation_num}/{file_name}.csv")
num_bins = int(math.sqrt(df.shape[0])) # estimate the number of bins by taking the square root
df = pd.pivot_table(df,
values="smiles_str",
index=[statistic_col_name],
columns=["generation_formed"],
aggfunc=lambda x: math.log10(len(x.unique()))) # the log of the count of unique smiles_str
df.plot.hist(ax=ax,
bins = num_bins,
title=title,
figsize = (15,15))
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
plt.savefig(f"output/{query_results_folder}/{generation_num}/{file_name}_histogram.png")
def plot_scatter(query_results_folder,
generation_num,
file_name,
statistic_col_name,
title,
x_label,
y_label):
fig, ax = plt.subplots()
df = pd.read_csv(f"output/{query_results_folder}/{generation_num}/{file_name}.csv")
df = df.head(100) # cut off by top 100 most interesting
# df.plot.bar(ax = ax,
# x = "smiles_str",
# y = statistic_col_name,
# # color = "generation_formed",
# legend = True,
# title = title,
# figsize = (14,14))
# ggplot(aes(x = "smiles_str",
# y = statistic_col_name,
# color = "generation_formed"),
# data = df) + geom_point()
# ax.legend(['generation_formed'])
groups = df.groupby("generation_formed")
for name, group in groups:
plt.plot(group['smiles_str'],
group[statistic_col_name],
marker = "o",
linestyle = "",
label = name)
plt.legend(loc="best", title="Generation Formed")
plt.xticks(rotation=90)
fig.set_figheight(15)
fig.set_figwidth(15)
ax.set_title(title)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
plt.savefig(f"output/{query_results_folder}/{generation_num}/{file_name}_scatter.png")
def network_statistics(generation_num, query_results_folder):
"""
Get some statistics on the network.
0. Number of nodes and edges in the graph, as well as various network-level
statistics: 1. Eigenvector centrality, 2. Betweenness Centrality,
3. Random-walk betweenness, 4. Clique enumeration,
5. k-plex enumeration, 6. k-core enumeration,
7. k-component enumeration, 8. neighbor redundancy
1. Node degree distribution: log10 of node degree frequency by degree
value colored by generation_formed, one plot for incoming, outgoing,
and incoming and outgoing edges
2. Avg number of edges per node per generation
"""
print("Doing some network statistics...")
# 0.
# get total number of nodes and edges
total_count_nodes = run_single_value_query("MATCH (n) RETURN COUNT(n) AS count_nodes", 'count_nodes')
total_count_rels = run_single_value_query("MATCH (n)-[r]->() RETURN COUNT(r) AS count_rels", 'count_rels')
# 0.1 eigenvector_centrality
# do by generation, molecule, order by score first
eigenvector_centrality = graph.run("""
CALL algo.eigenvector.stream('Molecule', 'FORMS', {})
YIELD nodeId, score
RETURN algo.asNode(nodeId).smiles_str AS smiles_str, algo.asNode(nodeId).generation_formed AS generation_formed, score AS eigenvector_centrality
ORDER BY eigenvector_centrality DESC """).data()
save_query_results(generation_num, eigenvector_centrality, "eigenvector_centrality", query_results_folder)
plot_hist(query_results_folder = query_results_folder,
generation_num = generation_num,
file_name = "eigenvector_centrality",
statistic_col_name = "eigenvector_centrality",
title = "Histogram of Eigenvector Centrality",
x_label = "Eigenvector Centrality Score Bin",
y_label = "Count of Molecules")
plot_scatter(query_results_folder = query_results_folder,
generation_num = generation_num,
file_name = "eigenvector_centrality",
statistic_col_name = "eigenvector_centrality",
title = "Eigenvector Centrality - Top 100 Connected Molecules",
x_label = "Molecule Smiles Format",
y_label = "Eigenvector Centrality Score")
avg_eigenvector_centrality = run_single_value_query("""
CALL algo.eigenvector.stream('Molecule', 'FORMS', {})
YIELD nodeId, score
RETURN avg(score) AS avg_score
""",
"avg_score")
# 0.2 betweenness_centrality
betweenness_centrality = graph.run("""
CALL algo.betweenness.stream('Molecule','FORMS',{direction:'out'})
YIELD nodeId, centrality
MATCH (molecule:Molecule) WHERE id(molecule) = nodeId
RETURN molecule.smiles_str AS smiles_str, molecule.generation_formed AS generation_formed, centrality AS betweenness_centrality
ORDER BY betweenness_centrality DESC;
""").data()
save_query_results(generation_num, betweenness_centrality, "betweenness_centrality", query_results_folder)
plot_hist(query_results_folder = query_results_folder,
generation_num = generation_num,
file_name = "betweenness_centrality",
statistic_col_name = "betweenness_centrality",
title = "Histogram of Betweenness Centrality",
x_label = "Betweenness Centrality Score Bin",
y_label = "Count of Molecules")
plot_scatter(query_results_folder = query_results_folder,
generation_num = generation_num,
file_name = "betweenness_centrality",
statistic_col_name = "betweenness_centrality",
title = "Betweenness Centrality - Top 100 Connected Molecules",
x_label = "Molecule Smiles Format",
y_label = "Betweenness Centrality Score")
avg_betweenness_centrality = run_single_value_query("""
CALL algo.betweenness.stream('Molecule','FORMS',{direction:'out'})
YIELD nodeId, centrality
MATCH (molecule:Molecule) WHERE id(molecule) = nodeId
RETURN avg(centrality) AS avg_centrality
""", 'avg_centrality')
# 0.3 Random-walk betweenness
random_walk_betweenness = graph.run(""" CALL algo.betweenness.sampled.stream('Molecule','FORMS', {strategy:'random', probability:1.0, maxDepth:1, direction: "out"})
YIELD nodeId, centrality
MATCH (molecule) WHERE id(molecule) = nodeId
RETURN molecule.smiles_str AS smiles_str, molecule.generation_formed AS generation_formed, centrality AS random_walk_betweenness
ORDER BY random_walk_betweenness DESC;""").data()
save_query_results(generation_num, random_walk_betweenness, "random_walk_betweenness", query_results_folder)
plot_hist(query_results_folder = query_results_folder,
generation_num = generation_num,
file_name = "random_walk_betweenness",
statistic_col_name = "random_walk_betweenness",
title = "Histogram of Random Walk Betweenness Centrality",
x_label = "Random Walk Betweenness Centrality Score Bin",
y_label = "Count of Molecules")
plot_scatter(query_results_folder = query_results_folder,
generation_num = generation_num,
file_name = "random_walk_betweenness",
statistic_col_name = "random_walk_betweenness",
title = "Random Walk Betweenness Centrality - Top 100 Connected Molecules",
x_label = "Molecule Smiles Format",
y_label = "Random Walk Betweenness Centrality Score")
avg_random_walk_betweenness = run_single_value_query("""CALL algo.betweenness.stream('Molecule','FORMS',{direction:'out'})
YIELD nodeId, centrality
MATCH (molecule:Molecule) WHERE id(molecule) = nodeId
RETURN avg(centrality) AS avg_random_walk_betweenness""",
'avg_random_walk_betweenness')
# 0.4 Clique enumeration
avg_clique_enumeration = None #run_single_value_query("", 'clique_enumeration')
# 0.5 K-Plex enumeration
avg_k_plex_enumeration = None #run_single_value_query("", 'k_plex_enumeration')
# 0.6 K-Core enumeration
avg_k_core_enumeration = None #run_single_value_query("", 'k_core_enumeration')
# 0.7 K-Component enumeration
avg_k_component_enumeration = None #run_single_value_query("", 'k_component_enumeration')
# 0.8 Neighbor redundancy
avg_neighbor_redundancy = None #run_single_value_query("", 'neighbor_redundancy')
# save all to graph_info DataFrame
graph_info = pd.DataFrame({"statistic": ["Total Count Molecules", "Total Count Edges","Average Eigenvector Centrality", "Average Betweenness Centrality", "Average Random-walk Betweenness", 'Clique enumeration','k-plex enumation','k-core enumeration','k-component enumeration','Neighbor redundancy'],
"value": [total_count_nodes, total_count_rels, avg_eigenvector_centrality, avg_betweenness_centrality, avg_random_walk_betweenness, avg_clique_enumeration, avg_k_plex_enumeration, avg_k_core_enumeration, avg_k_component_enumeration, avg_neighbor_redundancy]})
graph_info.to_csv(f"output/{query_results_folder}/_network_info.csv", index=False)
# 1.
# first do the query and save the results
node_deg_query = """
MATCH (n:Molecule)
RETURN n.smiles_str AS smiles_str, n.exact_mass AS exact_mass,
n.generation_formed AS generation_formed, size((n)--()) AS count_relationships
"""
node_deg_query_results = graph.run(node_deg_query).data()
node_deg_file = "node_distribution_results"
save_query_results(generation_num = generation_num,
query_result = node_deg_query_results,
file_name = node_deg_file,
this_out_folder = query_results_folder)
# now read in the results, transform, and plot
# also can represent this as a histogram?
fig, ax = plt.subplots()
node_deg_df = pd.read_csv(f"output/{query_results_folder}/{generation_num}/{node_deg_file}.csv")
# node_deg_df['count_relationships'].value_counts().plot(ax=ax,
# kind='bar',
# title="Node Degree Distribution by Generation Formed")
node_deg_pivot = pd.pivot_table(node_deg_df,
values="smiles_str",
index=["count_relationships"],
columns=["generation_formed"],
aggfunc=lambda x: math.log10(len(x.unique()))) # the log of the count of unique smiles_str
node_deg_pivot.plot(ax=ax,
kind="bar",
title="Square of Molecule Degree by Generation Formed",
figsize = (8,5))
ax.set_xlabel("Molecule Degree (count of incoming and outgoing edges)")
ax.set_ylabel("log10(Count of Molecules)")
plt.savefig(f"output/{query_results_folder}/{generation_num}/{node_deg_file}.png")
# 2.
# get average number of edges by node and generation
fig, ax = plt.subplots()
node_deg_avg = node_deg_df.groupby(by=['generation_formed']).mean().reset_index()
# print(node_deg_avg)
node_deg_avg.plot(ax=ax,
x = "generation_formed",
y = "count_relationships",
kind="scatter",
title="Average Molecule Degree by Generation Formed",
figsize = (8,5),
legend = False)
ax.set_xlabel("Generation Formed")
ax.set_ylabel("Average Node Degree")
plt.savefig(f"output/{query_results_folder}/{generation_num}/{node_deg_file}_avg.png")
# incoming relationships by molecule
incoming_rels_count_file = "incoming_rels_count"
incoming_rels_count = graph.run("""
MATCH (n)<-[r:FORMS]-()
RETURN n.smiles_str AS smiles_str,
n.generation_formed AS generation_formed,
n.exact_mass AS exact_mass,
count(r) AS count_incoming
ORDER BY count_incoming DESC
""").data()
save_query_results(generation_num = generation_num,
query_result = incoming_rels_count,
file_name = incoming_rels_count_file,
this_out_folder = query_results_folder)
fig, ax = plt.subplots()
# node_deg_df = pd.read_csv(f"output/{query_results_folder}/{generation_num}/{incoming_rels_count_file}.csv")
node_deg_df = read_query_results(f"output/{query_results_folder}/{generation_num}/{incoming_rels_count_file}.csv")
# node_deg_df['count_relationships'].value_counts().plot(ax=ax,
# kind='bar',
# title="Node Degree Distribution by Generation Formed")
if not node_deg_df.empty:
node_deg_pivot = pd.pivot_table(node_deg_df,
values="smiles_str",
index=["count_incoming"],
columns=["generation_formed"],
aggfunc=lambda x: math.log10(len(x.unique()))) # the square of the count of unique smiles_str
node_deg_pivot.plot(ax=ax,
kind="bar",
title="Square of Molecule Degree by Generation Formed for Incoming Relationships",
figsize = (8,5))
ax.set_xlabel("Molecule Degree (count of incoming edges)")
ax.set_ylabel("log10(Count of Molecules)")
plt.savefig(f"output/{query_results_folder}/{generation_num}/{incoming_rels_count_file}.png")
# outgoing relationships by molecule
outgoing_rels_count_file = "outgoing_rels_count"
outgoing_rels_count = graph.run("""
MATCH (n)-[r:FORMS]->()
RETURN n.smiles_str AS smiles_str,
n.generation_formed AS generation_formed,
n.exact_mass AS exact_mass,
count(r) AS count_outgoing
ORDER BY count_outgoing DESC
""").data()
save_query_results(generation_num = generation_num,
query_result = outgoing_rels_count,
file_name = outgoing_rels_count_file,
this_out_folder = query_results_folder)
fig, ax = plt.subplots()
# node_deg_df = pd.read_csv(f"output/{query_results_folder}/{generation_num}/{outgoing_rels_count_file}.csv")
node_deg_df = read_query_results(f"output/{query_results_folder}/{generation_num}/{outgoing_rels_count_file}.csv")
if not node_deg_df.empty:
node_deg_pivot = pd.pivot_table(node_deg_df,
values="smiles_str",
index=["count_outgoing"],
columns=["generation_formed"],
aggfunc=lambda x: math.log10(len(x.unique()))) # the square of the count of unique smiles_str
node_deg_pivot.plot(ax=ax,
kind="bar",
title="Square of Molecule Degree by Generation Formed for Outgoing Relationships",
figsize = (8,5))
ax.set_xlabel("Molecule Degree (count of outgoing edges)")
ax.set_ylabel("log10(Count of Molecules)")
plt.savefig(f"output/{query_results_folder}/{generation_num}/{outgoing_rels_count_file}.png")
# close all plots so they don't accumulate memory
print("\tNetwork statistics done.")
plt.close('all')
def graph_from_cypher(data):
"""
Setting FULL_NETWORK_VISUALIZATION to False because we generated a plot in
Gephi for the whole network visualizations; not needed in this module. Only
keeping in case we want to programmatically generate a static network
visualization.
From: https://stackoverflow.com/questions/59289134/constructing-networkx-graph-from-neo4j-query-result
Constructs a networkx graph from the results of a neo4j cypher query.
Example of use:
>>> result = session.run(query)
>>> G = graph_from_cypher(result.data())
Nodes have fields 'labels' (frozenset) and 'properties' (dicts). Node IDs correspond to the neo4j graph.
Edges have fields 'type_' (string) denoting the type of relation, and 'properties' (dict).
"""
G = nx.MultiDiGraph()
def add_node(node):
# Adds node id it hasn't already been added
# print(node)
# print(type(node))
# print(node.keys())
u = node['smiles_str'] # unique identifier for Node
if G.has_node(u):
return
G.add_node(u, labels=node._labels, properties=dict(node))
def add_edge(relation):
# Adds edge if it hasn't already been added.
# Make sure the nodes at both ends are created
for node in (relation.start_node, relation.end_node):
add_node(node)
# Check if edge already exists
u = relation.start_node['smiles_str'] # unique identifier for Node
v = relation.end_node['smiles_str'] # unique identifier for Node
eid = relation['rxn_id'] # unique identifier for Relationship
if G.has_edge(u, v, key=eid):
return
# If not, create it
G.add_edge(u, v, key=eid, type_=relation.type, properties=dict(relation))
for d in data:
for entry in d.values():
# Parse node
if isinstance(entry, Node):
add_node(entry)
# Parse link
elif isinstance(entry, Relationship):
add_edge(entry)
else:
raise TypeError("Unrecognized object")
return G
def network_visualization_by_gen(query_results_folder, generation_num):
print("Generating an image for the network visualization...")
# driver = GraphDatabase.driver(url)
full_network_query = """
MATCH (n)-[r]->(m)
RETURN *
"""
# with driver.session() as session:
# result = session.run(full_network_query)
result = graph.run(full_network_query)
# plot using NetworkX graph object + Matplotlib
nxG = graph_from_cypher(result.data())
nx.draw(nxG)
plt.savefig(f"output/{query_results_folder}/{generation_num}/network_visualization_nxG_at_gen_{generation_num}.png")
plt.close('all')
# plot using graph_tool module (convert from NetworkX graph to this graph)
# gtG = nx2gt(nxG)
# graph_draw(gtG,
# vertex_text = g.vertex_index,
# output = f"output/{query_results_folder}/{generation_num}/network_visualization_gtG_at_gen_{generation_num}.png")
def compute_likely_abundance_by_molecule(generation_num, query_results_folder):
"""
Get a dataset with the following columns in order to compute the abundance
score:
"""
print("\tComputing the likely abundance score by molecule...")
# Join all the datasets for rels for all generations. Start with the
# node_distribution_results query and then join all the other data onto it
datasets = {'node_distribution_results': ['smiles_str',
'exact_mass',
'generation_formed',
'count_relationships'],
'incoming_rels_count': ['smiles_str',
'count_incoming'],
'outgoing_rels_count': ['smiles_str',
'count_outgoing'],
'betweenness_centrality': ['smiles_str',
'betweenness_centrality'],
'eigenvector_centrality': ['smiles_str',
'eigenvector_centrality'],
'random_walk_betweenness': ['smiles_str',
'random_walk_betweenness']
}
full_df = pd.DataFrame()
for dataset in datasets.keys():
try:
df = pd.read_csv(f"output/{query_results_folder}/{generation_num}/{dataset}.csv")
df = df[datasets[dataset]] # filter only by the needed columns
if dataset == "node_distribution_results":
full_df = df
else:
full_df = | pd.merge(full_df, df, on="smiles_str", how='left') | pandas.merge |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix, roc_curve, auc
def multiple_histograms_plot(data, x, hue, density=False, bins=10,
alpha=0.5, colors=None, hue_order=None,
probability_hist=False, xticks=None,
title=None, xlabel=None, ylabel=None,
figsize=(15, 8), xticklabels=None):
hue_order = hue_order if hue_order is not None else sorted(data[hue].unique())
colors = colors if colors is not None else sns.color_palette(n_colors=len(hue_order))
colors_dict = dict(zip(hue_order, colors))
plt.figure(figsize=figsize)
for current_hue in hue_order:
current_hue_mask = data[hue] == current_hue
data.loc[current_hue_mask, x].hist(bins=bins, density=density,
alpha=alpha, label=str(current_hue),
color=colors_dict[current_hue])
xlabel = x if xlabel is None else xlabel
ylabel = (ylabel if ylabel is not None
else 'Density' if density
else 'Frequency')
_title_postfix = ' (normalized)' if density else ''
title = f'{xlabel} by {hue}{_title_postfix}'
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend()
ax = plt.gca()
if probability_hist:
plt.xlim(-0.0001, 1.0001)
ax.set_xticks(np.arange(0, 1.1, 0.1))
ax.set_xticks(np.arange(0.05, 1, 0.1), minor=True)
elif xticks is not None:
ax.set_xticks(xticks)
if xticklabels is not None:
ax.set_xticklabels(xticklabels)
def bar_plot_with_categorical(df, x, hue, order=None, figsize=(16, 8),
plot_average=True, xticklabels=None,
**sns_kwargs):
if order is None:
order = ( | pd.pivot_table(data=df, values=hue, index=[x], aggfunc='mean') | pandas.pivot_table |
import datetime as dt
import io
import unittest
from unittest.mock import patch
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
from spaced_repetition.domain.problem import Difficulty, ProblemCreator
from spaced_repetition.domain.problem_log import ProblemLogCreator, Result
from spaced_repetition.domain.tag import TagCreator
from spaced_repetition.presenters.cli_presenter import CliPresenter
# pylint: disable=protected-access, no-self-use
class TestCommonFormatters(unittest.TestCase):
def test_format_difficulty(self):
difficulty = pd.Series({1: Difficulty.MEDIUM})
expected_res = pd.Series({1: Difficulty.MEDIUM.name})
res = CliPresenter._format_difficulty(difficulty=difficulty)
assert_series_equal(expected_res, res)
def test_format_result(self):
result = pd.Series({1: Result.KNEW_BY_HEART})
expected_res = pd.Series({1: Result.KNEW_BY_HEART.name})
res = CliPresenter._format_result(result=result)
assert_series_equal(expected_res, res)
def test_format_timestamp(self):
timestamp = pd.Series({1: dt.datetime(2021, 1, 15, 10, 23, 45, 124)})
expected_res = pd.Series({1: '2021-01-15 10:23'})
res = CliPresenter._format_timestamp(ts=timestamp)
assert_series_equal(expected_res, res)
class TestListProblemTagCombos(unittest.TestCase):
def setUp(self):
self.problem_tag_combo_df = pd.DataFrame(data=[{
'difficulty': Difficulty.MEDIUM,
'ease': 2.5,
'interval': 10,
'KS': 2.0,
'problem': 'name',
'problem_id': 5,
'result': Result.NO_IDEA,
'RF': 0.7,
'surplus_col': 'not displayed',
'tag': 'test-tag',
'ts_logged': dt.datetime(2021, 1, 10, 8, 10, 0, 1561),
'url': 'www.test.com'
}])
self.pre_format_cols = ['tag', 'problem', 'problem_id', 'difficulty', 'last_access',
'last_result', 'KS', 'RF', 'url', 'ease',
'interval']
self.post_format_cols = ['problem', 'problem_id', 'difficulty', 'last_access',
'last_result', 'KS', 'RF', 'url', 'ease',
'interval']
def test_format_df(self):
expected_df = pd.DataFrame(data=[{
'difficulty': 'MEDIUM',
'ease': 2.5,
'interval': 10,
'problem_id': 5,
'KS': 2.0,
'last_access': '2021-01-10 08:10',
'problem': 'name',
'last_result': Result.NO_IDEA.name,
'RF': 0.7,
'tag': 'test-tag',
'url': 'www.test.com'}]) \
.set_index('tag') \
.reindex(columns=self.post_format_cols)
formatted_df = CliPresenter.format_df(
self.problem_tag_combo_df,
ordered_cols=self.pre_format_cols,
index_col='tag')
| assert_frame_equal(expected_df, formatted_df) | pandas.testing.assert_frame_equal |
import enum
from functools import lru_cache
from typing import List
import dataclasses
import pathlib
import pandas as pd
import numpy as np
from covidactnow.datapublic.common_fields import CommonFields
from covidactnow.datapublic.common_fields import FieldName
from covidactnow.datapublic.common_fields import GetByValueMixin
from covidactnow.datapublic.common_fields import ValueAsStrMixin
from covidactnow.datapublic.common_fields import PdFields
from libs.datasets import taglib
from libs.datasets import timeseries
from libs.datasets import dataset_utils
MultiRegionDataset = timeseries.MultiRegionDataset
NYTIMES_ANOMALIES_CSV = dataset_utils.LOCAL_PUBLIC_DATA_PATH / pathlib.Path(
"data/cases-nytimes/anomalies.csv"
)
@enum.unique
class NYTimesFields(GetByValueMixin, ValueAsStrMixin, FieldName, enum.Enum):
"""Fields used in the NYTimes anomalies file"""
DATE = "date"
END_DATE = "end_date"
COUNTY = "county"
STATE = "state"
GEOID = "geoid"
TYPE = "type"
OMIT_FROM_ROLLING_AVERAGE = "omit_from_rolling_average"
OMIT_FROM_ROLLING_AVERAGE_ON_SUBGEOGRAPHIES = "omit_from_rolling_average_on_subgeographies"
DESCRIPTION = "description"
@lru_cache(None)
def read_nytimes_anomalies():
df = pd.read_csv(
NYTIMES_ANOMALIES_CSV, parse_dates=[NYTimesFields.DATE, NYTimesFields.END_DATE]
)
# Extract fips from geoid column.
df[CommonFields.FIPS] = df[NYTimesFields.GEOID].str.replace("USA-", "")
# Denormalize data so that each row represents a single date+location+metric anomaly
df = _denormalize_nyt_anomalies(df)
# Add LOCATION_ID column (must happen after denormalizing since denormalizing can add additional
# rows for subgeographies).
df[CommonFields.LOCATION_ID] = df[CommonFields.FIPS].map(dataset_utils.get_fips_to_location())
# A few locations (e.g. NYC aggregated FIPS 36998) don't have location IDs. That's okay, just remove them.
df = df.loc[df[CommonFields.LOCATION_ID].notna()]
# Convert "type" column into "variable" column using new_cases / new_deaths as the variable.
assert df[NYTimesFields.TYPE].isin(["cases", "deaths"]).all()
df[PdFields.VARIABLE] = df[NYTimesFields.TYPE].map(
{"cases": CommonFields.NEW_CASES, "deaths": CommonFields.NEW_DEATHS}
)
# Add demographic bucket (all) to make it more compatible with our dataset structure.
df[PdFields.DEMOGRAPHIC_BUCKET] = "all"
return df
# TODO(mikelehen): This should probably live somewhere more central, but I'm not sure where.
def _get_county_fips_codes_for_state(state_fips_code: str) -> List[str]:
"""Helper to get county FIPS codes for all counties in a given state."""
geo_data = dataset_utils.get_geo_data()
state = geo_data.set_index("fips").at[state_fips_code, "state"]
counties_df = geo_data.loc[
(geo_data["state"] == state) & (geo_data["aggregate_level"] == "county")
]
counties_fips = counties_df["fips"].to_list()
return counties_fips
def _denormalize_nyt_anomalies(df: pd.DataFrame) -> pd.DataFrame:
"""
The NYT anomaly data is normalized such that each row can represent an
anomaly for multiple dates, locations, and metrics. We want to denormalize
it so that each row represents a single date+location+metric anomaly.
"""
# Look for rows with an end_date and create separate rows for each date in the [date, end_date] range.
def date_range_for_row(row: pd.Series):
return pd.date_range(
row[NYTimesFields.DATE],
row[NYTimesFields.DATE]
if | pd.isna(row[NYTimesFields.END_DATE]) | pandas.isna |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import sys
import matplotlib.pyplot as plt
sys.path.insert(1, '../MLA')
import imp
import numpy as np
import xgboost_wrapper as xw
import regression_wrappers as rw
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings('ignore')
# In[2]:
from hyperopt import hp, tpe
from hyperopt.fmin import fmin
from hyperopt import STATUS_OK
from sklearn.model_selection import cross_val_score, StratifiedKFold
# In[3]:
# A = pd.read_csv("CHESS%20COVID19%20CaseReport%2020200401.csv")
A = pd.read_csv("CHESS COVID19 CaseReport 20200628.csv")
#
# In[4]:
A_ = pd.read_csv("CHESS COVID19 CaseReport 20200628.csv")
idx = A_['asymptomatictesting']=='Yes'
A_ = A_.loc[idx,][['infectionswabdate', 'hospitaladmissiondate']]
lag = pd.to_datetime(A_['infectionswabdate']).dt.round('D') - pd.to_datetime(A_['hospitaladmissiondate']).dt.round('D')
# In[5]:
# plt.hist(lag.dt.days, bins=50);
print('swab at or after admission:')
print(np.sum(lag.dt.days >= 0))
print('swab before admission:')
print(np.sum(lag.dt.days < 0))
# In[6]:
# A = pd.read_csv("CHESS COVID19 CaseReport 20200601.csv")
def to_eliminate(x):
if str(x.finaloutcomedate) == 'nan':
if str(x.finaloutcome) == 'nan':
return True
elif 'still on unit' in str(x.finaloutcome):
return True
else:
return False
elif str(x.finaloutcomedate) == '1900-01-01':
return True
else:
return False
to_elimi = A[['finaloutcomedate','dateadmittedicu','finaloutcome']].apply(to_eliminate, axis=1)
# In[7]:
to_elimi.sum()
# In[8]:
A['dateupdated'] = pd.to_datetime(A['dateupdated']).dt.round('D')
A[['hospitaladmissiondate','finaloutcomedate','dateadmittedicu','finaloutcome','dateupdated']].head()
# In[9]:
A = A[~to_elimi]
# In[10]:
pd.to_datetime(A['hospitaladmissiondate']).min(), pd.to_datetime( A['dateleavingicu']).max()
# In[11]:
A = A.loc[~A.caseid.duplicated()]
# In[12]:
A = A.rename(columns={'immunosuppressiontreatmentcondit': 'immunosuppressiontreatmentcondition'})
A = A.rename(columns={'immunosuppressiondiseaseconditio': 'immunosuppressiondiseasecondition'})
# In[13]:
for feature in ['isviralpneumoniacomplication',
'isardscomplication', 'isunknowncomplication',
'isothercoinfectionscomplication', 'isothercomplication',
'issecondarybacterialpneumoniacom',
'chronicrespiratory', 'asthmarequiring',
'chronicheart',
'chronicrenal', 'asymptomatictesting',
'chronicliver',
'chronicneurological',
'immunosuppressiontreatment', 'immunosuppressiondisease', 'obesityclinical', 'pregnancy',
'other',
'hypertension', 'seriousmentalillness']:
A[feature] = A[feature].apply(lambda x: 1 if 'Yes' in str(x) else 0)
# In[14]:
A = A.rename(columns={'other': 'other_comorbidity'})
# In[15]:
A['age10year'] = A['ageyear'].apply(lambda x: x/10)
# In[16]:
A['sex_is_M'] = A['sex'].apply(lambda x: 1 if 'Male' in x else 0)
# In[17]:
A['sex_is_unkn'] = A['sex'].apply(lambda x: 1 if 'Unknown' in x else 0)
# In[18]:
A = A.drop(columns = ['ageyear', 'sex'])
# In[19]:
A['ethnicity'] = A['ethnicity'].apply(lambda x: 'Eth. NA' if pd.isna(x) else x)
# In[20]:
A['ethnicity'] = A['ethnicity'].apply(lambda x: x.strip(' '))
# In[21]:
def stratify(df, feature):
keys = [str(s) for s in df[feature].unique()]
keys = list(set(keys))
df = df.copy()
for key in keys:
df[key.strip(' ')] = df[feature].apply(lambda x, key: 1 if str(x)==key else 0, args=(key, ))
return df.drop(columns=feature)
# In[22]:
A['ethnicity'].value_counts()
# In[23]:
A['ethnicity'].value_counts().sum(), A.shape
# In[24]:
A = stratify(A, 'ethnicity')
# In[25]:
A = A.rename(columns={'Unknown':'Eth. unknown',
'other':'Other ethn.',
'White British': 'White British',
'Other White': 'Other white',
'Other Asian': 'Other Asian',
'Black African':'Black African',
'Black Caribbean':'Black Caribbean',
'Other Black': 'Other black',
'White Irish': 'White Irish',
'White and Black Caribbean':'White and black Caribbean',
'Other mixed':'Other mixed',
'White and Black African':'White and black African',
'White and Asian':'White and Asian'})
# In[26]:
def diabetes_type(x):
if x.isdiabetes == 'Yes':
if x.diabetestype == 'Type I':
return 'T1 diabetes'
else:
return 'T2 diabetes'
else:
return np.nan
# In[27]:
A['diabetes'] = A[['isdiabetes', 'diabetestype']].apply(diabetes_type, axis=1)
# In[28]:
A = stratify(A, 'diabetes')
# In[29]:
# drop nan column created from stratification of diabetes categorical
A = A.drop(columns=['isdiabetes','nan', 'diabetestype'])
# In[30]:
A = A.drop(columns=['organismname'])
# In[31]:
to_drop = ['trustcode', 'trustname', 'dateupdated', 'weekno',
'weekofadmission', 'yearofadmission', 'agemonth', 'postcode',
'estimateddateonset', 'infectionswabdate', 'labtestdate',
'typeofspecimen', 'otherspecimentype', 'covid19',
'influenzaah1n1pdm2009', 'influenzaah3n2', 'influenzab',
'influenzaanonsubtyped', 'influenzaaunsubtypable', 'rsv',
'otherresult', 'otherdetails', 'admissionflu', 'admissionrsv',
'admissioncovid19', 'admittedfrom', 'otherhospital', 'hospitaladmissionhours',
'hospitaladmissionminutes', 'hospitaladmissionadmittedfrom',
'wasthepatientadmittedtoicu',
'hoursadmittedicu', 'minutesadmittedicu', 'sbother', 'sbdate', 'respiratorysupportnone',
'oxygenviacannulaeormask', 'highflownasaloxygen',
'noninvasivemechanicalventilation',
'invasivemechanicalventilation', 'respiratorysupportecmo',
'mechanicalinvasiveventilationdur', 'anticovid19treatment',
'chronicrespiratorycondition', 'respiratorysupportunknown',
'asthmarequiringcondition', 'seriousmentalillnesscondition',
'chronicheartcondition', 'hypertensioncondition',
'immunosuppressiontreatmentcondition',
'immunosuppressiondiseasecondition',
'obesitybmi', 'gestationweek', 'travelin14days',
'travelin14dayscondition', 'prematurity', 'chroniclivercondition',
'worksashealthcareworker', 'contactwithconfirmedcovid19case',
'contactwithconfirmedcovid19casec', 'othercondition',
'transferdestination', 'chronicneurologicalcondition',
'outcomeother', 'causeofdeath', 'chronicrenalcondition', 'othercomplication']
# In[32]:
A = A.drop(columns=to_drop)
# In[33]:
A['caseid'] = A['caseid'].astype(int)
# In[34]:
A = A.set_index('caseid')
# In[35]:
A = A.loc[A.age10year > 0]
# In[36]:
A['is_ICU'] = ~A['dateadmittedicu'].isna()
# In[37]:
A['is_death'] = A['finaloutcome'] == 'Death'
# In[38]:
print(A['is_ICU'].sum())
# In[39]:
print(A['is_death'].sum())
# In[40]:
print((A['is_death'] & A['is_ICU']).sum())
# In[41]:
A.to_csv('CHESS_comorb_only_with_outcome.csv')
# In[42]:
A = pd.read_csv('CHESS_comorb_only_with_outcome.csv')
A = A.set_index('caseid')
# In[43]:
min_date = pd.to_datetime(A.hospitaladmissiondate).min()
A['day_from_beginning1'] = (pd.to_datetime(A.hospitaladmissiondate) - min_date).dt.days
plt.hist(A['day_from_beginning1'], bins=100);
# In[44]:
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
trasformed=sc.fit_transform(A['day_from_beginning1'].values.reshape(-1,1))
A['days from beginning'] = trasformed.flatten() #+ np.mean(trasformed.flatten())
A = A.drop(columns=['day_from_beginning1'])
# In[45]:
plt.hist(A['days from beginning'], bins=100);
# In[46]:
A['clinical experience'] = A['days from beginning'].rank()
A['clinical experience'] = A['clinical experience'] / A['clinical experience'].max()
# In[47]:
plt.hist(A['clinical experience'], bins=100);
# In[48]:
def int_to_date(x, min_date):
timestamp = pd.to_timedelta(x, unit='D') + min_date
return '-'.join([str(timestamp.day), str(timestamp.month), str(timestamp.year)[-2:]])
# pd.to_timedelta(A['day_from_beginning1'], unit='D') + min_date).head()
# In[49]:
A['is_death'].sum(), A['is_ICU'].sum()
# In[50]:
a = (10 * A['age10year']).value_counts().reset_index().sort_values(by='index').values
plt.plot(a[:,0], a[:,1],'.')
plt.xlabel('age');
(10 * A['age10year']).describe()
# In[51]:
dizionario = {'chronicrespiratory':'Chronic respiratory disease',
'asthmarequiring':'Asthma',
'chronicheart':'Chronic heart disease',
'chronicrenal':'Chronic renal disease',
'chronicneurological':'Chronic neurological cond.',
'immunosuppressiontreatment':'Immunosuppression treatment',
'immunosuppressiondisease':'Immunosuppression disease',
'obesityclinical':'Obesity (clinical)',
'other_comorbidity':'Other comorbidity',
'age10year': 'Age (x10 years)',
'sex_is_M':'Sex male',
'sex_is_unkn':'Sex unknown',
'asymptomatictesting':'Asymptomatic testing',
'seriousmentalillness':'Serious mental illness',
'chronicliver':'Chronic liver',
'chronicliver_fatty':'Chronic fat liver',
'chronicliver_alcohol':'Chronic alcohol. liver',
'chronicliver_other': 'Chronic liver disease',
'hypertension': 'Hypertension',
'pregnancy': 'Pregnancy'}
# In[52]:
A = A.rename(columns=dizionario)
# In[53]:
A['Sex unknown'].sum() / A.shape[0] * 100
# In[54]:
A['Sex male'].sum() / A.shape[0] * 100
# In[55]:
A[A['is_ICU']]['is_death'].sum()
# In[56]:
A.shape
# In[57]:
A = A.rename(columns={'days from beginning': 'Admission day'})
# # Clustermap
# In[58]:
get_ipython().system('mkdir results_10Nov')
# In[59]:
import seaborn as sns
sns.distplot(A[A['Eth. unknown'].astype('bool')]['Age (x10 years)'] * 10)
sns.distplot(A[A['Eth. NA'].astype('bool')]['Age (x10 years)']* 10)
sns.distplot(A['Age (x10 years)']* 10)
# In[60]:
import seaborn as sns
C = A.drop(columns=['dateadmittedicu', 'hospitaladmissiondate', 'finaloutcome', 'finaloutcomedate', 'dateleavingicu',
'isviralpneumoniacomplication', 'issecondarybacterialpneumoniacom',
'isardscomplication', 'isunknowncomplication',
'isothercoinfectionscomplication', 'isothercomplication'])
# Draw the full plt
ethnicity = ['White Irish', 'Black Caribbean', 'Other Asian', 'White and black African',
'Bangladeshi', 'Indian',
'Other black', 'Chinese', 'Other white', 'Black African', 'White and Asian',
'Pakistani', 'White British', 'Other mixed', 'White and black Caribbean', 'Other ethn.',
'Eth. unknown', 'Eth. NA']
print("number of people who didn't report any ethnicity")
print(C[ethnicity].apply(lambda x: ~x.any(), axis=1).sum())
# C['NA'] = C[ethnicity].apply(lambda x: ~x.any(), axis=1)
comorbidities = ['chronicrespiratory', 'asthmarequiring', 'chronicheart', 'hypertension',
'chronicrenal', 'immunosuppressiontreatment',
'immunosuppressiondisease', 'obesityclinical', 'pregnancy',
'other_comorbidity', 'age10year', 'sex_is_unkn', 'sex_is_M',
'T1 diabetes', 'T2 diabetes', 'seriousmentalillness',
'chronicneurological', 'chronicliver', 'asymptomatictesting']
comorbidities =[dizionario.get(x) if x in dizionario else x for x in comorbidities ]
Ccorr = C.corr();
Ccorr1 = Ccorr[comorbidities +['is_death', 'is_ICU']].loc[ethnicity,:]
Ccorr1 = Ccorr1.rename(columns={'is_death':'death', 'is_ICU':'ICUA'})
fig,ax = plt.subplots(1, 1, figsize=(10, 8))
sns.heatmap(Ccorr1, center=0, cmap="vlag", ax=ax,
# row_colors=network_colors,
# col_colors=network_colors,
linewidths=.75)
# figsize=(13, 13))
fig = plt.gcf()
plt.tight_layout()
plt.savefig('results_10Nov/correlation1_new.png')
# In[61]:
dizionarioR = {'Age..x10.years.':'Age (x10 years)',
'Asthma':'Asthma',
'Black.African':'Black African',
'Black.Caribbean':'Black Caribbean',
'Chronic.heart.disease':'Chronic heart disease',
'Chronic.liver':'Chronic liver',
'Chronic.neurological.cond.':'Chronic neurological cond.',
'Chronic.renal.disease':'Chronic renal disease',
'Chronic.respiratory.disease':'Chronic respiratory disease',
'Immunosuppression.disease':'Immunosuppression disease',
'Immunosuppression.treatment':'Immunosuppression treatment',
'Obesity..clinical.':'Obesity (clinical)',
'Other.Asian':'Other Asian',
'Other.black':'Other black',
'Other.comorbidity':'Other comorbidity',
'Other.ethn.':'Other ethn.',
'Other.mixed':'Other mixed',
'Other.white':'Other white',
'Serious.mental.illness':'Serious mental illness',
'Sex.male':'Sex male',
'Sex.unknown':'Sex unknown',
'T1.diabetes':'T1 diabetes',
'T2.diabetes':'T2 diabetes',
'White.and.Asian':'White and Asian',
'White.and.black.African':'White and black African',
'White.and.black.Caribbean':'White and black Caribbean',
'White.British':'White British',
'White.Irish':'White Irish',
'Asymptomatic.testing':'Asymptomatic testing',
'Admission.day':'Admission day',
'Clinical.experience':'Clinical experience',
'Eth..NA':'Eth. NA',
'Eth..unknown':'Eth. unknown'
}
# # Logistic 1
# In[242]:
ethnicity = ['Black Caribbean', 'Other Asian', 'White and black African', 'Bangladeshi', 'Indian',
'Other black', 'Chinese', 'Other white', 'Black African', 'White and Asian', 'Pakistani',
'Eth. unknown', 'Eth. NA', 'Other mixed', 'White and black Caribbean', 'White British', 'White Irish', 'Other ethn.']
# In[243]:
B = A.drop(columns=['dateadmittedicu', 'hospitaladmissiondate', 'finaloutcome', 'finaloutcomedate', 'dateleavingicu',
'isviralpneumoniacomplication', 'issecondarybacterialpneumoniacom',
'isardscomplication', 'isunknowncomplication', 'patientstillonicu',
'isothercoinfectionscomplication', 'isothercomplication']).drop(columns=['clinical experience'])
# In[244]:
percentages_eth = pd.DataFrame((B[ethnicity].sum() / B.shape[0]).sort_values(ascending=False))
percentages_com = pd.DataFrame((B[comorbidities].sum() / B.shape[0]).sort_values(ascending=False))
# In[245]:
percentages_eth
# In[246]:
percentages_com
# In[247]:
percentages_eth.to_excel('results_10Nov/frequencies_et.xls')
percentages_com.to_excel('results_10Nov/frequencies_com.xls')
# In[68]:
targets = ['is_death', 'is_ICU']
# In[69]:
B=B.drop(columns='White British')
# ## Death outcome
# In[70]:
target = 'is_death'
predictors = [x for x in B.columns if x not in targets]
X_train, X_test, y_train, y_test = train_test_split(B[predictors], B[target], test_size=0.1, random_state=42)
# In[71]:
X_train.to_csv('results_10Nov/X_train_death.csv')
pd.DataFrame(y_train).to_csv('results_10Nov/y_train_death.csv')
X_test.to_csv('results_10Nov/X_test_death.csv')
# In[72]:
get_ipython().system('pwd')
# In[73]:
get_ipython().system('/usr/bin/Rscript logisticregression.R results_10Nov/X_train_death.csv results_10Nov/y_train_death.csv results_10Nov/X_test_death.csv results_10Nov/logistic_summary_death.csv results_10Nov/logistic_prediction_death.csv')
# In[74]:
Vif_death = pd.read_csv("tmp.csv")
Vif_death.sort_values(by='vif.logitMod.', ascending=False).head()
# In[75]:
# Vif_death.sort_values(by='vif.logitMod.', ascending=False).rename(index=dizionarioR).to_excel('results_10Nov/vif_GLM_death_1.xls')
# In[76]:
logistic_summary = pd.read_csv("results_10Nov/logistic_summary_death.csv")
logistic_prediction = pd.read_csv("results_10Nov/logistic_prediction_death.csv")
# In[77]:
fig, ax = plt.subplots(1,1,figsize=(4, 3.5))
xw.buildROC(y_test, logistic_prediction, ax, color=None, label=None, CI=True)
ax.set_title('ROC GML logistic for death outcome');
# fig.savefig('results_10Nov/ROC_GML_death_1.png')
# In[78]:
fig_, ax_ = plt.subplots(1, 2, figsize=(4 * 2.2, 3.5), sharey=True)
xw.buildROC(y_test, logistic_prediction, ax_[0], color=None, label=None, CI=True)
ax_[0].set_title('ROC GML logistic for death outcome');
# fig.savefig('results_10Nov/ROC_GML_death_1.png')
# In[79]:
logistic_summary['OR'] = np.exp(logistic_summary['Estimate'])
# Taylor series-based delta method
logistic_summary['OR_sd'] = logistic_summary[['Std. Error', 'OR']].apply(lambda x: np.sqrt(x.OR**2) * x['Std. Error'], axis=1)
# In[80]:
def is_not_ethn(x):
if x in ['Bangladeshi', 'Black.African', 'Black.Caribbean', 'Chinese', 'Other.ethn.', 'Indian', 'Other.Asian',
'Other.black', 'Other.mixed',
'Other.white', 'Pakistani', 'White.and.Asian', 'White.and.black.African',
'White.and.black.Caribbean', 'Eth..unknown', 'Eth..NA','White.Irish']:
return 1
elif ('Sex' in x) or ('Age' in x) or ('Admis' in x) or ('Pregnan' in x) or ("Asympt" in x) or ("Immunosuppression.treatment" in x):
return 0
else:
return 2
# In[81]:
logistic_summary['order'] = logistic_summary['Row.names'].apply(is_not_ethn)
# In[82]:
logistic_summary = logistic_summary.sort_values(by=['order','Estimate'], ascending=False).drop(columns=['order'])
# In[83]:
logistic_summary['Row.names'] = logistic_summary['Row.names'].apply(lambda x: dizionarioR.get(x) if x in dizionarioR else x)
# In[84]:
logistic_summary
# In[85]:
def pvaluesymbol(P):
if P > 0.05:
return ''
elif (P <= 0.05) & (P > 0.01):
return '*'
elif (P <= 0.01) & (P > 0.001):
return '**'
elif (P <= 0.001) & (P > 0.0001):
return '***'
elif P <= 0.0001:
return '****'
# In[86]:
logistic_summary['pvaluesymbol'] = logistic_summary['Pr(>|z|)'].apply(pvaluesymbol)
# In[87]:
logistic_summary.to_excel('results_10Nov/coefficients_GLM_death_1.xls')
# In[88]:
def plot_coeff(summary, ax, title=None, xtext=None):
summary = summary.sort_values(by='Estimate', ascending=True)
summary.plot.barh(x='Row.names', y='Estimate',
ax=ax, color='none',
xerr='Std. Error',
legend=False)
ax.set_ylabel('')
ax.set_xlabel('')
ax.set_title(title)
ax.scatter(y=pd.np.arange(summary.shape[0]),
marker='s', s=120,
x=summary['Estimate'], color='black')
ax.axvline(x=0, linestyle='--', color='grey', linewidth=1)
if xtext is None:
xtext = ax.get_xlim()[1]
for i, p in enumerate(summary.pvaluesymbol.values):
# try:
ax.text(xtext, i, p)
# print(p)
# except:
# pass
def plot_OR(summary, ax, title=None, xtext=None):
# summary['order'] = summary['Row.names'].apply(is_not_ethn)
# summary = summary.sort_values(by=['order', 'Estimate'], ascending=True)
summary = summary.loc[::-1,]
xerr = summary.apply(lambda x: (x['OR']- x["2.5 %.y"], x["97.5 %.y"] -x['OR']), result_type='expand', axis=1).values.transpose()
# print(xerr.head())
# print(xerr.tail())
summary.plot.barh(x='Row.names', y='OR',
ax=ax, color='none',
xerr=xerr,
legend=False)
ax.set_ylabel('')
ax.set_xlabel('')
ax.set_title(title)
ax.scatter(y=pd.np.arange(summary.shape[0]),
marker='s', s=120,
x=summary['OR'], color='black')
ax.axvline(x=1, linestyle='--', color='grey', linewidth=1)
if xtext is None:
xtext = ax.get_xlim()[1]
for i, p in enumerate(summary.pvaluesymbol.values):
try:
float(p)
bo = False
except:
bo = True
if bo:
ax.text(xtext, i, p)
# In[89]:
logistic_summary.to_excel('results_10Nov/coefficients_GLM_death_1_britbaseline.xls')
logistic_summary = pd.read_excel('results_10Nov/coefficients_GLM_death_1_britbaseline.xls')
# In[90]:
fig, ax = plt.subplots(figsize=(8, 12))
plot_OR(logistic_summary.drop(index=1), ax, title='OR log. regression, death outcome')
ax.autoscale(enable=False)
ax.fill_between([-10,100],[-1,-1], [6.5, 6.5], color='grey', alpha=0.05, zorder=-100)
ax.fill_between([-10,100],[23.5,23.5], [100,105], color='grey', alpha=0.05, zorder=-100)
fig.tight_layout()
fig.savefig('results_10Nov/OR_GML_death_1.png')
# In[91]:
fig, ax = plt.subplots(1,1,figsize=(4, 3.5))
plt.plot(X_test['Age (x10 years)'] * 10, logistic_prediction, '.')
# ax.set_title('ROC XGBoost for ICU outcome');
ax.set_xlabel('Age');
ax.set_ylabel('Prob. of death');
# In[92]:
fig, ax = plt.subplots(1,1,figsize=(4, 3.5))
plt.plot(X_test['Admission day'] * 10, logistic_prediction, '.')
# ax.set_title('ROC XGBoost for ICU outcome');
ax.set_xlabel('days from beginning');
ax.set_ylabel('Prob. of death');
# ## ICU outcome
# In[93]:
target = 'is_ICU'
predictors = [x for x in B.columns if x not in targets]
X_train, X_test, y_train, y_test = train_test_split(B[predictors], B[target], test_size=0.1, random_state=42)
# In[94]:
X_train.to_csv('results_10Nov/X_train_ICU.csv')
pd.DataFrame(y_train).to_csv('results_10Nov/y_train_ICU.csv')
X_test.to_csv('results_10Nov/X_test_ICU.csv')
# In[95]:
get_ipython().system('/usr/bin/Rscript logisticregression.R results_10Nov/X_train_ICU.csv results_10Nov/y_train_ICU.csv results_10Nov/X_test_ICU.csv results_10Nov/logistic_summary_ICU.csv results_10Nov/logistic_prediction_ICU.csv')
# In[96]:
Vif_ICU = pd.read_csv("tmp.csv")
Vif_ICU.sort_values(by='vif.logitMod.', ascending=False)
Vif_ICU.to_excel('results_10Nov/vif_GLM_ICU_1.xls')
# In[97]:
Vif = Vif_death.join(Vif_ICU, lsuffix='death', rsuffix='ICU')
Vif.sort_values(by='vif.logitMod.death', ascending=False).rename(index=dizionarioR).to_excel('results_10Nov/vif_GLM_1.xls')
# In[98]:
logistic_summary = pd.read_csv("results_10Nov/logistic_summary_ICU.csv")
logistic_prediction = pd.read_csv("results_10Nov/logistic_prediction_ICU.csv")
# In[99]:
fig, ax = plt.subplots(1,1,figsize=(4, 3.5))
xw.buildROC(y_test, logistic_prediction, ax, color=None, label=None, CI=True);
ax.set_title('ROC GML logistic for ICU outcome');
# fig.savefig('results_10Nov/ROC_GLM_ICU_1.png')
# In[100]:
# fig_, ax_ = plt.subplots(1, 2, figsize=(4 * 2.2, 3.5))
xw.buildROC(y_test, logistic_prediction, ax_[1], color=None, label=None, CI=True)
ax_[1].set_title('ROC GML logistic for ICU outcome');
fig_.savefig('results_10Nov/ROC_GML_both_1.png')
# In[101]:
logistic_summary['OR'] = np.exp(logistic_summary['Estimate'])
# Taylor series-based delta method
logistic_summary['OR_sd'] = logistic_summary[['Std. Error', 'OR']].apply(lambda x: np.sqrt(x.OR**2) * x['Std. Error'], axis=1 )
# In[ ]:
# In[102]:
logistic_summary['pvaluesymbol'] = logistic_summary['Pr(>|z|)'].apply(pvaluesymbol)
# In[103]:
logistic_summary['order'] = logistic_summary['Row.names'].apply(is_not_ethn)
# In[104]:
logistic_summary = logistic_summary.sort_values(by=['order','Estimate'], ascending=False).drop(columns=['order'])
# In[105]:
logistic_summary['Row.names'] = logistic_summary['Row.names'].apply(lambda x: dizionarioR.get(x) if x in dizionarioR else x)
# In[106]:
logistic_summary
# In[107]:
logistic_summary.to_excel('results_10Nov/coefficients_GLM_ICU_1_britbaseline.xls')
# In[108]:
logistic_summary = pd.read_excel('results_10Nov/coefficients_GLM_ICU_1_britbaseline.xls')
fig, ax = plt.subplots(figsize=(8, 12))#
plot_OR(logistic_summary.drop(index=1), ax, title='ORs log. regression, ICU outcome')
ax.autoscale(enable=False)
ax.fill_between([-10,100],[-1,-1], [6.5, 6.5], color='grey', alpha=0.05, zorder=-100)
ax.fill_between([-10,100],[23.5,23.5], [100,105], color='grey', alpha=0.05, zorder=-100)
fig.tight_layout()
fig.savefig('results_10Nov/OR_GML_ICU_1.png')
# In[109]:
fig, ax = plt.subplots(1,1,figsize=(4, 3.5))
plt.plot(X_test['Age (x10 years)'] * 10, logistic_prediction, '.')
ax.set_xlabel('age');
ax.set_ylabel('Prob. of ICU');
# # Cross plots of ORs
# In[248]:
logistic_summary_ICU = | pd.read_excel('results_10Nov/coefficients_GLM_ICU_1_britbaseline.xls') | pandas.read_excel |
import numpy as np
import pandas as pd
from typing import List, Tuple, Dict
from sklearn.preprocessing import MinMaxScaler
from data_mining import ColorizedLogger
logger = ColorizedLogger('NullsFixer', 'yellow')
class NullsFixer:
__slots__ = ('sort_col', 'group_col')
sort_col: str
group_col: str
cols: List[str] = ['iso_code', 'date', 'daily_vaccinations', 'total_vaccinations',
'people_vaccinated', 'people_fully_vaccinated']
def __init__(self, sort_col: str, group_col: str):
self.sort_col = sort_col
self.group_col = group_col
@staticmethod
def fill_with_population(df: pd.DataFrame, df_meta: pd.DataFrame) -> pd.DataFrame:
def f1(row, col, target_col, multiplier=1):
if pd.isna(row[target_col]):
abs_val = row[col]
ph_val = 100 * abs_val / get_population(df_meta, row['country'])
return_val = round(ph_val, 2) * multiplier
else:
return_val = row[target_col]
return return_val
def get_population(_df, country):
return _df.loc[_df['country'] == country, 'population'].values[0]
df['people_vaccinated_per_hundred'] = df.apply(f1, args=(
'people_vaccinated', 'people_vaccinated_per_hundred'), axis=1)
df['people_fully_vaccinated_per_hundred'] = df.apply(f1, args=(
'people_fully_vaccinated', 'people_fully_vaccinated_per_hundred'), axis=1)
df['total_vaccinations_per_hundred'] = df.apply(f1, args=(
'total_vaccinations', 'total_vaccinations_per_hundred'), axis=1)
df['daily_vaccinations_per_million'] = df.apply(f1, args=(
'daily_vaccinations', 'daily_vaccinations_per_million', 10000), axis=1)
return df
def scale_cols(self, df: pd.DataFrame, cols: List[Tuple], per_group: bool = False) \
-> Tuple[pd.DataFrame, Dict, List[Tuple]]:
def scale_func(group_col, col_name):
# if col.max() > max_val:
scaler_ = MinMaxScaler(feature_range=(0, max_val))
scalers[(col_name, group_col.name)] = scaler_
return scaler_.fit_transform(group_col.astype(float).values.reshape(-1, 1)).reshape(-1)
df_keys = df.copy()[[self.sort_col, self.group_col]]
df_keys = [tuple(x) for x in df_keys.to_numpy()]
scalers = {}
for col, max_val in cols:
# logger.info(f'Scaling "{col}" column in the range: [0, {max_val}]')
if per_group:
df[col] = df.groupby(self.group_col)[col].transform(scale_func, col_name=col)
else:
scaler = MinMaxScaler(feature_range=(0, max_val))
scalers[col] = scaler
df[[col]] = scaler.fit_transform(df[[col]])
return df, scalers, df_keys
def unscale_cols(self, df: pd.DataFrame, cols: List[Tuple], scalers: Dict, df_keys: List[Tuple],
per_group: bool = False) -> pd.DataFrame:
def unscale_func(group_col, col_name):
scaler_ = scalers[(col_name, group_col.name)]
return scaler_.inverse_transform(group_col.astype(float).values.reshape(-1, 1)).reshape(-1)
def fix_negatives(group_col):
min_val = group_col.min()
if min_val < 0:
group_col -= min_val
return group_col
df = df[df[[self.sort_col, self.group_col]].apply(tuple, axis=1).isin(df_keys)]
for col, max_val in cols:
# logger.info(f'Unscaling "{col}" column from the range: [0, {max_val}]')
if per_group:
df[col] = df.groupby(self.group_col)[col].transform(unscale_func, col_name=col)
df[col] = df.groupby(self.group_col)[col].transform(fix_negatives)
else:
scaler = scalers[col]
df[[col]] = scaler.inverse_transform(df[[col]])
return df
def fix_and_infer(self, df: pd.DataFrame) -> pd.DataFrame:
accum_cols = ['people_fully_vaccinated', 'people_vaccinated', 'total_vaccinations']
df = self.fix(df)
for col in accum_cols:
count_nan = len(df[col]) - df[col].count()
if count_nan > 0:
df = self.infer_accum_col(df, col, 'total_vaccinations')
df = self.fix(df)
return df
def fix(self, df: pd.DataFrame) -> pd.DataFrame:
all_cols = df.columns
nulls_prev = df.loc[:, self.cols].isna().sum()
while True:
df = self.fix_people_fully_vaccinated(df)
df = self.fix_people_vaccinated(df)
df = self.fix_total_vaccinations(df)
df = self.fix_daily_vaccinations(df)
nulls = df.loc[:, self.cols].isna().sum()
if nulls.equals(nulls_prev):
break
nulls_prev = nulls
return df.loc[:, all_cols]
def infer_accum_col(self, df: pd.DataFrame, col: str, limit_col: str) -> pd.DataFrame:
def _infer_values(col, col_list, nulls_idx, val, consecutive_nulls, limit_col: pd.Series):
# Get top and bottom non-null values (for this block of consecutive nulls)
non_null_val_1 = col[col_list[nulls_idx[0] - 1][0]]
non_null_val_2 = val
# Calculate avg difference and create whole-number steps
diff = non_null_val_2 - non_null_val_1
whole_step, remainder = divmod(diff, consecutive_nulls + 1)
steps = whole_step * np.ones(consecutive_nulls)
steps[1:int(remainder) + 1] += 1
# Add the avg steps to each null value for this block
for null_ind, step in zip(nulls_idx, steps):
pd_idx_previous = col_list[null_ind - 1][0]
val_to_insert = col[pd_idx_previous] + step
pd_idx_null_current = col_list[null_ind][0]
limit_val = limit_col[pd_idx_null_current]
if val_to_insert > limit_val:
val_to_insert = limit_val
col[pd_idx_null_current] = val_to_insert
return col
def f_cols(col, limit_col: pd.Series):
consecutive_nulls = 0
nulls_idx = []
col_list = [(idx, val) for idx, val in col.items()]
for ind, (pd_ind, val) in enumerate(col_list):
if pd.isna(val):
if ind == 0:
col[pd_ind] = 0.0
else:
consecutive_nulls += 1
nulls_idx.append(ind)
if ind == len(col_list) - 1:
non_null_val_1 = col[col_list[nulls_idx[0] - 1][0]]
mean_step = round(col.mean())
max_val = non_null_val_1 + mean_step * consecutive_nulls
col = _infer_values(col, col_list, nulls_idx, max_val,
consecutive_nulls, limit_col)
else:
if consecutive_nulls > 0:
col = _infer_values(col, col_list, nulls_idx, val,
consecutive_nulls, limit_col)
# Reset
consecutive_nulls = 0
nulls_idx = []
return col
def f_groups(df: pd.DataFrame, col: str, limit_col: str):
df.loc[:, [col]] = df[[col]].apply(f_cols, args=(df[limit_col],), axis=0)
return df
df = df.sort_values(self.sort_col).reset_index(drop=True)
df = df.groupby(df[self.group_col]).apply(f_groups, col, limit_col)
return df
def fix_people_fully_vaccinated(self, df: pd.DataFrame) -> pd.DataFrame:
def f1(row):
cond_1 = pd.notna(row['total_vaccinations']) and pd.notna(row['people_vaccinated'])
cond_2 = pd.isna(row['people_fully_vaccinated'])
if cond_1 and cond_2:
row = row['total_vaccinations'] - row['people_vaccinated']
else:
row = row['people_fully_vaccinated']
return row
def f2(row):
cond_1 = row['total_vaccinations'] == 0.0
cond_2 = pd.isna(row['people_fully_vaccinated'])
if cond_1 and cond_2:
row = 0.0
else:
row = row['people_fully_vaccinated']
return row
# people_fully_vaccinated = total_vaccinations - people_vaccinated
df.loc[:, 'people_fully_vaccinated'] = df.apply(f1, axis=1)
# If total_vaccinations==0 -> people_fully_vaccinated = 0.0
df.loc[:, 'people_fully_vaccinated'] = df.apply(f2, axis=1)
# if prev_col == next_col -> col=prev_col
self.fix_if_unchanged(df=df, col='people_fully_vaccinated')
return df
def fix_people_vaccinated(self, df: pd.DataFrame) -> pd.DataFrame:
def f1(row):
cond_1 = pd.notna(row['total_vaccinations']) and pd.notna(row['people_fully_vaccinated'])
cond_2 = pd.isna(row['people_vaccinated'])
if cond_1 and cond_2:
row = row['total_vaccinations'] - row['people_fully_vaccinated']
else:
row = row['people_vaccinated']
return row
def f2(row):
cond_1 = row['total_vaccinations'] == 0.0
cond_2 = pd.isna(row['people_vaccinated'])
if cond_1 and cond_2:
row = 0.0
else:
row = row['people_vaccinated']
return row
# people_vaccinated = total_vaccinations - people_fully_vaccinated
df.loc[:, 'people_vaccinated'] = df.apply(f1, axis=1)
# If total_vaccinations==0 -> people_vaccinated = 0.0
df.loc[:, 'people_vaccinated'] = df.apply(f2, axis=1)
# if prev_col == next_col -> col=prev_col
self.fix_if_unchanged(df, 'people_vaccinated')
return df
@staticmethod
def global_fix(row):
# Setup the conditions
cond_1_1 = pd.notna(row['people_vaccinated']) and pd.notna(row['total_vaccinations'])
cond_1_2 = row['people_vaccinated'] > row['total_vaccinations']
cond_2_1 = pd.notna(row['people_fully_vaccinated']) and pd.notna(row['total_vaccinations'])
cond_2_2 = row['people_fully_vaccinated'] > row['total_vaccinations']
cond_3_1 = pd.notna(row['people_vaccinated']) and pd.notna(row['people_fully_vaccinated']) \
and pd.notna(row['total_vaccinations'])
cond_3_2 = row['people_vaccinated'] + row['people_fully_vaccinated'] \
> row['total_vaccinations']
# Check and fix
if cond_3_1:
if cond_3_2:
row['people_fully_vaccinated'], _ = divmod(row['total_vaccinations'], 2)
row['people_vaccinated'] = row['total_vaccinations'] - row['people_fully_vaccinated']
elif cond_1_1:
if cond_1_2:
row['people_vaccinated'] = row['total_vaccinations']
elif cond_2_1:
if cond_2_2:
row['people_fully_vaccinated'] = row['total_vaccinations']
return row
def fix_total_vaccinations(self, df: pd.DataFrame) -> pd.DataFrame:
def f1(row):
cond_1 = pd.notna(row['people_vaccinated']) and pd.notna(row['people_fully_vaccinated'])
cond_2 = | pd.isna(row['total_vaccinations']) | pandas.isna |
import pathlib
import subprocess
import pandas as pd
from papermill import execute_notebook, PapermillExecutionError
from .m3c import m3c_mapping_stats, m3c_additional_cols
from .mc import mc_mapping_stats, mc_additional_cols
from .mct import mct_mapping_stats, mct_additional_cols
from ._4m import _4m_mapping_stats, _4m_additional_cols
from .plate_info import get_plate_info
from ..pipelines import PACKAGE_DIR
from ...utilities import get_configuration
def mapping_stats(output_dir):
"""This is UID level mapping summary, the config file is in parent dir"""
output_dir = pathlib.Path(output_dir).absolute()
config = get_configuration(output_dir.parent / 'mapping_config.ini')
mode = config['mode']
if mode == 'mc':
final_df = mc_mapping_stats(output_dir, config)
elif mode == 'mct':
final_df = mct_mapping_stats(output_dir, config)
elif mode == 'm3c':
final_df = m3c_mapping_stats(output_dir, config)
elif mode == '4m':
final_df = _4m_mapping_stats(output_dir, config)
else:
raise ValueError
# plate info, which is tech independent.
_plate_info = get_plate_info(final_df.index, barcode_version=config['barcode_version'])
final_df = pd.concat([_plate_info, final_df], axis=1)
# save
final_df.to_csv(output_dir / 'MappingSummary.csv.gz')
return
def final_summary(output_dir, cleanup=True, notebook=None):
output_dir = pathlib.Path(output_dir).absolute()
mode = get_configuration(output_dir / 'mapping_config.ini')['mode']
path_to_remove = []
# Before running summary,
# first make sure all the UID dir having Snakefile also has mapping summary (means successful)
snakefile_list = list(output_dir.glob('*/Snakefile'))
summary_paths = []
missing_summary_dirs = []
for path in snakefile_list:
uid_dir = path.parent
summary_path = uid_dir / 'MappingSummary.csv.gz'
if summary_path.exists():
summary_paths.append(summary_path)
else:
missing_summary_dirs.append(uid_dir)
if len(missing_summary_dirs) != 0:
print('These sub dir missing MappingSummary files:')
for p in missing_summary_dirs:
print(p)
raise FileNotFoundError(f'Note that all sub dir should be successfully mapped '
f'before generating final summary. \n'
f'The MappingSummary.csv.gz is the final target file of snakefile in {path}. \n'
f'Run the corresponding snakemake command again to retry mapping.\n'
f'The snakemake commands can be found in output_dir/snakemake/*/snakemake_cmd.txt')
# aggregate mapping summaries
total_mapping_summary = pd.concat([ | pd.read_csv(path, index_col=0) | pandas.read_csv |
# ============= COMP90024 - Assignment 2 ============= #
#
# The University of Melbourne
# Team 37
#
# ** Authors: **
#
# <NAME> 1048105
# <NAME> 694209
# <NAME> 980433
# <NAME> 640975
# <NAME> 1024577
#
# Location: Melbourne
# ====================================================
import tweepy
import json
import pandas as pd
from time import sleep
import copy
from datetime import datetime
#pip install openpyxl
def processTweet(tweet, geocoordinate):
geocoordSplit = geocoordinate.split(',')
temp_dict = {'tweet id': tweet.id, 'user id': tweet.user.id, 'text': tweet.text, 'lang': tweet.lang, 'user location': tweet.user.location,
'user geo_enabled': tweet.user.geo_enabled, 'coordinates': tweet.coordinates, 'created_at': tweet.created_at, 'latlong' : str(geocoordSplit[0] + ',' + geocoordSplit[1]),
'search_radius': geocoordSplit[2]}
return temp_dict
def searchTweets(query, twitter_count, language, geocoordinate, api):
tweets = api.search(q=query, count=twitter_count, lang=language, geocode=geocoordinate)
tweet_data = []
for tweet in tweets:
if not tweet.id:
continue
temp_dict = processTweet(tweet, geocoordinate)
tweet_data.append(temp_dict)
return tweet_data
def readKeys():
f = open("keys_tokens.txt", "r")
keys_tokens = eval(f.read())
#student_1 (Declan)
#student_2 (Janelle)
#Student_3 (Shuang)
#student_4 (JJ)
return keys_tokens
def readCityCoords():
f = open("Australian_cities_coordinates.txt", "r")
cityCoordsDict = eval(f.read())
cityList = cityCoordsDict.keys()
return cityCoordsDict, cityList
def setUser(keys_tokens, user_no):
total_users = len(keys_tokens)
user = user_no % total_users
consumer_key = keys_tokens[user][1]
consumer_secret= keys_tokens[user][2]
access_token = keys_tokens[user][3]
access_token_secret = keys_tokens[user][4]
return consumer_key, consumer_secret, access_token, access_token_secret
def createGridPoints(city, search_radius, cityCoordsDict):
interval = 0.05 #approx 5km
oglat = cityCoordsDict[city][0]
oglong = cityCoordsDict[city][1]
templong = copy.deepcopy(oglong)
latlong_list = []
for i in range(1,16):
for j in range(1,16):
templong += interval
tempcoord = str(oglat) +',' + str(templong) + ',' + str(search_radius) + 'km'
latlong_list.append(tempcoord)
oglat -=interval
templong = copy.deepcopy(oglong)
return latlong_list
def checkWait(student_no, keys_tokens):
total_users = len(keys_tokens)
if student_no % total_users == 0:
sleep(.5)
def harvestTweets(city, cityCoordsDict, keys_tokens, tweet_count, query, language):
tweet_df = | pd.DataFrame(columns=['tweet id','user id','text','lang','user location','user geo_enabled','coordinates','created_at','latlong','search_radius']) | pandas.DataFrame |
"""
Functions to prepare the data for
the components
"""
from collections import Counter
from datetime import datetime, timedelta
from typing import Any, Callable, Dict, List, Optional, Tuple
import pandas as pd
from dateutil import parser
from loguru import logger
from pandas import DataFrame
from openstats.client import Client
class Data:
"""
Class containing the methods to fetch data
"""
def __init__(self, client: Client):
self.client = client
# Use client's Levy config
self.config = self.client.config
def stars_data(self) -> Optional[DataFrame]:
"""
Extract information from stargazers.
Prepare an accumulative sum of stars by date
"""
today = datetime.today()
delta = today - self.client.start_date
try:
stars = self.client.get_all(
self.client.root
/ "repos"
/ self.client.owner
/ self.client.repo
/ "stargazers"
)
clean_stars = [
parser.parse(user["starred_at"]).strftime("%Y/%m/%d") for user in stars
]
star_counts = Counter(clean_stars)
acc = 0
for i in range(delta.days + 1):
day = self.client.start_date + timedelta(days=i)
day_str = day.strftime("%Y/%m/%d")
if not star_counts.get(day_str):
star_counts[day_str] = acc
sorted_dict = dict(sorted(star_counts.items()))
df = pd.DataFrame(
{"date": list(sorted_dict.keys()), "stars": list(sorted_dict.values())}
)
df["date"] = pd.to_datetime(df["date"])
# This resample groups by week. Let's keep grouping by day.
# df = df.resample("W", on="date")[["stars"]].sum()
df["stars"] = df["stars"].cumsum()
return df
except Exception as err: # pylint: disable=broad-except
logger.error("Error trying to retrieve stars data...")
logger.error(err)
return None
def health_data(self) -> Tuple[str, str]:
"""
Obtain the health % from the community profile
"""
profile_data = self.client.get(
self.client.root
/ "repos"
/ self.client.owner
/ self.client.repo
/ "community"
/ "profile"
).json()
percentage = profile_data.get("health_percentage", "Endpoint error")
description = profile_data.get("description", "Error fetching description")
return percentage, description
@staticmethod
def is_good_first_issue(issue: List[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
"""
Check if an issue is a good first issue
"""
if isinstance(issue, dict):
is_gfi = next(
iter(
label
for label in issue.get("labels")
if isinstance(label, dict)
and label.get("name") == "good first issue"
),
None,
)
return is_gfi
return None
@staticmethod
def is_support_issue(issue: List[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
"""
Check if an issue is a support issue
"""
if isinstance(issue, dict):
is_support = next(
iter(
label
for label in issue.get("labels")
if isinstance(label, dict) and label.get("name") == "support"
),
None,
)
return is_support
return None
def _issues_data(self, filter_fn: Callable) -> Tuple[List[dict], List[dict]]:
"""
Return issue data with callable filtering.
filter_fn should return True / False from a list of issues
"""
open_issues = self.client.get_all(
self.client.root / "repos" / self.client.owner / self.client.repo / "issues"
)
open_filtered_issues = [issue for issue in open_issues if filter_fn(issue)]
closed_issues = self.client.get_all(
self.client.root
/ "repos"
/ self.client.owner
/ self.client.repo
/ "issues",
"&state=closed",
)
closed_filtered_issues = [issue for issue in closed_issues if filter_fn(issue)]
return open_filtered_issues, closed_filtered_issues
def good_first_issues_data(self) -> Tuple[List[dict], List[dict]]:
"""
Analyze issues data for open and closed good
first issues.
"""
return self._issues_data(filter_fn=self.is_good_first_issue)
def support_issues_data(self) -> Tuple[List[dict], List[dict]]:
"""
Analyzes issues data for open and closed support issues
"""
return self._issues_data(filter_fn=self.is_support_issue)
def contributors_data(self):
"""
Get all project contributors.
Return them sorted by contributions
"""
data = self.client.get_all(
self.client.root
/ "repos"
/ self.client.owner
/ self.client.repo
/ "contributors"
)
sorted_contrib = sorted(data, key=lambda d: d["contributions"], reverse=True)
df = pd.DataFrame(sorted_contrib)
# df.reset_index(inplace=True)
# df.set_index("index", drop=False, inplace=True)
return df
def traffic_data(self):
"""
Cook traffic data and views
for the last 14 days
"""
clones = self.client.get_all(
self.client.root
/ "repos"
/ self.client.owner
/ self.client.repo
/ "traffic"
/ "clones"
).get("uniques")
views = self.client.get_all(
self.client.root
/ "repos"
/ self.client.owner
/ self.client.repo
/ "traffic"
/ "views",
option="&per=week",
).get("uniques")
return clones, views
def get_participation(self, owner: str, repo: str):
"""
Get all participation data for the last 52 weeks
as a reversed list
"""
return self.client.get(
self.client.root / "repos" / owner / repo / "stats" / "participation"
).json()["all"]
def competitors_data(self) -> DataFrame:
"""
Compare your project stats vs. a list
of competitors.
Return my activity a list of competitor's activity
"""
my_activity = {
self.client.repo: self.get_participation(
self.client.owner, self.client.repo
)
}
activity = {
competitor.repo: self.get_participation(competitor.owner, competitor.repo)
for competitor in self.config.competitors
}
return | pd.DataFrame({**my_activity, **activity}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 11:41:44 2018
@author: MichaelEK
"""
import types
import pandas as pd
import numpy as np
import json
from pdsf import sflake as sf
from utils import split_months
def process_allo(param):
"""
"""
run_time_start = pd.Timestamp.today().strftime('%Y-%m-%d %H:%M:%S')
print(run_time_start)
#######################################
### Read in source data and update accela tables in ConsentsReporting db
print('--Reading in source data...')
## Make object to contain the source data
db = types.SimpleNamespace()
for t in param['misc']['AllocationProcessing']['tables']:
p = param['source data'][t]
print(p['table'])
if p['schema'] != 'public':
stmt = 'select {cols} from "{schema}"."{table}"'.format(schema=p['schema'], table=p['table'], cols=json.dumps(p['col_names'])[1:-1])
else:
stmt = 'select {cols} from "{table}"'.format(table=p['table'], cols=json.dumps(p['col_names'])[1:-1])
setattr(db, t, sf.read_table(p['username'], p['password'], p['account'], p['database'], p['schema'], stmt))
##################################################
### Sites
print('--Process Waps')
## takes
wap_allo1 = db.wap_allo.copy()
wap1 = wap_allo1['Wap'].unique()
waps = wap1[~pd.isnull(wap1)].copy()
## Check that all Waps exist in the USM sites table
usm_waps1 = db.waps[db.waps.isin(waps)].copy()
# usm_waps1[['NzTmX', 'NzTmY']] = usm_waps1[['NzTmX', 'NzTmY']].astype(int)
if len(wap1) != len(usm_waps1):
miss_waps = set(wap1).difference(set(usm_waps1.Wap))
print('Missing {} Waps in USM'.format(len(miss_waps)))
wap_allo1 = wap_allo1[~wap_allo1.Wap.isin(miss_waps)].copy()
##################################################
### Permit table
print('--Process Permits')
## Clean data
permits2 = db.permit.copy()
permits2['FromDate'] = pd.to_datetime(permits2['FromDate'], infer_datetime_format=True, errors='coerce')
permits2['ToDate'] = | pd.to_datetime(permits2['ToDate'], infer_datetime_format=True, errors='coerce') | pandas.to_datetime |
import os
import logging
import pickle
from abc import ABC, abstractmethod
import pandas as pd
import numpy as np
from . import dtutil
from . import arguments
from amulog import common
from amulog import config
_logger = logging.getLogger(__package__)
SRCCLS_LOG = "log"
SRCCLS_SNMP = "snmp"
class EventDefinition(ABC):
_l_attr = ["source", "host", "group"]
def __init__(self, **kwargs):
for attr in self._l_attr:
setattr(self, attr, kwargs[attr])
@property
def _attribute_keys(self):
return self._l_attr
def key(self):
return None
@abstractmethod
def event(self) -> str:
raise NotImplementedError
@property
def identifier(self):
return self.__str__()
@property
def groups(self):
group = getattr(self, "group")
if group is None or group == "None":
return []
elif "|" in group:
return "|".split(group)
else:
return [group]
def all_attr(self, key):
return {getattr(self, key)}
def member_identifiers(self):
return [self.identifier]
def match(self, evdef):
if isinstance(evdef, MultipleEventDefinition):
return any([self.match(tmp_evdef)
for tmp_evdef in evdef.members])
else:
return self.identifier == evdef.identifier
def replaced_copy(self, **kwargs):
input_kwargs = {}
for attr in self._attribute_keys:
input_kwargs[attr] = getattr(self, attr)
input_kwargs.update(kwargs)
return type(self)(**input_kwargs)
class MultipleEventDefinition(EventDefinition):
_l_attr = []
def __init__(self, members, **kwargs):
super().__init__(**kwargs)
self._members = members
def __str__(self):
return "|".join([str(evdef) for evdef in self._members])
@property
def _attribute_keys(self):
return self._l_attr + ["_members"]
@property
def members(self):
return self._members
def update_members(self, members):
self._members = members
@property
def identifier(self):
return "|".join(sorted([evdef.identifier for evdef in self._members]))
def event(self):
return "|".join(sorted([evdef.event() for evdef in self._members]))
def all_attr(self, key):
return {getattr(evdef, key) for evdef in self._members}
def member_identifiers(self):
ret = []
for evdef in self.members:
ret += evdef.member_identifiers()
return ret
def match(self, evdef):
self_identifiers = set(self.member_identifiers())
if isinstance(evdef, MultipleEventDefinition):
given_identifiers = set(evdef.member_identifiers())
return len(self_identifiers & given_identifiers) > 0
else:
return evdef.identifier in self_identifiers
class EventDefinitionMap(object):
"""This class defines classified groups as "Event", and provide
interconvirsion functions between Event IDs and their
classifying criterions.
The definition of Event is saved as a nametuple EvDef.
Evdef has following attributes.
source (str):
key (int):
host (str):
label (str):
"""
def __init__(self):
self._emap = {} # key : eid, val : evdef
self._ermap = {} # key : evdef, val : eid
def __len__(self):
return len(self._emap)
def eids(self):
return self._emap.keys()
def _next_eid(self):
eid = len(self._emap)
while eid in self._emap:
eid += 1
else:
return eid
def add_evdef(self, evdef):
eid = self._next_eid()
self._emap[eid] = evdef
self._ermap[evdef.identifier] = eid
return eid
def has_eid(self, eid):
return eid in self._emap
def has_evdef(self, evdef):
return evdef.identifier in self._ermap
def evdef(self, eid):
return self._emap[eid]
def items(self):
return self._emap.items()
def get_eid(self, evdef):
return self._ermap[evdef.identifier]
def iter_eid(self):
return self._emap.keys()
def iter_evdef(self):
for eid in self.iter_eid():
yield self._emap[eid]
@staticmethod
def from_dict(mapping):
evmap = EventDefinitionMap()
evmap._emap = mapping
for eid, evdef in mapping.items():
evmap._ermap[evdef.identifier] = eid
return evmap
def dump(self, args):
fp = arguments.ArgumentManager.evdef_path(args)
obj = (self._emap, self._ermap)
with open(fp, "wb") as f:
pickle.dump(obj, f)
def load(self, args):
fp = arguments.ArgumentManager.evdef_path(args)
try:
with open(fp, "rb") as f:
obj = pickle.load(f)
self._emap, self._ermap = obj
except:
# compatibility
fp = arguments.ArgumentManager.evdef_path_old(args)
with open(fp, "rb") as f:
obj = pickle.load(f)
self._emap, self._ermap = obj
class AreaTest:
def __init__(self, conf):
self._arearule = conf["dag"]["area"]
self._areadict = config.GroupDef(conf["dag"]["area_def"])
if self._arearule == "all":
self._testfunc = self._test_all
elif self._arearule == "each":
self._testfunc = self._test_each
else:
self.areas = config.gettuple(conf, "dag", "area")
self._testfunc = self._test_ingroup
@staticmethod
def _test_all(area, host):
return True
@staticmethod
def _test_each(area, host):
return area == host
def _test_ingroup(self, area, host):
return self._areadict.ingroup(area, host)
def test(self, area, host):
return self._testfunc(area, host)
class EventDetail:
def __init__(self, conf, d_evloaders, load_cache=True, dump_cache=True):
self._conf = conf
self._d_el = d_evloaders
self._load_cache = load_cache
self._dump_cache = dump_cache
self._head = int(conf["dag"]["event_detail_head"])
self._foot = int(conf["dag"]["event_detail_foot"])
@staticmethod
def output_format(data):
return "{0} {1}: {2}".format(data[0], data[1], data[2])
@staticmethod
def cache_name(evdef):
return "cache_detail_" + evdef.identifier
def cache_path(self, args, evdef):
return arguments.ArgumentManager.unit_cache_path(
self._conf, args, self.cache_name(evdef)
)
def has_cache(self, args, evdef):
path = self.cache_path(args, evdef)
return os.path.exists(path)
def load(self, args, evdef):
path = self.cache_path(args, evdef)
with open(path, "r") as f:
buf = f.read()
return buf
def dump(self, args, evdef, buf):
path = self.cache_path(args, evdef)
with open(path, "w") as f:
f.write(buf)
def get_detail(self, args, evdef, evdef_org=None):
if self._load_cache and self.has_cache(args, evdef):
buf = self.load(args, evdef)
else:
conf, dt_range, area = args
el = self._d_el[evdef.source]
data = list(el.details(evdef, dt_range, evdef_org=evdef_org))
buf = common.show_repr(
data, self._head, self._foot, indent=0,
strfunc=self.output_format
)
if self._dump_cache:
self.dump(args, evdef, buf)
return buf
def init_evloader(conf, src):
if src == SRCCLS_LOG:
from .source import evgen_log
return evgen_log.LogEventLoader(conf)
elif src == SRCCLS_SNMP:
from .source import evgen_snmp
return evgen_snmp.SNMPEventLoader(conf)
else:
raise NotImplementedError
def init_evloaders(conf):
return {src: init_evloader(conf, src)
for src in config.getlist(conf, "dag", "source")}
def load_event(measure, tags, dt_range, ci_bin_size, ci_bin_diff,
method, el=None):
if method == "sequential":
df = el.load(measure, tags, dt_range, ci_bin_size)
if df is None or df[el.fields[0]].sum() == 0:
_logger.debug("{0} is empty".format((measure, tags)))
return None
elif method == "slide":
tmp_dt_range = (dt_range[0],
max(dt_range[1],
dt_range[1] + (ci_bin_size - ci_bin_diff)))
items = sorted(list(el.load_items(measure, tags, tmp_dt_range)),
key=lambda x: x[0])
if len(items) == 0:
_logger.debug("{0} is empty".format((measure, tags)))
return None
l_dt = [e[0] for e in items]
l_array = np.vstack([e[1] for e in items])[:, 0]
data = dtutil.discretize_slide(l_dt, dt_range,
ci_bin_diff, ci_bin_size,
l_dt_values=l_array)
l_dt_label = dtutil.range_dt(dt_range[0], dt_range[1], ci_bin_diff)
dtindex = pd.to_datetime(l_dt_label)
df = pd.DataFrame(data, index=dtindex)
elif method == "radius":
tmp_dt_range = (min(dt_range[0],
dt_range[0] - 0.5 * (ci_bin_size - ci_bin_diff)),
max(dt_range[1],
dt_range[1] + 0.5 * (ci_bin_size - ci_bin_diff)))
items = sorted(list(el.load_items(measure, tags, tmp_dt_range)),
key=lambda x: x[0])
if len(items) == 0:
_logger.debug("{0} is empty".format((measure, tags)))
return None
l_dt = [e[0] for e in items]
l_array = np.vstack([e[1] for e in items])[:, 0]
data = dtutil.discretize_radius(l_dt, dt_range, ci_bin_diff,
0.5 * ci_bin_size,
l_dt_values=l_array)
l_dt_label = dtutil.range_dt(dt_range[0], dt_range[1], ci_bin_diff)
dtindex = pd.to_datetime(l_dt_label)
df = pd.DataFrame(data, index=dtindex)
else:
raise NotImplementedError
return df
def load_event_log_all(conf, dt_range, area, d_el=None):
if d_el is None:
from .source import evgen_log
el = evgen_log.LogEventLoader(conf)
else:
el = d_el[SRCCLS_LOG]
areatest = AreaTest(conf)
method = conf.get("dag", "ci_bin_method")
ci_bin_size = config.getdur(conf, "dag", "ci_bin_size")
ci_bin_diff = config.getdur(conf, "dag", "ci_bin_diff")
for evdef in el.iter_evdef(dt_range):
measure, tags = evdef.series()
if not areatest.test(area, tags["host"]):
continue
df = load_event(measure, tags, dt_range, ci_bin_size, ci_bin_diff,
method, el)
if df is not None:
yield evdef, df
def load_event_snmp_all(conf, dt_range, area, d_el=None):
if d_el is None:
from .source import evgen_snmp
el = evgen_snmp.SNMPEventLoader(conf)
else:
el = d_el["snmp"]
areatest = AreaTest(conf)
method = conf.get("dag", "ci_bin_method")
ci_bin_size = config.getdur(conf, "dag", "ci_bin_size")
ci_bin_diff = config.getdur(conf, "dag", "ci_bin_diff")
l_feature_name = config.getlist(conf, "dag", "snmp_features")
if len(l_feature_name) == 0:
l_feature_name = el.all_feature()
for evdef in el.iter_evdef(l_feature_name):
measure, tags = evdef.series()
if not areatest.test(area, tags["host"]):
continue
df = load_event(measure, tags, dt_range, ci_bin_size, ci_bin_diff,
method, el)
if df is not None:
yield evdef, df
def load_event_all(sources, conf, dt_range, area):
for src in sources:
if src == SRCCLS_LOG:
for evdef, df in load_event_log_all(conf, dt_range, area):
yield evdef, df
elif src == SRCCLS_SNMP:
for evdef, df in load_event_snmp_all(conf, dt_range, area):
yield evdef, df
else:
raise NotImplementedError
def makeinput(conf, dt_range, area):
evmap = EventDefinitionMap()
evlist = []
sources = config.getlist(conf, "dag", "source")
for evdef, df in load_event_all(sources, conf, dt_range, area):
eid = evmap.add_evdef(evdef)
df.columns = [eid, ]
evlist.append(df)
msg = "loaded event {0} {1} (sum: {2})".format(eid, evmap.evdef(eid),
df[eid].sum())
_logger.debug(msg)
if len(evlist) == 0:
_logger.warning("No data loaded")
return None, None
merge_sync = conf.getboolean("dag", "merge_syncevent")
if merge_sync:
merge_sync_rules = config.getlist(conf, "dag", "merge_syncevent_rules")
evlist, evmap = merge_sync_event(evlist, evmap, merge_sync_rules)
input_df = pd.concat(evlist, axis=1)
return input_df, evmap
def merge_sync_event(evlist, evmap, rules):
from collections import defaultdict
hashmap = defaultdict(list)
# make clusters that have completely same values
for old_eid, df in enumerate(evlist):
# old_eid = df.columns[0]
evdef = evmap.evdef(old_eid)
value_key = tuple(df.iloc[:, 0])
tmp_key = [value_key, ]
if "source" in rules:
tmp_key.append(evdef.source)
if "host" in rules:
tmp_key.append(evdef.host)
if "group" in rules:
tmp_key.append(evdef.group)
key = tuple(tmp_key)
hashmap[key].append(old_eid)
new_evlist = []
new_evmap = EventDefinitionMap()
for l_old_eid in hashmap.values():
l_evdef = [evmap.evdef(eid) for eid in l_old_eid]
new_evdef = MultipleEventDefinition(l_evdef)
if "source" in rules:
new_evdef.source = l_evdef[0].source
if "host" in rules:
new_evdef.host = l_evdef[0].host
if "group" in rules:
new_evdef.group = l_evdef[0].group
new_eid = new_evmap.add_evdef(new_evdef)
new_df = evlist[l_old_eid[0]]
new_df.columns = [new_eid, ]
new_evlist.append(new_df)
_logger.info("merge-syncevent {0} -> {1}".format(len(evmap), len(new_evmap)))
return new_evlist, new_evmap
def _load_merged_event(conf, dt_range, area, evdef, areatest,
ci_bin_size, ci_bin_diff, ci_bin_method, d_el):
if isinstance(evdef, MultipleEventDefinition):
l_df = []
for member in evdef.members:
tmp_df = _load_merged_event(conf, dt_range, area,
member, areatest,
ci_bin_size, ci_bin_diff,
ci_bin_method, d_el)
if tmp_df is not None:
tmp_df.columns = ["tmp"]
l_df.append(tmp_df)
if len(l_df) == 0:
return None
ret = l_df[0]
for tmp_df in l_df[1:]:
ret = ret.add(tmp_df)
return ret
else:
measure, tags = evdef.series()
if not areatest.test(area, tags["host"]):
return None
df = load_event(measure, tags, dt_range, ci_bin_size, ci_bin_diff,
ci_bin_method, el=d_el[evdef.source])
return df
def load_merged_events(conf, dt_range, area, l_evdef, d_el):
"""for visualization"""
areatest = AreaTest(conf)
ci_bin_method = conf.get("dag", "ci_bin_method")
ci_bin_size = config.getdur(conf, "dag", "ci_bin_size")
ci_bin_diff = config.getdur(conf, "dag", "ci_bin_diff")
l_df = []
for idx, evdef in enumerate(l_evdef):
tmp_df = _load_merged_event(conf, dt_range, area, evdef, areatest,
ci_bin_size, ci_bin_diff,
ci_bin_method, d_el)
if tmp_df is None:
raise ValueError("no time-series for {0}".format(evdef))
tmp_df.columns = [idx]
l_df.append(tmp_df)
return | pd.concat(l_df, axis=1) | pandas.concat |
import pytest
from pandas import (
DataFrame,
Index,
Series,
)
import pandas._testing as tm
@pytest.mark.parametrize("n, frac", [(2, None), (None, 0.2)])
def test_groupby_sample_balanced_groups_shape(n, frac):
values = [1] * 10 + [2] * 10
df = DataFrame({"a": values, "b": values})
result = df.groupby("a").sample(n=n, frac=frac)
values = [1] * 2 + [2] * 2
expected = DataFrame({"a": values, "b": values}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(n=n, frac=frac)
expected = Series(values, name="b", index=result.index)
tm.assert_series_equal(result, expected)
def test_groupby_sample_unbalanced_groups_shape():
values = [1] * 10 + [2] * 20
df = DataFrame({"a": values, "b": values})
result = df.groupby("a").sample(n=5)
values = [1] * 5 + [2] * 5
expected = DataFrame({"a": values, "b": values}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(n=5)
expected = Series(values, name="b", index=result.index)
tm.assert_series_equal(result, expected)
def test_groupby_sample_index_value_spans_groups():
values = [1] * 3 + [2] * 3
df = DataFrame({"a": values, "b": values}, index=[1, 2, 2, 2, 2, 2])
result = df.groupby("a").sample(n=2)
values = [1] * 2 + [2] * 2
expected = DataFrame({"a": values, "b": values}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(n=2)
expected = Series(values, name="b", index=result.index)
tm.assert_series_equal(result, expected)
def test_groupby_sample_n_and_frac_raises():
df = DataFrame({"a": [1, 2], "b": [1, 2]})
msg = "Please enter a value for `frac` OR `n`, not both"
with pytest.raises(ValueError, match=msg):
df.groupby("a").sample(n=1, frac=1.0)
with pytest.raises(ValueError, match=msg):
df.groupby("a")["b"].sample(n=1, frac=1.0)
def test_groupby_sample_frac_gt_one_without_replacement_raises():
df = DataFrame({"a": [1, 2], "b": [1, 2]})
msg = "Replace has to be set to `True` when upsampling the population `frac` > 1."
with pytest.raises(ValueError, match=msg):
df.groupby("a").sample(frac=1.5, replace=False)
with pytest.raises(ValueError, match=msg):
df.groupby("a")["b"].sample(frac=1.5, replace=False)
@pytest.mark.parametrize("n", [-1, 1.5])
def test_groupby_sample_invalid_n_raises(n):
df = DataFrame({"a": [1, 2], "b": [1, 2]})
if n < 0:
msg = "Please provide positive value"
else:
msg = "Only integers accepted as `n` values"
with pytest.raises(ValueError, match=msg):
df.groupby("a").sample(n=n)
with pytest.raises(ValueError, match=msg):
df.groupby("a")["b"].sample(n=n)
def test_groupby_sample_oversample():
values = [1] * 10 + [2] * 10
df = DataFrame({"a": values, "b": values})
result = df.groupby("a").sample(frac=2.0, replace=True)
values = [1] * 20 + [2] * 20
expected = DataFrame({"a": values, "b": values}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(frac=2.0, replace=True)
expected = Series(values, name="b", index=result.index)
tm.assert_series_equal(result, expected)
def test_groupby_sample_without_n_or_frac():
values = [1] * 10 + [2] * 10
df = DataFrame({"a": values, "b": values})
result = df.groupby("a").sample(n=None, frac=None)
expected = DataFrame({"a": [1, 2], "b": [1, 2]}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(n=None, frac=None)
expected = Series([1, 2], name="b", index=result.index)
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
import argparse
import sys, os
import numpy as np
import pandas as pd
import datetime, time
import logging
import traceback
from sqlalchemy import select, Table, Column
from semutils.logging.setup_logger import setup_logger
setup_logger('download_data.log')
from semutils.messaging.Slack import Slack
from semutils.db_access.AccessReference import AccessReference
from semutils.db_access.AccessSQL import Access_SQL_DB
from semutils.db_access.MasterConfig import MasterConfig
SQLHost = MasterConfig['prod']['sql_ref_host']
SignalsModel = '2018-06-21'
DataDir = 'data_prod'
RemoteDir = '/home/web/projects/semwebsite/semapp'
EnterLong = 0.55
EnterShort = 0.45
def download_sec_master_and_pricing(**kwargs):
ref = AccessReference(sql_host = SQLHost)
# securities master
logging.info('Downloading securities master')
sm = ref.get_sec_master()
sm.to_parquet(os.path.join(DataDir, 'sec_master.parquet'))
# get indices
benchmarks = [('SP500','SP50'),('SP400','MID'),('SP600','SML')]
for b,bid in benchmarks:
b_data = ref.get_index_prices(benchmark_id = bid)
b_data.to_parquet(os.path.join(DataDir, b + '.parquet'))
# prices
logging.info('Downloading pricing data')
cols = ['sec_id', 'm_ticker', 'data_date', 'adj_close', 'close']
filepath = os.path.join(DataDir, 'eod_prices.hdf')
if os.path.exists(filepath) and (not kwargs['clear_existing']):
prices = | pd.read_hdf(filepath, 'table') | pandas.read_hdf |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn.metrics import r2_score, confusion_matrix, classification_report
from keras.wrappers.scikit_learn import KerasClassifier
# load the dataset at ../data/HR_comma_sep.csv, inspect it with `.head()`, `.info()` and `.describe()`.
df = | pd.read_csv('../data/HR_comma_sep.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 8 11:39:33 2020
@author: cristian
"""
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import os
from gurobipy import *
from matplotlib import cm
from time import time
from scripts.utils import save_solution
#from shapely.geometry import Point, shape
#from numpy.random import uniform
#from collections import Counter
"""
DistanceBetweenNodes: Given a set of coordinates XY, computes the euclidean distance between all nodes
"""
def DistanceBetweenNodes(XY):
n = XY.shape[0]
# Distance between points
r = np.zeros((n, n))
for i in range(n):
for j in range(i,n):
form = np.around(np.sqrt((XY[i,0] - XY[j,0])**2 + (XY[i,1] - XY[j,1])**2))
r[i,j] = form
r[j,i] = form
return r
"""
ReadData: function for getting data from a .xlsx file
Returns a dictionary with all the data extracted from the .xlsx file
"""
def ReadData(datadir, file):
# Nodes
df = pd.read_excel(os.path.join(datadir, file), sheet_name = 'Nodes')
XY = df[['X','Y']].values
F = df[df['Type'] == 'F'].index.tolist()
D = df[df['Type'] == 'D'].index.tolist()
S = df[df['Type'] == 'S'].index.tolist()
C = df[df['Type'] == 'C'].index.tolist()
LEZ = dict(zip(df.index.tolist(),df['LEZ'].tolist()))
city = dict(zip(df.index.tolist(),df['City'].tolist()))
# Products
df = pd.read_excel(os.path.join(datadir, file), sheet_name = 'Products')
P = df['Product'].tolist()
nu = df['Volume'].tolist()
omega = df['Weight'].tolist()
omegaeff = df['Weight eff'].tolist()
P_f = {}
for f in F:
P_f[f] = df[df['Firm'] == f]['Product'].tolist()
# Demands
df = pd.read_excel(os.path.join(datadir, file), sheet_name = 'Demands')
DEM = {}
for c in C:
DEM[c] = df[df['Customer'] == c]['Demand'].tolist()
# Depots cap.
df = pd.read_excel(os.path.join(datadir, file), sheet_name = 'Depots cap.')
Lambd = {}
Omega = {}
epsil = {}
for i in range(df.shape[0]):
d = int(df['Depot'].iloc[i])
Lambd[d] = df['Lambd'].iloc[i]
Omega[d] = df['Omega'].iloc[i]
epsil[d] = df['epsil'].iloc[i]
# Vehicles
df = pd.read_excel(os.path.join(datadir, file), sheet_name = 'Vehicles')
K = df['Vehicle'].tolist()
V_i = {}
Phi = {}
Theta = {}
rho = {}
delta = {}
gamma = {}
vehictype = {}
DcupS = D+S
for d in DcupS:
V_i[d] = df[df['Depot'] == d]['Vehicle'].tolist()
for k in V_i[d]:
Phi[k] = df[df['Vehicle'] == k]['Phi'].sum()
Theta[k] = df[df['Vehicle'] == k]['Theta'].sum()
rho[k] = df[df['Vehicle'] == k]['rho'].sum()
delta[k] = df[df['Vehicle'] == k]['delta'].sum()
gamma[k] = df[df['Vehicle'] == k]['gamma'].sum()
vehictype[k] = df[df['Vehicle'] == k]['VehicleType'].iloc[0]
r = DistanceBetweenNodes(XY)
"""
DATA DICTIONARY
"""
data = {}
data['XY'] = XY
data['F'] = F
data['D'] = D
data['S'] = S
data['C'] = C
data['P'] = P
data['P_f'] = P_f
data['K'] = K
data['V_i'] = V_i
data['DEM'] = DEM
data['Lambd'] = Lambd
data['Omega'] = Omega
data['Phi'] = Phi
data['Theta'] = Theta
data['nu'] = nu
data['omega'] = omega
data['omegaeff'] = omegaeff
data['rho'] = rho
data['delta'] = delta
data['gamma'] = gamma
data['epsil'] = epsil
data['r'] = r
data['LEZ'] = LEZ
data['vehictype'] = vehictype
data['city'] = city
A = np.ones((len(F+D+S+C), len(K)), dtype=int)
for s in S:
for k in V_i[s]:
for s1 in S:
if s1 != s:
# Bikes aren't shared between satellites
A[s1,k] = 0
for n in F+D+C:
# Bikes only visit nodes from the same city
if vehictype[k] == 'bike' and city[s] != city[n]:
A[n,k] = 0
# Non eco vehicles aren't allowed in LEZ points
if vehictype[k] != 'bike' and LEZ[n] > 0:
A[n,k] = 0
for d in D:
for k in V_i[d]:
for d1 in D:
if d1 != d:
# Vehicles aren't shared between delivering
A[d1,k] = 0
for n in F+S+C:
# Non eco vehicles aren't allowed in LEZ points
if vehictype[k] != 'bike' and LEZ[n] > 0:
A[n,k] = 0
data['A'] = A
return data
"""
MATH HEURISTIC FUNCTIONS
Here are all the steps for solving the multi-echelon multi-vehicle problem
"""
def GreedyRoutingForServingCost(d0, W0, NodesToVisit, WeightNodes, gamma_k, rho_k, r):
# This function estimates the serving cost via greedy routing
VisitedNodes = [d0]
PendingNodes = [n for n in NodesToVisit]
TotalCost = 0
CumulatedWeight = W0
# Initial case: select the first node to visit
i = d0
ArcCost = np.inf
for j in PendingNodes:
CurrentCost = r[i,j]*(gamma_k*CumulatedWeight + rho_k)
if CurrentCost < ArcCost:
ArcCost = CurrentCost
j_ = j
TotalCost = TotalCost + ArcCost
VisitedNodes.append(j_)
CumulatedWeight = CumulatedWeight + WeightNodes[j_]
PendingNodes = [n for n in PendingNodes if n not in VisitedNodes]
i = j_
# rest of the cases
while PendingNodes:
i = j_
ArcCost = np.inf
for j in PendingNodes:
CurrentCost = r[i,j]*(gamma_k*CumulatedWeight + rho_k)
if CurrentCost < ArcCost:
ArcCost = CurrentCost
j_ = j
TotalCost = TotalCost + ArcCost
VisitedNodes.append(j_)
CumulatedWeight = CumulatedWeight + WeightNodes[j_]
PendingNodes = [n for n in PendingNodes if n not in VisitedNodes]
# return a tuple with the last node visited and the total cost
return j_, TotalCost
def GetMinimalLoadCostF1(r, i, gamma, Weight_cf, rho, FminFw, D, V_i):
loadcostf1 = 0
# for f in FminFw:
# gamma_kf = max([gamma[v] for v in V_i[f]])
# rho_kf = max([rho[v] for v in V_i[f]])
# cost_f = r[f,i]*(gamma_kf*Weight_cf[f] + rho_kf)
# cost_d = np.inf
# for d in D:
# gamma_kd = max([gamma[v] for v in V_i[d]])
# rho_kd = max([rho[v] for v in V_i[d]])
# cost_d_ = r[f,d]*(gamma_kf*Weight_cf[f] + rho_kf) + r[d,i]*(gamma_kd*Weight_cf[f] + rho_kd)
# if cost_d_ < cost_d:
# cost_d = cost_d_
# loadcostf1 = loadcostf1 + min(cost_f, cost_d)
return loadcostf1
def GetBestDeliveringCost(r, i, gamma, Weight_cf, rho, FirmsToVisit, D, V_i):
cost_d = np.inf
for d in D:
gamma_kd = max([gamma[v] for v in V_i[d]])
rho_kd = max([rho[v] for v in V_i[d]])
f0, cost_d_ = GreedyRoutingForServingCost(d, 0, FirmsToVisit, Weight_cf, gamma_kd, rho_kd, r)
cost_d_ = cost_d_ + r[f0,i]*(sum([gamma_kd*Weight_cf[f] for f in FirmsToVisit]) + rho_kd)
if cost_d_ < cost_d:
cost_d = cost_d_
return cost_d
def Inter(list1, list2):
return [i for i in list1 if i in list2]
def GetFeasibleCombinationsForVehicles(minlen, nodes, Theta, Phi, WeightClient, VolClient, banned):
result = []
for i in range(len(nodes), minlen, -1):
for seq in itertools.combinations(nodes, i):
if sum([WeightClient[c] for c in seq]) <= Theta and sum([VolClient[c] for c in seq]) <= Phi:
result.append(list(seq))
return [r for r in result if not Inter(r,banned)]
def GetBestListOfNodes(result, VolClient, WeightClient, banned):
prod = 0
bestlist = []
for l in result:
if l not in banned:
vol_l = sum([VolClient[c] for c in l])
weight_l = sum([WeightClient[c] for c in l])
if vol_l*weight_l > prod:
prod = vol_l*weight_l
bestlist = l
return bestlist
def GetRoutingList(k,d0,N,w_final):
routing = []
test = int(sum([w_final[i,j,k] for i in N for j in N]))
if test > 2:
routing_list = [d0]
j = int(sum([w_final[d0,l,k]*l for l in N]))
routing.append((d0,j))
routing_list.append(j)
while j != d0:
i = j
j = int(sum([w_final[i,l,k]*l for l in N]))
routing_list.append(j)
routing.append((i,j))
elif test == 2:
j = int(sum([w_final[d0,l,k]*l for l in N]))
routing = [(d0,j), (j,d0)]
routing_list = [d0, j]
else:
routing = []
routing_list = []
##print('empty route')
return routing, routing_list
def CreateSetE(DEM, F, C, P_f):
DictFirmCl = {}
for c in C:
listdem = []
for f in F:
listdem.append(min(sum([DEM[c][p] for p in P_f[f]]),1))
DictFirmCl[c] = listdem
listdem = []
for key in DictFirmCl.keys():
if DictFirmCl[key] not in listdem:
listdem.append(DictFirmCl[key])
DemVecCl = {}
for l in range(len(listdem)):
dem = listdem[l]
DemVecCl[l] = [c for c in DictFirmCl.keys() if DictFirmCl[c] == dem]
E_c = {}
for key in DemVecCl.keys():
l = DemVecCl[key]
if len(l) % 2 != 0:
l = l[:-1]
for c in l:
E_c[c] = [e for e in l if e != c]
return E_c
def ConstructorForRouting(dictclass, d0, k, m_opt, x_opt, data):
# Unpack data
XY = data['XY']
F = data['F']
D = data['D']
S = data['S']
C = data['C']
P = data['P']
P_f = data['P_f']
K = data['K']
V_i = data['V_i']
DEM = data['DEM']
Lambd = data['Lambd']
Omega = data['Omega']
Phi = data['Phi']
Theta = data['Theta']
nu = data['nu']
omega = data['omega']
omegaeff = data['omegaeff']
rho = data['rho']
delta = data['delta']
gamma = data['gamma']
r = data['r']
N = F+D+S+C
N_ = []
DEM_ = {}
Q0 = []
if dictclass[d0] != 'D':
for p in P:
valid_keys = [(p,j,k) for j in N if (p,j,k) in m_opt.keys()]
q0_ = int(sum([m_opt[t] for t in valid_keys]))
Q0.append(q0_)
else:
for p in P:
valid_keys = [(p,j,k) for j in N if (p,j,k) in m_opt.keys()]
valid_keys_f = [(p,f,k) for f in F if (p,f,k) in x_opt.keys()]
q0_ = int(sum([m_opt[t] for t in valid_keys]) - sum([x_opt[t] for t in valid_keys_f]))
Q0.append(q0_)
# if dictclass[d0] == 'F':
# Q0 = [int(sum([m_opt[p,j,k] for j in N])) for p in P]
# else:
# Q0 = [int(sum([(m_opt[p,j,k])for j in N]) - sum([(x_opt[p,f,k])for f in F])) for p in P]
Q0 = [max(q,0) for q in Q0]
N_.append(d0)
Nmin_i = [n for n in N if n != d0]
DEM_[d0] = [int(m_opt.get((p,d0,k), 0)) for p in P]
for j in Nmin_i:
if dictclass[j] != 'F':
tot = sum([m_opt.get((p,j,k), 0) for p in P])
else:
tot = sum([x_opt.get((p,j,k),0) for p in P])
if tot > 0:
N_.append(j)
if dictclass[j] != 'F':
DEM_[j] = [int(m_opt.get((p,j,k),0)) for p in P]
else:
DEM_[j] = [-int(x_opt.get((p,j,k),0)) for p in P]
F_ = [f for f in F if f in N_]
D_ = [d for d in D if d in N_]
S_ = [s for s in S if s in N_]
C_ = [c for c in C if c in N_]
data_routing = {'Q0' : Q0,
'DEM': DEM_,
'N': N_,
'F' : F_,
'D': D_,
'S': S_,
'C': C_,
'P' : P,
'Phi' : Phi[k],
'Theta': Theta[k],
'nu' : nu,
'omega': omega,
'omegaeff': omegaeff,
'd0' : d0,
'gamma': gamma[k],
'rho' : rho[k],
'r' : r}
return data_routing
def FlowRouting(data_routing):
# Unpacking data
F = data_routing['F']
D = data_routing['D']
S = data_routing['S']
C = data_routing['C']
P = data_routing['P']
Phi = data_routing['Phi']
Theta = data_routing['Theta']
nu = data_routing['nu']
omega = data_routing['omega']
omegaeff = data_routing['omegaeff']
gamma = data_routing['gamma']
DEM = data_routing['DEM']
d0 = data_routing['d0']
Q0 = data_routing['Q0']
rho = data_routing['rho']
r = data_routing['r']
# Auxiliary sets
N = F+D+S+C
NminC = F+D+S
NminF = D+S+C
Nmind0 = [n for n in N if n != d0]
Nmind0_i = {}
ScupCmind0 = [i for i in S+C if i != d0]
for i in Nmind0:
Nmind0_i[i] = [j for j in Nmind0 if j != i]
# Consolidation of weight and volume
Weight = {}
Volume = {}
WeightE = {}
for i in N:
try:
Weight[i] = sum([DEM[i][p]*omega[p] for p in P])
except:
Weight[i] = 0
try:
WeightE[i] = sum([DEM[i][p]*omegaeff[p] for p in P])
except:
WeightE[i] = 0
try:
Volume[i] = sum([DEM[i][p]*nu[p] for p in P])
except:
Volume[i] = 0
#print(Q0)
W0 = sum([Q0[p]*omega[p] for p in P])
W0e = sum([Q0[p]*omegaeff[p] for p in P])
##print('W0 = ', W0)
V0 = sum([Q0[p]*nu[p] for p in P])
#print('V0 = ', V0, " Vol cap = ", Phi)
#print('W0 = ', W0, " Weight cap = ", Theta)
#print('W0 effective = ', W0e)
# #print('N = ', N)
# #print('Nmind0 = ', Nmind0)
# Model start
model = Model()
model.setParam('OutputFlag', 0)
# Decision variables
q = {}
for i in N:
for j in N:
q[i,j] = model.addVar(vtype = GRB.CONTINUOUS, name = 'q[%s,%s]' % (i,j))
qe = {}
for i in N:
for j in N:
qe[i,j] = model.addVar(vtype = GRB.CONTINUOUS, name = 'q[%s,%s]' % (i,j))
v = {}
for i in N:
for j in N:
v[i,j] = model.addVar(vtype = GRB.CONTINUOUS, name = 'v[%s,%s]' % (i,j))
w = {}
for i in N:
for j in N:
w[i,j] = model.addVar(vtype = GRB.BINARY, name = 'w[%s,%s]' % (i,j))
e = {}
for i in Nmind0:
e[i] = model.addVar(vtype = GRB.INTEGER, name = 'e[%s]' % i)
# Aux
fc = model.addVar(vtype = GRB.CONTINUOUS, name = 'fc')
ac = model.addVar(vtype = GRB.CONTINUOUS, name = 'ac')
# Constraints
# Flow
model.addConstrs(
quicksum(q[i,j] for i in N) - quicksum(q[j,l] for l in N) == Weight[j] for j in Nmind0
)
model.addConstrs(
quicksum(qe[i,j] for i in N) - quicksum(qe[j,l] for l in N) == WeightE[j] for j in Nmind0
)
model.addConstrs(
quicksum(v[i,j] for i in N) - quicksum(v[j,l] for l in N) == Volume[j] for j in Nmind0
)
model.addConstrs(
quicksum(w[i,j] for i in N) == quicksum(w[j,l] for l in N) for j in N
)
model.addConstrs(
q[i,j] <= Theta*w[i,j] for i in N for j in N
)
model.addConstrs(
qe[i,j] <= 2*(Theta + Phi)*w[i,j] for i in N for j in N
)
model.addConstrs(
v[i,j] <= Phi*w[i,j] for i in N for j in N
)
# Out
model.addConstr(
quicksum(q[d0,j] for j in N) == W0
)
model.addConstr(
quicksum(qe[d0,j] for j in N) == W0e
)
model.addConstr(
quicksum(v[d0,j] for j in N) == V0
)
# Back to depot OK with this one
model.addConstr(
quicksum(q[i,d0] for i in N) == Weight[d0]
)
model.addConstr(
quicksum(qe[i,d0] for i in N) == WeightE[d0]
)
model.addConstr(
quicksum(v[i,d0] for i in N) == Volume[d0]
)
# Node visiting OK with this one
model.addConstrs(
quicksum(w[i,j] for i in N) == 1 for j in N
)
# Node leaving OK with this one
model.addConstrs(
quicksum(w[i,j] for j in N) == 1 for i in N
)
# TMZ
model.addConstrs(
e[i] - e[j] + len(N)*w[i,j] <= len(N) - 1 for i in Nmind0 for j in Nmind0
)
model.addConstrs(
e[i] >= 0 for i in Nmind0
)
Fmind0 = [f for f in F if f != d0]
model.addConstrs(
e[i] >= e[f] for i in ScupCmind0 for f in Fmind0
)
# Logic
model.addConstrs(
q[i,i] == 0 for i in N
)
# Logic
model.addConstrs(
qe[i,i] == 0 for i in N
)
model.addConstrs(
v[i,i] == 0 for i in N
)
model.addConstrs(
w[i,i] == 0 for i in N
)
# Capacity and arc utilization
model.addConstr(
fc == quicksum(quicksum(qe[i,j]*gamma*r[i,j] for i in N) for j in N)
)
model.addConstr(
ac == quicksum(quicksum(w[i,j]*r[i,j]*rho for i in N) for j in N)
)
model.update()
model.__data = qe, q, w, v
model.setObjective(fc + ac,
GRB.MINIMIZE)
model.update()
return model
"""STEP 4: Vehicle routing"""
def MultiEchelonRouting(data, x_opt, y_opt, m_opt, z_opt, u_opt):
# Unpacking data
XY = data['XY']
F = data['F']
D = data['D']
S = data['S']
C = data['C']
P = data['P']
P_f = data['P_f']
K = data['K']
V_i = data['V_i']
DEM = data['DEM']
Lambd = data['Lambd']
Omega = data['Omega']
Phi = data['Phi']
Theta = data['Theta']
nu = data['nu']
omega = data['omega']
omegaeff = data['omegaeff']
rho = data['rho']
delta = data['delta']
gamma = data['gamma']
r = data['r']
# Auxiliary sets
N = F+D+S+C
NminC = F+D+S
# for p in P:
# for c in C:
# for k in K:
# m_opt[p,c,k] = DEM[c][p]*z_opt[k,c]
model = Model()
dictclass = {}
for f in F:
dictclass[f] = 'F'
for d in D:
dictclass[d] = 'D'
for s in S:
dictclass[s] = 'S'
for c in C:
dictclass[c] = 'C'
# DEFINITIVE DECISION VARIABLES
q_final, qe_final, w_final, v_final = {}, {} ,{}, {}
for i in N:
for j in N:
for k in K:
q_final[i,j,k] = 0
qe_final[i,j,k] = 0
w_final[i,j,k] = 0
v_final[i,j,k] = 0
# Auxiliary dictionary for routes
DictRoutes = {}
DictRoutesList = {}
# Auxiliary dictionary for subsets for each vehicle
DictNodes = {}
# Auxiliary dictionaries for remaining capacities
Phi_, Theta_ = {}, {}
Q0_, DEMS_ = {}, {}
for k in K:
DictRoutes[k] = []
DictRoutesList[k] = []
DictNodes[k] = {'F' : [], 'D' : [], 'S': [], 'C': []}
Phi_[k] = Phi[k]
Theta_[k] = Theta[k]
# for d0 in NminC:
#print('y_opt = ', y_opt)
for d0 in D+S:
##print('Node: %s, Vehicles = %s' % (d0,V_i[d0]))
for k in V_i[d0]:
data_routing = ConstructorForRouting(dictclass, d0, k, m_opt, x_opt, data)
if y_opt.get(k,0) > 0 and len(data_routing['N']) > 1:
#print('data for routing vehicle %s' % k)
#print(data_routing)
model_rou = FlowRouting(data_routing)
model_rou.optimize()
qe, q, w, v = model_rou.__data
q_rou = model_rou.getAttr('x', q)
qe_rou = model_rou.getAttr('x', qe)
w_rou = model_rou.getAttr('x', w)
v_rou = model_rou.getAttr('x', v)
F_ = data_routing['F']
D_ = data_routing['D']
S_ = data_routing['S']
C_ = data_routing['C']
N_ = F_ + D_ + S_ + C_
Q0 = data_routing['Q0']
DEM_ = data_routing['DEM']
try:
# model_rou.##printAttr('X')
qe, q, w, v = model_rou.__data
q_rou = model_rou.getAttr('x', q)
qe_rou = model_rou.getAttr('x', qe)
w_rou = model_rou.getAttr('x', w)
v_rou = model_rou.getAttr('x', v)
for i in N_:
for j in N_:
try:
q_final[i,j,k] = q_rou[i,j]
qe_final[i,j,k] = qe_rou[i,j]
w_final[i,j,k] = w_rou[i,j]
v_final[i,j,k] = v_rou[i,j]
except:
q_final[i,j,k] = 0
w_final[i,j,k] = 0
v_final[i,j,k] = 0
Theta_[k] = Theta[k] - max([q_rou[i,j] for i in N_ for j in N_])
Phi_[k] = Phi[k] - max([v_rou[i,j] for i in N_ for j in N_])
DictRoutes[k], DictRoutesList[k] = GetRoutingList(k,d0,N,w_final)
DictNodes[k] = {'F' : F_, 'D' : D_, 'S': S_, 'C': C_}
Q0_[k] = Q0
DEMS_[k] = DEM_
except:
pass
##print('ERROR IN VEHICLE %s' % k)
else:
for i in N:
for j in N:
q_final[i,j,k] = 0
qe_final[i,j,k] = 0
v_final[i,j,k] = 0
w_final[i,j,k] = 0
solution = {'q_final' : q_final,
'qe_final' : qe_final,
'v_final' : v_final,
'w_final' : w_final,
'DictRoutes' : DictRoutes,
'DictRoutesList' : DictRoutesList,
'DictNodes' : DictNodes,
'Theta_' : Theta_,
'Phi_' : Phi_,
'Q0_' : Q0_,
'DEMS_' : DEMS_}
return solution
"""
AUXILIARY FUNCTIONS FOR HEURISTIC LOOP
"""
# Function that computes the freight cost for the route of a certain vehicle
def ComputeRouteCost(q, routing, k, gamma, r):
return sum([q[i,j,k]*r[i,j]*gamma[k] for (i,j) in routing])
def DictFromListofTuples(listtuples):
dic = {}
for i,j in listtuples:
dic[i] = j
return dic
# Function that computes every routing cost for every vehicle that visits clients and satellites
def GetMaxRoutingCosts(N, K, depots, DictNodes, r, gamma, w_final, q_final):
RC = {}
Kfil = [k for k in K if DictNodes[k]['S'] and DictNodes[k]['C']] # Vehicles that visit satellites and clients
for k in Kfil:
routing, routing_list = GetRoutingList(k,depots[k],N,w_final)
freightcost = ComputeRouteCost(q_final, routing, k, gamma, r)
RC[k] = freightcost
try:
RC = DictFromListofTuples(sorted(RC.items(), key=lambda x: x[1], reverse=True))
except:
pass
return RC
# Function that determines the "most expensive" destination in a route
def GetNode2Exclude(routing, q, qe, v, C, k, gamma, r, banlist):
DictRouFreight = dict(zip(routing,[qe[i,j,k]*r[i,j]*gamma[k] for (i,j) in routing]))
MaxCost = 0
ex = None
q_ex = None
v_ex = None
for t in routing:
if t[1] in C and t[1] not in banlist:
if DictRouFreight[t] > MaxCost:
ex = t[1]
MaxCost = DictRouFreight[t]
# get freight from that node
if ex != None:
ant_ex = [t for t in routing if t[1] == ex][0][0]
post_ex = [t for t in routing if t[0] == ex][0][1]
q_ex = q[ant_ex,ex,k] - q[ex,post_ex,k]
qe_ex = qe[ant_ex,ex,k] - qe[ex,post_ex,k]
v_ex = v[ant_ex,ex,k] - v[ex,post_ex,k]
return ex, q_ex, qe_ex, v_ex
def GetNode2ExcludeFromVs(routing, q, v, C, k, gamma, r, banlist):
DictRouFreight = dict(zip(routing,[q[i,j,k]*r[i,j]*gamma[k] for (i,j) in routing]))
MaxCost = 0
ex = None
q_ex = None
v_ex = None
if len(routing) > 2:
for t in routing:
if t[1] in C and t[1] not in banlist:
if DictRouFreight[t] > MaxCost:
ex = t[1]
MaxCost = DictRouFreight[t]
elif len(routing) == 2:
##print('Route of lenght 2')
ex = routing[0][1]
else:
pass
if ex != None:
ant_ex = [t for t in routing if t[1] == ex][0][0]
post_ex = [t for t in routing if t[0] == ex][0][1]
q_ex = q[ant_ex,ex,k] - q[ex,post_ex,k]
v_ex = v[ant_ex,ex,k] - v[ex,post_ex,k]
return ex, q_ex, v_ex
# Function that takes a route and adds a node to that route
def ReroutingAdd(routing, tremove, tadd):
rerouting = []
for t in routing:
if t == tremove:
rerouting = rerouting + tadd #tadd is a list of 2 tuples
else:
rerouting.append(t)
return rerouting
# Function that takes a route and removes a node from that route
def ReroutingRemove(routing, ex):
rerouting = []
t_aux = [None,None]
for t in routing:
flag = True
if t[1] == ex:
t_aux[0] = t[0]
flag = False
if t[0] == ex:
t_aux[1] = t[1]
flag = False
try:
if sum(t_aux) > 0:
rerouting.append(tuple(t_aux))
t_aux = [None,None]
except:
pass
if flag:
rerouting.append(t)
return rerouting
# Function that decides which is the best way for adding a node to a route
def MinRouteVariation(Vsat, ex, r, DictRoutes):
MinDist = np.inf
for k in Vsat:
d0 = DictRoutes[k][0]
for t in DictRoutes[k]:
i1 = t[0]
j1 = t[1]
dist = r[i1,ex] + r[j1,t[1]]
if i1 != d0 and j1 != d0:
if dist < MinDist:
ks = k
i = t[0]
l = t[1]
tremove = (i,l)
tadd = [(i,ex), (ex,l)]
MinDist = dist
# Rerouting
rerouting = ReroutingAdd(DictRoutes[ks], tremove, tadd)
return ks, rerouting
# Function that decides which satellite will receive the freight from the excluded node
def SelectSatellite(ex, q_ex, v_ex, Sk, V_i, cdv, cdw, Phi_, Theta_, r, DictRoutes):
sat = None
ks = None
rerouting = None
MinDist = np.inf
for s in Sk:
if q_ex <= cdw[s] and v_ex <= cdv[s]:
Vsat = [k for k in V_i[s] if Theta_[k] >= q_ex and Phi_[k] >= v_ex]
if len(Vsat) > 0:
if r[s,ex] < MinDist:
sat = s
MinDist = r[s,ex]
ks, rerouting = MinRouteVariation(Vsat, ex, r, DictRoutes)
return sat, ks, rerouting
# Function for recomputing the freight for routing
def RecomputeFreightEx(q_final,w_final, N, k, ex, q_ex, sat, routing, gamma, r):
routing_list = [routing[0][0]]
for t in routing:
routing_list.append(t[1])
flag_ex = 0
q_rec = {}
for i in routing_list[:-1]:
j = int(sum([w_final[i,j,k]*j for j in N]))
if j == ex:
ex_ant = i
flag_ex = q_ex
elif j == sat:
if i == ex:
q_rec[ex_ant,j,k] = q_final[i,j,k] + flag_ex
else:
q_rec[i,j,k] = q_final[i,j,k] + flag_ex
flag_ex = 0
else:
if i == ex:
q_rec[ex_ant,j,k] = q_final[i,j,k] + flag_ex
else:
q_rec[i,j,k] = q_final[i,j,k] + flag_ex
rerouting = ReroutingRemove(routing, ex)
return ComputeRouteCost(q_rec, rerouting, k, gamma, r)
# Function for recomputing the freight for routing
def RecomputeFreightAdd(q_final, N, k, ex, q_ex, rerouting, gamma, r):
flag_ex = q_ex
q_rec = {}
for t in rerouting:
i = t[0]
j = t[1]
if j == ex:
ex_ant = i
else:
if i == ex:
q_rec[ex_ant,i,k] = q_final[ex_ant,j,k] + flag_ex
flag_ex = 0
q_rec[i,j,k] = q_final[ex_ant,j,k] + flag_ex
else:
q_rec[i,j,k] = q_final[i,j,k] + flag_ex
return ComputeRouteCost(q_rec, rerouting, k, gamma, r)
def RecomputeFreightExFromKs(q_final,w_final, N, k, ex, q_ex, routing, gamma, r):
##print('Removing %s from freigh of vehicle %s' %(q_ex, k))
# Function for recomputing freight
if len(routing) > 2:
routing_list = [routing[0][0]]
for t in routing:
routing_list.append(t[1])
flag_ex = q_ex
q_rec = {}
for i in routing_list[:-1]:
j = int(sum([w_final[i,j,k]*j for j in N]))
if j == ex:
ex_ant = i
flag_ex = 0
else:
if i == ex:
q_rec[ex_ant,j,k] = q_final[i,j,k] - flag_ex
else:
q_rec[i,j,k] = q_final[i,j,k] - flag_ex
rerouting = ReroutingRemove(routing, ex)
cost = ComputeRouteCost(q_rec, rerouting, k, gamma, r)
else:
cost = 0
return cost
def RecomputeFreightAddToKd(q_final, N, k, ex, q_ex, sat, rerouting, gamma, r):
# Function for recomputing the freight for routing
##print('Adding %s to freight of vehicle %s' %(q_ex, k))
##print('Rerouting: ', rerouting)
q_or = {}
for (i,j) in rerouting:
try:
q_or[i,j,k] = q_final[i,j,k]
except:
pass
routing_list = [rerouting[0][0]]
for t in rerouting:
routing_list.append(t[1])
if routing_list.index(ex) < routing_list.index(sat):
flag_ex = 0
q_rec = {}
for t in rerouting:
i = t[0]
j = t[1]
if j == ex:
ex_ant = i
else:
if i == ex:
q_rec[ex_ant,ex,k] = q_final[ex_ant,j,k]
flag_ex = q_ex
q_rec[ex,j,k] = q_final[ex_ant,j,k] - flag_ex
elif i == sat:
flag_ex = 0
q_rec[sat,j,k] = q_final[sat,j,k] - flag_ex
else:
q_rec[i,j,k] = q_final[i,j,k] - flag_ex
else:
flag_ex = 0
q_rec = {}
for t in rerouting:
i = t[0]
j = t[1]
if j == ex:
ex_ant = i
else:
if i == ex:
q_rec[ex_ant,ex,k] = q_final[ex_ant,j,k]
q_rec[ex,j,k] = q_final[ex_ant,j,k] - flag_ex
flag_ex = 0
elif i == sat:
flag_ex = q_ex
q_rec[sat,j,k] = q_final[sat,j,k] + flag_ex
else:
q_rec[i,j,k] = q_final[i,j,k] + flag_ex
##print('Q nuevo: ', q_rec)
return ComputeRouteCost(q_rec, rerouting, k, gamma, r)
def ImproveOptimalSwapKdKs(RCVd, data, cdv, cdw, DictRoutes, DictRoutesList, DictNodes, DEMS_, Q0_, q_final, qe_final, v_final, w_final, Phi_, Theta_, depots):
# Unpacking data
XY = data['XY']
F = data['F']
D = data['D']
S = data['S']
C = data['C']
P = data['P']
V_i = data['V_i']
Phi = data['Phi']
Theta = data['Theta']
nu = data['nu']
omega = data['omega']
rho = data['rho']
gamma = data['gamma']
r = data['r']
A = data['A']
N = F+D+S+C
banlist = []
for kd in RCVd.keys():
flag_feasible = True
flag_descend = True
while flag_feasible and flag_descend:
# Get node to exclude REMARK: IS ALWAYS A CLIENT!!
try:
ex, q_ex, qe_ex, v_ex = GetNode2Exclude(DictRoutes[kd], q_final, qe_final, v_final, C, kd, gamma, r, banlist)
# Get satellite
sat, ks, rerouting_ks = SelectSatellite(ex, q_ex, v_ex, DictNodes[kd]['S'], V_i, cdv, cdw, Phi_, Theta_, r, DictRoutes)
except:
sat = None
# If there is a satelite...
if sat != None:
IncumbentCost = np.inf
# PrevCost: routing cost for kd and ks without changes
PrevCost = ComputeRouteCost(qe_final, DictRoutes[kd], kd, gamma, r) + ComputeRouteCost(qe_final, DictRoutes[ks], ks, gamma, r)
Costkd = RecomputeFreightEx(qe_final,w_final, N, kd, ex, qe_ex, sat, DictRoutes[kd], gamma, r)
Costks = RecomputeFreightAdd(qe_final, N, ks, ex, qe_ex, rerouting_ks, gamma, r)
IncumbentCost = Costkd + Costks
if A[ex,ks] < 0:
IncumbentCost = np.inf
##print('Incumbent: ', IncumbentCost, ' previous: ', PrevCost)
if IncumbentCost <= PrevCost:
# Modify nodes for kd and ks
##print('Removing %s from the route of vehicle %s' % (ex,kd))
DictNodes[kd]['C'] = [c for c in DictNodes[kd]['C'] if c != ex]
DictNodes[ks]['C'] = DictNodes[ks]['C'] + [ex]
# Create entry for exchanged node
DEMS_[ks][ex] = [0 for p in P]
# Correct demand for excluded node
for p in P:
aux = DEMS_[kd][ex][p]
DEMS_[kd][sat][p] = DEMS_[kd][sat][p] + aux
Q0_[ks][p] = Q0_[ks][p] + aux
DEMS_[ks][ex][p] = aux
cdv[sat] = cdv[sat] + aux*nu[p]
cdw[sat] = cdw[sat] + aux*omega[p]
del DEMS_[kd][ex]
# Re routing for kd
##print('RE ROUTING FOR VEHICLE %s' % kd)
F_ = DictNodes[kd]['F']
D_ = DictNodes[kd]['D']
S_ = DictNodes[kd]['S']
C_ = DictNodes[kd]['C']
N_ = F_ + D_ + S_ + C_
data_routing = {'Q0' : Q0_[kd],
'DEM': DEMS_[kd],
'N': N_,
'F' : F_,
'D': D_,
'S': S_,
'C': C_,
'P' : P,
'Phi' : Phi[kd],
'Theta': Theta[kd],
'nu' : nu,
'omega': omega,
'd0' : depots[kd],
'gamma': gamma[kd],
'rho' : rho[kd],
'r' : r}
model_rou = FlowRouting(data_routing)
model_rou.optimize()
try:
qe, q, w, v = model_rou.__data
q_rou = model_rou.getAttr('x', q)
qe_rou = model_rou.getAttr('x', qe)
w_rou = model_rou.getAttr('x', w)
v_rou = model_rou.getAttr('x', v)
for i in N_:
for j in N_:
try:
q_final[i,j,kd] = q_rou[i,j]
qe_final[i,j,kd] = qe_rou[i,j]
w_final[i,j,kd] = w_rou[i,j]
v_final[i,j,kd] = v_rou[i,j]
except:
q_final[i,j,kd] = 0
qe_final[i,j,kd] = 0
w_final[i,j,kd] = 0
v_final[i,j,kd] = 0
# Delete route for excluded node
for i in N_:
q_final[i,ex,kd] = 0
qe_final[i,ex,kd] = 0
w_final[i,ex,kd] = 0
v_final[i,ex,kd] = 0
q_final[ex,i,kd] = 0
qe_final[ex,i,kd] = 0
w_final[ex,i,kd] = 0
v_final[ex,i,kd] = 0
Theta_[kd] = Theta[kd] - max([q_rou[i,j] for i in N_ for j in N_])
Phi_[kd] = Phi[kd] - max([v_rou[i,j] for i in N_ for j in N_])
DictRoutes[kd], DictRoutesList[kd] = GetRoutingList(kd,depots[kd],N,w_final)
DictNodes[kd] = {'F' : F_, 'D' : D_, 'S': S_, 'C': C_}
banlist.append(ex)
except:
pass
##print('ERROR FOR VEHICLE %s' % kd)
##print('RE ROUTING FOR VEHICLE %s' % ks)
F_ = DictNodes[ks]['F']
D_ = DictNodes[ks]['D']
S_ = DictNodes[ks]['S']
C_ = DictNodes[ks]['C']
N_ = F_ + D_ + S_ + C_
data_routing = {'Q0' : Q0_[ks],
'DEM': DEMS_[ks],
'N': N_,
'F' : F_,
'D': D_,
'S': S_,
'C': C_,
'P' : P,
'Phi' : Phi[ks],
'Theta': Theta[ks],
'nu' : nu,
'omega': omega,
'd0' : depots[ks],
'gamma': gamma[ks],
'rho' : rho[ks],
'r' : r}
model_rou = FlowRouting(data_routing)
model_rou.optimize()
try:
qe, q, w, v = model_rou.__data
q_rou = model_rou.getAttr('x', q)
qe_rou = model_rou.getAttr('x', qe)
w_rou = model_rou.getAttr('x', w)
v_rou = model_rou.getAttr('x', v)
for i in N_:
for j in N_:
try:
q_final[i,j,ks] = q_rou[i,j]
qe_final[i,j,ks] = qe_rou[i,j]
w_final[i,j,ks] = w_rou[i,j]
v_final[i,j,ks] = v_rou[i,j]
except:
q_final[i,j,ks] = 0
qe_final[i,j,ks] = 0
w_final[i,j,ks] = 0
v_final[i,j,ks] = 0
Theta_[ks] = Theta[ks] - max([q_rou[i,j] for i in N_ for j in N_])
Phi_[ks] = Phi[ks] - max([v_rou[i,j] for i in N_ for j in N_])
DictRoutes[ks], DictRoutesList[ks] = GetRoutingList(ks,depots[ks],N,w_final)
DictNodes[ks] = {'F' : F_, 'D' : D_, 'S': S_, 'C': C_}
except:
pass
##print('ERROR IN REOPTI?IWING VEHICLE %s' % ks)
else:
##print('No more feasible changes for Vehicle %s' % kd)
flag_descend = False
else:
##print('No more feasible changes for Vehicle %s' % kd)
flag_feasible = False
solution_swapkdks = {'DictRoutes' : DictRoutes,
'DictNodes' : DictNodes,
'DEMS_' : DEMS_,
'Q0_' : Q0_,
'q_final' : q_final,
'v_final' : v_final,
'w_final' : w_final,
'Phi_' : Phi_,
'Theta_' : Theta_,
'cdv' : cdv,
'cdw' : cdw,
'banlist' : banlist}
return solution_swapkdks
def AddNodeToSet(dictclass, add, F, D, S, C):
cla = dictclass[add]
if cla == 'F':
F = F + [add]
elif cla == 'D':
D = D + [add]
elif cla == 'S':
S = S + [add]
elif cla == 'C':
C = C + [add]
else:
pass
return F,D,S,C
def RemoveNodeFromSet(dictclass, rem, F, D, S, C):
cla = dictclass[rem]
if cla == 'F':
F = [f for f in F if f != rem]
elif cla == 'D':
D = [d for d in D if d != rem]
elif cla == 'S':
S = [s for s in S if s != rem]
elif cla == 'C':
C = [c for c in C if c != rem]
else:
pass
return F,D,S,C
def AddAndRemoveFromRoute(dictclass, DEMS_, P, DictNodes, k, add, DemAdd, rem, DemRem, r, depots, Q0_, Phi, Theta, nu, omega, rho, gamma):
demms = DEMS_.copy()
DEM_ = demms[k]
Q0 = Q0_[k].copy()
F_ = DictNodes[k]['F']
D_ = DictNodes[k]['D']
S_ = DictNodes[k]['S']
C_ = DictNodes[k]['C']
d0 = depots[k]
##print('AddAndRemoveFromRoute: Original demands ', DEM_)
if add != None:
##print('AddAndRemoveFromRoute: Attempting to add node %s to vehicle %s' % (add,k))
F_, D_, S_, C_ = AddNodeToSet(dictclass, add, F_, D_, S_, C_)
DEM_[add] = [0 for p in P]
for p in P:
aux = DemAdd[p]
DEM_[add][p] = aux # Demand for the new node
if rem != None:
##print('AddAndRemoveFromRoute: Attempting to remove node %s to vehicle %s' % (rem,k))
F_, D_, S_, C_ = RemoveNodeFromSet(dictclass, rem, F_, D_, S_, C_)
for p in P:
aux = DemRem[p]
DEM_[rem][p] = DEM_[rem][p] - aux # If rem is depot, it will receive less feight
# If rem is client, it will have demand 0
N_ = F_ + D_ + S_ + C_
q_rou, w_rou, v_rou = {},{},{}
for i in N_:
for j in N_:
q_rou[i,j,k] = 0
v_rou[i,j,k] = 0
w_rou[i,j,k] = 0
N_mind0 = [n for n in N_ if n != d0]
for n in N_mind0:
if max([np.absolute(DEM_[n][p]) for p in P]) < 1:
##print('AddAndRemoveFromRoute: Removing node %s from route of vehicle %s because of empty demand' % (n,k))
# ##printx = True
F_, D_, S_, C_ = RemoveNodeFromSet(dictclass, n, F_, D_, S_, C_)
Route, RouteList = [], []
NewDictNodes = {'F' : F_, 'D': D_, 'S': S_, 'C': C_}
##print('AddAndRemoveFromRoute: Vehicle %s, Nodes: ' % k, NewDictNodes)
##print('AddAndRemoveFromRoute: Vehicle %s, Demands: ' % k, DEM_)
flag_optim = True
N_ = F_ + D_ + S_ + C_
if len(N_) > 2:
data_routing = {'Q0' : Q0,
'DEM': DEMS_,
'N': N_,
'F' : F_,
'D': D_,
'S': S_,
'C': C_,
'P' : P,
'Phi' : Phi[k],
'Theta': Theta[k],
'nu' : nu,
'omega': omega,
'd0' : depots[k],
'gamma': gamma[k],
'rho' : rho[k],
'r' : r}
model_rou = FlowRouting(data_routing)
model_rou.optimize()
try:
q, w, v = model_rou.__data
q_rou2 = model_rou.getAttr('x', q)
w_rou2 = model_rou.getAttr('x', w)
v_rou2 = model_rou.getAttr('x', v)
for (i,j) in q_rou2:
q_rou[i,j,k] = q_rou2[i,j]
w_rou[i,j,k] = w_rou2[i,j]
v_rou[i,j,k] = v_rou2[i,j]
Route, RouteList = GetRoutingList(k,d0,N_,w_rou)
##print('End routing vehicle %s' % k)
except:
flag_optim = False
##print('Infeasible routing for vehicle %s' % k)
elif len(N_) == 2:
j = [n for n in N_ if n != d0][0]
w_rou[d0, j, k] = 1
q_rou[d0, j, k] = sum([Q0[p]*omega[p] for p in P])
v_rou[d0, j, k] = sum([Q0[p]*nu[p] for p in P])
w_rou[j, d0, k] = 1
q_rou[j, d0, k] = 0
v_rou[j, d0, k] = 0
Route, RouteList = [(d0,j), (j,d0)], [d0,j]
else:
pass
CapVolRem = Theta[k] - max([q_rou[i,j,k] for i in N_ for j in N_])
CapWeiRem = Phi[k] - max([v_rou[i,j,k] for i in N_ for j in N_])
if flag_optim:
FreightCost = sum([q_rou[i,j,k]*gamma[k]*r[i,j] for i in N_ for j in N_])
else:
FreightCost = np.infty
return FreightCost, Route, RouteList, DEM_, Q0, CapVolRem, CapWeiRem, q_rou, w_rou, v_rou, NewDictNodes
def AuxRoutingKd(DEMS_, P, DictNodes, k, add, DemAdd, sat, r, depots, Q0_, Phi, Theta, nu, omega, rho, gamma, omegaeff):
# Kd gets a new node on its route. This new node has demand and, as before
# was served by a satellite, now that satelite receives less freight
demms = DEMS_.copy()
DEM_ = demms[k]
Q0 = Q0_[k].copy()
F_ = DictNodes[k]['F']
D_ = DictNodes[k]['D']
S_ = DictNodes[k]['S']
C_ = DictNodes[k]['C']
d0 = depots[k]
if add != None:
##print('AuxRoutingKd: adding node %s to route of vehicle %s' % (add, k))
C_ = C_ + [add]
DEM_[add] = [0 for p in P]
for p in P:
aux = DemAdd[p]
DEM_[sat][p] = DEM_[sat][p] - aux # satellite receives less freight
DEM_[add][p] = aux # Demand for the new node
N_ = F_ + D_ + S_ + C_
q_rou, w_rou, v_rou = {},{},{}
for i in N_:
for j in N_:
q_rou[i,j,k] = 0
v_rou[i,j,k] = 0
w_rou[i,j,k] = 0
N_mind0 = [n for n in N_ if n != d0]
for n in N_mind0:
if max([np.absolute(DEM_[n][p]) for p in P]) < 1:
##print('AuxRoutingKd: Removing node %s from route of vehicle %s because of empty demand' % (n,k))
F_ = [f for f in F_ if f != n]
D_ = [d for d in D_ if d != n]
S_ = [s for s in S_ if s != n]
C_ = [c for c in C_ if c != n]
N_ = F_ + D_ + S_ + C_
Route, RouteList = [], []
NewDictNodes = {'F' : F_, 'D': D_, 'S': S_, 'C': C_}
##print('AuxRoutingKd: Vehicle %s, Nodes: ' % k, NewDictNodes)
# Consolidation of weight and volume
flag_optim = True
if len(N_) > 2:
data_routing = {'Q0' : Q0,
'DEM': DEM_,
'N': N_,
'F' : F_,
'D': D_,
'S': S_,
'C': C_,
'P' : P,
'Phi' : Phi[k],
'Theta': Theta[k],
'nu' : nu,
'omega': omega,
'omegaeff': omegaeff,
'd0' : depots[k],
'gamma': gamma[k],
'rho' : rho[k],
'r' : r}
model_rou = FlowRouting(data_routing)
model_rou.optimize()
try:
q, w, v = model_rou.__data
q_rou2 = model_rou.getAttr('x', q)
w_rou2 = model_rou.getAttr('x', w)
v_rou2 = model_rou.getAttr('x', v)
for (i,j) in q_rou2:
q_rou[i,j,k] = q_rou2[i,j]
w_rou[i,j,k] = w_rou2[i,j]
v_rou[i,j,k] = v_rou2[i,j]
Route, RouteList = GetRoutingList(k,d0,N_,w_rou)
##print('End routing vehicle %s' % k)
except:
flag_optim = False
##print('Infeasible routing for vehicle %s' % k)
elif len(N_) == 2:
j = [n for n in N_ if n != d0][0]
w_rou[d0, j, k] = 1
q_rou[d0, j, k] = sum([Q0[p]*omega[p] for p in P])
v_rou[d0, j, k] = sum([Q0[p]*nu[p] for p in P])
w_rou[j, d0, k] = 1
q_rou[j, d0, k] = 0
v_rou[j, d0, k] = 0
Route, RouteList = [(d0,j), (j,d0)], [d0,j]
else:
pass
CapVolRem = Theta[k] - max([q_rou[i,j,k] for i in N_ for j in N_])
CapWeiRem = Phi[k] - max([v_rou[i,j,k] for i in N_ for j in N_])
if flag_optim:
FreightCost = sum([q_rou[i,j,k]*gamma[k]*r[i,j] for i in N_ for j in N_])
else:
FreightCost = np.infty
return FreightCost, Route, RouteList, DEM_, Q0, CapVolRem, CapWeiRem, q_rou, w_rou, v_rou, NewDictNodes
def AuxRoutingKs(DEMS_, P, DictNodes, k, ex, DemRem, sat, r, depots, Q0_, Phi, Theta, nu, omega, rho, gamma, omegaeff):
# Ks got a node removed. So Q0 changes
demms = DEMS_.copy()
DEM_ = demms[k]
Q0 = Q0_[k].copy()
F_ = DictNodes[k]['F']
D_ = DictNodes[k]['D']
S_ = DictNodes[k]['S']
C_ = DictNodes[k]['C']
d0 = depots[k]
N_ = F_ + D_ + S_ + C_
C_ = [c for c in C_ if c != ex]
DEM_[ex] = [0 for p in P]
for p in P:
aux = DemRem[p]
Q0[p] = Q0[p] - aux # Vehicle starts with less freight
DEM_[ex][p] = 0
N_ = F_ + D_ + S_ + C_
##print('AuxRoutingKs: Vehicle %s, Nodes: ' % k, DictNodes[k])
q_rou, w_rou, v_rou = {},{},{}
for i in N_:
for j in N_:
q_rou[i,j,k] = 0
v_rou[i,j,k] = 0
w_rou[i,j,k] = 0
Route, RouteList = [], []
# Consolidation of weight and volume
Weight = {}
Volume = {}
for i in N_:
Weight[i] = sum([DEM_[i][p]*omega[p] for p in P])
Volume[i] = sum([DEM_[i][p]*nu[p] for p in P])
flag_optim = True
if len(N_) > 2:
data_routing = {'Q0' : Q0,
'DEM': DEMS_,
'N': N_,
'F' : F_,
'D': D_,
'S': S_,
'C': C_,
'P' : P,
'Phi' : Phi[k],
'Theta': Theta[k],
'nu' : nu,
'omega': omega,
'omegaeff': omegaeff,
'd0' : depots[k],
'gamma': gamma[k],
'rho' : rho[k],
'r' : r}
model_rou = FlowRouting(data_routing)
model_rou.optimize()
model_rou.optimize()
try:
q, w, v = model_rou.__data
q_rou2 = model_rou.getAttr('x', q)
w_rou2 = model_rou.getAttr('x', w)
v_rou2 = model_rou.getAttr('x', v)
for (i,j) in q_rou2:
q_rou[i,j,k] = q_rou2[i,j]
w_rou[i,j,k] = w_rou2[i,j]
v_rou[i,j,k] = v_rou2[i,j]
Route, RouteList = GetRoutingList(k,d0,N_,w_rou)
##print('End routing vehicle %s' % k)
except:
flag_optim = False
##print('Infeasible routing for vehicle %s' % k)
elif len(N_) == 2:
j = [n for n in N_ if n != d0][0]
w_rou[d0, j, k] = 1
q_rou[d0, j, k] = sum([Q0[p]*omega[p] for p in P])
v_rou[d0, j, k] = sum([Q0[p]*nu[p] for p in P])
w_rou[j, d0, k] = 1
q_rou[j, d0, k] = 0
v_rou[j, d0, k] = 0
Route, RouteList = [(d0,j), (j,d0)], [d0,j]
else:
pass
CapVolRem = Theta[k] - max([q_rou[i,j,k] for i in N_ for j in N_])
CapWeiRem = Phi[k] - max([v_rou[i,j,k] for i in N_ for j in N_])
if flag_optim:
FreightCost = sum([q_rou[i,j,k]*gamma[k]*r[i,j] for i in N_ for j in N_])
else:
FreightCost = np.infty
NewDictNodes = {'F' : F_, 'D': D_, 'S': S_, 'C': C_}
return FreightCost, Route, RouteList, DEM_, Q0, CapVolRem, CapWeiRem, q_rou, w_rou, v_rou, NewDictNodes
def ImproveOptimalSwapKsKd(RCVs, data, banlist, DictSatKd, cdv, cdw, DictRoutes, DictRoutesList, DictNodes, DEMS_, Q0_, q_final, v_final, w_final, Phi_, Theta_, depots):
# Unpacking data
XY = data['XY']
F = data['F']
D = data['D']
S = data['S']
C = data['C']
P = data['P']
V_i = data['V_i']
Phi = data['Phi']
Theta = data['Theta']
nu = data['nu']
omega = data['omega']
omegaeff = data['omegaeff']
rho = data['rho']
gamma = data['gamma']
DEM = data['DEM']
r = data['r']
A = data['A']
N = F+D+S+C
##print('starting: ImproveOptimalSwapKsKd')
# PARAMETER THAT SAYS HOW MANY VEHICLES FROM DEPOTS ARE BEING USED:
depots_list = []
for dep in depots.values():
if dep not in depots_list:
depots_list.append(dep)
VehiclesPerDepot = {}
for dep in depots_list:
VehiclesPerDepot[dep] = sum([w_final[dep,j,k] for j in N for k in V_i[dep]])
##print(VehiclesPerDepot)
for ks in RCVs.keys():
##print('Vehicle %s' % ks)
flag_feasible = True
flag_descend = True
while flag_feasible and flag_descend:
ex, q_ex, v_ex = GetNode2ExcludeFromVs(DictRoutes[ks], q_final, v_final,C, ks, gamma, r, banlist)
if ex != None:
# patch qex y vex (sometimes it has errors)
q_ex = sum([DEM[ex][p]*omega[p] for p in P])
v_ex = sum([DEM[ex][p]*nu[p] for p in P])
##print('Original demand of node %s: ' % ex, DEM[ex])
##print('Original freight of node %s: ' % ex, q_ex)
sat = depots[ks]
kd, rerouting_kd = MinRouteVariation([DictSatKd[sat]], ex, r, DictRoutes)
# Backup for satellite demand
dem_sat = [dem for dem in DEMS_[kd][sat]]
else:
sat = None
# If there is a satelite...
if sat != None and A[ex,kd] > 0:
IncumbentCost = np.inf
# PrevCost: routing cost for kd and ks without changes
Costkd_pre = ComputeRouteCost(q_final, DictRoutes[kd], kd, gamma, r)
Costks_pre = ComputeRouteCost(q_final, DictRoutes[ks], ks, gamma, r)
PrevCost = Costkd_pre + Costks_pre
##print('Attempting to remove node %s from route of vehicle %s (sat = %s)' % (ex, ks,sat))
# Aux##printPreRerouting(DEMS_, Q0_, kd , nu, omega, P, DictNodes)
Sol_kd = AuxRoutingKd(DEMS_, P, DictNodes, kd, ex, DEM[ex], sat, r, depots, Q0_, Phi, Theta, nu, omega, rho, gamma, omegaeff)
Costkd_pos = Sol_kd[0]
# Aux##printPreRerouting(DEMS_, Q0_, ks , nu, omega, P, DictNodes)
Sol_ks = AuxRoutingKs(DEMS_, P, DictNodes, ks, ex, DEM[ex], sat, r, depots, Q0_, Phi, Theta, nu, omega, rho, gamma, omegaeff)
Costks_pos = Sol_ks[0]
# CHECK IF SATELLITE HAS EMPTY ROUTE
if Sol_ks[10]['C']:
IncumbentCost = Costkd_pos + Costks_pos
else:
##print('Vehicle %s has an empty route' % ks)
if VehiclesPerDepot[depots[ks]] - 1 == 0:
IncumbentCost = Costkd_pos + Costks_pos - 1000
##print('Attempting to close satelite %s' % depots[ks])
##print('Incumbent: ', IncumbentCost, ' previous: ', PrevCost)
if IncumbentCost <= PrevCost:
##print('Updating routes for vehicles kd = %s and ks = %s' % (kd,ks))
DictSol = {kd : Sol_kd, ks: Sol_ks}
#FreightCost, Route, RouteList, DEM, Q0, CapVolRem, CapWeiRem, q_rou, w_rou, v_rou
for k in [kd,ks]:
OldRoute = DictRoutes[k]
for (i,j) in OldRoute:
q_final[i,j,k] = 0
w_final[i,j,k] = 0
v_final[i,j,k] = 0
DictRoutes[k] = DictSol[k][1]
DictRoutesList[k] = DictSol[k][2]
DEMS_[k] = DictSol[k][3]
Q0_[k] = DictSol[k][4]
Phi_[k] = DictSol[k][5]
Theta_[k] = DictSol[k][6]
for (i,j) in DictSol[k][1]:
q_final[i,j,k] = DictSol[k][7][i,j,k]
w_final[i,j,k] = DictSol[k][8][i,j,k]
v_final[i,j,k] = DictSol[k][9][i,j,k]
# Nodes are modified
DictNodes[k] = DictSol[k][10]
# Se agrega nuevo Node a kd y se quita de ks:
# Remaining capacities of depots are modified:
cdw[depots[ks]] = cdw[depots[ks]] + q_ex
cdv[depots[ks]] = cdv[depots[ks]] + v_ex
if Sol_ks[10]['C']:
pass
else:
VehiclesPerDepot[depots[ks]] = VehiclesPerDepot[depots[ks]] - 1
##print('There was an exchange between kd = %s y ks = %s' % (kd,ks))
else:
##print('There was not an exchange between kd = %s y ks = %s' % (kd,ks))
DEMS_[kd][sat] = dem_sat
del DEMS_[kd][ex]
flag_descend = False
else:
##print('No more feasible changes for Vehicle %s' % ks)
flag_feasible = False
solution_swapkskd = {'DictRoutes' : DictRoutes,
'DictNodes' : DictNodes,
'DEMS_' : DEMS_,
'Q0_' : Q0_,
'q_final' : q_final,
'v_final' : v_final,
'w_final' : w_final,
'Phi_' : Phi_,
'Theta_' : Theta_,
'cdv' : cdv,
'cdw' : cdw,
'banlist' : banlist}
return solution_swapkskd
def Steps1To3(data):
# Unpacking data
XY = data['XY']
F = data['F']
D = data['D']
S = data['S']
C = data['C']
P = data['P']
P_f = data['P_f']
K = data['K']
V_i = data['V_i']
DEM = data['DEM']
Lambd = data['Lambd']
Omega = data['Omega']
Phi = data['Phi']
Theta = data['Theta']
nu = data['nu']
omega = data['omega']
omegaeff = data['omegaeff']
rho = data['rho']
delta = data['delta']
gamma = data['gamma']
r = data['r']
epsil = data['epsil']
A = data['A']
firmswithout = F[- max(int(len(F)*0.2), 1):] #This is just for saying that 20% of the firms (or at least 1)
# don't have vehicles
Fw = [f for f in firmswithout]
FminFw = [f for f in F if f not in Fw]
Vs = [item for sublist in [V_i[s] for s in S] for item in sublist]
Vd = [item for sublist in [V_i[d] for d in D] for item in sublist]
N = F+D+S+C
DcupS = D + S
ScupC = S + C
NminC = F + D + S
# Other parameters
VolClient = {}
WeightClient = {}
for client, dem in DEM.items():
VolClient[client] = sum([dem[p]*nu[p] for p in P])
WeightClient[client] = sum([dem[p]*omega[p] for p in P])
MinVolDep = {}
MinWeightDep = {}
for i in DcupS:
MinVolDep[i] = min(Lambd[i], sum([Phi[k] for k in V_i[i]]))
MinWeightDep[i] = min(Omega[i], sum([Theta[k] for k in V_i[i]]))
ServCost = []
PfromFw = [item for sublist in [P_f[f] for f in Fw] for item in sublist]
F_p = {}
for f in F:
for p in P_f[f]:
F_p[p] = f
# Serving cost for delivering to customers
for i in D:
# gamma_ki = max([gamma[v] for v in V_i[i]])
# rho_ki = max([rho[v] for v in V_i[i]])
for c in C:
Weight_cf = {}
gamma_kf = {}
rho_kf = {}
for f in F:
Weight_cf[f] = sum([DEM[c][p]*omegaeff[p] for p in P_f[f]])
gamma_kf[f] = 0
rho_kf[f] = 0
# gamma_kf[f] = max([gamma[v] for v in V_i[f]])
# rho_kf[f] = max([rho[v] for v in V_i[f]])
# Check if customer c demanded products from firms without vehicles
if sum([DEM[c][p] for p in PfromFw]) > 0:
flag_fw = True
FirmsToVisit = [f for f in Fw if max([DEM[c][p] for p in P_f[f]]) > 0]
else:
flag_fw = False
load_cost_f1 = sum([r[f,i]*(gamma_kf[f]*Weight_cf[f] + rho_kf[f]) for f in FminFw])
for k in V_i[i]:
if A[c,k] > 0:
gamma_ki = gamma[k]
rho_ki = rho[k]
if flag_fw:
f0, load_cost_f2 = GreedyRoutingForServingCost(i,
sum([Weight_cf[f] for f in FminFw]),
FirmsToVisit,
Weight_cf,
gamma_ki,
rho_ki,
r)
del_cost = r[f0,c]*(gamma_ki*sum([Weight_cf[f] for f in Fw]) + rho_ki)
else:
load_cost_f2 = 0
del_cost = r[i,c]*(gamma_ki*sum([Weight_cf[f] for f in F]) + rho_ki)
"""HERE WE CAN ADD ADDITIONAL COSTS FOR ROUTING"""
sc = load_cost_f1 + load_cost_f2 + del_cost + delta[k]
ServCost.append([i,c,k, sc, VolClient[c], WeightClient[c]])
# serving cost for satellite
for i in S:
for c in C:
Weight_cf = {}
gamma_kf = {}
rho_kf = {}
for f in F:
Weight_cf[f] = sum([DEM[c][p]*omegaeff[p] for p in P_f[f]])
gamma_kf[f] = 0
rho_kf[f] = 0
# gamma_kf[f] = max([gamma[v] for v in V_i[f]])
# rho_kf[f] = max([rho[v] for v in V_i[f]])
# Check if customer c demanded products from firms without vehicles
if sum([DEM[c][p] for p in PfromFw]) > 0:
flag_fw = True
FirmsToVisit = [f for f in Fw if max([DEM[c][p] for p in P_f[f]]) > 0]
else:
flag_fw = False
load_cost_f1 = GetMinimalLoadCostF1(r, i, gamma, Weight_cf, rho, FminFw, D, V_i)
if flag_fw:
del_cost = GetBestDeliveringCost(r, i, gamma, Weight_cf, rho, FirmsToVisit, D, V_i)
else:
del_cost = 0
for k in V_i[i]:
if A[c,k] > 0:
gamma_ki = max([gamma[v] for v in V_i[i]])
rho_ki = max([rho[v] for v in V_i[i]])
del_cost = r[i,c]*(gamma_ki*sum([Weight_cf[f] for f in Fw]) + rho_ki) + epsil[i]
sc = load_cost_f1 + del_cost + epsil[i] + delta[k]
ServCost.append([i,c,k,sc, VolClient[c], WeightClient[c]])
df_sc = pd.DataFrame(data = ServCost, columns = ['depot','customer','vehicle','servcost','volume','weight'])
df_sc = df_sc.sort_values(by='servcost', ascending = True).reset_index(drop=True)
openedS = []
usedK = []
DEM_i = {} # Dictionary for depots demands
for i in DcupS:
DEM_i[i] = [0 for p in P]
h_opt = {}
m_opt = {}
x_opt = {}
y_opt = {}
u_opt = {}
z_opt = {}
Weight_i = {}
Volume_i = {}
for i in DcupS:
Weight_i[i] = 0
Volume_i[i] = 0
Weight_k = {}
Volume_k = {}
for k in K:
Weight_k[k] = 0
Volume_k[k] = 0
# #print(df_sc)
while df_sc.shape[0] > 0:
# Always check first element in dataframe
i = int(df_sc.loc[0]['depot'])
c = int(df_sc.loc[0]['customer'])
k = int(df_sc.loc[0]['vehicle'])
w = df_sc.loc[0]['weight']
v = df_sc.loc[0]['volume']
# #print(df_sc.head())
# #print(df_sc.shape[0])
#print('Customer %s trying to be added to depot %s' %(c,i))
# #print('Depot incumbent weight: %s of %s' % (Weight_i[i] + w, MinWeightDep[i]))
# #print('Depot incumbent Volume: %s of %s' % (Volume_i[i] + v, MinVolDep[i]))
if Weight_i[i] + w <= MinWeightDep[i] and Volume_i[i] + v <= MinVolDep[i]:
# #print('Vehicle incumbent weight: %s of %s' % (Weight_k[k] + w, Theta[k]))
# #print('Vehicle incumbent Volume: %s of %s' % (Volume_k[k] + v, Phi[k]))
if Weight_k[k] + w <= Theta[k] and Volume_k[k] + v <= Phi[k]:
# Add
for p in P:
if DEM[c][p] > 0:
h_opt[p,i,c] = DEM[c][p]
m_opt[p,c,k] = DEM[c][p]
DEM_i[i][p] = DEM_i[i][p] + DEM[c][p]
fp = F_p[p]
if fp in Fw and k in Vd:
if (p,fp,k) in x_opt.keys():
x_opt[p,fp,k] = x_opt[p,fp,k] + DEM[c][p]
else:
x_opt[p,fp,k] = DEM[c][p]
Weight_i[i] = Weight_i[i] + w
Volume_i[i] = Volume_i[i] + v
Weight_k[k] = Weight_k[k] + w
Volume_k[k] = Volume_k[k] + v
z_opt[k,c] = 1
# Delete customer from set (becasue it was assigned)
df_sc = df_sc[df_sc['customer'] != c]
if i in S and i not in openedS:
openedS.append(i)
u_opt[i] = 1
# Substract the opening cost
df_sc['servcost'] = np.where(df_sc['depot'] == i,
df_sc['servcost'] - epsil[i],
df_sc['servcost'])
if k not in usedK:
usedK.append(k)
y_opt[k] = 1
# Substract the opening cost
df_sc['servcost'] = np.where(df_sc['vehicle'] == k,
df_sc['servcost'] - delta[k],
df_sc['servcost'])
# #print('Customer %s added to depot %s' %(c,i))
else:
df_sc = df_sc[1:]
else:
df_sc = df_sc[1:]
# wm = df_sc['weight'].min()
# vm = df_sc['volume'].min()
# if Weight_i[i] == MinWeightDep[i] or Volume_i[i] == MinVolDep[i]:
# df_sc = df_sc[df_sc['depot'] != i]
# if Weight_k[k] == Theta[k] or Volume_k[k] == Phi[k]:
# df_sc = df_sc[df_sc['vehicle'] != k]
# Reorder by servingcost
df_sc = df_sc.sort_values(by='servcost', ascending = True).reset_index(drop=True)
# Now, we know the satellites' demand for products. So, we will assign dustributor(s) to
# each satellite as if they were customers
# Serving cost for products from firms with vehicles
# for s in openedS:
# #print(DEM_i[s])
# #print('Opened satellites = %s' % len(openedS))
ServCost = []
for f in FminFw:
for s in openedS:
for p in P_f[f]:
if DEM_i[s][p] > 0:
w = DEM_i[s][p]*omega[p]
we = DEM_i[s][p]*omegaeff[p]
v = DEM_i[s][p]*nu[p]
# for k in V_i[f]:
# gamma_kf = gamma[k]
# rho_kf = rho[k]
# sc = r[f,s]*(gamma_kf*w + rho_kf)
# ServCost.append([f, s, p, k, sc, v, w])
# gamma_kf = max([gamma[v] for v in V_i[f]])
# rho_kf = max([rho[v] for v in V_i[f]])
gamma_kf = 0
rho_kf = 0
for d in D:
for k in V_i[d]:
gamma_kd = gamma[k]
rho_kd = rho[k]
sc = r[f,d]*(gamma_kf*we + rho_kf) + r[d,s]*(gamma_kd*we + rho_kd)
ServCost.append([d, s, p, k, sc, v, w])
# Serving cost for products from firms without vehicles:
for f in Fw:
for s in openedS:
for p in P_f[f]:
if DEM_i[s][p] > 0:
w = DEM_i[s][p]*omega[p]
we = DEM_i[s][p]*omegaeff[p]
v = DEM_i[s][p]*nu[p]
for d in D:
for k in V_i[d]:
gamma_kd = gamma[k]
rho_kd = rho[k]
sc = r[f,d]*(gamma_kd*we + rho_kd) + r[d,s]*(gamma_kd*we + rho_kd)
if k not in usedK:
sc = sc + delta[k]
ServCost.append([d, s, p, k, sc, v, w])
df_sc = pd.DataFrame(data = ServCost, columns = ['depot','satellite','product','vehicle','servcost','volume','weight'])
df_sc = df_sc.sort_values(by='servcost', ascending = True).reset_index(drop=True)
df_sc['fixcostvehicle'] = [delta[v] for v in df_sc['vehicle'].tolist()]
df_sc['servcost'] = np.where(df_sc['vehicle'].isin(usedK), df_sc['servcost'], df_sc['servcost'] + df_sc['fixcostvehicle'])
while df_sc.shape[0] > 0:
# Always check first element in dataframe
i = int(df_sc.loc[0]['depot'])
s = int(df_sc.loc[0]['satellite'])
p = int(df_sc.loc[0]['product'])
k = int(df_sc.loc[0]['vehicle'])
w = int(df_sc.loc[0]['weight'])
v = int(df_sc.loc[0]['volume'])
if i in F:
condition1 = True
else:
condition1 = Weight_i[i] + w <= MinWeightDep[i] and Volume_i[i] + v <= MinVolDep[i]
# Add
condition2 = Weight_k[k] + w <= Theta[k] and Volume_k[k] + v <= Phi[k]
if condition1 and condition2:
# if DEM_i[s][p] == 0:
#print('Warning: s = %s and p = %s' % (s,p))
fp = F_p[p]
h_opt[p,i,s] = DEM_i[s][p]
# PATCH FOR MAKING PRODUCTS FROM FIRMS WITH VEHICLES APPEAR
if fp not in Fw:
m_opt[p,s,k] = 0
else:
m_opt[p,s,k] = DEM_i[s][p]
Weight_k[k] = Weight_k[k] + w
Volume_k[k] = Volume_k[k] + v
if i in D:
DEM_i[i][p] = DEM_i[i][p] + DEM_i[s][p]
Weight_i[i] = Weight_i[i] + w
Volume_i[i] = Volume_i[i] + v
if fp in Fw and k in Vd:
if (p,fp,k) in x_opt.keys():
x_opt[p,fp,k] = x_opt[p,fp,k] + DEM_i[s][p]
else:
x_opt[p,fp,k] = DEM_i[s][p]
if k not in usedK:
usedK.append(k)
y_opt[k] = 1
df_sc['servcost'] = df_sc['servcost'] - delta[k]
DEM_i[s][p] = 0
df_sc = df_sc[1:]
df_sc = df_sc[~((df_sc['satellite'] == s) & (df_sc['product'] == p))]
else:
df_sc = df_sc[1:]
wm = df_sc['weight'].min()
vm = df_sc['volume'].min()
if i in D:
if Weight_i[i] + wm > MinWeightDep[i] or Volume_i[i] + vm > MinVolDep[i]:
df_sc = df_sc[df_sc['depot'] != i]
if Weight_k[k] + wm > Theta[k] or Volume_k[k] + vm > Phi[k]:
df_sc = df_sc[df_sc['vehicle'] != k]
# Delete customer from set (becasue it was assigned)
if sum([DEM_i[s][p_] for p_ in P]) < 1:
df_sc = df_sc[df_sc['satellite'] != s]
# Reorder by servingcost
df_sc = df_sc.sort_values(by='servcost', ascending = True).reset_index(drop=True)
cdv = {}
cdw = {}
for i in DcupS:
cdv[i] = Lambd[i] - Volume_i[i]
cdw[i] = Omega[i] - Weight_i[i]
return m_opt, u_opt, x_opt, y_opt, z_opt, cdv, cdw
def ExecuteMultiEchelon(data, filename = None, preplots = False):
# Unpacking data
XY = data['XY']
F = data['F']
D = data['D']
S = data['S']
C = data['C']
P = data['P']
P_f = data['P_f']
K = data['K']
V_i = data['V_i']
DEM = data['DEM']
Lambd = data['Lambd']
Omega = data['Omega']
Phi = data['Phi']
Theta = data['Theta']
nu = data['nu']
omega = data['omega']
omegaeff = data['omegaeff']
rho = data['rho']
delta = data['delta']
gamma = data['gamma']
epsil = data['epsil']
r = data['r']
N = F+D+S+C
dictclass = {}
for f in F:
dictclass[f] = 'F'
for d in D:
dictclass[d] = 'D'
for s in S:
dictclass[s] = 'S'
for c in C:
dictclass[c] = 'C'
m_opt, u_opt, x_opt, y_opt, z_opt, cdv, cdw = Steps1To3(data)
solution = MultiEchelonRouting(data, x_opt, y_opt, m_opt, z_opt, u_opt)
# Unpack data from solution
q_final = solution['q_final']
qe_final = solution['qe_final']
v_final = solution['v_final']
w_final = solution['w_final']
y_final = {}
for k in K:
try:
y_final[k] = min(sum([w_final[i,j,k] for i in N for j in N]), 1)
except:
y_final[k] = 0
DictRoutes = solution['DictRoutes']
DictRoutesList = solution['DictRoutesList']
DictNodes = solution['DictNodes']
Theta_ = solution['Theta_']
Phi_ = solution['Phi_']
Q0_ = solution['Q0_']
DEMS_ = solution['DEMS_']
"""RETRIEVE ORIGINAL OBJECTIVE FUNCTION VALUE"""
# Aux
m_final = {}
# Patch m_final:
for i in N:
for j in N:
for k in K:
m_final[i,j,k] = m_opt.get((i,j,k),0)
# Patch w_final, q final, v_final:
for i in N:
for j in N:
for k in K:
if (i,j,k) not in q_final:
q_final[i,j,k] = 0
if (i,j,k) not in w_final:
w_final[i,j,k] = 0
if (i,j,k) not in v_final:
v_final[i,j,k] = 0
if preplots:
AuxSubPlot(data, w_final, figsize = (5,5), save = True, filename = filename)
u_final = {}
for s in S:
u_final[s] = u_opt.get(s,0)
# Cost of Satellites
SatCost = sum([u_final[s]*epsil[s] for s in S])
VehicCost = sum([y_final[k]*delta[k] for k in K])
ArcCost = sum([w_final[i,j,k]*r[i,j]*rho[k] for i in N for j in N for k in K])
FreightCost = sum([gamma[k]*qe_final[i,j,k]*r[i,j] for i in N for j in N for k in K])
Opt = SatCost + VehicCost + ArcCost + FreightCost
depots = {}
for i in D+S:
for k in V_i[i]:
depots[k] = i
##print('LOOP: START!')
# WORKING HERE
CurrentOpt = Opt
Vd = [item for sublist in [V_i[d] for d in D] for item in sublist]
n_iters = 3
iters = 1
tries = 1
while iters <= n_iters and tries < 2:
##print('Iter: %s, Try: %s' % (iters,tries))
RCVd = GetMaxRoutingCosts(N, Vd, depots, DictNodes, r, gamma, w_final, q_final)
# ADD FUNCTION FOR SORT RCVd dictionary by value
##print('PERMUTACIONES KD A KS')
solution_swapkdks = ImproveOptimalSwapKdKs(RCVd,
data,
cdv,
cdw,
DictRoutes,
DictRoutesList,
DictNodes,
DEMS_,
Q0_,
q_final,
qe_final,
v_final,
w_final,
Phi_,
Theta_,
depots)
# Unpack data
DictRoutes = solution_swapkdks['DictRoutes']
DictNodes = solution_swapkdks['DictNodes']
DEMS_ = solution_swapkdks['DEMS_']
Q0_ = solution_swapkdks['Q0_']
q_final = solution_swapkdks['q_final']
v_final = solution_swapkdks['v_final']
w_final = solution_swapkdks['w_final']
Phi_ = solution_swapkdks['Phi_']
Theta_ = solution_swapkdks['Theta_']
cdv = solution_swapkdks['cdv']
cdw = solution_swapkdks['cdw']
banlist = solution_swapkdks['banlist']
# Patch w_final, q final, v_final:
for i in N:
for j in N:
for k in K:
if (i,j,k) not in q_final:
q_final[i,j,k] = 0
if (i,j,k) not in w_final:
w_final[i,j,k] = 0
if (i,j,k) not in v_final:
v_final[i,j,k] = 0
# Get Dictionary with this structure: key = satellite, value = vehicle from D that visits that satellite
KminV_is = {}
for s in S:
KminV_is[s] = [k for k in K if k not in V_i[s]]
Sserv1 = [s for s in S if sum([w_final[s,j,k] for j in N for k in KminV_is[s]]) == 1]
DictSatKd = {}
for kd in Vd:
if DictNodes[kd]['S']:
for s in DictNodes[kd]['S']:
if s in Sserv1:
DictSatKd[s] = kd
Vs1 = [item for sublist in [V_i[s] for s in DictSatKd.keys()] for item in sublist]
RCVs = GetMaxRoutingCosts(N, Vs1, depots, DictNodes, r, gamma, w_final, q_final)
solution_swapkskd = ImproveOptimalSwapKsKd(RCVs,
data,
banlist,
DictSatKd,
cdv,
cdw,
DictRoutes,
DictRoutesList,
DictNodes,
DEMS_,
Q0_,
q_final,
v_final,
w_final,
Phi_,
Theta_,
depots)
DictRoutes = solution_swapkskd['DictRoutes']
DictNodes = solution_swapkskd['DictNodes']
DEMS_ = solution_swapkskd['DEMS_']
Q0_ = solution_swapkskd['Q0_']
q_final = solution_swapkskd['q_final']
v_final = solution_swapkskd['v_final']
w_final = solution_swapkskd['w_final']
Phi_ = solution_swapkskd['Phi_']
Theta_ = solution_swapkskd['Theta_']
cdv = solution_swapkskd['cdv']
cdw = solution_swapkskd['cdw']
banlist = solution_swapkskd['banlist']
# Patch w_final, q final, v_final:
for i in N:
for j in N:
for k in K:
if (i,j,k) not in q_final:
q_final[i,j,k] = 0
if (i,j,k) not in w_final:
w_final[i,j,k] = 0
if (i,j,k) not in v_final:
v_final[i,j,k] = 0
for s in S:
if sum(w_final[s,j,k] for j in N for k in V_i[s]) < 1:
u_final[s] = 0
else:
u_final[s] = 1
for k in K:
y_final[k] = max(min(sum([w_final[i,j,k] for i in N for j in N]),1),0)
SatCost = sum([u_final[s]*epsil[s] for s in S])
VehicCost = sum([y_final[k]*delta[k] for k in K])
ArcCost = sum([w_final[i,j,k]*r[i,j]*rho[k] for i in N for j in N for k in K])
FreightCost = sum([gamma[k]*q_final[i,j,k]*r[i,j] for i in N for j in N for k in K])
Opt = SatCost + VehicCost + ArcCost + FreightCost
### STEP FOR VEHICLES FROM FIRMS ###
iters = iters + 1
if Opt < CurrentOpt:
##print('####################### REPORT FOR ITER %s #######################' % iters)
##print('Number of satellites open: %s at cost %s' % (sum([u_final[s] for s in S]), SatCost))
##print('Number of vehicles used: %s at cost %s' % (sum([y_final[k] for k in K]), VehicCost))
##print('Arc cost: %s' % ArcCost)
##print('Freight cost: %s' % FreightCost)
##print('Optimal value for original O.F: %s' % Opt)
CurrentOpt = Opt
tries = 1
else:
tries = tries + 1
##print('####################### FINAL REPORT #######################')
for k in K:
y_final[k] = max(min(sum([w_final[i,j,k] for i in N for j in N]),1),0)
SatCost = sum([u_final[s]*epsil[s] for s in S])
VehicCost = sum([y_final[k]*delta[k] for k in K])
ArcCost = sum([w_final[i,j,k]*r[i,j]*rho[k] for i in N for j in N for k in K])
FreightCost = sum([gamma[k]*qe_final[i,j,k]*r[i,j] for i in N for j in N for k in K])
#print('Number of satellites open: %s at cost %s' % (sum([u_final[s] for s in S]), SatCost))
#print('Number of vehicles used: %s at cost %s' % (sum([y_final[k] for k in K]), VehicCost))
#print('Arc cost: %s' % ArcCost)
#print('Freight cost: %s' % FreightCost)
Opt = SatCost + VehicCost + ArcCost + FreightCost
#print('Optimal value for original O.F: %s' % Opt)
return q_final, w_final, u_final, y_final, DictRoutes, Opt
"""
FUNCTIONS FOR PLOTTING
"""
def PlotNodes(XY, F, D, S, C, figsize = (20,20)):
fig, ax = plt.subplots(figsize= figsize)
plt.scatter(XY[F,0],XY[F,1],color='red', label= 'Goods')
plt.scatter(XY[D,0], XY[D,1],color='blue', label = 'Delivery')
plt.scatter(XY[S,0], XY[S,1],color='green', label = 'Satellites')
plt.scatter(XY[C,0], XY[C,1],color='brown', label = 'Clients')
for i in S:
ax.annotate(i, (XY[i,0], XY[i,1]))
for i in C:
ax.annotate(i, (XY[i,0], XY[i,1]))
return fig, ax
def PlotAssignsSatCli(XY,F, D, S, C, model, figsize = (20,20)):
l, u = model.__data
N = F+D+S+C
DcupS = D + S
FcupD = D + S
NminC = F + D + S
l_opt = model.getAttr('x', l)
u_opt = model.getAttr('x', u)
colors = {}
for s in NminC:
colors[s] = tuple(np.random.rand(3))
dictveh = {}
fig, ax = plt.subplots(figsize = figsize)
S_op = []
for s in S:
if u_opt[s] > 0:
S_op.append(s)
##print(S_op)
plt.scatter(XY[F,0],XY[F,1],color='red', label= 'Goods')
plt.scatter(XY[D,0], XY[D,1],color='blue', label = 'Delivery')
plt.scatter(XY[S_op,0], XY[S_op,1],color='green', label = 'Satellites')
plt.scatter(XY[C,0], XY[C,1],color='brown', label = 'Clients')
for s in NminC:
flag_v = True
for c in C:
if l_opt[s,c] > 0:
x1, x2 = XY[s,0], XY[c,0]
y1, y2 = XY[s,1], XY[c,1]
plt.plot([x1,x2],[y1,y2],
color = colors[s],
linestyle = 'dashed',
label = 'Satelite %s' % s if flag_v else "")
flag_v = False
for i in F:
ax.annotate(i, (XY[i,0], XY[i,1]))
for i in D:
ax.annotate(i, (XY[i,0], XY[i,1]))
for i in S:
ax.annotate(i, (XY[i,0], XY[i,1]))
for i in C:
ax.annotate(i, (XY[i,0], XY[i,1]))
plt.legend()
plt.show()
def PlotAssignsVehCli(XY,F, D, S, C, V_i, model, figsize = (20,20)):
y, z = model.__data
Vs = [item for sublist in [V_i[s] for s in S] for item in sublist]
Vd = [item for sublist in [V_i[d] for d in D] for item in sublist]
Vf = [item for sublist in [V_i[f] for f in F] for item in sublist]
VdcupVs = Vd + Vs
VfcupVs = Vf + Vd
K = Vf + Vd + Vs
N = F+D+S+C
DcupS = D + S
FcupD = D + S
NminC = F + D + S
z_opt = model.getAttr('x', z)
colors = {}
for s in NminC:
for k in V_i[s]:
colors[k] = tuple(np.random.rand(3))
dictveh = {}
for i in NminC:
for k in V_i[i]:
dictveh[k] = i
fig, ax = plt.subplots(figsize = figsize)
plt.scatter(XY[F,0],XY[F,1],color='red', label= 'Goods')
plt.scatter(XY[D,0], XY[D,1],color='blue', label = 'Delivery')
plt.scatter(XY[S,0], XY[S,1],color='green', label = 'Satellites')
plt.scatter(XY[C,0], XY[C,1],color='brown', label = 'Clients')
for k in K:
flag_v = True
for c in C:
try:
if z_opt[k,c] > 0:
s = dictveh[k]
x1, x2 = XY[s,0], XY[c,0]
y1, y2 = XY[s,1], XY[c,1]
plt.plot([x1,x2],[y1,y2],
color = colors[k],
linestyle = 'dashed',
label = 'Vehicle %s (%s)' % (k, dictveh[k]) if flag_v else "")
flag_v = False
except:
pass
for i in F:
ax.annotate(i, (XY[i,0], XY[i,1]))
for i in D:
ax.annotate(i, (XY[i,0], XY[i,1]))
for i in S:
ax.annotate(i, (XY[i,0], XY[i,1]))
for i in C:
ax.annotate(i, (XY[i,0], XY[i,1]))
plt.legend()
plt.show()
def AuxSubPlot(data, w_opt, figsize = (20,20), save = False, filename = 'test'):
# Unpacking data
XY = data['XY']
F = data['F']
D = data['D']
S = data['S']
C = data['C']
P = data['P']
P_f = data['P_f']
K = data['K']
V_i = data['V_i']
DEM = data['DEM']
Lambd = data['Lambd']
Omega = data['Omega']
Phi = data['Phi']
Theta = data['Theta']
nu = data['nu']
omega = data['omega']
rho = data['rho']
delta = data['delta']
gamma = data['gamma']
r = data['r']
X,Y = XY[:,0], XY[:,1]
label = ['Goods' for f in F] + ['Delivery' for d in D] + ['Satellite' for s in S] + ['Clients' for c in C]
n_label = [0 for f in F] + [1 for d in D] + [2 for s in S] + [3 for c in C]
colors_xy = ['red','blue','green','brown']
N = F + D + S + C
NminC = F + D + S
dictveh = {}
for i in NminC:
for k in V_i[i]:
dictveh[k] = i
K = [item for sublist in [V_i[i] for i in NminC] for item in sublist]
cmapp = cm.get_cmap('viridis', len(K))
colors = {}
for k in K:
if k % 2 == 0:
colors[k] = cmapp(k)
else:
colors[k] = cmapp(K[::-1][k])
plt.figure(figsize=figsize)
plt.scatter(X[F], Y[F], label = 'Firms', color = 'red')
plt.scatter(X[D], Y[D], label = 'Delivery', color = 'blue')
plt.scatter(X[S], Y[S], label = 'Satellites', color = 'green')
plt.scatter(X[C], Y[C], label = 'Clients', color = 'brown')
for i in range(XY.shape[0]):
x = X[i]
y = Y[i]
plt.text(x+0.3, y+0.3, i, fontsize=9)
for f in F:
for k in V_i[f]:
for i in N:
for j in N:
key = (i,j,k)
if key in w_opt:
if w_opt[key] > 0:
x1, x2 = XY[i,0], XY[j,0]
y1, y2 = XY[i,1], XY[j,1]
plt.plot([x1,x2],[y1,y2],
color = colors[k],
linestyle = 'dashed',
label = 'Vehicle %s (%s)' % (k, dictveh[k]) if i == dictveh[k] else "")
plt.legend()
plt.title('Vehicles from Firms')
if save:
plt.tight_layout()
plt.savefig('%s-firms.png' % filename, dpi = 250)
plt.figure(figsize=figsize)
plt.scatter(X[F], Y[F], label = 'Firms', color = 'red')
plt.scatter(X[D], Y[D], label = 'Delivery', color = 'blue')
plt.scatter(X[S], Y[S], label = 'Satellites', color = 'green')
plt.scatter(X[C], Y[C], label = 'Clients', color = 'brown')
for i in range(XY.shape[0]):
x = X[i]
y = Y[i]
plt.text(x+0.3, y+0.3, i, fontsize=9)
for d in D:
for k in V_i[d]:
for i in N:
for j in N:
key = (i,j,k)
if key in w_opt:
if w_opt[key] > 0:
x1, x2 = XY[i,0], XY[j,0]
y1, y2 = XY[i,1], XY[j,1]
plt.plot([x1,x2],[y1,y2],
color = colors[k],
linestyle = 'dashed',
label = 'Vehicle %s (%s)' % (k, dictveh[k]) if i == dictveh[k] else "")
plt.legend()
plt.title('Vehicles from Delivery')
if save:
plt.tight_layout()
plt.savefig('%s-delivery.png' % filename, dpi = 250)
plt.figure(figsize=figsize)
plt.scatter(X[F], Y[F], label = 'Firms', color = 'red')
plt.scatter(X[D], Y[D], label = 'Delivery', color = 'blue')
plt.scatter(X[S], Y[S], label = 'Satellites', color = 'green')
plt.scatter(X[C], Y[C], label = 'Clients', color = 'brown')
for i in range(XY.shape[0]):
x = X[i]
y = Y[i]
plt.text(x+0.3, y+0.3, i, fontsize=9)
for s in S:
for k in V_i[s]:
for i in N:
for j in N:
key = (i,j,k)
if key in w_opt:
if w_opt[key] > 0:
x1, x2 = XY[i,0], XY[j,0]
y1, y2 = XY[i,1], XY[j,1]
plt.plot([x1,x2],[y1,y2],
color = colors[k],
linestyle = 'dashed',
label = 'Vehicle %s (%s)' % (k, dictveh[k]) if i == dictveh[k] else "")
plt.legend()
plt.title('Vehicles from Satellite')
if save:
plt.tight_layout()
plt.savefig('%s-sat.png' % filename, dpi = 250)
plt.show()
def RecoverOriginalqValues(data, DictRoutes):
DEM = data['DEM']
V_i = data['V_i']
S = data['S']
D = data['D']
F = data['F']
P = data['P']
P_f = data['P_f']
q_subp = {}
# vehicles from satellites
for s in S:
DEM[s] = np.zeros(len(P), dtype=int)
for k in V_i[s]:
if k in DictRoutes.keys():
if DictRoutes[k]:
# Cummulative demand for vehicle
sumdemand = np.zeros(len(P), dtype=int)
# Last node visited
for t in DictRoutes[k][::-1]:
i,j = t
if j != s:
for p in P:
sumdemand[p] = sumdemand[p] + DEM[j][p]
q_subp[p,i,j,k] = sumdemand[p]
DEM[s] = DEM[s] + sumdemand
DEM[s] = list(DEM[s])
# vehicles from delivery
for d in D:
DEM[d] = np.zeros(len(P), dtype=int)
for k in V_i[d]:
if k in DictRoutes.keys():
if DictRoutes[k]:
# Cummulative demand for vehicle
sumdemand = np.zeros(len(P), dtype=int)
# Last node visited
# HERE I NEED TO DELETE THE FREIGHTS FROM PRODUCTS FROM FIRMS WITH VEHICLES
for t in DictRoutes[k][::-1]:
i,j = t
if j != d:
if j not in F:
for p in P:
sumdemand[p] = sumdemand[p] + DEM[j][p]
q_subp[p,i,j,k] = sumdemand[p]
else:
PminPf = [p for p in P if p not in P_f[j]]
for p in P_f[j]:
q_subp[p,i,j,k] = 0
aux = max([value for key, value in q_subp.items() if key[0] == p and key[3] == k])
sumdemand[p] = sumdemand[p] - aux
for p in PminPf:
aux = max([value for key, value in q_subp.items() if key[0] == p and key[3] == k])
q_subp[p,i,j,k] = aux
DEM[d] = DEM[d] + sumdemand
DEM[d] = list(DEM[d])
# vehicles from firms
# for f in F:
# for k in V_i[f]:
# if k in DictRoutes.keys():
# if DictRoutes[k]:
# # Cummulative demand for vehicle
# sumdemand = np.zeros(len(P), dtype=int)
# # Last node visited
# for t in DictRoutes[k][::-1]:
# i,j = t
# if j != f:
# for p in P:
# sumdemand[p] = sumdemand[p] + DEM[j][p]
# q_subp[p,i,j,k] = sumdemand[p]
# for p in P
return q_subp
def SaveSolHeuristic(data, file, dt, soldir, q_final, w_final, u_final, y_final, DictRoutes, Opt):
#Create Excell Writter
writer = pd.ExcelWriter(os.path.join(soldir, file), engine='xlsxwriter')
# Save solutions: q
q_final = RecoverOriginalqValues(data, DictRoutes)
dfq = []
for key, value in dict(q_final).items():
if value > 0:
# #print(key, value)
dfq.append([*key, value])
dfq = pd.DataFrame(data=dfq, columns=['p', 'i', 'j', 'k', 'q_final'])
dfq.to_excel(writer, index=False, sheet_name = "q")
# Save solutions: w
dfw = []
for key, value in dict(w_final).items():
if value > 0:
dfw.append([*key, value])
dfw = | pd.DataFrame(data=dfw, columns=['i', 'j', 'k', 'w_final']) | pandas.DataFrame |
#!/usr/bin/env python
import os
import sys
import pandas as pd
from datetime import datetime
from distutils.dir_util import mkpath
import shutil
from collections import defaultdict
sys.path.append("..")
from model_utils.model import DeepSpeech2Model
from utils.yaml_loader import load_yaml_config
import model_utils.network as network
from data_utils.dataloader import SpecgramGenerator
import torch
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import ruamel.yaml as yaml
networks = {"network.deepspeech_orig" : network.deepspeech_orig,
"network.deepspeech_newbottleneck": network.deepspeech_newbottleneck}
def test(config):
model=networks[config["basic"]["model"]]
pretrained_model_path=config["basic"]["pt_model_path"]
device = config["basic"]["device"]
exp_root_dir=config["basic"]["exp_root_path"]
ds2_model_path=config["basic"]["ds2_model_path"]
pt_model_path = config["basic"]["pt_model_path"]
use_pt_model = config["basic"]["use_pt_model"]
augmentation_config_name=config["basic"]["augmentation_config_name"]
language_model_path=config["basic"]["language_model_path"]
vocab_filepath=config["basic"]["vocab_filepath"]
mean_std_filepath=config["basic"]["mean_std_filepath"]
batch_size=config["test"]["batch_size"]
max_duration=config["test"]["max_duration"],
min_duration=config["test"]["min_duration"],
segmented=config["test"]["segmented"]
num_workers=config["test"]["num_workers"]
test_csv= config["data"]["test_csv"]
test_dataset = SpecgramGenerator(manifest=os.path.join(exp_root_dir, "data", test_csv),
vocab_filepath=vocab_filepath,
mean_std_filepath=mean_std_filepath,
augmentation_config="{}",
max_duration=max_duration, #20,
min_duration=min_duration, # 3
segmented=segmented) # False
dataloader = DataLoader(test_dataset, batch_size=batch_size,
shuffle=False, num_workers=num_workers,
collate_fn=SpecgramGenerator.padding_batch)
vocab_list = ["'", ' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
ds2_model = DeepSpeech2Model(model=model,
ds2_model_path=ds2_model_path,
vocab_list=vocab_list,
device=device)
if use_pt_model and pretrained_model_path:
ds2_model.load_weights(pt_model_path)
ds2_model.init_ext_scorer(1.4, 0.35, language_model_path)
outputs = defaultdict(list)
beam_alpha=1.1
for i_batch, sample_batched in enumerate(dataloader):
batch_results = ds2_model.infer_batch_probs(infer_data=sample_batched)
batch_transcripts_beam = ds2_model.decode_batch_beam_search(
probs_split=batch_results,
beam_alpha=beam_alpha,
beam_beta=0.35,
beam_size=500,
cutoff_prob=1.0,
cutoff_top_n=40,
num_processes=6)
outputs["uttid"].extend(sample_batched["uttid"])
outputs["probs"].extend(batch_results)
outputs["asr"].extend(batch_transcripts_beam)
outputs["text"].extend(sample_batched["trans"])
df = | pd.DataFrame.from_dict(outputs) | pandas.DataFrame.from_dict |
import operator
import re
import warnings
import numpy as np
import pytest
from pandas._libs.sparse import IntIndex
import pandas.util._test_decorators as td
import pandas as pd
from pandas import isna
from pandas.core.sparse.api import SparseArray, SparseDtype, SparseSeries
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
@pytest.fixture(params=["integer", "block"])
def kind(request):
return request.param
class TestSparseArray:
def setup_method(self, method):
self.arr_data = np.array([np.nan, np.nan, 1, 2, 3,
np.nan, 4, 5, np.nan, 6])
self.arr = | SparseArray(self.arr_data) | pandas.core.sparse.api.SparseArray |
import logging
logging.basicConfig(level=logging.WARNING)
import pytest
import numpy
import os
import pypipegraph as ppg
import pandas as pd
from pathlib import Path
from pandas.testing import assert_frame_equal
import dppd
import dppd_plotnine # noqa:F401
from mbf_qualitycontrol.testing import assert_image_equal
from mbf_sampledata import get_sample_data
import mbf_genomics.regions as regions
from mbf_genomics.annotator import Constant, Annotator
from .shared import (
get_genome,
get_genome_chr_length,
force_load,
inside_ppg,
run_pipegraph,
RaisesDirectOrInsidePipegraph,
MockGenome,
)
dp, X = dppd.dppd()
@pytest.mark.usefixtures("new_pipegraph")
class TestGenomicRegionsLoadingPPGOnly:
def test_dependency_passing(self):
job = ppg.ParameterInvariant("sha", (None,))
a = regions.GenomicRegions("shu", lambda: None, [job], get_genome())
load_job = a.load()
assert job in load_job.lfg.prerequisites
def test_dependency_may_be_iterable_instead_of_list(self):
job = ppg.ParameterInvariant("shu", (None,))
a = regions.GenomicRegions("shu", lambda: None, (job,), get_genome())
load_job = a.load()
assert job in load_job.lfg.prerequisites
def test_depenencies_must_be_jobs(self):
ppg.ParameterInvariant("shu", (None,))
with pytest.raises(ValueError):
regions.GenomicRegions("shu", lambda: None, ["shu"], get_genome())
@pytest.mark.usefixtures("both_ppg_and_no_ppg")
class TestGenomicRegionsLoading:
def test_raises_on_duplicate_name(self, both_ppg_and_no_ppg):
def sample_data():
return pd.DataFrame(
{"chr": ["Chromosome"], "start": [1000], "stop": [1100]}
)
regions.GenomicRegions("shu", sample_data, [], get_genome())
if inside_ppg():
with pytest.raises(ValueError):
regions.GenomicRegions("shu", sample_data, [], get_genome())
both_ppg_and_no_ppg.new_pipegraph()
regions.GenomicRegions(
"shu", sample_data, [], get_genome()
) # should not raise
def test_raises_on_non_iterable_dependencies(self):
def sample_data():
return pd.DataFrame(
{"chr": ["Chromosome"], "start": [1000], "stop": [1100]}
)
with pytest.raises(ValueError):
regions.GenomicRegions("shu", sample_data, "aaeu", get_genome())
with pytest.raises(ValueError):
regions.GenomicRegions("shu", sample_data, 1, get_genome())
with pytest.raises(ValueError):
regions.GenomicRegions("shu", sample_data, iter([]), get_genome())
def test_loading(self):
def sample_data():
return pd.DataFrame(
{"chr": ["Chromosome"], "start": [1000], "stop": [1100]}
)
a = regions.GenomicRegions("sha", sample_data, [], get_genome())
if inside_ppg():
assert not hasattr(a, "df")
force_load(a.load())
else:
assert hasattr(a, "df")
run_pipegraph()
assert hasattr(a, "df")
assert len(a.df) == 1
assert "chr" in a.df.columns
assert "start" in a.df.columns
assert "stop" in a.df.columns
def test_filtering_copy_anno(self, clear_annotators):
import mbf_genomics
def sample_data():
return pd.DataFrame(
{
"chr": "Chromosome",
"start": [1000, 1001, 1002],
"stop": [1100, 1101, 1102],
}
)
a = regions.GenomicRegions(
"sha", sample_data, [], get_genome(), on_overlap="ignore"
)
b = a.filter("filtered", ("start", "==", 1001))
class CopyAnno(mbf_genomics.annotator.Annotator):
def __init__(self):
self.columns = ["copy"]
def calc(self, df):
return pd.DataFrame({"copy": df["start"]})
a += CopyAnno()
if inside_ppg():
assert not hasattr(a, "df")
force_load(a.load())
force_load(b.annotate())
else:
assert hasattr(a, "df")
run_pipegraph()
print(b.df)
assert (b.df["start"] == [1001]).all()
assert (b.df["copy"] == [1001]).all()
def test_raises_on_invalid_on_overlap(self):
def inner():
regions.GenomicRegions(
"shu",
lambda: None,
[],
get_genome(),
on_overlap="run in circles all about",
)
with pytest.raises(ValueError):
inner()
def test_magic(self):
def sample_data():
return pd.DataFrame(
{"chr": ["Chromosome"], "start": [1000], "stop": [1100]}
)
a = regions.GenomicRegions("shu", sample_data, [], get_genome())
hash(a)
str(a)
repr(a)
bool(a)
a.load()
run_pipegraph()
with pytest.raises(TypeError):
iter(a)
def test_loading_missing_start(self):
def sample_data():
return pd.DataFrame({"chr": "1", "stop": [1100]})
with RaisesDirectOrInsidePipegraph(ValueError):
a = regions.GenomicRegions("sha", sample_data, [], get_genome())
force_load(a.load)
def test_loading_missing_chr(self):
def sample_data():
return pd.DataFrame({"start": [1000], "stop": [1100]})
with RaisesDirectOrInsidePipegraph(ValueError):
a = regions.GenomicRegions("sha", sample_data, [], get_genome())
force_load(a.load)
def test_loading_missing_stop(self):
def sample_data():
return pd.DataFrame({"chr": "Chromosome", "start": [1200]})
with RaisesDirectOrInsidePipegraph(ValueError):
a = regions.GenomicRegions("sha", sample_data, [], get_genome())
force_load(a.load)
def test_loading_raises_on_invalid_chromosome(self):
def sample_data():
return pd.DataFrame({"chr": ["1b"], "start": [1200], "stop": [1232]})
with RaisesDirectOrInsidePipegraph(ValueError):
a = regions.GenomicRegions("sharum", sample_data, [], get_genome())
force_load(a.load)
def test_loading_raises_on_no_int_start(self):
def sample_data():
return pd.DataFrame(
{"chr": ["Chromosome"], "start": ["shu"], "stop": [1232]}
)
with RaisesDirectOrInsidePipegraph(ValueError):
a = regions.GenomicRegions("sharum", sample_data, [], get_genome())
force_load(a.load)
def test_loading_raises_on_no_int_stop(self):
def sample_data():
return pd.DataFrame({"chr": ["Chromosome"], "start": [2], "stop": [20.0]})
with RaisesDirectOrInsidePipegraph(ValueError):
a = regions.GenomicRegions("sharum", sample_data, [], get_genome())
force_load(a.load)
def test_loading_raises_on_no_str_chr(self):
def sample_data():
return pd.DataFrame({"chr": [1], "start": [2], "stop": [20]})
with RaisesDirectOrInsidePipegraph(ValueError):
a = regions.GenomicRegions("sharum", sample_data, [], get_genome())
force_load(a.load)
def test_loading_raises_on_not_dataframe(self):
def sample_data():
return None
with RaisesDirectOrInsidePipegraph(ValueError):
a = regions.GenomicRegions("sharum", sample_data, [], get_genome())
force_load(a.load)
def test_loading_raises_on_overlapping(self):
def sample_data():
return pd.DataFrame(
{
"chr": ["Chromosome", "Chromosome"],
"start": [1000, 1010],
"stop": [1100, 1020],
}
)
with RaisesDirectOrInsidePipegraph(ValueError):
a = regions.GenomicRegions(
"sha", sample_data, [], get_genome(), on_overlap="raise"
)
force_load(a.load)
def test_raises_on_negative_intervals(self):
def sample_data():
return pd.DataFrame(
{
"chr": [
"Chromosome",
"Chromosome",
"Chromosome",
"Chromosome",
"Chromosome",
],
"start": [10, 100, 1000, 10000, 100_000],
"stop": [8, 110, 1110, 11110, 111_110],
}
)
with RaisesDirectOrInsidePipegraph(ValueError):
a = regions.GenomicRegions("sharum", sample_data, [], get_genome())
force_load(a.load)
def test_raises_on_overlapping_intervals(self):
def sample_data():
return pd.DataFrame(
{
"chr": ["1", "2", "1", "3", "5"],
"start": [10, 100, 1000, 10000, 100_000],
"stop": [1010, 110, 1110, 11110, 111_110],
}
)
with RaisesDirectOrInsidePipegraph(ValueError):
a = regions.GenomicRegions("sharum", sample_data, [], get_genome())
force_load(a.load)
def test_index_reset(self):
def sample_data():
return pd.DataFrame(
{
"chr": ["Chromosome"],
"start": [1000],
"stop": [1100],
"myindex": ["a"],
}
).set_index("myindex")
a = regions.GenomicRegions("sharum", sample_data, [], get_genome())
force_load(a.load)
run_pipegraph()
assert a.df.index == ["a"]
assert not "myindex" in a.df.columns
def test_merges_overlapping_intervals(self):
def sample_data():
return pd.DataFrame(
{
"chr": ["1", "2", "1", "3", "5"],
"start": [10, 100, 1000, 10000, 100_000],
"stop": [1010, 110, 1110, 11110, 111_110],
}
)
a = regions.GenomicRegions(
"shu", sample_data, [], get_genome_chr_length(), on_overlap="merge"
)
force_load(a.load)
run_pipegraph()
assert len(a.df) == 4
assert a.df.iloc[0]["start"] == 10
assert a.df.iloc[0]["stop"] == 1110
def test_merge_overlapping_with_function(self):
def sample_data():
return pd.DataFrame(
{
"chr": ["1", "2", "1", "3", "5"],
"start": [10, 100, 1000, 10000, 100_000],
"stop": [1010, 110, 1110, 11110, 111_110],
"pick_me": [1, 2, 3, 4, 5],
}
)
def merge_function(subset_df):
row = subset_df.iloc[0].to_dict()
row["pick_me"] = numpy.max(subset_df["pick_me"])
return row
a = regions.GenomicRegions(
"shu",
sample_data,
[],
get_genome_chr_length(),
on_overlap=("merge", merge_function),
)
force_load(a.load)
run_pipegraph()
assert len(a.df) == 4
assert a.df.iloc[0]["start"] == 10
assert a.df.iloc[0]["stop"] == 1110
assert a.df.iloc[0]["pick_me"] == 3
def test_merge_overlapping_with_function2(self):
def sample_data():
return pd.DataFrame(
{
"chr": ["1", "2", "1", "3", "5"],
"start": [10, 100, 1000, 10000, 100_000],
"stop": [1010, 110, 1110, 11110, 111_110],
"pick_me": [10, 2, 3, 4, 5],
}
)
def merge_function(subset_df):
row = subset_df.iloc[0].to_dict()
row["pick_me"] = numpy.max(subset_df["pick_me"])
return row
a = regions.GenomicRegions(
"shu",
sample_data,
[],
get_genome_chr_length(),
on_overlap=("merge", merge_function),
)
force_load(a.load)
run_pipegraph()
assert len(a.df) == 4
assert a.df.iloc[0]["start"] == 10
assert a.df.iloc[0]["stop"] == 1110
assert a.df.iloc[0]["pick_me"] == 10
def test_merge_overlapping_with_function_ignores_returned_intervals(self):
def sample_data():
return pd.DataFrame(
{
"chr": ["1", "2", "1", "3", "5"],
"start": [10, 100, 1000, 10000, 100_000],
"stop": [1010, 110, 1110, 11110, 111_110],
"pick_me": [1, 2, 3, 4, 5],
}
)
def merge_function(subset_df):
row = subset_df.iloc[0].to_dict()
row["start"] = 9000
row["pick_me"] = numpy.max(subset_df["pick_me"])
return row
a = regions.GenomicRegions(
"shu",
sample_data,
[],
get_genome_chr_length(),
on_overlap=("merge", merge_function),
)
force_load(a.load)
run_pipegraph()
assert len(a.df) == 4
assert a.df.iloc[0]["start"] == 10
assert a.df.iloc[0]["stop"] == 1110
assert a.df.iloc[0]["pick_me"] == 3
def test_merge_overlapping_with_function_raises_on_non_dict(self):
def sample_data():
return pd.DataFrame(
{
"chr": ["1", "2", "1", "3", "5"],
"start": [10, 100, 1000, 10000, 100_000],
"stop": [1010, 110, 1110, 11110, 111_110],
"pick_me": [1, 2, 3, 4, 5],
}
)
def merge_function(subset_df):
row = subset_df.iloc[0].copy()
row["pick_me"] = numpy.max(subset_df["pick_me"])
return row
with RaisesDirectOrInsidePipegraph(ValueError):
a = regions.GenomicRegions(
"shu",
sample_data,
[],
get_genome_chr_length(),
on_overlap=("merge", merge_function),
)
force_load(a.load)
def test_merge_overlapping_with_function_raises_on_unknown_column(self):
def sample_data():
return pd.DataFrame(
{
"chr": ["1", "2", "1", "3", "5"],
"start": [10, 100, 1000, 10000, 100_000],
"stop": [1010, 110, 1110, 11110, 111_110],
"pick_me": [1, 2, 3, 4, 5],
}
)
def merge_function(subset_df):
row = subset_df.iloc[0][:]
row["does not exist"] = row["pick_me"]
return row
with RaisesDirectOrInsidePipegraph(ValueError):
a = regions.GenomicRegions(
"shu",
sample_data,
[],
get_genome_chr_length(),
on_overlap=("merge", merge_function),
)
force_load(a.load)
def test_merge_overlapping_with_function_raises_on_missing(self):
def sample_data():
return pd.DataFrame(
{
"chr": ["1", "2", "1", "3", "5"],
"start": [10, 100, 1000, 10000, 100_000],
"stop": [1010, 110, 1110, 11110, 111_110],
"pick_me": [1, 2, 3, 4, 5],
}
)
def merge_function(subset_df):
row = subset_df.iloc[0]
del row["pick_me"]
return None
with RaisesDirectOrInsidePipegraph(ValueError):
a = regions.GenomicRegions(
"shu",
sample_data,
[],
get_genome_chr_length(),
on_overlap=("merge", merge_function),
)
force_load(a.load)
def test_merges_overlapping_intervals_next_to_each_other(self):
def sample_data():
return pd.DataFrame(
{
"chr": ["Chromosome", "Chromosome"],
"start": [10, 21],
"stop": [20, 100],
}
)
a = regions.GenomicRegions(
"shu", sample_data, [], get_genome(), on_overlap="merge"
)
force_load(a.load)
run_pipegraph()
assert (a.df["start"] == [10, 21]).all()
assert (a.df["stop"] == [20, 100]).all()
def test_merges_overlapping_earlier_overlaps_later_with_one_in_between(self):
def sample_data():
return pd.DataFrame(
{
"chr": ["Chromosome", "Chromosome", "Chromosome", "Chromosome"],
"start": [3100, 3000, 3750, 4910],
"stop": [4900, 3500, 4000, 5000],
}
)
a = regions.GenomicRegions(
"shu", sample_data, [], get_genome(), on_overlap="merge"
)
force_load(a.load)
run_pipegraph()
assert (a.df["start"] == [3000, 4910]).all()
assert (a.df["stop"] == [4900, 5000]).all()
def test_merges_overlapping_intervals_multiple(self):
def sample_data():
return pd.DataFrame(
{
"chr": ["1", "2", "1", "3", "5", "1"],
"start": [10, 100, 1000, 10000, 100_000, 1005],
"stop": [1010, 110, 1110, 11110, 111_110, 2000],
}
)
a = regions.GenomicRegions(
"shu", sample_data, [], get_genome_chr_length(), on_overlap="merge"
)
force_load(a.load)
run_pipegraph()
assert len(a.df) == 4
assert a.df.iloc[0]["start"] == 10
assert a.df.iloc[0]["stop"] == 2000
def test_merges_overlapping_intervals_multiple_with_function(self):
def sample_data():
return pd.DataFrame(
{
"chr": ["1", "2", "1", "3", "5", "1"],
"start": [10, 100, 1000, 10000, 100_000, 1005],
"stop": [1010, 110, 1110, 11110, 111_110, 2000],
"pick_me": [23, 1234, 2, 4, 5, 50],
}
)
def merge_function(subset_df):
row = subset_df.iloc[0].to_dict()
row["pick_me"] = numpy.max(subset_df["pick_me"])
return row
a = regions.GenomicRegions(
"shu",
sample_data,
[],
get_genome_chr_length(),
on_overlap=("merge", merge_function),
)
force_load(a.load)
run_pipegraph()
assert len(a.df) == 4
assert a.df.iloc[0]["start"] == 10
assert a.df.iloc[0]["stop"] == 2000
assert a.df.iloc[0]["pick_me"] == 50
def test_on_overlap_drop(self):
def sample_data():
return pd.DataFrame(
{
"chr": ["1", "2", "1", "3", "5", "1", "5", "5", "5"],
"start": [10, 100, 1000, 10000, 100_000, 1005, 5000, 6049, 6000],
"stop": [1010, 110, 1110, 11110, 111_110, 2000, 5050, 6051, 6050],
}
)
a = regions.GenomicRegions(
"shu", sample_data, [], get_genome_chr_length(), on_overlap="drop"
)
force_load(a.load)
run_pipegraph()
assert len(a.df) == 4
assert a.df.iloc[0]["start"] == 100
assert a.df.iloc[0]["stop"] == 110
assert a.df.iloc[1]["start"] == 10000
assert a.df.iloc[1]["stop"] == 11110
assert a.df.iloc[2]["start"] == 5000
assert a.df.iloc[2]["stop"] == 5050
assert a.df.iloc[3]["start"] == 100_000
assert a.df.iloc[3]["stop"] == 111_110
def test_on_overlap_drop_nested(self):
def sample_data():
return pd.DataFrame(
{
"chr": ["1", "2", "1", "3", "5", "1", "5", "5", "5"],
"start": [10, 100, 1000, 10000, 100_000, 1005, 5000, 6049, 6000],
"stop": [1010, 110, 1110, 11110, 111_110, 2000, 5050, 6100, 6050],
}
)
a = regions.GenomicRegions(
"shu", sample_data, [], get_genome_chr_length(), on_overlap="drop"
)
force_load(a.load)
run_pipegraph()
assert len(a.df) == 4
assert a.df.iloc[0]["start"] == 100
assert a.df.iloc[0]["stop"] == 110
assert a.df.iloc[1]["start"] == 10000
assert a.df.iloc[1]["stop"] == 11110
assert a.df.iloc[2]["start"] == 5000
assert a.df.iloc[2]["stop"] == 5050
assert a.df.iloc[3]["start"] == 100_000
assert a.df.iloc[3]["stop"] == 111_110
def test_on_overlap_drop_empty(self):
def sample_data():
return pd.DataFrame({"chr": [], "start": [], "stop": []})
a = regions.GenomicRegions(
"shu", sample_data, [], get_genome_chr_length(), on_overlap="drop"
)
force_load(a.load)
run_pipegraph()
assert len(a.df) == 0
def test_on_overlap_ignore(self):
def sample_data():
return pd.DataFrame(
{
"chr": ["1", "2", "1", "3", "5", "1", "5"],
"start": [10, 100, 1000, 10000, 100_000, 1005, 5000],
"stop": [1010, 110, 1110, 11110, 111_110, 2000, 5050],
"index": ["a", "b", "c", "d", "e", "f", "g"],
"is_overlapping": 55,
}
).set_index("index")
a = regions.GenomicRegions(
"shu", sample_data, [], get_genome_chr_length(), on_overlap="ignore"
)
force_load(a.load)
run_pipegraph()
assert len(a.df) == 7
assert (a.df.index == ["a", "c", "f", "b", "d", "g", "e"]).all()
assert not (a.df.is_overlapping == 55).any() # make sure we drop the column
assert (
a.df.is_overlapping == [True, True, False, False, False, False, False]
).any()
def test_merging_apperantly_creating_negative_intervals(self):
def sample_data():
return pd.DataFrame(
{
"start": [
140_688_139,
140_733_871,
140_773_241,
140_792_022,
141_032_547,
141_045_565,
141_069_938,
141_075_938,
141_098_775,
141_108_518,
141_131_159,
-4423,
-4352,
-3398,
-3329,
-1770,
-1693,
-737,
3400,
-598,
],
"chr": [
str(x)
for x in [
"1",
"1",
"1",
"1",
"1",
"1",
"1",
"1",
"1",
"1",
"1",
"2",
"2",
"2",
"2",
"2",
"2",
"2",
"2",
"2",
]
],
"stop": [
140_767_241,
140_786_022,
141_026_547,
141_039_565,
141_064_437,
141_070_437,
141_092_775,
141_102_518,
141_125_159,
141_145_045,
141_213_431,
1577,
1648,
2602,
2671,
4230,
4307,
5263,
9400,
5402,
],
}
)
with RaisesDirectOrInsidePipegraph(
ValueError, "All starts need to be positive"
):
a = regions.GenomicRegions(
"shu", sample_data, [], get_genome_chr_length(), on_overlap="merge"
)
force_load(a.load)
def test_merge_test(self):
def sample_data():
return pd.DataFrame(
{
"chr": ["1", "1", "1", "3", "5"],
"start": [10, 10, 1000, 10000, 100_000],
"stop": [100, 100, 1110, 11110, 111_110],
}
)
a = regions.GenomicRegions(
"shu", sample_data, [], get_genome_chr_length(), on_overlap="merge"
)
force_load(a.load)
run_pipegraph()
assert len(a.df) == 4
assert a.df.iloc[0]["start"] == 10
assert a.df.iloc[0]["stop"] == 100
def test_merge_identical_ok(self):
def sample_data():
return pd.DataFrame(
{
"chr": ["1", "1", "1", "3", "5"],
"start": [10, 10, 1000, 10, 100_000],
"stop": [100, 100, 1110, 100, 111_110],
}
)
a = regions.GenomicRegions(
"shu",
sample_data,
[],
get_genome_chr_length(),
on_overlap="merge_identical",
)
force_load(a.load)
run_pipegraph()
assert len(a.df) == 4
assert a.df.iloc[0]["start"] == 10
assert a.df.iloc[0]["stop"] == 100
def test_merge_identical_raises(self):
def sample_data():
return pd.DataFrame(
{
"chr": ["1", "1", "1", "3", "5"],
"start": [10, 10, 1000, 10000, 100_000],
"stop": [100, 120, 1110, 11110, 111_110],
}
)
with RaisesDirectOrInsidePipegraph(ValueError):
a = regions.GenomicRegions(
"shu",
sample_data,
[],
get_genome_chr_length(),
on_overlap="merge_identical",
)
force_load(a.load)
def test_regions_merge_in_init_does_not_add_strand_if_it_was_missing(self):
def sample_data():
return pd.DataFrame(
{
"chr": ["1", "1", "1", "3", "5"],
"start": [10, 10, 1000, 10000, 100_000],
"stop": [100, 120, 1110, 11110, 111_110],
}
)
a = regions.GenomicRegions(
"shu", sample_data, [], get_genome_chr_length(), on_overlap="merge"
)
force_load(a.load())
run_pipegraph()
assert not "strand" in a.df.columns
def test_plot_job(self):
def sample_data():
return pd.DataFrame(
{
"chr": ["1", "2", "1", "3", "5"],
"start": [10, 100, 1000, 10000, 100_000],
"stop": [1010, 110, 1110, 11110, 111_110],
}
)
x = regions.GenomicRegions(
"shu", sample_data, [], get_genome_chr_length(), on_overlap="merge"
)
pj = x.plot(
"shu.png",
lambda df: dp(df)
.p9()
.add_scatter("chr", "start")
.pd, # also tests the write-to-result_dir_part
)
fn = "results/GenomicRegions/shu/shu.png"
if inside_ppg():
assert isinstance(pj[0], ppg.FileGeneratingJob)
assert pj[1].absolute() == Path(fn).absolute()
else:
assert str(pj[1]) == str(Path(fn).absolute())
run_pipegraph()
assert_image_equal(fn)
def test_plot_job_with_custom_calc_function(self):
def sample_data():
return pd.DataFrame(
{
"chr": ["1", "2", "1", "3", "5"],
"start": [10, 100, 1000, 10000, 100_000],
"stop": [1010, 110, 1110, 11110, 111_110],
}
)
x = regions.GenomicRegions(
"shu", sample_data, [], get_genome_chr_length(), on_overlap="merge"
)
def calc(df):
df = df.copy()
df = df.assign(shu=["shu"] * len(df))
return df
pj = x.plot(
Path("shu.png").absolute(),
lambda df: dp(df).p9().add_scatter("shu", "start"),
calc,
)
fn = str(Path("shu.png").absolute())
if inside_ppg():
assert isinstance(pj[0], ppg.FileGeneratingJob)
assert str(pj[1].absolute()) == fn
else:
assert str(pj[1]) == fn
run_pipegraph()
assert_image_equal(fn)
@pytest.mark.usefixtures("new_pipegraph")
class TestGenomicRegionsAnnotationDependencyies:
def setUp(self):
def sample_data():
df = pd.DataFrame(
{
"chr": ["1", "2", "1", "3", "5"],
"start": [10, 100, 1000, 10000, 100_000],
"stop": [12, 110, 1110, 11110, 111_110],
}
)
df = df.assign(summit=df["start"] + (df["stop"] - df["start"]) / 2)
return df
self.a = regions.GenomicRegions("shu", sample_data, [], get_genome_chr_length())
def test_anno_job_depends_on_load(self):
self.setUp()
if inside_ppg():
assert not hasattr(self.a, "df")
else:
assert hasattr(self.a, "df")
ca = Constant("Constant", 5)
anno_job = self.a.add_annotator(ca)
assert isinstance(anno_job(), ppg.Job)
@pytest.mark.usefixtures("both_ppg_and_no_ppg")
class TestGenomicRegionsAnnotation:
def setUp(self):
def sample_data():
df = pd.DataFrame(
{
"chr": ["1", "2", "1", "3", "5"],
"start": [10, 100, 1000, 10000, 100_000],
"stop": [12, 110, 1110, 11110, 111_110],
}
)
df = df.assign(summit=df["start"] + (df["stop"] - df["start"]) / 2)
return df
self.a = regions.GenomicRegions("shu", sample_data, [], get_genome_chr_length())
def test_anno_jobs_are_singletonic(self):
self.setUp()
ca = Constant("Constant", 5)
assert len(self.a.annotators) == 1
anno_job = self.a.add_annotator(ca)
anno_job2 = self.a.add_annotator(ca)
assert anno_job is anno_job2
def test_anno_jobs_are_singletonic_across_names(self):
self.setUp()
ca = Constant("Constant", 5)
assert len(self.a.annotators) == 1
anno_job = self.a.add_annotator(ca)
cb = Constant("Constant", 5)
assert ca is cb
anno_job2 = self.a.add_annotator(cb)
assert anno_job is anno_job2
def test_has_annotator(self):
self.setUp()
ca = Constant("Constant", 5)
assert not self.a.has_annotator(ca)
self.a.add_annotator(ca)
assert self.a.has_annotator(ca)
def test_annotator_by_name(self):
self.setUp()
ca = Constant("Constant", 5)
assert not self.a.has_annotator(ca)
self.a.add_annotator(ca)
assert ca == self.a.get_annotator(ca.columns[0])
def test_anno_jobs_add_columns(self):
self.setUp()
ca = Constant("Constant", 5)
assert len(self.a.annotators) == 1
self.a.add_annotator(ca)
force_load(self.a.annotate(), "test_anno_jobs_add_columns")
run_pipegraph()
assert ca.columns[0] in self.a.df.columns
@pytest.mark.usefixtures("both_ppg_and_no_ppg")
class TestGenomicRegionsWriting:
def setUp(self):
def sample_data():
return pd.DataFrame(
{
"chr": ["1", "2", "1", "3", "5"],
"start": [10, 100, 1000, 10000, 100_000],
"stop": [11, 110, 1110, 11110, 111_110],
"name": ["a", "b", "c", "d", "e"],
"notname": ["A", "B", "C", "D", "E"],
"strand": [1, 1, 1, 1, -1],
}
)
self.a = regions.GenomicRegions("shu", sample_data, [], get_genome_chr_length())
self.sample_filename = str(Path("sample.dat").absolute())
try:
os.unlink(self.sample_filename)
except OSError:
pass
def test_write_bed(self):
self.setUp()
from mbf_fileformats.bed import read_bed
self.a.write_bed(self.sample_filename)
run_pipegraph()
assert len(self.a.df) > 0
read = read_bed(self.sample_filename)
assert len(read) == len(self.a.df)
assert read[0].refseq == b"1"
assert read[1].refseq == b"1"
assert read[2].refseq == b"2"
assert read[3].refseq == b"3"
assert read[4].refseq == b"5"
assert read[0].position == 10
assert read[1].position == 1000
assert read[2].position == 100
assert read[3].position == 10000
assert read[4].position == 100_000
assert read[0].length == 1
assert read[1].length == 110
assert read[2].length == 10
assert read[3].length == 1110
assert read[4].length == 11110
assert read[0].name == b"Noname"
def test_write_bed_with_name(self):
self.setUp()
from mbf_fileformats.bed import read_bed
self.a.write_bed(self.sample_filename, region_name="name")
run_pipegraph()
assert len(self.a.df) > 0
read = read_bed(self.sample_filename)
assert len(read) == len(self.a.df)
assert read[0].refseq == b"1"
assert read[1].refseq == b"1"
assert read[2].refseq == b"2"
assert read[3].refseq == b"3"
assert read[4].refseq == b"5"
assert read[0].position == 10
assert read[1].position == 1000
assert read[2].position == 100
assert read[3].position == 10000
assert read[4].position == 100_000
assert read[0].length == 1
assert read[1].length == 110
assert read[2].length == 10
assert read[3].length == 1110
assert read[4].length == 11110
assert read[0].name == b"a"
assert read[1].name == b"c"
assert read[2].name == b"b"
assert read[3].name == b"d"
assert read[4].name == b"e"
def test_write_bigbed_name_column(self):
self.setUp()
from mbf_fileformats.bed import read_bigbed
self.a.write_bigbed(self.sample_filename, "notname")
run_pipegraph()
assert len(self.a.df) > 0
read = read_bigbed(
self.sample_filename, {"1": 10000, "2": 20000, "3": 30000, "5": 500_000}
)
print(read)
should = self.a.df.reset_index(drop=True)
assert len(read) == len(should)
assert (read["chr"] == should["chr"]).all()
assert (read["start"] == should["start"]).all()
assert (read["stop"] == should["stop"]).all()
assert (read["strand"] == should["strand"]).all()
assert (
read["name"].str.upper() == should["name"].str.upper()
).all() # bigbed seems to store uprcse names?
def test_write_bigbed(self):
self.setUp()
from mbf_fileformats.bed import read_bigbed
self.a.write_bigbed(self.sample_filename)
run_pipegraph()
assert len(self.a.df) > 0
read = read_bigbed(
self.sample_filename, {"1": 10000, "2": 20000, "3": 30000, "5": 500_000}
)
should = self.a.df.reset_index(drop=True)
print(read)
print(should)
assert len(read) == len(should)
assert (read["chr"] == should["chr"]).all()
assert (read["start"] == should["start"]).all()
assert (read["stop"] == should["stop"]).all()
assert (read["strand"] == should["strand"]).all()
def test_write_bed_with_name_column_not_found(self):
self.setUp()
with RaisesDirectOrInsidePipegraph(KeyError):
self.a.write_bed(self.sample_filename, region_name="name_not_found")
def test_write(self):
self.setUp()
self.a.write(self.sample_filename)
run_pipegraph()
assert len(self.a.df) > 0
df = pd.read_csv(self.sample_filename, sep="\t")
df["chr"] = df["chr"].astype(str)
assert_frame_equal(df, self.a.df.reset_index(drop=True), check_less_precise=2)
def test_write_without_filename(self):
self.setUp()
self.a.result_dir = Path("")
self.a.write()
run_pipegraph()
assert os.path.exists("shu.tsv")
os.unlink("shu.tsv")
def test_write_sorted(self):
self.setUp()
# sorting by chromosome means they would've been equal anyhow, since we
# internally sort by chr
self.a.write(self.sample_filename, lambda df: df.sort_values("start"))
run_pipegraph()
assert len(self.a.df) > 0
df = | pd.read_csv(self.sample_filename, sep="\t") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 11 22:14:51 2021
@author: Allectus
"""
import os
import re
import copy
import pandas as pd
import tkinter as tk
import plotly.io as pio
import plotly.express as px
from tkinter import filedialog
from lxml import etree
#==============================================================================
def parse_asset_file(xmlfile, taglist, convert=True, collapse_diffs=True):
#Parses X4:Foundations asset xml files
#
#xmlfile: file path to desired input asset file
#taglist: XML asset property tag to collect attributes for
#convert: If True attributes will be converted to floats
xtree = etree.parse(xmlfile)
result = {}
for attr in taglist:
attr_element = xtree.find('//' + str(attr))
if attr_element is not None:
attr_path = xtree.getpath(attr_element)
if collapse_diffs:
attr_path = re.sub(r'/diff/(replace|add)', '', attr_path)
if attr_element is None:
attr_dict = {}
else:
attr_dict = {str(attr_path) + '/' + str(k):v for k,v in attr_element.attrib.items()}
if convert:
attr_dict = {k:float(v) for k,v in attr_dict.items()}
else:
attr_dict = {}
result.update(attr_dict)
return(result)
#------------------------------------------------------------------------------
def export_asset_xml_diff(outfilepath, attributes):
#Exports X4:Foundations asset diff xml files
#
#outfilepath: file path to desired output file
#attributes: dict of xpath:value to be exported in the diff file
attributes
outstr = '\n'.join(['<?xml version="1.0" encoding="utf-8"?>',
'<diff>',
' <replace sel="' +
'\n <replace sel="'.join([str(xpath)[:str(xpath).rfind('/') + 1] + '@' +
str(xpath)[str(xpath).rfind('/') + 1:] + '">' +
str(round(val,2)) + '</replace>'
for xpath,val in attributes.items()]),
'</diff>'])
os.makedirs(os.path.dirname(outfilepath), exist_ok=True)
with open(outfilepath, 'w') as outfile:
outfile.write(outstr)
return(True)
#------------------------------------------------------------------------------
def parse_resources(resources, asset_path, file_pattern, taglist):
#Collects and parses relevant X4:Foundations asset files based upon input filters
#
#resources: pd.DataFrame of available unpacked input directories, contains resources root
#asset_path: path to relevant directory for the specific asset, relative to resource root
#file_pattern: regex pattern to id files in asset path to retain
#taglist: tags to extract from the identied input files
loc_resources = copy.deepcopy(resources)
#Find game files
loc_resources['assetdir'] = loc_resources.root.apply(lambda x: os.path.join(x, asset_path))
loc_resources['filelist'] = loc_resources.assetdir.apply(os.listdir)
loc_resources = loc_resources.explode('filelist', ignore_index=True)
#Filter out unwanted files (only keep appropriate xml files)
loc_resources.rename(columns={'filelist':'basefilename'}, inplace=True)
loc_resources['keep'] = loc_resources.basefilename.apply(lambda x: os.path.splitext(x)[1] == '.xml') & loc_resources.basefilename.str.contains(file_pattern)
loc_resources = loc_resources[loc_resources.keep].reset_index(drop=True)
loc_resources = loc_resources.drop('keep', axis=1)
loc_resources['fullpath'] = loc_resources.apply(lambda x: os.path.join(x['assetdir'], x['basefilename']), axis=1)
#Parse the discovered files
loc_resources = pd.concat([loc_resources, pd.DataFrame(list(loc_resources['fullpath'].apply(
lambda x: parse_asset_file(x, taglist=taglist, convert=True, collapse_diffs=True))))], axis=1)
return(loc_resources)
#------------------------------------------------------------------------------
def update_shields(resources, asset_path = 'assets/props/SurfaceElements/macros',
file_pattern=r'^shield.*', taglist = ['recharge']):
#Identifies and modified X4: Foundations shield files
#
#resources: pd.DataFrame of available unpacked input directories, contains resources root
#asset_path: path to relevant directory for the specific asset, relative to resource root
#file_pattern: regex pattern to id files in asset path to retain
#taglist: tags to extract from the identied input files
shield_resources = parse_resources(resources=resources, asset_path=asset_path,
file_pattern=file_pattern, taglist=taglist)
#capture owner/size/type from filename
shield_metadata = shield_resources.basefilename.str.extract(r'(shield_)(.*)(_)(s|m|l|xl)(_)(.*)(_.*)(mk.)(.*)', expand=True)
shield_metadata = shield_metadata.rename(columns={1:'race', 3:'size', 5:'type', 7:'mk'})
shield_resources = pd.concat([shield_resources, shield_metadata[['race', 'size', 'type', 'mk']]], axis=1)
#colname look up table (to retain xpath in colname so we dont have to reshape to long format)
#gives 'tag_attrib': xpath
modified_cols = {}
cnm_init = {}
for tag in taglist:
colpattern = r'.*(/' + str(tag) + r'/).*'
cnm_init.update({str(tag)+'_'+str(c)[str(c).rfind('/')+1:] :c for c in shield_resources.columns if re.match(colpattern, c)})
vro_results = shield_resources[(shield_resources['source'] == 'vro')].reset_index()
base_results = shield_resources[(shield_resources['source'] == 'base')].reset_index()
modified = | pd.merge(vro_results, base_results, how='left', on=['race', 'size', 'type', 'mk'], suffixes=['_vro', '_base']) | pandas.merge |
"""
Sumarize results for the train/valid/test splits.
# PROGRAM : metrics.py
# POURPOSE : compute model metrics on the test datasete
# AUTHOR : <NAME>
# EMAIL : <EMAIL>
# V1.0 : 05/05/2020 [<NAME>]
"""
import argparse
import numpy as np
import tensorflow as tf
import pandas as pd
import pathlib
try:
import efficientnet.tfkeras as efn
except Exception:
print(ImportError("\nWarning: run pip install -U --pre efficientnet"))
from tensorflow.keras.preprocessing.image import ImageDataGenerator
if __name__ == '__main__':
print("\nClassifiying wave breaking data, please wait...\n")
# Argument parser
parser = argparse.ArgumentParser()
# input model and history
parser.add_argument("--model", "-M",
nargs=1,
action="store",
dest="model",
required=True,
help="Input model in .h5 format.",)
parser.add_argument("--history", "-hist",
nargs=1,
action="store",
dest="history",
required=True,
help="Input model history in csv format.",)
# input test data
parser.add_argument("--data", "-data",
nargs=1,
action="store",
dest="data",
required=True,
help="Input path with image data.",)
parser.add_argument("--threshold", "-trx",
nargs=1,
action="store",
dest="TRX",
default=[0.5],
required=False,
help="Probability threshold for classification.")
parser.add_argument("--epoch", "-epch",
nargs=1,
action="store",
dest="epoch",
default=[-1],
required=False,
help="Which epoch to use. Default is last epoch.")
# output data
parser.add_argument("--output", "-o",
nargs=1,
action="store",
dest="output",
required=True,
help="Output file.",)
args = parser.parse_args()
# --- test data input ---
test_dir = args.data[0]
test_dir = pathlib.Path(test_dir)
image_count = len(list(test_dir.glob('*/*')))
epoch = int(args.epoch[0])
BATCH_SIZE = int(image_count/10)
class_names = np.array([item.name for item in test_dir.glob('*')])
try:
nclasses = len(class_names)
print(" Found image data, proceeding.\n")
print(" - Classes are {}".format(class_names))
except Exception:
raise IOError("Check your data!")
# --- pre-trained model ---
model = tf.keras.models.load_model(args.model[0])
history = pd.read_csv(args.history[0])
# train data
accuracy = history.iloc[epoch]["Binary_Accuracy"]
tp = history.iloc[epoch]["True_Positives"]
fp = history.iloc[epoch]["False_Positives"]
tn = history.iloc[epoch]["True_Negatives"]
fn = history.iloc[epoch]["False_Negatives"]
precision = history.iloc[epoch]["Precision"]
recall = history.iloc[epoch]["Recall"]
auc = history.iloc[epoch]["AUC"]
X = [accuracy, tp, fp, tn, fn, precision, recall, auc]
cols = ["Binary_Accuracy", "True_Positives", "False_Positives",
"True_Negatives", "False_Negatives", "Precision", "Recall", "AUC"]
df_train = pd.DataFrame([X], columns=cols)
df_train.index = ["Train"]
print(df_train)
# validation data
accuracy = history.iloc[epoch]["val_Binary_Accuracy"]
tp = history.iloc[epoch]["val_True_Positives"]
fp = history.iloc[epoch]["val_False_Positives"]
tn = history.iloc[epoch]["val_True_Negatives"]
fn = history.iloc[epoch]["val_False_Negatives"]
precision = history.iloc[epoch]["val_Precision"]
recall = history.iloc[epoch]["val_Recall"]
auc = history.iloc[epoch]["val_AUC"]
X = [accuracy, tp, fp, tn, fn, precision, recall, auc]
cols = ["Binary_Accuracy", "True_Positives", "False_Positives",
"True_Negatives", "False_Negatives", "Precision", "Recall", "AUC"]
df_val = pd.DataFrame([X], columns=cols)
df_val.index = ["Validation"]
print(df_val)
# evaluate the model on test data
inp_shape = model.input_shape
img_height = inp_shape[1] # image height for all images
img_width = inp_shape[2] # image width for all images
datagen = ImageDataGenerator(rescale=1./255.)
print("\n Fitting the teset data generator:\n")
data_gen_test = datagen.flow_from_directory(
directory=str(test_dir), batch_size=BATCH_SIZE, shuffle=False,
target_size=(img_height, img_width), classes=["0", "1"],
class_mode="binary")
result = model.evaluate(data_gen_test)
metrics = dict(zip(model.metrics_names, result))
# validation data
accuracy = metrics["Binary_Accuracy"]
tp = metrics["True_Positives"]
fp = metrics["False_Positives"]
tn = metrics["True_Negatives"]
fn = metrics["False_Negatives"]
precision = metrics["Precision"]
recall = metrics["Recall"]
auc = metrics["AUC"]
X = [accuracy, tp, fp, tn, fn, precision, recall, auc]
cols = ["Binary_Accuracy", "True_Positives", "False_Positives",
"True_Negatives", "False_Negatives", "Precision", "Recall", "AUC"]
df_test = | pd.DataFrame([X], columns=cols) | pandas.DataFrame |
# -*- coding: utf-8 -*-
""" dati_selezione.ipynb
Extraction of data from ISS weekly covid-19 reports
https://www.epicentro.iss.it/coronavirus/aggiornamenti
See example pdf:
https://www.epicentro.iss.it/coronavirus/bollettino/Bollettino-sorveglianza-integrata-COVID-19_12-gennaio-2022.pdf
Requirements:
Python 3.6+, Ghostscript (ghostscript), Tkinter (python3-tk)
numpy, pandas, camelot, PyMuPDF, Beautiful Soup 4 """
import locale
import re
from datetime import datetime, timedelta
from os import chdir, path
from urllib import request
from urllib.parse import urljoin
import camelot
import fitz
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
def get_surveillance_reports():
"""get_surveillance_reports() -> list
return: list of "integrated surveillance of Covid-19 in Italy" reports"""
# Source of the ISS reports
epicentro_url = "https://www.epicentro.iss.it/coronavirus/aggiornamenti"
# Requests URL and get http.client.HTTPResponse object
with request.urlopen(epicentro_url) as response:
# Parse text obtained
soup = BeautifulSoup(response, "html.parser")
# Find all hyperlinks present on webpage
links = soup.find_all("a")
# The table is available since 14/07/2021
# The script has been updated to 2022-01-12 report
# for older reports than 2022-01-12 use "dati_selezione_old1.py" and "dati_ISS_complessivi_old1.csv"
# for older reports than 2021-11-10 use "dati_selezione_old.py and "dati_ISS_complessivi_old.csv"
cut_date = pd.to_datetime("2022-01-12")
cut_date_end = pd.to_datetime("2022-01-19")
return [urljoin(epicentro_url, link["href"]) for link in links
if "Bollettino-sorveglianza-integrata-COVID-19" in link["href"]
and date_from_url(link["href"], is_raw=False) >= cut_date
and (date_from_url(link["href"], is_raw=False) < cut_date_end)]
def page_from_url(sel_url, is_pop=False):
"""page_from_url(str, boolean) -> int
sel_url: url of the report
is_pop: choose between populations and general data
return: number of the page containing the table"""
query = "TABELLA A[0-9] - POPOLAZIONE DI RIFERIMENTO" if is_pop else \
"TABELLA [0-9] – NUMERO DI CASI DI COVID-19"
with request.urlopen(sel_url) as response:
content = response.read()
with fitz.open(stream=content, filetype="pdf") as pdf:
print("\nSearching for the selected table...")
# Query for string
for page in pdf:
text = page.get_text()
if re.search(query, text, re.IGNORECASE):
return page.number + 1
return None
def date_from_url(sel_url, is_raw=True):
"""date_from_url(str, boolean) -> datetime
sel_url: url of the report
is_raw: choose whether to return raw or translated date
return: datetime"""
date_ = re.findall(r"\d+[a-z-A-Z]+\d+", sel_url)[0]
return date_ if is_raw else datetime.strptime(date_, "%d-%B-%Y")
def check_df(sel_df):
"""check_df(df) -> None
sel_df: dataframe
return: check if the table has at least 2 columns"""
error_msg = "Can't extract the table! DIY!"
if len(sel_df.columns) < 3:
# Table is incomplete, bye bye
print(error_msg)
exit()
def get_raw_table(sel_url, table):
"""get_raw_table(str, int) -> df
sel_url: url of the report
table: the page number of the table
return: raw dataframe"""
# Read the found page using camelot
tables = camelot.read_pdf(sel_url,
pages=f"{table}",
flavor="stream")
df_raw = tables[0].df
# Check if there are enough columns
if len(df_raw.columns) < 5:
if len(tables) >= 1:
df_raw = tables[1].df
check_df(df_raw)
return df_raw
def clean_raw_table(sel_df):
"""clean_raw_table(df) -> df
sel_df: raw dataframe
return: extract numerical data from the dataframe"""
# We are interested in the last 5 columns
columns_to_keep = sel_df.columns[-5:]
df_raw = sel_df[columns_to_keep]
# select rows containing numbers
selection = r"[0-9]"
df_raw = df_raw[df_raw[df_raw.columns[0]].str.match(selection)]
# Remove dots and parentheses
to_exclude = r"\((.*)|[^0-9]"
df_final = df_raw.replace(to_exclude, "", regex=True).apply(np.int64)
df_final.columns = ["non vaccinati",
"vaccinati 1 dose",
"vaccinati completo < x mesi",
"vaccinati completo > x mesi",
"vaccinati booster"]
# Merge immunized columns ("vaccinati completo < x mesi",
# "vaccinati completo > x mesi", "vaccinati booster") into one
idx = df_final.columns.tolist().index("vaccinati 1 dose")
vaccinati_completo = df_final.iloc[:, 2:].sum(axis=1)
df_final.insert(idx+1, "vaccinati completo", vaccinati_completo)
# Drop these columns
df_final.drop(["vaccinati completo < x mesi",
"vaccinati completo > x mesi"], axis=1, inplace=True)
df_final.reset_index(inplace=True, drop=True)
return df_final
def extract_data_from_raw(raw_df, to_df, sel_rows=None):
"""extract_data_from_raw(df, df, list) -> df, df
raw_df: raw dataframe
to_df: dataframe to update
sel_rows: selected raw df rows
return: processed dataframes"""
if sel_rows is None:
f_pop = "data_iss_età_%s.xlsx"
# Align hospitalizations/ti and deaths populations
# Get hospitalizations/ti populations from 2nd latest report
# Get deaths populations from 3rd latest report
date_osp = rep_date - timedelta(days=15)
df_hosp = pd.read_excel(f_pop % date_osp.date(), sheet_name="popolazioni")
date_dec = rep_date - timedelta(days=22)
df_deaths = pd.read_excel(f_pop % date_dec.date(), sheet_name="popolazioni")
# Get general data
results = np.concatenate((raw_df.iloc[4, :].values,
to_df.loc[date_osp].values[0:4],
to_df.loc[date_dec].values[0:4]))
# Build ages dataframe
# Merge df together
df_ = pd.concat([raw_df.iloc[:4, :5], df_hosp.iloc[:, 1:5], df_deaths.iloc[:, 1:5]], axis=1)
df_.columns = df_deaths.columns[1:]
df_.set_index(df_deaths["età"], inplace=True)
else:
# Get general data
results = raw_df.iloc[sel_rows, :].stack().values
# Get data by age
ages = ["12-39", "40-59", "60-79", "80+"]
rows_to_keep = np.arange(0, len(raw_df), 5)
results_ = {age: raw_df.iloc[rows_to_keep+i, :].stack().values
for i, age in enumerate(ages)}
# Build ages dataframe
df_ = | pd.DataFrame(results_) | pandas.DataFrame |
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from blocktorch.pipelines.components.transformers.preprocessing import (
DropRowsTransformer,
)
def test_drop_rows_transformer_init():
drop_rows_transformer = DropRowsTransformer()
assert drop_rows_transformer.indices_to_drop is None
drop_rows_transformer = DropRowsTransformer(indices_to_drop=[0, 1])
assert drop_rows_transformer.indices_to_drop == [0, 1]
def test_drop_rows_transformer_init_with_duplicate_indices():
with pytest.raises(ValueError, match="All input indices must be unique."):
DropRowsTransformer(indices_to_drop=[0, 0])
def test_drop_rows_transformer_fit_transform():
X = pd.DataFrame({"a column": [1, 2, 3], "another col": [4, 5, 6]})
X_expected = X.copy()
drop_rows_transformer_none = DropRowsTransformer()
drop_rows_transformer_none.fit(X)
transformed = drop_rows_transformer_none.transform(X)
assert_frame_equal(X, transformed[0])
assert transformed[1] is None
indices_to_drop = [1, 2]
X_expected = pd.DataFrame({"a column": [1], "another col": [4]})
drop_rows_transformer = DropRowsTransformer(indices_to_drop=indices_to_drop)
drop_rows_transformer.fit(X)
transformed = drop_rows_transformer.transform(X)
assert_frame_equal(X_expected, transformed[0])
assert transformed[1] is None
drop_rows_transformer = DropRowsTransformer(indices_to_drop=indices_to_drop)
fit_transformed = drop_rows_transformer.fit_transform(X)
assert_frame_equal(fit_transformed[0], transformed[0])
assert fit_transformed[1] is None
def test_drop_rows_transformer_fit_transform_with_empty_indices_to_drop():
X = pd.DataFrame({"a column": [1, 2, 3], "another col": [4, 5, 6]})
y = pd.Series([1, 0, 1])
drop_rows_transformer = DropRowsTransformer(indices_to_drop=[])
fit_transformed = drop_rows_transformer.fit_transform(X)
assert_frame_equal(X, fit_transformed[0])
assert fit_transformed[1] is None
fit_transformed = drop_rows_transformer.fit_transform(X, y)
assert_frame_equal(X, fit_transformed[0])
assert_series_equal(y, fit_transformed[1])
def test_drop_rows_transformer_fit_transform_with_target():
X = pd.DataFrame({"a column": [1, 2, 3], "another col": [4, 5, 6]})
y = pd.Series([1, 0, 1])
X_expected = pd.DataFrame({"a column": [1], "another col": [4]})
y_expected = pd.Series([1])
drop_rows_transformer_none = DropRowsTransformer()
drop_rows_transformer_none.fit(X, y)
transformed = drop_rows_transformer_none.transform(X, y)
assert_frame_equal(X, transformed[0])
| assert_series_equal(y, transformed[1]) | pandas.testing.assert_series_equal |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
程序通用函数库
作者:wking [http://wkings.net]
"""
import os
import statistics
import time
import datetime
import requests
import numpy as np
import pandas as pd
import threading
from queue import Queue
from retry import retry
# from rich.progress import track
# from rich import print
from tqdm import tqdm
import user_config as ucfg
# debug输出函数
def user_debug(print_str, print_value='', ):
"""第一个参数为变量名称,第二个参数为变量的值"""
if ucfg.debug:
if print_value:
print(str(print_str) + ' = ' + str(print_value))
else:
print(str(print_str))
# 将通达信的日线文件转换成CSV格式保存函数。通达信数据文件32字节为一组。
def day2csv(source_dir, file_name, target_dir):
"""
将通达信的日线文件转换成CSV格式保存函数。通达信数据文件32字节为一组
:param source_dir: str 源文件路径
:param file_name: str 文件名
:param target_dir: str 要保存的路径
:return: none
"""
from struct import unpack
from decimal import Decimal # 用于浮点数四舍五入
# 以二进制方式打开源文件
source_path = source_dir + os.sep + file_name # 源文件包含文件名的路径
source_file = open(source_path, 'rb')
buf = source_file.read() # 读取源文件保存在变量中
source_file.close()
source_size = os.path.getsize(source_path) # 获取源文件大小
source_row_number = int(source_size / 32)
# user_debug('源文件行数', source_row_number)
# 打开目标文件,后缀名为CSV
target_path = target_dir + os.sep + file_name[2:-4] + '.csv' # 目标文件包含文件名的路径
# user_debug('target_path', target_path)
if not os.path.isfile(target_path):
# 目标文件不存在。写入表头行。begin从0开始转换
target_file = open(target_path, 'w', encoding="utf-8") # 以覆盖写模式打开文件
header = str('date') + ',' + str('code') + ',' + str('open') + ',' + str('high') + ',' + str('low') + ',' \
+ str('close') + ',' + str('vol') + ',' + str('amount')
target_file.write(header)
begin = 0
end = begin + 32
row_number = 0
else:
# 不为0,文件有内容。行附加。
# 通达信数据32字节为一组,因此通达信文件大小除以32可算出通达信文件有多少行(也就是多少天)的数据。
# 再用readlines计算出目标文件已有多少行(目标文件多了首行标题行),(行数-1)*32 即begin要开始的字节位置
target_file = open(target_path, 'a+', encoding="gbk") # 以追加读写模式打开文件
# target_size = os.path.getsize(target_path) #获取目标文件大小
# 由于追加读写模式载入文件后指针在文件的结尾,需要先把指针改到文件开头,读取文件行数。
user_debug('当前指针', target_file.tell())
target_file.seek(0, 0) # 文件指针移到文件头
user_debug('移动指针到开头', target_file.seek(0, 0))
target_file_content = target_file.readlines() # 逐行读取文件内容
row_number = len(target_file_content) # 获得文件行数
user_debug('目标文件行数', row_number)
user_debug('目标文件最后一行的数据', target_file_content[-1])
target_file.seek(0, 2) # 文件指针移到文件尾
user_debug('移动指针到末尾', target_file.seek(0, 2))
if row_number > source_row_number:
user_debug('已是最新数据,跳过for循环')
else:
user_debug('追加模式,从' + str(row_number + 1) + '行开始')
if row_number == 0: # 如果文件出错是0的特殊情况
begin = 0
else:
row_number = row_number - 1 # 由于pandas的dataFrame格式索引从0开始,为下面for循环需要减1
begin = row_number * 32
end = begin + 32
for i in range(row_number, source_row_number):
# 由于pandas的dataFrame格式首行为标题行,第二行的索引从0开始,
# 因此转换出来显示的行数比原本少一行,但实际数据一致
#
# 将字节流转换成Python数据格式
# I: unsigned int
# f: float
# a[5]浮点类型的成交金额,使用decimal类四舍五入为整数
a = unpack('IIIIIfII', buf[begin:end])
# '\n' + str(i) + ','
# a[0] 将’19910404'样式的字符串转为'1991-05-05'格式的字符串。为了统一日期格式
a_date = str(a[0])[0:4] + '-' + str(a[0])[4:6] + '-' + str(a[0])[6:8]
file_name[2:-4]
line = '\n' + str(a_date) + ',' \
+ file_name[2:-4] + ',' \
+ str(a[1] / 100.0) + ',' \
+ str(a[2] / 100.0) + ',' \
+ str(a[3] / 100.0) + ',' \
+ str(a[4] / 100.0) + ',' \
+ str(a[6]) + ',' \
+ str(Decimal(a[5]).quantize(Decimal("1."), rounding="ROUND_HALF_UP"))
target_file.write(line)
begin += 32
end += 32
target_file.close()
def get_TDX_blockfilecontent(filename):
"""
读取本机通达信板块文件,获取文件内容
:rtype: object
:param filename: 字符串类型。输入的文件名。
:return: DataFrame类型
"""
from pytdx.reader import block_reader, TdxFileNotFoundException
if ucfg.tdx['tdx_path']:
filepath = ucfg.tdx['tdx_path'] + os.sep + 'T0002' + os.sep + 'hq_cache' + os.sep + filename
df = block_reader.BlockReader().get_df(filepath)
else:
print("user_config文件的tdx_path变量未配置,或未找到" + filename + "文件")
return df
def get_lastest_stocklist():
"""
使用pytdx从网络获取最新券商列表
:return:DF格式,股票清单
"""
import pytdx.hq
import pytdx.util.best_ip
print(f"优选通达信行情服务器 也可直接更改为优选好的 {{'ip': '172.16.58.3', 'port': 7709}}")
# ipinfo = pytdx.util.best_ip.select_best_ip()
api = pytdx.hq.TdxHq_API()
# with api.connect(ipinfo['ip'], ipinfo['port']):
with api.connect('172.16.58.3', 7709):
data = pd.concat([pd.concat(
[api.to_df(api.get_security_list(j, i * 1000)).assign(sse='sz' if j == 0 else 'sh') for i in
range(int(api.get_security_count(j) / 1000) + 1)], axis=0) for j in range(2)], axis=0)
data = data.reindex(columns=['sse', 'code', 'name', 'pre_close', 'volunit', 'decimal_point'])
data.sort_values(by=['sse', 'code'], ascending=True, inplace=True)
data.reset_index(drop=True, inplace=True)
# 这个方法不行 字符串不能运算大于小于,转成int更麻烦
# df = data.loc[((data['sse'] == 'sh') & ((data['code'] >= '600000') | (data['code'] < '700000'))) | \
# ((data['sse'] == 'sz') & ((data['code'] >= '000001') | (data['code'] < '100000'))) | \
# ((data['sse'] == 'sz') & ((data['code'] >= '300000') | (data['code'] < '309999')))]
sh_start_num = data[(data['sse'] == 'sh') & (data['code'] == '600000')].index.tolist()[0]
sh_end_num = data[(data['sse'] == 'sh') & (data['code'] == '706070')].index.tolist()[0]
sz00_start_num = data[(data['sse'] == 'sz') & (data['code'] == '000001')].index.tolist()[0]
sz00_end_num = data[(data['sse'] == 'sz') & (data['code'] == '100303')].index.tolist()[0]
sz30_start_num = data[(data['sse'] == 'sz') & (data['code'] == '300001')].index.tolist()[0]
sz30_end_num = data[(data['sse'] == 'sz') & (data['code'] == '395001')].index.tolist()[0]
df_sh = data.iloc[sh_start_num:sh_end_num]
df_sz00 = data.iloc[sz00_start_num:sz00_end_num]
df_sz30 = data.iloc[sz30_start_num:sz30_end_num]
df = pd.concat([df_sh, df_sz00, df_sz30])
df.reset_index(drop=True, inplace=True)
return df
def historyfinancialreader(filepath):
"""
读取解析通达信目录的历史财务数据
:param filepath: 字符串类型。传入文件路径
:return: DataFrame格式。返回解析出的财务文件内容
"""
import struct
cw_file = open(filepath, 'rb')
header_pack_format = '<1hI1H3L'
header_size = struct.calcsize(header_pack_format)
stock_item_size = struct.calcsize("<6s1c1L")
data_header = cw_file.read(header_size)
stock_header = struct.unpack(header_pack_format, data_header)
max_count = stock_header[2]
report_date = stock_header[1]
report_size = stock_header[4]
report_fields_count = int(report_size / 4)
report_pack_format = '<{}f'.format(report_fields_count)
results = []
for stock_idx in range(0, max_count):
cw_file.seek(header_size + stock_idx * struct.calcsize("<6s1c1L"))
si = cw_file.read(stock_item_size)
stock_item = struct.unpack("<6s1c1L", si)
code = stock_item[0].decode("utf-8")
foa = stock_item[2]
cw_file.seek(foa)
info_data = cw_file.read(struct.calcsize(report_pack_format))
data_size = len(info_data)
cw_info = list(struct.unpack(report_pack_format, info_data))
cw_info.insert(0, code)
results.append(cw_info)
df = pd.DataFrame(results)
return df
class ManyThreadDownload:
def __init__(self, num=10):
self.num = num # 线程数,默认10
self.url = '' # url
self.name = '' # 目标地址
self.total = 0 # 文件大小
# 获取每个线程下载的区间
def get_range(self):
ranges = []
offset = int(self.total / self.num)
for i in range(self.num):
if i == self.num - 1:
ranges.append((i * offset, ''))
else:
ranges.append(((i * offset), (i + 1) * offset - 1))
return ranges # [(0,99),(100,199),(200,"")]
# 通过传入开始和结束位置来下载文件
def download(self, ts_queue):
while not ts_queue.empty():
start_, end_ = ts_queue.get()
headers = {
'Range': 'Bytes=%s-%s' % (start_, end_),
'Accept-Encoding': '*'
}
flag = False
while not flag:
try:
# 设置重连次数
requests.adapters.DEFAULT_RETRIES = 10
# s = requests.session() # 每次都会发起一次TCP握手,性能降低,还可能因发起多个连接而被拒绝
# # 设置连接活跃状态为False
# s.keep_alive = False
# 默认stream=false,立即下载放到内存,文件过大会内存不足,大文件时用True需改一下码子
res = requests.get(self.url, headers=headers)
res.close() # 关闭请求 释放内存
except Exception as e:
print((start_, end_, "出错了,连接重试:%s", e,))
time.sleep(1)
continue
flag = True
# print("\n", ("%s-%s download success" % (start_, end_)), end="", flush=True)
# with lock:
with open(self.name, "rb+") as fd:
fd.seek(start_)
fd.write(res.content)
# self.fd.seek(start_) # 指定写文件的位置,下载的内容放到正确的位置处
# self.fd.write(res.content) # 将下载文件保存到 fd所打开的文件里
def run(self, url, name):
self.url = url
self.name = name
self.total = int(requests.head(url).headers['Content-Length'])
# file_size = int(urlopen(self.url).info().get('Content-Length', -1))
file_size = self.total
if os.path.exists(name):
first_byte = os.path.getsize(name)
else:
first_byte = 0
if first_byte >= file_size:
return file_size
self.fd = open(name, "wb") # 续传时直接rb+ 文件不存在时会报错,先wb再rb+
self.fd.truncate(self.total) # 建一个和下载文件一样大的文件,不是必须的,stream=True时会用到
self.fd.close()
# self.fd = open(self.name, "rb+") # 续传时ab方式打开时会强制指针指向文件末尾,seek并不管用,应用rb+模式
thread_list = []
ts_queue = Queue() # 用队列的线程安全特性,以列表的形式把开始和结束加到队列
for ran in self.get_range():
start_, end_ = ran
ts_queue.put((start_, end_))
for i in range(self.num):
t = threading.Thread(target=self.download, name='th-' + str(i), kwargs={'ts_queue': ts_queue})
t.setDaemon(True)
thread_list.append(t)
for t in thread_list:
t.start()
for t in thread_list:
t.join() # 设置等待,全部线程完事后再继续
self.fd.close()
@retry(tries=3, delay=3) # 无限重试装饰性函数
def dowload_url(url):
"""
:param url:要下载的url
:return: request.get实例化对象
"""
import requests
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/87.0.4280.141',
}
response_obj = requests.get(url, headers=header, timeout=5) # get方式请求
response_obj.raise_for_status() # 检测异常方法。如有异常则抛出,触发retry
# print(f'{url} 下载完成')
return response_obj
def list_localTDX_cwfile(ext_name):
"""
列出本地已有的专业财务文件。返回文件列表
:param ext_name: str类型。文件扩展名。返回指定扩展名的文件列表
:return: list类型。财务专业文件列表
"""
cw_path = ucfg.tdx['tdx_path'] + os.sep + "vipdoc" + os.sep + "cw"
tmplist = os.listdir(cw_path) # 遍历通达信vipdoc/cw目录
cw_filelist = []
for file in tmplist: # 只保留gpcw????????.扩展名 格式文件
if len(file) == 16 and file[:4] == "gpcw" and file[-4:] == "." + ext_name:
cw_filelist.append(file)
# print(f'检测到{len(cw_filelist)}个专业财务文件')
return cw_filelist
def readall_local_cwfile():
"""
将全部财报文件读到df_cw字典里。会占用1G内存,但处理速度比遍历CSV方式快很多
:return: 字典形式,所有财报内容。
"""
print(f'开始载入所有财报文件到内存')
dict = {}
cwfile_list = os.listdir(ucfg.tdx['csv_cw']) # cw目录 生成文件名列表
starttime_tick = time.time()
for cwfile in cwfile_list:
if os.path.getsize(ucfg.tdx['csv_cw'] + os.sep + cwfile) != 0:
dict[cwfile[4:-4]] = pd.read_pickle(ucfg.tdx['csv_cw'] + os.sep + cwfile, compression=None)
print(f'读取所有财报文件完成 用时{(time.time() - starttime_tick):.2f}秒')
return dict
def make_fq(code, df_code, df_gbbq, df_cw='', start_date='', end_date='', fqtype='qfq'):
"""
股票周期数据复权处理函数
:param code:str格式,具体股票代码
:param df_code:DF格式,未除权的具体股票日线数据。DF自动生成的数字索引,列定义:date,open,high,low,close,vol,amount
:param df_gbbq:DF格式,通达信导出的全股票全日期股本变迁数据。DF读取gbbq文件必须加入dtype={'code': str}参数,否则股票代码开头0会忽略
:param df_cw:DF格式,读入内存的全部财务文件
:param start_date:可选,要截取的起始日期。默认为空。格式"2020-10-10"
:param end_date:可选,要截取的截止日期。默认为空。格式"2020-10-10"
:param fqtype:可选,复权类型。默认前复权。
:return:复权后的DF格式股票日线数据
"""
'''以下是从https://github.com/rainx/pytdx/issues/78#issuecomment-335668322 提取学习的前复权代码
import datetime
import numpy as np
import pandas as pd
from pytdx.hq import TdxHq_API
# from pypinyin import lazy_pinyin
import tushare as ts
'除权除息'
api = TdxHq_API()
with api.connect('172.16.17.32', 7709):
# 从服务器获取该股的股本变迁数据
category = {
'1': '除权除息', '2': '送配股上市', '3': '非流通股上市', '4': '未知股本变动', '5': '股本变化',
'6': '增发新股', '7': '股份回购', '8': '增发新股上市', '9': '转配股上市', '10': '可转债上市',
'11': '扩缩股', '12': '非流通股缩股', '13': '送认购权证', '14': '送认沽权证'}
data = api.to_df(api.get_xdxr_info(0, '000001'))
data = data \
.assign(date=pd.to_datetime(data[['year', 'month', 'day']])) \
.drop(['year', 'month', 'day'], axis=1) \
.assign(category_meaning=data['category'].apply(lambda x: category[str(x)])) \
.assign(code=str('000001')) \
.rename(index=str, columns={'panhouliutong': 'liquidity_after',
'panqianliutong': 'liquidity_before', 'houzongguben': 'shares_after',
'qianzongguben': 'shares_before'}) \
.set_index('date', drop=False, inplace=False)
xdxr_data = data.assign(date=data['date'].apply(lambda x: str(x)[0:10])) # 该股的股本变迁DF处理完成
df_gbbq = xdxr_data[xdxr_data['category'] == 1] # 提取只有除权除息的行保存到DF df_gbbq
# print(df_gbbq)
# 从服务器读取该股的全部历史不复权K线数据,保存到data表, 只包括 日期、开高低收、成交量、成交金额数据
data = pd.concat([api.to_df(api.get_security_bars(9, 0, '000001', (9 - i) * 800, 800)) for i in range(10)], axis=0)
# 从data表加工数据,保存到bfq_data表
df_code = data \
.assign(date=pd.to_datetime(data['datetime'].apply(lambda x: x[0:10]))) \
.assign(code=str('000001')) \
.set_index('date', drop=False, inplace=False) \
.drop(['year', 'month', 'day', 'hour',
'minute', 'datetime'], axis=1)
df_code['if_trade'] = True
# 不复权K线数据处理完成,保存到bfq_data表
# 提取info表的category列的值,按日期一一对应,列拼接到bfq_data表。也就是标识出当日是除权除息日的行
data = pd.concat([df_code, df_gbbq[['category']][df_code.index[0]:]], axis=1)
# print(data)
data['date'] = data.index
data['if_trade'].fillna(value=False, inplace=True) # if_trade列,无效的值填充为False
data = data.fillna(method='ffill') # 向下填充无效值
# 提取info表的'fenhong', 'peigu', 'peigujia',‘songzhuangu'列的值,按日期一一对应,列拼接到data表。
# 也就是将当日是除权除息日的行,对应的除权除息数据,写入对应的data表的行。
data = pd.concat([data, df_gbbq[['fenhong', 'peigu', 'peigujia',
'songzhuangu']][df_code.index[0]:]], axis=1)
data = data.fillna(0) # 无效值填空0
data['preclose'] = (data['close'].shift(1) * 10 - data['fenhong'] + data['peigu']
* data['peigujia']) / (10 + data['peigu'] + data['songzhuangu'])
data['adj'] = (data['preclose'].shift(-1) / data['close']).fillna(1)[::-1].cumprod() # 计算每日复权因子
data['open'] = data['open'] * data['adj']
data['high'] = data['high'] * data['adj']
data['low'] = data['low'] * data['adj']
data['close'] = data['close'] * data['adj']
data['preclose'] = data['preclose'] * data['adj']
data = data[data['if_trade']]
result = data \
.drop(['fenhong', 'peigu', 'peigujia', 'songzhuangu', 'if_trade', 'category'], axis=1)[data['open'] != 0] \
.assign(date=data['date'].apply(lambda x: str(x)[0:10]))
print(result)
'''
# 先进行判断。如果有adj列,且没有NaN值,表示此股票数据已处理完成,无需处理。直接返回。
# 如果没有‘adj'列,表示没进行过复权处理,当作新股处理
if 'adj' in df_code.columns.to_list():
if True in df_code['adj'].isna().to_list():
first_index = np.where(df_code.isna())[0][0] # 有NaN值,设为第一个NaN值所在的行
else:
return ""
else:
first_index = 0
flag_newstock = True
flag_attach = False # True=追加数据模式 False=数据全部重新计算
# 设置新股标志。True=新股,False=旧股。新股跳过追加数据部分的代码。如果没定义,默认为False
if 'flag_newstock' not in dir():
flag_newstock = False
# 提取该股除权除息行保存到DF df_cqcx,提取其他信息行到df_gbbq
df_cqcx = df_gbbq.loc[(df_gbbq['code'] == code) & (df_gbbq['类别'] == '除权除息')]
df_gbbq = df_gbbq.loc[(df_gbbq['code'] == code) & (
(df_gbbq['类别'] == '股本变化') |
(df_gbbq['类别'] == '送配股上市') |
(df_gbbq['类别'] == '转配股上市'))]
# 清洗df_gbbq,可能出现同一日期有 配股上市、股本变化两行数据。不清洗后面合并会索引冲突。
# 下面的代码可以保证删除多个不连续的重复行,用DF dropdup方法不能确保删除的值是大是小
# 如果Ture在列表里。表示有重复行存在
if True in df_gbbq.duplicated(subset=['权息日'], keep=False).to_list():
# 提取重复行的索引
del_index = [] # 要删除的后流通股的值
tmp_dict = df_gbbq.duplicated(subset=['权息日'], keep=False).to_dict()
for k, v in tmp_dict.items():
if v:
del_index.append(df_gbbq.at[k, '送转股-后流通盘'])
# 如果dup_index有1个以上的值,且K+1的元素是False,或K+1不存在也返回False,表示下一个元素 不是 重复行
if len(del_index) > 1 and (tmp_dict.get(k + 1, False) == False):
del_index.remove(max(del_index)) # 删除最大值
# 选择剩余的值,取反,则相当于保留了最大值,删除了其余的值
df_gbbq = df_gbbq[~df_gbbq['送转股-后流通盘'].isin(del_index)]
# int64类型储存的日期19910404,转换为dtype: datetime64[ns] 1991-04-04 为了按日期一一对应拼接
df_cqcx = df_cqcx.assign(date=pd.to_datetime(df_cqcx['权息日'], format='%Y%m%d')) # 添加date列,设置为datetime64[ns]格式
df_cqcx.set_index('date', drop=True, inplace=True) # 设置权息日为索引 (字符串表示的日期 "19910101")
df_cqcx['category'] = 1.0 # 添加category列
df_gbbq = df_gbbq.assign(date=pd.to_datetime(df_gbbq['权息日'], format='%Y%m%d')) # 添加date列,设置为datetime64[ns]格式
df_gbbq.set_index('date', drop=True, inplace=True) # 设置权息日为索引 (字符串表示的日期 "19910101")
if len(df_cqcx) > 0: # =0表示股本变迁中没有该股的除权除息信息。gbbq_lastest_date设置为今天,当作新股处理
cqcx_lastest_date = df_cqcx.index[-1].strftime('%Y-%m-%d') # 提取最新的除权除息日
else:
cqcx_lastest_date = str(datetime.date.today())
flag_newstock = True
# 判断df_code是否已有历史数据,是追加数据还是重新生成。
# 如果gbbq_lastest_date not in df_code.loc[first_index:, 'date'].to_list(),表示未更新数据中不包括除权除息日
# 由于前复权的特性,除权后历史数据都要变。因此未更新数据中不包括除权除息日,只需要计算未更新数据。否则日线数据需要全部重新计算
# 如果'adj'在df_code的列名单里,表示df_code是已复权过的,只需追加新数据,否则日线数据还是需要全部重新计算
if cqcx_lastest_date not in df_code.loc[first_index:, 'date'].to_list() and not flag_newstock:
if 'adj' in df_code.columns.to_list():
flag_attach = True # 确定为追加模式
df_code_original = df_code # 原始code备份为df_code_original,最后合并
df_code = df_code.iloc[first_index:] # 切片df_code,只保留需要处理的行
df_code.reset_index(drop=True, inplace=True)
df_code_original.dropna(how='any', inplace=True) # 丢掉缺失数据的行,之后直接append新数据就行。比merge简单。
df_code_original['date'] = pd.to_datetime(df_code_original['date'], format='%Y-%m-%d') # 转为时间格式
df_code_original.set_index('date', drop=True, inplace=True) # 时间为索引。方便与另外复权的DF表对齐合并
# 单独提取流通股处理。因为流通股是设置流通股变更时间节点,最后才填充nan值。和其他列的处理会冲突。
# 如果有流通股列,单独复制出来;如果没有流通股列,添加流通股列,赋值为NaN。
# 如果是追加数据模式,则肯定已存在流通股列且数据已处理。因此不需单独提取流通股列。只在前复权前处理缺失的流通股数据即可
# 虽然财报中可能没有流通股的数据,但股本变迁文件中最少也有股票第一天上市时的流通股数据。
# 且后面还会因为送配股上市、股本变化,导致在非财报日之前,流通股就发生变动
if not flag_attach:
if '流通股' in df_code.columns.to_list():
df_ltg = pd.DataFrame(index=df_code.index)
df_ltg['date'] = df_code['date']
df_ltg['流通股'] = df_code['流通股']
del df_code['流通股']
else:
df_ltg = pd.DataFrame(index=df_code.index)
df_ltg['date'] = df_code['date']
df_ltg['流通股'] = np.nan
else:
# 附加模式,此处df_code是已经切片过的,只包括需要更新的数据行。其中也包含流通股列,值全为NaN。
# 类似单独提出处理流通股列,和新股模式的区别是只处理需要更新的数据行。
df_ltg = pd.DataFrame(index=df_code.index)
del df_code['流通股']
# 第一个值赋值为df_code_original流通股列第一个NaN值的前一个有效值
ltg_lastest_value = df_code_original.at[df_code_original.index[-1], '流通股']
df_ltg['date'] = df_code['date']
df_ltg['流通股'] = np.nan
df_ltg.at[0, '流通股'] = ltg_lastest_value
df_gbbq = df_gbbq.rename(columns={'送转股-后流通盘': '流通股'}) # 列改名,为了update可以匹配
# 用df_gbbq update data,由于只有流通股列重复,因此只会更新流通股列对应索引的NaN值
df_ltg['date'] = pd.to_datetime(df_ltg['date'], format='%Y-%m-%d') # 转为时间格式
df_ltg.set_index('date', drop=True, inplace=True) # 时间为索引。方便与另外复权的DF表对齐合并
df_ltg.update(df_gbbq, overwrite=False) # 使用update方法更新df_ltg
if not flag_attach: # 附加模式则单位已经调整过,无需再调整
# 股本变迁里的流通股单位是万股。转换与财报的单位:股 统一
df_ltg['流通股'] = df_ltg['流通股'] * 10000
# int64类型储存的日期19910404,转换为dtype: datetime64[ns] 1991-04-04 为了按日期一一对应拼接
with pd.option_context('mode.chained_assignment', None): # 临时屏蔽语句警告
df_code['date'] = pd.to_datetime(df_code['date'], format='%Y-%m-%d')
df_code.set_index('date', drop=True, inplace=True)
df_code.insert(df_code.shape[1], 'if_trade', True) # 插入if_trade列,赋值True
# 提取df_cqcx和df_gbbq表的category列的值,按日期一一对应,列拼接到bfq_data表。也就是标识出当日是股本变迁的行
data = pd.concat([df_code, df_cqcx[['category']][df_code.index[0]:]], axis=1)
# print(data)
data['if_trade'].fillna(value=False, inplace=True) # if_trade列,无效的值填充为False
data.fillna(method='ffill', inplace=True) # 向下填充无效值
# 提取info表的'fenhong', 'peigu', 'peigujia',‘songzhuangu'列的值,按日期一一对应,列拼接到data表。
# 也就是将当日是除权除息日的行,对应的除权除息数据,写入对应的data表的行。
data = pd.concat([data, df_cqcx[['分红-前流通盘', '配股-后总股本', '配股价-前总股本',
'送转股-后流通盘']][df_code.index[0]:]], axis=1)
data = data.fillna(0) # 无效值填空0
data['preclose'] = (data['close'].shift(1) * 10 - data['分红-前流通盘'] + data['配股-后总股本']
* data['配股价-前总股本']) / (10 + data['配股-后总股本'] + data['送转股-后流通盘'])
# 计算每日复权因子 前复权最近一次股本变迁的复权因子为1
data['adj'] = (data['preclose'].shift(-1) / data['close']).fillna(1)[::-1].cumprod()
data['open'] = data['open'] * data['adj']
data['high'] = data['high'] * data['adj']
data['low'] = data['low'] * data['adj']
data['close'] = data['close'] * data['adj']
# data['preclose'] = data['preclose'] * data['adj'] # 这行没用了
data = data[data['if_trade']] # 重建整个表,只保存if_trade列=true的行
# 抛弃过程处理行,且open值不等于0的行
data = data.drop(['分红-前流通盘', '配股-后总股本', '配股价-前总股本',
'送转股-后流通盘', 'if_trade', 'category', 'preclose'], axis=1)[data['open'] != 0]
# 复权处理完成
# 如果没有传参进来,就自己读取财务文件,否则用传参的值
if df_cw == '':
cw_dict = readall_local_cwfile()
else:
cw_dict = df_cw
# 计算换手率
# 财报数据公开后,股本才变更。因此有效时间是“当前财报日至未来日期”。故将结束日期设置为2099年。每次财报更新后更新对应的日期时间段
e_date = '20990101'
for cw_date in cw_dict: # 遍历财报字典 cw_date=财报日期 cw_dict[cw_date]=具体的财报内容
# 如果复权数据表的首行日期>当前要读取的财务报表日期,则表示此财务报表发布时股票还未上市,跳过此次循环。有例外情况:003001
# (cw_dict[cw_date][1] == code).any() 表示当前股票code在财务DF里有数据
if df_ltg.index[0].strftime('%Y%m%d') <= cw_date <= df_ltg.index[-1].strftime('%Y%m%d') \
and len(cw_dict[cw_date]) > 0:
if (cw_dict[cw_date][1] == code).any():
# 获取目前股票所在行的索引值,具有唯一性,所以直接[0]
code_df_index = cw_dict[cw_date][cw_dict[cw_date][1] == code].index.to_list()[0]
# DF格式读取的财报,字段与财务说明文件的序号一一对应,如果是CSV读取的,字段需+1
# print(f'{cwfile_date} 总股本:{cw_dict[cw_date].iat[code_df_index,238]}'
# f'流通股本:{cw_dict[cw_date].iat[code_df_index,239]}')
# 如果流通股值是0,则进行下一次循环
if int(cw_dict[cw_date].iat[code_df_index, 239]) != 0:
# df_ltg[cw_date:e_date].index[0] 表示df_ltg中从cw_date到e_date的第一个索引的值。
# 也就是离cw_date日期最近的下一个有效行
df_ltg.at[df_ltg[cw_date:e_date].index[0], '流通股'] = float(cw_dict[cw_date].iat[code_df_index, 239])
# df_ltg拼接回原DF
data = pd.concat([data, df_ltg], axis=1)
data = data.fillna(method='ffill') # 向下填充无效值
data = data.fillna(method='bfill') # 向上填充无效值 为了弥补开始几行的空值
data = data.round({'open': 2, 'high': 2, 'low': 2, 'close': 2, }) # 指定列四舍五入
if '流通股' in data.columns.to_list():
data['流通市值'] = data['流通股'] * data['close']
data['换手率'] = data['vol'] / data['流通股'] * 100
data = data.round({'流通市值': 2, '换手率': 2, }) # 指定列四舍五入
if flag_attach: # 追加模式,则附加最新处理的数据
data = df_code_original.append(data)
if len(start_date) == 0 and len(end_date) == 0:
pass
elif len(start_date) != 0 and len(end_date) == 0:
data = data[start_date:]
elif len(start_date) == 0 and len(end_date) != 0:
data = data[:end_date]
elif len(start_date) != 0 and len(end_date) != 0:
data = data[start_date:end_date]
data.reset_index(drop=False, inplace=True) # 重置索引行,数字索引,date列到第1列,保存为str '1991-01-01' 格式
# 最后调整列顺序
# data = data.reindex(columns=['code', 'date', 'open', 'high', 'low', 'close', 'vol', 'amount', 'adj', '流通股', '流通市值', '换手率'])
return data
def get_tdx_lastestquote(stocklist=None):
"""
使用pytdx获取当前实时行情。返回行情的DF列表格式。stocklist为空则获取ucfg.tdx['csv_lday']目录全部股票行情
:param stocklist: 可选,list类型。str类型传入股票列表['000001', '000002','600030']
:return:当前从pytdx服务器获取的最新股票行情
"""
# get_security_quotes只允许最大80个股票为一组 数字越大漏掉的股票越多。测试数据:
# 数字 获取股票 用时
# 80 3554 2.59
# 40 3874 5.07
# 20 4015 10.12
# 10 4105 17.54
from pytdx.hq import TdxHq_API
stocklist_pytdx = []
if stocklist is None: # 如果列表为空,则获取csv_lday目录全部股票
stocklist = []
for i in os.listdir(ucfg.tdx['csv_lday']):
stocklist.append(i[:-4])
elif isinstance(stocklist, str):
tmp = []
tmp.append(stocklist)
stocklist = tmp
del tmp
elif isinstance(stocklist, tuple):
stocklist_pytdx.append(stocklist)
if isinstance(stocklist, list):
for stock in stocklist: # 构造get_security_quotes所需的元组参数
if stock[:1] == '6':
stocklist_pytdx.append(tuple([1, stock]))
elif stock[:1] == '0' or stock[:1] == '3':
stocklist_pytdx.append(tuple([0, stock]))
del stocklist
df = pd.DataFrame()
api = TdxHq_API(raise_exception=False)
starttime_tick = time.time()
print(f'请求 {len(stocklist_pytdx)} 只股票实时行情')
if api.connect(ucfg.tdx['pytdx_ip'], ucfg.tdx['pytdx_port']):
# 第一轮获取股票行情,会有100只股票左右遗漏。pytdx代码问题
if len(stocklist_pytdx) == 1:
data = api.to_df(api.get_security_quotes(stocklist_pytdx))
df = pd.concat([df, data], axis=0, ignore_index=True)
else:
k = 0
tq = tqdm(stocklist_pytdx)
for v in tq:
if type(v) == tuple:
tq.set_description(v[1])
else:
tq.set_description(v)
if k > 0 and k % 10 == 0:
data = api.to_df(api.get_security_quotes(stocklist_pytdx[k - 10:k]))
df = pd.concat([df, data], axis=0, ignore_index=True)
elif k == len(stocklist_pytdx) - 1: # 循环到最后,少于10个构成一组
data = api.to_df(api.get_security_quotes(stocklist_pytdx[k - (k % 10):k + 1]))
df = pd.concat([df, data], axis=0, ignore_index=True)
k = k + 1
api.disconnect()
df.dropna(how='all', inplace=True)
# print(f'已获取{len(df)}只股票行情 用时{(time.time() - starttime_tick):>5.2f}秒')
# stocklist_pytdx剔除已获取的股票
for stock in df.loc[(df['market'] == 1)]['code'].to_list():
try:
stocklist_pytdx.remove((1, stock))
except ValueError:
pass
for stock in df.loc[(df['market'] == 0)]['code'].to_list():
try:
stocklist_pytdx.remove((0, stock))
except ValueError:
pass
# 第二轮获取股票行情,剩余的就是今日无交易的股票
if api.connect(ucfg.tdx['pytdx_ip'], ucfg.tdx['pytdx_port']):
for i in stocklist_pytdx:
data = api.to_df(api.get_security_quotes(i))
df = pd.concat([df, data], axis=0, ignore_index=True)
api.disconnect()
df.dropna(how='all', inplace=True)
df.reset_index(drop=True, inplace=True)
# stocklist_pytdx剔除已获取的股票
for stock in df.loc[(df['market'] == 1)]['code'].to_list():
try:
stocklist_pytdx.remove((1, stock))
except ValueError:
pass
for stock in df.loc[(df['market'] == 0)]['code'].to_list():
try:
stocklist_pytdx.remove((0, stock))
except ValueError:
pass
print(f'已获取 {len(df)} 只股票实时行情 用时 {(time.time() - starttime_tick):>.2f} 秒')
if len(stocklist_pytdx) > 0:
print(f'剩余 {len(stocklist_pytdx)} 只股票今日未交易:')
print(stocklist_pytdx)
return df
def update_stockquote(code, df_history, df_today):
"""
使用pytdx获取当前实时行情。合并历史DF和当前行情,返回合并后的DF
:param code: str类型。股票代码,'600030'
:param df_history: DF类型。该股的历史DF数据
:param df_today: DF类型。该股的当天盘中最新数据
:return:合并后的DF数据
"""
now_date = pd.to_datetime(time.strftime("%Y-%m-%d", time.localtime()))
# now_time = time.strftime("%H:%M:%S", time.localtime())
# df_history[date]最后一格的日期小于今天
if pd.to_datetime(df_history.at[df_history.index[-1], 'date']) < now_date:
df_today = df_today[(df_today['code'] == code)]
with pd.option_context('mode.chained_assignment', None): # 临时屏蔽语句警告
df_today['date'] = now_date
df_today.set_index('date', drop=False, inplace=True)
df_today = df_today.rename(columns={'price': 'close'})
df_today = df_today[{'code', 'date', 'open', 'high', 'low', 'close', 'vol', 'amount'}]
result = pd.concat([df_history, df_today], axis=0, ignore_index=False)
result = result.fillna(method='ffill') # 向下填充无效值
if '流通市值' and '换手率' in result.columns.tolist():
result['流通市值'] = result['流通股'] * result['close']
result = result.round({'流通市值': 2, }) # 指定列四舍五入
if '换手率' and '换手率' in result.columns.tolist():
result['换手率'] = result['vol'] / result['流通股'] * 100
result = result.round({'换手率': 2, }) # 指定列四舍五入
else:
result = df_history
return result
if __name__ == '__main__':
stock_code = '000001'
day2csv(ucfg.tdx['tdx_path'] + '/vipdoc/sz/lday', 'sz' + stock_code + '.day', ucfg.tdx['csv_lday'])
df_gbbq = | pd.read_csv(ucfg.tdx['csv_gbbq'] + '/gbbq.csv', encoding='gbk', dtype={'code': str}) | pandas.read_csv |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import os.path
import pkg_resources
import tempfile
import unittest
import numpy as np
import pandas as pd
from qiime2.metadata import (Metadata, CategoricalMetadataColumn,
NumericMetadataColumn, MetadataFileError)
def get_data_path(filename):
return pkg_resources.resource_filename('qiime2.metadata.tests',
'data/%s' % filename)
# NOTE: many of the test files in the `data` directory intentionally have
# leading/trailing whitespace characters on some lines, as well as mixed usage
# of spaces, tabs, carriage returns, and newlines. When editing these files,
# please make sure your code editor doesn't strip these leading/trailing
# whitespace characters (e.g. Atom does this by default), nor automatically
# modify the files in some other way such as converting Windows-style CRLF
# line terminators to Unix-style newlines.
#
# When committing changes to the files, carefully review the diff to make sure
# unintended changes weren't introduced.
class TestLoadErrors(unittest.TestCase):
def test_path_does_not_exist(self):
with self.assertRaisesRegex(MetadataFileError,
"Metadata file path doesn't exist"):
Metadata.load(
'/qiime2/unit/tests/hopefully/this/path/does/not/exist')
def test_path_is_directory(self):
fp = get_data_path('valid')
with self.assertRaisesRegex(MetadataFileError,
"path points to something other than a "
"file"):
Metadata.load(fp)
def test_non_utf_8_file(self):
fp = get_data_path('invalid/non-utf-8.tsv')
with self.assertRaisesRegex(MetadataFileError,
'encoded as UTF-8 or ASCII'):
Metadata.load(fp)
def test_utf_16_le_file(self):
fp = get_data_path('invalid/simple-utf-16le.txt')
with self.assertRaisesRegex(MetadataFileError,
'UTF-16 Unicode'):
Metadata.load(fp)
def test_utf_16_be_file(self):
fp = get_data_path('invalid/simple-utf-16be.txt')
with self.assertRaisesRegex(MetadataFileError,
'UTF-16 Unicode'):
Metadata.load(fp)
def test_empty_file(self):
fp = get_data_path('invalid/empty-file')
with self.assertRaisesRegex(MetadataFileError,
'locate header.*file may be empty'):
Metadata.load(fp)
def test_comments_and_empty_rows_only(self):
fp = get_data_path('invalid/comments-and-empty-rows-only.tsv')
with self.assertRaisesRegex(MetadataFileError,
'locate header.*only of comments or empty '
'rows'):
Metadata.load(fp)
def test_header_only(self):
fp = get_data_path('invalid/header-only.tsv')
with self.assertRaisesRegex(MetadataFileError, 'at least one ID'):
Metadata.load(fp)
def test_header_only_with_comments_and_empty_rows(self):
fp = get_data_path(
'invalid/header-only-with-comments-and-empty-rows.tsv')
with self.assertRaisesRegex(MetadataFileError, 'at least one ID'):
Metadata.load(fp)
def test_qiime1_empty_mapping_file(self):
fp = get_data_path('invalid/qiime1-empty.tsv')
with self.assertRaisesRegex(MetadataFileError, 'at least one ID'):
Metadata.load(fp)
def test_invalid_header(self):
fp = get_data_path('invalid/invalid-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'unrecognized ID column name.*'
'invalid_id_header'):
Metadata.load(fp)
def test_empty_id(self):
fp = get_data_path('invalid/empty-id.tsv')
with self.assertRaisesRegex(MetadataFileError, 'empty metadata ID'):
Metadata.load(fp)
def test_whitespace_only_id(self):
fp = get_data_path('invalid/whitespace-only-id.tsv')
with self.assertRaisesRegex(MetadataFileError, 'empty metadata ID'):
Metadata.load(fp)
def test_empty_column_name(self):
fp = get_data_path('invalid/empty-column-name.tsv')
with self.assertRaisesRegex(MetadataFileError,
'column without a name'):
Metadata.load(fp)
def test_whitespace_only_column_name(self):
fp = get_data_path('invalid/whitespace-only-column-name.tsv')
with self.assertRaisesRegex(MetadataFileError,
'column without a name'):
Metadata.load(fp)
def test_duplicate_ids(self):
fp = get_data_path('invalid/duplicate-ids.tsv')
with self.assertRaisesRegex(MetadataFileError,
'IDs must be unique.*id1'):
Metadata.load(fp)
def test_duplicate_ids_with_whitespace(self):
fp = get_data_path('invalid/duplicate-ids-with-whitespace.tsv')
with self.assertRaisesRegex(MetadataFileError,
'IDs must be unique.*id1'):
Metadata.load(fp)
def test_duplicate_column_names(self):
fp = get_data_path('invalid/duplicate-column-names.tsv')
with self.assertRaisesRegex(MetadataFileError,
'Column names must be unique.*col1'):
Metadata.load(fp)
def test_duplicate_column_names_with_whitespace(self):
fp = get_data_path(
'invalid/duplicate-column-names-with-whitespace.tsv')
with self.assertRaisesRegex(MetadataFileError,
'Column names must be unique.*col1'):
Metadata.load(fp)
def test_id_conflicts_with_id_header(self):
fp = get_data_path('invalid/id-conflicts-with-id-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
"ID 'id' conflicts.*ID column header"):
Metadata.load(fp)
def test_column_name_conflicts_with_id_header(self):
fp = get_data_path(
'invalid/column-name-conflicts-with-id-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
"column name 'featureid' conflicts.*ID "
"column header"):
Metadata.load(fp)
def test_column_types_unrecognized_column_name(self):
fp = get_data_path('valid/simple.tsv')
with self.assertRaisesRegex(MetadataFileError,
'not_a_column.*column_types.*not a column '
'in the metadata file'):
Metadata.load(fp, column_types={'not_a_column': 'numeric'})
def test_column_types_unrecognized_column_type(self):
fp = get_data_path('valid/simple.tsv')
with self.assertRaisesRegex(MetadataFileError,
'col2.*column_types.*unrecognized column '
'type.*CATEGORICAL'):
Metadata.load(fp, column_types={'col1': 'numeric',
'col2': 'CATEGORICAL'})
def test_column_types_not_convertible_to_numeric(self):
fp = get_data_path('valid/simple.tsv')
with self.assertRaisesRegex(MetadataFileError,
"column 'col3' to numeric.*could not be "
"interpreted as numeric: 'bar', 'foo'"):
Metadata.load(fp, column_types={'col1': 'numeric',
'col2': 'categorical',
'col3': 'numeric'})
def test_column_types_override_directive_not_convertible_to_numeric(self):
fp = get_data_path('valid/simple-with-directive.tsv')
with self.assertRaisesRegex(MetadataFileError,
"column 'col3' to numeric.*could not be "
"interpreted as numeric: 'bar', 'foo'"):
Metadata.load(fp, column_types={'col3': 'numeric'})
def test_directive_before_header(self):
fp = get_data_path('invalid/directive-before-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'directive.*#q2:types.*searching for '
'header'):
Metadata.load(fp)
def test_unrecognized_directive(self):
fp = get_data_path('invalid/unrecognized-directive.tsv')
with self.assertRaisesRegex(MetadataFileError,
'Unrecognized directive.*#q2:foo.*'
'#q2:types directive is supported'):
Metadata.load(fp)
def test_duplicate_directives(self):
fp = get_data_path('invalid/duplicate-directives.tsv')
with self.assertRaisesRegex(MetadataFileError,
'duplicate directive.*#q2:types'):
Metadata.load(fp)
def test_unrecognized_column_type_in_directive(self):
fp = get_data_path('invalid/unrecognized-column-type.tsv')
with self.assertRaisesRegex(MetadataFileError,
'col2.*unrecognized column type.*foo.*'
'#q2:types directive'):
Metadata.load(fp)
def test_column_types_directive_not_convertible_to_numeric(self):
fp = get_data_path('invalid/types-directive-non-numeric.tsv')
# This error message regex is intentionally verbose because we want to
# assert that many different types of non-numeric strings aren't
# interpreted as numbers. The error message displays a sorted list of
# all values that couldn't be converted to numbers, making it possible
# to test a variety of non-numeric strings in a single test case.
msg = (r"column 'col2' to numeric.*could not be interpreted as "
r"numeric: '\$42', '\+inf', '-inf', '0xAF', '1,000', "
r"'1\.000\.0', '1_000_000', '1e3e4', 'Infinity', 'NA', 'NaN', "
"'a', 'e3', 'foo', 'inf', 'nan', 'sample-1'")
with self.assertRaisesRegex(MetadataFileError, msg):
Metadata.load(fp)
def test_directive_after_directives_section(self):
fp = get_data_path(
'invalid/directive-after-directives-section.tsv')
with self.assertRaisesRegex(MetadataFileError,
'#q2:types.*outside of the directives '
'section'):
Metadata.load(fp)
def test_directive_longer_than_header(self):
fp = get_data_path('invalid/directive-longer-than-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'row has 5 cells.*header declares 4 '
'cells'):
Metadata.load(fp)
def test_data_longer_than_header(self):
fp = get_data_path('invalid/data-longer-than-header.tsv')
with self.assertRaisesRegex(MetadataFileError,
'row has 5 cells.*header declares 4 '
'cells'):
Metadata.load(fp)
class TestLoadSuccess(unittest.TestCase):
def setUp(self):
self.temp_dir_obj = tempfile.TemporaryDirectory(
prefix='qiime2-metadata-tests-temp-')
self.temp_dir = self.temp_dir_obj.name
# This Metadata object is compared against observed Metadata objects in
# many of the tests, so just define it once here.
self.simple_md = Metadata(
pd.DataFrame({'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
# Basic sanity check to make sure the columns are ordered and typed as
# expected. It'd be unfortunate to compare observed results to expected
# results that aren't representing what we think they are!
obs_columns = [(name, props.type)
for name, props in self.simple_md.columns.items()]
exp_columns = [('col1', 'numeric'), ('col2', 'categorical'),
('col3', 'categorical')]
self.assertEqual(obs_columns, exp_columns)
def tearDown(self):
self.temp_dir_obj.cleanup()
def test_simple(self):
# Simple metadata file without comments, empty rows, jaggedness,
# missing data, odd IDs or column names, directives, etc. The file has
# multiple column types (numeric, categorical, and something that has
# mixed numbers and strings, which must be interpreted as categorical).
fp = get_data_path('valid/simple.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_bom_simple_txt(self):
# This is the encoding that notepad.exe will use most commonly
fp = get_data_path('valid/BOM-simple.txt')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_different_file_extension(self):
fp = get_data_path('valid/simple.txt')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_no_newline_at_eof(self):
fp = get_data_path('valid/no-newline-at-eof.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_unix_line_endings(self):
fp = get_data_path('valid/unix-line-endings.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_windows_line_endings(self):
fp = get_data_path('valid/windows-line-endings.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_mac_line_endings(self):
fp = get_data_path('valid/mac-line-endings.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_no_source_artifacts(self):
fp = get_data_path('valid/simple.tsv')
metadata = Metadata.load(fp)
self.assertEqual(metadata.artifacts, ())
def test_retains_column_order(self):
# Explicitly test that the file's column order is retained in the
# Metadata object. Many of the test cases use files with column names
# in alphabetical order (e.g. "col1", "col2", "col3"), which matches
# how pandas orders columns in a DataFrame when supplied with a dict
# (many of the test cases use this feature of the DataFrame
# constructor when constructing the expected DataFrame).
fp = get_data_path('valid/column-order.tsv')
obs_md = Metadata.load(fp)
# Supply DataFrame constructor with explicit column ordering instead of
# a dict.
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_columns = ['z', 'y', 'x']
exp_data = [
[1.0, 'a', 'foo'],
[2.0, 'b', 'bar'],
[3.0, 'c', '42']
]
exp_df = pd.DataFrame(exp_data, index=exp_index, columns=exp_columns)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_leading_trailing_whitespace(self):
fp = get_data_path('valid/leading-trailing-whitespace.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_comments(self):
fp = get_data_path('valid/comments.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_empty_rows(self):
fp = get_data_path('valid/empty-rows.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_qiime1_mapping_file(self):
fp = get_data_path('valid/qiime1.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='#SampleID')
exp_df = pd.DataFrame({'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_qiita_sample_information_file(self):
fp = get_data_path('valid/qiita-sample-information.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id.1', 'id.2'], name='sample_name')
exp_df = pd.DataFrame({
'DESCRIPTION': ['description 1', 'description 2'],
'TITLE': ['A Title', 'Another Title']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_qiita_preparation_information_file(self):
fp = get_data_path('valid/qiita-preparation-information.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id.1', 'id.2'], name='sample_name')
exp_df = pd.DataFrame({
'BARCODE': ['ACGT', 'TGCA'],
'EXPERIMENT_DESIGN_DESCRIPTION': ['longitudinal study',
'longitudinal study']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_biom_observation_metadata_file(self):
fp = get_data_path('valid/biom-observation-metadata.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['OTU_1', 'OTU_2'], name='#OTUID')
exp_df = pd.DataFrame([['k__Bacteria;p__Firmicutes', 0.890],
['k__Bacteria', 0.9999]],
columns=['taxonomy', 'confidence'],
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_supported_id_headers(self):
case_insensitive = {
'id', 'sampleid', 'sample id', 'sample-id', 'featureid',
'feature id', 'feature-id'
}
exact_match = {
'#SampleID', '#Sample ID', '#OTUID', '#OTU ID', 'sample_name'
}
# Build a set of supported headers, including exact matches and headers
# with different casing.
headers = set()
for header in case_insensitive:
headers.add(header)
headers.add(header.upper())
headers.add(header.title())
for header in exact_match:
headers.add(header)
fp = os.path.join(self.temp_dir, 'metadata.tsv')
count = 0
for header in headers:
with open(fp, 'w') as fh:
fh.write('%s\tcolumn\nid1\tfoo\nid2\tbar\n' % header)
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2'], name=header)
exp_df = pd.DataFrame({'column': ['foo', 'bar']}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
count += 1
# Since this test case is a little complicated, make sure that the
# expected number of comparisons are happening.
self.assertEqual(count, 26)
def test_recommended_ids(self):
fp = get_data_path('valid/recommended-ids.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['c6ca034a-223f-40b4-a0e0-45942912a5ea', 'My.ID'],
name='id')
exp_df = pd.DataFrame({'col1': ['foo', 'bar']}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_non_standard_characters(self):
# Test that non-standard characters in IDs, column names, and cells are
# handled correctly. The test case isn't exhaustive (e.g. it doesn't
# test every Unicode character; that would be a nice additional test
# case to have in the future). Instead, this test aims to be more of an
# integration test for the robustness of the reader to non-standard
# data. Many of the characters and their placement within the data file
# are based on use-cases/bugs reported on the forum, Slack, etc. The
# data file has comments explaining these test case choices in more
# detail.
fp = get_data_path('valid/non-standard-characters.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['©id##1', '((id))2', "'id_3<>'", '"id#4"',
'i d\r\t\n5'], name='id')
exp_columns = ['↩c@l1™', 'col(#2)', "#col'3", '"<col_4>"',
'col\t \r\n5']
exp_data = [
['ƒoo', '(foo)', '#f o #o', 'fo\ro', np.nan],
["''2''", 'b#r', 'ba\nr', np.nan, np.nan],
['b"ar', 'c\td', '4\r\n2', np.nan, np.nan],
['b__a_z', '<42>', '>42', np.nan, np.nan],
['baz', np.nan, '42']
]
exp_df = pd.DataFrame(exp_data, index=exp_index, columns=exp_columns)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_missing_data(self):
fp = get_data_path('valid/missing-data.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['None', 'nan', 'NA'], name='id')
exp_df = pd.DataFrame(collections.OrderedDict([
('col1', [1.0, np.nan, np.nan]),
('NA', [np.nan, np.nan, np.nan]),
('col3', ['null', 'N/A', 'NA']),
('col4', np.array([np.nan, np.nan, np.nan], dtype=object))]),
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
# Test that column types are correct (mainly for the two empty columns;
# one should be numeric, the other categorical).
obs_columns = [(name, props.type)
for name, props in obs_md.columns.items()]
exp_columns = [('col1', 'numeric'), ('NA', 'numeric'),
('col3', 'categorical'), ('col4', 'categorical')]
self.assertEqual(obs_columns, exp_columns)
def test_minimal_file(self):
# Simplest possible metadata file consists of one ID and zero columns.
fp = get_data_path('valid/minimal.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['a'], name='id')
exp_df = pd.DataFrame({}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_single_id(self):
fp = get_data_path('valid/single-id.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1'], name='id')
exp_df = pd.DataFrame({'col1': [1.0], 'col2': ['a'], 'col3': ['foo']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_no_columns(self):
fp = get_data_path('valid/no-columns.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['a', 'b', 'my-id'], name='id')
exp_df = pd.DataFrame({}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_single_column(self):
fp = get_data_path('valid/single-column.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': [1.0, 2.0, 3.0]}, index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_trailing_columns(self):
fp = get_data_path('valid/trailing-columns.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_jagged_trailing_columns(self):
# Test case based on https://github.com/qiime2/qiime2/issues/335
fp = get_data_path('valid/jagged-trailing-columns.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_padding_rows_shorter_than_header(self):
fp = get_data_path('valid/rows-shorter-than-header.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': [1.0, 2.0, np.nan],
'col2': ['a', np.nan, np.nan],
'col3': [np.nan, np.nan, np.nan]},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_all_cells_padded(self):
fp = get_data_path('valid/all-cells-padded.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': [np.nan, np.nan, np.nan],
'col2': [np.nan, np.nan, np.nan],
'col3': [np.nan, np.nan, np.nan]},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_does_not_cast_ids_or_column_names(self):
fp = get_data_path('valid/no-id-or-column-name-type-cast.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['0.000001', '0.004000', '0.000000'],
dtype=object, name='id')
exp_columns = ['42.0', '1000', '-4.2']
exp_data = [
[2.0, 'b', 2.5],
[1.0, 'b', 4.2],
[3.0, 'c', -9.999]
]
exp_df = pd.DataFrame(exp_data, index=exp_index, columns=exp_columns)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_numeric_column(self):
fp = get_data_path('valid/numeric-column.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3', 'id4', 'id5', 'id6', 'id7',
'id8', 'id9', 'id10', 'id11', 'id12'], name='id')
exp_df = pd.DataFrame({'col1': [0.0, 2.0, 0.0003, -4.2, 1e-4, 1e4,
1.5e2, np.nan, 1.0, 0.5, 1e-8, -0.0]},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_numeric_column_as_categorical(self):
fp = get_data_path('valid/numeric-column.tsv')
obs_md = Metadata.load(fp, column_types={'col1': 'categorical'})
exp_index = pd.Index(['id1', 'id2', 'id3', 'id4', 'id5', 'id6', 'id7',
'id8', 'id9', 'id10', 'id11', 'id12'], name='id')
exp_df = pd.DataFrame({'col1': ['0', '2.0', '0.00030', '-4.2', '1e-4',
'1e4', '+1.5E+2', np.nan, '1.', '.5',
'1e-08', '-0']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_with_complete_types_directive(self):
fp = get_data_path('valid/complete-types-directive.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_with_partial_types_directive(self):
fp = get_data_path('valid/partial-types-directive.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_with_empty_types_directive(self):
fp = get_data_path('valid/empty-types-directive.tsv')
obs_md = Metadata.load(fp)
self.assertEqual(obs_md, self.simple_md)
def test_with_case_insensitive_types_directive(self):
fp = get_data_path('valid/case-insensitive-types-directive.tsv')
obs_md = Metadata.load(fp)
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': [-5.0, 0.0, 42.0]},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_column_types_without_directive(self):
fp = get_data_path('valid/simple.tsv')
obs_md = Metadata.load(fp, column_types={'col1': 'categorical'})
exp_index = pd.Index(['id1', 'id2', 'id3'], name='id')
exp_df = pd.DataFrame({'col1': ['1', '2', '3'],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=exp_index)
exp_md = Metadata(exp_df)
self.assertEqual(obs_md, exp_md)
def test_column_types_override_directive(self):
fp = get_data_path('valid/simple-with-directive.tsv')
obs_md = Metadata.load(fp, column_types={'col1': 'categorical',
'col2': 'categorical'})
exp_index = | pd.Index(['id1', 'id2', 'id3'], name='id') | pandas.Index |
"""This is test module for knoema client with test credentials"""
import unittest
import knoema
import pandas
class TestKnoemaClient(unittest.TestCase):
"""This is class with knoema client unit tests with test credentials"""
base_host = 'knoema.com'
def setUp(self):
apicfg = knoema.ApiConfig()
apicfg.host = self.base_host
apicfg.app_id = 'FzOYqDg'
apicfg.app_secret = '<KEY>'
def test_getdata_singleseries_by_member_id(self):
"""The method is testing getting single series by dimension member ids"""
data_frame = knoema.get('xmhdwqf', company='c1', indicator='ind_a')
self.assertEqual(data_frame.shape[0], 11)
self.assertEqual(data_frame.shape[1], 1)
self.assertEqual(['Company', 'Indicator', 'Frequency'], data_frame.columns.names)
indx = data_frame.first_valid_index()
sname = ('BOX', 'Annual', 'A')
value = data_frame.at[indx, sname]
self.assertEqual(value, 85.50)
indx = data_frame.index[10]
value = data_frame.at[indx, sname]
self.assertEqual(value, 15.62)
def test_getdata_multiseries_by_member_id(self):
"""The method is testing getting multiple series by dimension member ids"""
data_frame = knoema.get('xmhdwqf', company='c1;c2', indicator='ind_m;ind_a')
self.assertEqual(data_frame.shape[0], 56)
self.assertEqual(data_frame.shape[1], 4)
self.assertEqual(['Company', 'Indicator', 'Frequency'], data_frame.columns.names)
indx = data_frame.index[7]
sname = ('BOX', 'Monthly', 'M')
value = data_frame.at[indx, sname]
self.assertEqual(value, 23.08)
indx = data_frame.index[55]
value = data_frame.at[indx, sname]
self.assertEqual(value, 19.71)
def test_getdata_multiseries_by_member_name(self):
"""The method is testing getting data by dimension member names"""
company_names = 'BOX;UBER'
indicator_names = 'Monthly;Annual'
data_frame = knoema.get('xmhdwqf', company=company_names, indicator=indicator_names)
self.assertEqual(data_frame.shape[0], 56)
self.assertEqual(data_frame.shape[1], 4)
self.assertEqual(['Company', 'Indicator', 'Frequency'], data_frame.columns.names)
indx = data_frame.index[7]
sname = ('BOX', 'Monthly', 'M')
value = data_frame.at[indx, sname]
self.assertEqual(value, 23.08)
indx = data_frame.index[55]
value = data_frame.at[indx, sname]
self.assertEqual(value, 19.71)
def test_getdata_multiseries_by_member_id_range(self):
"""The method is testing getting multiple series by dimension member ids and time range"""
data_frame = knoema.get('xmhdwqf', company='c1;c2', indicator='ind_a', timerange='2017-2019')
self.assertEqual(data_frame.shape[0], 11)
self.assertEqual(data_frame.shape[1], 2)
indx = data_frame.first_valid_index()
sname = ('UBER', 'Annual', 'A')
value = data_frame.at[indx, sname]
self.assertEqual(value, 53.03)
indx = data_frame.last_valid_index()
value = data_frame.at[indx, sname]
self.assertEqual(value, 99.15)
def test_getdata_singleseries_difffrequencies_by_member_id(self):
"""The method is testing getting single series on different frequencies by dimension member ids"""
data_frame = knoema.get('xmhdwqf', company='c1', indicator='ind_multi')
self.assertEqual(data_frame.shape[1], 3)
indx = data_frame.first_valid_index()
sname = ('BOX', 'Multi', 'A')
value = data_frame.at[indx, sname]
self.assertEqual(value, 60.24)
value = data_frame.at[pandas.to_datetime('2018-01-01'), sname]
self.assertEqual(value, 80.56)
indx = data_frame.first_valid_index()
sname = ('BOX', 'Multi', 'Q')
value = data_frame.at[indx, sname]
self.assertEqual(value, 47.82)
value = data_frame.at[pandas.to_datetime('2017-01-01'), sname]
self.assertEqual(value, 50.28)
def test_getdata_multiseries_singlefrequency_by_member_id(self):
"""The method is testing getting mulitple series with one frequency by dimension member ids"""
data_frame = knoema.get('xmhdwqf', company='c2', indicator='ind_multi', frequency='M')
self.assertEqual(data_frame.shape[1], 1)
sname = ('UBER', 'Multi', 'M')
value = data_frame.at[ | pandas.to_datetime('2017-01-01') | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 14 21:31:56 2017
@author: Franz
"""
import scipy.signal
import numpy as np
import scipy.io as so
import os.path
import re
import matplotlib.pylab as plt
import h5py
import matplotlib.patches as patches
import numpy.random as rand
import seaborn as sns
import pandas as pd
from functools import reduce
import random
import pdb
class Mouse :
def __init__(self, idf, list=None, typ='') :
self.recordings = []
self.recordings.append(list)
self.typ = typ
self.idf = idf
def add(self, rec) :
self.recordings.append(rec)
def __len__(self) :
return len(self.recordings)
def __repr__(self) :
return ", ".join(self.recordings)
### PROCESSING OF RECORDING DATA ##############################################
def load_stateidx(ppath, name, ann_name=''):
""" load the sleep state file of recording (folder) $ppath/$name
@Return:
M,K sequence of sleep states, sequence of
0'1 and 1's indicating non- and annotated states
"""
ddir = os.path.join(ppath, name)
ppath, name = os.path.split(ddir)
if ann_name == '':
ann_name = name
sfile = os.path.join(ppath, name, 'remidx_' + ann_name + '.txt')
f = open(sfile, 'r')
lines = f.readlines()
f.close()
n = 0
for l in lines:
if re.match('\d', l):
n += 1
M = np.zeros(n, dtype='int')
K = np.zeros(n, dtype='int')
i = 0
for l in lines :
if re.search('^\s+$', l) :
continue
if re.search('\s*#', l) :
continue
if re.match('\d+\s+-?\d+', l) :
a = re.split('\s+', l)
M[i] = int(a[0])
K[i] = int(a[1])
i += 1
return M,K
def load_recordings(ppath, rec_file) :
"""
load_recordings(ppath, rec_file)
load recording listing with syntax:
[E|C] \s+ recording_name
#COMMENT
@RETURN:
(list of controls, lis of experiments)
"""
exp_list = []
ctr_list = []
rfile = os.path.join(ppath, rec_file)
f = open(rfile, newline=None)
lines = f.readlines()
f.close()
for l in lines :
if re.search('^\s+$', l) :
continue
if re.search('^\s*#', l) :
continue
a = re.split('\s+', l)
if re.search('E', a[0]) :
exp_list.append(a[1])
if re.search('C', a[0]) :
ctr_list.append(a[1])
return ctr_list, exp_list
def load_dose_recordings(ppath, rec_file):
"""
load recording list with following syntax:
A line is either control or experiments; Control recordings look like:
C \s recording_name
Experimental recordings also come with an additional dose parameter
(allowing for comparison of multiple doses with controls)
E \s recording_name \s dose_1
E \s recording_name \s dose_2
"""
rfile = os.path.join(ppath, rec_file)
f = open(rfile, newline=None)
lines = f.readlines()
f.close()
# first get all potential doses
doses = {}
ctr_list = []
for l in lines :
if re.search('^\s+$', l):
continue
if re.search('^\s*#', l):
continue
a = re.split('\s+', l)
if re.search('E', a[0]):
if a[2] in doses:
doses[a[2]].append(a[1])
else:
doses[a[2]] = [a[1]]
if re.search('C', a[0]):
ctr_list.append(a[1])
return ctr_list, doses
def get_snr(ppath, name):
"""
read and return sampling rate (SR) from file $ppath/$name/info.txt
"""
fid = open(os.path.join(ppath, name, 'info.txt'), newline=None)
lines = fid.readlines()
fid.close()
values = []
for l in lines :
a = re.search("^" + 'SR' + ":" + "\s+(.*)", l)
if a :
values.append(a.group(1))
return float(values[0])
def get_infoparam(ifile, field):
"""
NOTE: field is a single string
and the function does not check for the type
of the values for field.
In fact, it just returns the string following field
"""
fid = open(ifile, newline=None)
lines = fid.readlines()
fid.close()
values = []
for l in lines :
a = re.search("^" + field + ":" + "\s+(.*)", l)
if a :
values.append(a.group(1))
return values
def add_infoparam(ifile, field, vals):
"""
:param ifile: info file
:param field: Parameters specifier, e.g. 'SR'
:param vals: list with parameters
"""
fid = open(ifile, 'a')
vals = [str(s) for s in vals]
param = " ".join(vals)
fid.write('%s:\t%s' % (field, param))
fid.write(os.linesep)
fid.close()
def laser_start_end(laser, SR=1525.88, intval=5):
"""laser_start_end(ppath, name)
print start and end index of laser stimulation trains: For example,
if you was stimulated for 2min every 20 min with 20 Hz, return the
start and end index of the each 2min stimulation period (train)
returns the tuple (istart, iend), both indices are inclusive,
i.e. part of the sequence
@Param:
laser - laser, vector of 0s and 1s
intval - minimum time separation [s] between two laser trains
@Return:
(istart, iend) - tuple of two np.arrays with laser start and end indices
"""
idx = np.where(laser > 0.5)[0]
if len(idx) == 0 :
return ([], [])
idx2 = np.nonzero(np.diff(idx)*(1./SR) > intval)[0]
istart = np.hstack([idx[0], idx[idx2+1]])
iend = np.hstack([idx[idx2], idx[-1]])
return (istart, iend)
def load_laser(ppath, name):
"""
load laser from recording ppath/name
@RETURN:
@laser, vector of 0's and 1's
"""
# laser might be .mat or h5py file
# perhaps we could find a better way of testing that
file = os.path.join(ppath, name, 'laser_'+name+'.mat')
try:
laser = np.array(h5py.File(file,'r').get('laser'))
except:
laser = so.loadmat(file)['laser']
return np.squeeze(laser)
def laser_protocol(ppath, name):
"""
What was the stimulation frequency and the inter-stimulation interval for recording
$ppath/$name?
@Return:
iinter-stimulation intervals, avg. inter-stimulation interval, frequency
"""
laser = load_laser(ppath, name)
SR = get_snr(ppath, name)
# first get inter-stimulation interval
(istart, iend) = laser_start_end(laser, SR)
intv = np.diff(np.array(istart/float(SR)))
d = intv/60.0
print("The laser was turned on in average every %.2f min," % (np.mean(d)))
print("with a min. interval of %.2f min and max. interval of %.2f min." % (np.min(d), np.max(d)))
print("Laser stimulation lasted for %f s." % (np.mean(np.array(iend/float(SR)-istart/float(SR)).mean())))
# print laser start times
print("Start time of each laser trial:")
j=1
for t in istart:
print("trial %d: %.2f" % (j, (t / float(SR)) / 60))
j += 1
# for each laser stimulation interval, check laser stimulation frequency
dt = 1/float(SR)
freq = []
laser_up = []
laser_down = []
for (i,j) in zip(istart, iend):
part = laser[i:j+1]
(a,b) = laser_start_end(part, SR, 0.005)
dur = (j-i+1)*dt
freq.append(len(a) / dur)
up_dur = (b-a+1)*dt*1000
down_dur = (a[1:]-b[0:-1]-1)*dt*1000
laser_up.append(np.mean(up_dur))
laser_down.append(np.mean(down_dur))
print(os.linesep + "Laser stimulation freq. was %.2f Hz," % np.mean(np.array(freq)))
print("with laser up and down duration of %.2f and %.2f ms." % (np.mean(np.array(laser_up)), np.mean(np.array(laser_down))))
return d, np.mean(d), np.mean(np.array(freq))
def swap_eeg(ppath, rec, ch='EEG'):
"""
swap EEG and EEG2 or EMG with EMG2 if $ch='EMG'
"""
if ch == 'EEG':
name = 'EEG'
else:
name = ch
EEG = so.loadmat(os.path.join(ppath, rec, name+'.mat'))[name]
EEG2 = so.loadmat(os.path.join(ppath, rec, name+'2.mat'))[name + '2']
tmp = EEG
EEG = EEG2
EEG2 = tmp
file_eeg1 = os.path.join(ppath, rec, '%s.mat' % name)
file_eeg2 = os.path.join(ppath, rec, '%s2.mat' % name)
so.savemat(file_eeg1, {name : EEG})
so.savemat(file_eeg2, {name+'2' : EEG2})
def eeg_conversion(ppath, rec, conv_factor=0.195):
"""
multiply all EEG and EMG channels with the given
conversion factor and write the conversion factor
as parameter (conversion:) into the info file.
Only if there's no conversion factor in the info file
specified, the conversion will be executed
:param ppath: base filder
:param rec: recording
:param conv_factor: conversion factor
:return: n/s
"""
ifile = os.path.join(ppath, rec, 'info.txt')
conv = get_infoparam(ifile, 'conversion')
if len(conv) > 0:
print("found conversion: parameter in info file")
print("returning: no conversion necessary!!!")
return
else:
files = os.listdir(os.path.join(ppath, rec))
files = [f for f in files if re.match('^EEG', f)]
for f in files:
name = re.split('\.', f)[0]
EEG = so.loadmat(os.path.join(ppath, rec, name+'.mat'), squeeze_me=True)[name]
if EEG[0].dtype == 'int16':
EEG = EEG * conv_factor
file_eeg = os.path.join(ppath, rec, '%s.mat' % name)
print(file_eeg)
so.savemat(file_eeg, {name: EEG})
else:
print('Wrong datatype! probably already converted; returning...')
return
files = os.listdir(os.path.join(ppath, rec))
files = [f for f in files if re.match('^EMG', f)]
for f in files:
name = re.split('\.', f)[0]
EMG = so.loadmat(os.path.join(ppath, rec, name+'.mat'), squeeze_me=True)[name]
if EMG[0].dtype == 'int16':
EMG = EMG * conv_factor
file_emg = os.path.join(ppath, rec, '%s.mat' % name)
print(file_emg)
so.savemat(file_emg, {name: EMG})
else:
print('Wrong datatype! probably already converted; returning...')
return
add_infoparam(ifile, 'conversion', [conv_factor])
calculate_spectrum(ppath, rec)
### DEPRICATED ############################################
def video_pulse_detection(ppath, rec, SR=1000, iv = 0.01):
"""
return index of each video frame onset
ppath/rec - recording
@Optional
SR - sampling rate of EEG(!) recording
iv - minimum time inverval (in seconds) between two frames
@Return
index of each video frame onset
"""
V = np.squeeze(so.loadmat(os.path.join(ppath, rec, 'videotime_' + rec + '.mat'))['video'])
TS = np.arange(0, len(V))
# indices where there's a jump in the signal
t = TS[np.where(V<0.5)];
if len(t) == 0:
idx = []
return idx
# time points where the interval between jumps is longer than iv
t2 = np.where(np.diff(t)*(1.0/SR)>=iv)[0]
idx = np.concatenate(([t[0]],t[t2+1]))
return idx
# SIGNAL PROCESSING ###########################################################
def my_lpfilter(x, w0, N=4):
"""
create a lowpass Butterworth filter with a cutoff of w0 * the Nyquist rate.
The nice thing about this filter is that is has zero-phase distortion.
A conventional lowpass filter would introduce a phase lag.
w0 - filter cutoff; value between 0 and 1, where 1 corresponds to nyquist frequency.
So if you want a filter with cutoff at x Hz, the corresponding w0 value is given by
w0 = 2 * x / sampling_rate
N - order of filter
@Return:
low-pass filtered signal
See also my hp_filter, or my_bpfilter
"""
from scipy import signal
b,a = signal.butter(N, w0)
y = signal.filtfilt(b,a, x)
return y
def my_hpfilter(x, w0, N=4):
"""
create an N-th order highpass Butterworth filter with cutoff frequency w0 * sampling_rate/2
"""
from scipy import signal
# use scipy.signal.firwin to generate filter
#taps = signal.firwin(numtaps, w0, pass_zero=False)
#y = signal.lfilter(taps, 1.0, x)
b,a = signal.butter(N, w0, 'high')
y = signal.filtfilt(b,a, x, padlen = x.shape[0]-1)
return y
def my_bpfilter(x, w0, w1, N=4,bf=True):
"""
create N-th order bandpass Butterworth filter with corner frequencies
w0*sampling_rate/2 and w1*sampling_rate/2
"""
#from scipy import signal
#taps = signal.firwin(numtaps, w0, pass_zero=False)
#y = signal.lfilter(taps, 1.0, x)
#return y
from scipy import signal
b,a = signal.butter(N, [w0, w1], 'bandpass')
if bf:
y = signal.filtfilt(b,a, x)
else:
y = signal.lfilter(b,a, x)
return y
def my_notchfilter(x, sr=1000, band=5, freq=60, ripple=10, order=3, filter_type='butter'):
from scipy.signal import iirfilter,lfilter
fs = sr
nyq = fs/2.0
low = freq - band/2.0
high = freq + band/2.0
low = low/nyq
high = high/nyq
b, a = iirfilter(order, [low, high], rp=ripple, btype='bandstop',
analog=False, ftype=filter_type)
filtered_data = lfilter(b, a, x)
return filtered_data
def downsample_vec(x, nbin):
"""
y = downsample_vec(x, nbin)
downsample the vector x by replacing nbin consecutive \
bin by their mean \
@RETURN: the downsampled vector
"""
n_down = int(np.floor(len(x) / nbin))
x = x[0:n_down*nbin]
x_down = np.zeros((n_down,))
# 0 1 2 | 3 4 5 | 6 7 8
for i in range(nbin) :
idx = list(range(i, int(n_down*nbin), int(nbin)))
x_down += x[idx]
return x_down / nbin
def smooth_data(x, sig):
"""
y = smooth_data(x, sig)
smooth data vector @x with gaussian kernel
with standard deviation $sig
"""
sig = float(sig)
if sig == 0.0:
return x
# gaussian:
gauss = lambda x, sig : (1/(sig*np.sqrt(2.*np.pi)))*np.exp(-(x*x)/(2.*sig*sig))
bound = 1.0/10000
L = 10.
p = gauss(L, sig)
while (p > bound):
L = L+10
p = gauss(L, sig)
#F = map(lambda x: gauss((x, sig)), np.arange(-L, L+1.))
# py3:
F = [gauss(x, sig) for x in np.arange(-L, L+1.)]
F = F / np.sum(F)
return scipy.signal.fftconvolve(x, F, 'same')
def power_spectrum(data, length, dt):
"""
scipy's implementation of Welch's method using hanning window to estimate
the power spectrum
The function returns power density with units V**2/Hz
see also https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.welch.html
The label on the y-axis should say PSD [V**2/Hz]
@Parameters
data - time series; float vector!
length - length of hanning window, even integer!
@Return:
power density, frequencies
The function returns power density in units V^2 / Hz
Note that
np.var(data) ~ np.sum(power density) * (frequencies[1]-frequencies[0])
"""
f, pxx = scipy.signal.welch(data, fs=1.0/dt, window='hanning', nperseg=int(length), noverlap=int(length/2))
return pxx, f
def spectral_density(data, length, nfft, dt):
"""
calculate the spectrogram for the time series given by data with time resolution dt
The powerspectrum for each window of length $length is computed using
Welch's method.
The windows for the powerspectrum calculation are half-overlapping. If length contains 5s of data,
then the first windows goes from 0s to 5s, the second window from 2.5 to 7.5s, ...
The last window ends at ceil(len(data)/length)*5s
Another example, assume we have 13 s of data, with 5 s windows, the the powerdensity is calculated for the following
time windows:
0 -- 5, 2.5 -- 7.5, 5 -- 10, 7.5 -- 12.5, 10 -- 15
In total there are thus 2*ceil(13/5)-1 = 5 windows
The last window starts at 2*3-2 * (5/2) = 10 s
Note: the returned time axis starts at time point goes from 0 to 10s in 2.5s steps
@Parameters:
data - time series
length - window length of data used to calculate powerspectrum.
Note that the time resolution of the spectrogram is length/2
nfft - size of the window used to calculate the powerspectrum.
determines the frequency resolution.
@Return:
Powspectrum, frequencies, time axis
"""
n = len(data)
k = int(np.ceil((1.0*n)/length))
data = np.concatenate((data, np.zeros((length*k-n,))))
fdt = length*dt/2 # time step for spectrogram
t = np.arange(0, fdt*(2*k-2)+fdt/2.0, fdt)
# frequency axis of spectrogram
f = np.linspace(0, 1, int(np.ceil(nfft/2.0))+1) * (0.5/dt)
# the power spectrum is calculated for 2*k-1 time points
Pow = np.zeros((len(f), k*2-1))
j = 0
for i in range(0, k-2+1):
w1=data[(length*i):(i+1)*length]
w2=data[length*i+int(length/2):(i+1)*length+int(length/2)]
Pow[:,j] = power_spectrum(w1, nfft, dt)[0]
Pow[:,j+1] = power_spectrum(w2, nfft, dt)[0]
j += 2
# last time point
Pow[:,j],f = power_spectrum(data[length*(k-1):k*length], nfft, dt)
return Pow, f, t
def calculate_spectrum(ppath, name, fres=0.5):
"""
calculate EEG and EMG spectrogram used for sleep stage detection.
Function assumes that data vectors EEG.mat and EMG.mat exist in recording
folder ppath/name; these are used to calculate the powerspectrum
fres - resolution of frequency axis
all data saved in "true" mat files
:return EEG Spectrogram, EMG Spectrogram, frequency axis, time axis
"""
SR = get_snr(ppath, name)
swin = round(SR)*5
fft_win = round(swin/5) # approximate number of data points per second
if (fres == 1.0) or (fres == 1):
fft_win = int(fft_win)
elif fres == 0.5:
fft_win = 2*int(fft_win)
else:
print("Resolution %f not allowed; please use either 1 or 0.5" % fres)
(peeg2, pemg2) = (False, False)
# Calculate EEG spectrogram
EEG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EEG.mat'))['EEG'])
Pxx, f, t = spectral_density(EEG, int(swin), int(fft_win), 1/SR)
if os.path.isfile(os.path.join(ppath, name, 'EEG2.mat')):
peeg2 = True
EEG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EEG2.mat'))['EEG2'])
Pxx2, f, t = spectral_density(EEG, int(swin), int(fft_win), 1/SR)
#save the stuff to a .mat file
spfile = os.path.join(ppath, name, 'sp_' + name + '.mat')
if peeg2 == True:
so.savemat(spfile, {'SP':Pxx, 'SP2':Pxx2, 'freq':f, 'dt':t[1]-t[0],'t':t})
else:
so.savemat(spfile, {'SP':Pxx, 'freq':f, 'dt':t[1]-t[0],'t':t})
# Calculate EMG spectrogram
EMG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EMG.mat'))['EMG'])
Qxx, f, t = spectral_density(EMG, int(swin), int(fft_win), 1/SR)
if os.path.isfile(os.path.join(ppath, name, 'EMG2.mat')):
pemg2 = True
EMG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EMG2.mat'))['EMG2'])
Qxx2, f, t = spectral_density(EMG, int(swin), int(fft_win), 1/SR)
# save the stuff to .mat file
spfile = os.path.join(ppath, name, 'msp_' + name + '.mat')
if pemg2 == True:
so.savemat(spfile, {'mSP':Qxx, 'mSP2':Qxx2, 'freq':f, 'dt':t[1]-t[0],'t':t})
else:
so.savemat(spfile, {'mSP':Qxx, 'freq':f, 'dt':t[1]-t[0],'t':t})
return Pxx, Qxx, f, t
def whiten_spectrogram(ppath, name, fmax=50):
"""
experimental
:param ppath:
:param name:
:param fmax:
:return:
"""
P = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'), squeeze_me=True)
SPE = P['SP']
freq = P['freq']
ifreq = np.where(freq <= fmax)[0]
SPE = SPE[ifreq,:]
nfilt = 5
filt = np.ones((nfilt, nfilt))
filt = np.divide(filt, filt.sum())
#SPE = scipy.signal.convolve2d(SPE, filt, boundary='symm', mode='same')
m = np.mean(SPE,axis=1)
SPE -= np.tile(m, (SPE.shape[1], 1)).T
SPE = SPE.T
C = np.dot(SPE.T, SPE)
[evals, L] = np.linalg.eigh(C)
idx = np.argsort(evals)
D = np.diag(np.sqrt(evals[idx]))
L = L[:,idx]
W = np.dot(L, np.dot(np.linalg.inv(D),np.dot(L.T,SPE.T)))
nfilt = 2
filt = np.ones((nfilt,nfilt))
filt = np.divide(filt, filt.sum())
W = scipy.signal.convolve2d(W, filt, boundary='symm', mode='same')
return W, D, L
def normalize_spectrogram(ppath, name, fmax=0, band=[], vm=5, pplot=True, sptype='', filt_dim=[]):
"""
Normalize EEG spectrogram by deviding each frequency band by its average value.
:param ppath, name: base folder, recording name
:param fmax: maximum frequency; frequency axis of spectrogram goes from 0 to fmax
if fmax=0, use complete frequency axis
:param band: list or tuple, define lower and upper range of a frequency band,
if pplot=True, plot band, along with spectrogram;
if band=[], disregard
:param vm: color range for plotting spectrogram
:pplot: if True, plot spectrogram along with power band
:sptype: if sptype='fine' plot 'special' spectrogram, save under sp_fine_$name.mat;
otherwise plot 'normal' spectrogram sp_$name.mat
:filt_dim: list or tuple; the two values define the dimensions of box filter
used to filter the normalized spectrogram; if filt_dim=[], then no filtering
:return SPE, t, freq: normalized spectrogram (np.array), time axis, frequency axis
"""
if (len(sptype) == 0) or (sptype=='std'):
P = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'), squeeze_me=True)
elif sptype == 'fine':
P = so.loadmat(os.path.join(ppath, name, 'sp_fine_' + name + '.mat'), squeeze_me=True)
SPE = P['SP']
freq = P['freq']
t = P['t']
if fmax > 0:
ifreq = np.where(freq <= fmax)[0]
else:
ifreq = np.arange(0, len(freq))
freq = freq[ifreq]
nfilt = 4
filt = np.ones((nfilt,nfilt))
filt = np.divide(filt, filt.sum())
SPE = SPE[ifreq,:]
# before
#SPE = SPE[ifreq]
#W = scipy.signal.convolve2d(SPE, filt, boundary='symm', mode='same')
#sp_mean = W.mean(axis=1)
sp_mean = SPE.mean(axis=1)
SPE = np.divide(SPE, np.tile(sp_mean, (SPE.shape[1], 1)).T)
if len(filt_dim) > 0:
filt = np.ones(filt_dim)
filt = np.divide(filt, filt.sum())
SPE = scipy.signal.convolve2d(SPE, filt, boundary='symm', mode='same')
# get high gamma peaks
if len(band) > 0:
iband = np.where((freq >= band[0]) & (freq <= band[-1]))[0]
pow_band = SPE[iband,:].mean(axis=0)
thr = pow_band.mean() + pow_band.std()
idx = np.where(pow_band > thr)[0]
# plot normalized spectrogram, along with band
if pplot:
plt.ion()
plt.figure()
if len(band) > 0:
med = np.median(SPE.mean(axis=0))
ax1 = plt.subplot(211)
plt.pcolormesh(t, freq, SPE, vmin=0, vmax=vm*med, cmap='jet')
plt.subplot(212, sharex=ax1)
plt.plot(t,SPE[iband,:].mean(axis=0))
plt.plot(t[idx], pow_band[idx], '.')
plt.draw()
return SPE, t, freq[ifreq]
def recursive_spectrogram(ppath, name, sf=0.3, alpha=0.3, pplot=True):
"""
calculate EEG/EMG spectrogram in a way that can be implemented by a closed-loop system.
The spectrogram is temporally filtered using a recursive implementation of a lowpass filter
@Parameters:
ppath/name - mouse EEG recording
sf - smoothing factor along frequency axis
alpha - temporal lowpass filter time constant
pplot - if pplot==True, plot figure
@Return:
SE, SM - EEG, EMG spectrogram
"""
EEG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EEG.mat'))['EEG'])
EMG = np.squeeze(so.loadmat(os.path.join(ppath, name, 'EMG.mat'))['EMG'])
len_eeg = len(EEG)
fdt = 2.5
SR = get_snr(ppath, name)
# we calculate the powerspectrum for 5s windows
swin = int(np.round(SR) * 5.0)
# but we sample new data each 2.5 s
swinh = int(swin/2.0)
fft_win = int(swin / 5.0)
# number of 2.5s long samples
spoints = int(np.floor(len_eeg / swinh))
SE = np.zeros((int(fft_win/2+1), spoints))
SM = np.zeros((int(fft_win/2+1), spoints))
print("Starting calculating spectrogram for %s..." % name)
for i in range(2, spoints):
# we take the last two swinh windows (the new 2.5 s long sample and the one from
# the last iteration)
x = EEG[(i-2)*swinh:i*swinh]
[p, f] = power_spectrum(x.astype('float'), fft_win, 1.0/SR)
p = smooth_data(p, sf)
# recursive low pass filtering of spectrogram:
# the current state is an estimate of the current sample and the previous state
SE[:,i] = alpha*p + (1-alpha) * SE[:,i-1]
# and the same of EMG
x = EMG[(i-2)*swinh:i*swinh]
[p, f] = power_spectrum(x.astype('float'), fft_win, 1.0/SR)
p = smooth_data(p, sf)
SM[:,i] = alpha*p + (1-alpha) * SM[:,i-1]
if pplot:
# plot EEG spectrogram
t = np.arange(0, SM.shape[1])*fdt
plt.figure()
ax1 = plt.subplot(211)
im = np.where((f>=0) & (f<=30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im,:]), vmin=0, vmax=med*2)
plt.xticks(())
ix = list(range(0, 30, 10))
fi = f[im][::-1]
plt.yticks(ix, list(map(int, fi[ix])))
box_off(ax1)
plt.axis('tight')
plt.ylabel('Freq (Hz)')
# plot EMG amplitude
ax2 = plt.subplot(212)
im = np.where((f>=10) & (f<100))[0]
df = np.mean(np.diff(f))
# amplitude is the square root of the integral
ax2.plot(t, np.sqrt(SM[im,:].sum(axis=0)*df)/1000.0)
plt.xlim((0, t[-1]))
plt.ylabel('EMG Ampl (mV)')
plt.xlabel('Time (s)')
box_off(ax2)
plt.show(block=False)
return SE, SM, f
def recursive_sleepstate_rem(ppath, recordings, sf=0.3, alpha=0.3, past_mu=0.2, std_thdelta = 1.5, past_len=120, sdt=2.5, psave=False, xemg=False):
"""
predict a REM period only based on EEG/EMG history; the same algorithm is also used for
closed-loop REM sleep manipulation.
The algorithm uses for REM sleep detection a threshold on delta power, EMG power, and theta/delta power.
For theta/delta I use two thresholds: A hard (larger) threshold and a soft (lower) threshold. Initially,
theta/delta has to cross the hard threshold to initiate a REM period. Then, as long as,
theta/delta is above the soft threshold (and EMG power stays low) REM sleep continues.
@Parameters:
ppath base folder with recordings
recordings list of recordings
sf smoothing factor for each powerspectrum
alpha smoothing factor along time dimension
past_mu percentage (0 .. 1) of brain states that are allowed to have EMG power larger than threshold
during the last $past_len seconds
past_len window to calculate $past_mu
std_thdelta the hard theta/delta threshold is given by, mean(theta/delta) + $std_thdelta * std(theta/delta)
sdt time bin for brain sttate, typically 2.5s
psave if True, save threshold parameters to file.
"""
idf = re.split('_', recordings[0])[0]
# 02/05/2020 changed from int to float:
past_len = float(np.round(past_len/sdt))
# calculate spectrogram
(SE, SM) = ([],[])
for rec in recordings:
A,B, freq = recursive_spectrogram(ppath, rec, sf=sf, alpha=alpha)
SE.append(A)
SM.append(B)
# fuse lists SE and SM
SE = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SE))
if not xemg:
SM = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SM))
else:
SM = SE
# EEG, EMG bands
ntbins = SE.shape[1]
r_delta = [0.5, 4]
r_theta = [5,12]
# EMG band
r_mu = [300, 500]
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
pow_delta = np.sum(SE[i_delta,:], axis=0)
pow_theta = np.sum(SE[i_theta,:], axis=0)
pow_mu = np.sum(SM[i_mu,:], axis=0)
# theta/delta
th_delta = np.divide(pow_theta, pow_delta)
thr_th_delta1 = np.nanmean(th_delta) + std_thdelta*np.nanstd(th_delta)
thr_th_delta2 = np.nanmean(th_delta) + 0.0*np.nanstd(th_delta)
thr_delta = pow_delta.mean()
thr_mu = pow_mu.mean() + 0.5*np.nanstd(pow_mu)
### The actual algorithm for REM detection
rem_idx = np.zeros((ntbins,))
prem = 0 # whether or not we are in REM
for i in range(ntbins):
if prem == 0 and pow_delta[i] < thr_delta and pow_mu[i] < thr_mu:
### could be REM
if th_delta[i] > thr_th_delta1:
### we are potentially entering REM
if (i - past_len) >= 0:
sstart = int(i-past_len)
else:
sstart = 0
# count the percentage of brainstate bins with elevated EMG power
c_mu = np.sum( np.where(pow_mu[sstart:i]>thr_mu)[0] ) / (past_len*1.0)
if c_mu < past_mu:
### we are in REM
prem = 1 # turn laser on
rem_idx[i] = 1
# We are currently in REM; do we stay there?
if prem == 1:
### REM continues, if theta/delta is larger than soft threshold and if there's
### no EMG activation
if (th_delta[i] > thr_th_delta2) and (pow_mu[i] < thr_mu):
rem_idx[i] = 1
else:
prem = 0 #turn laser off
# for loop ends
# Determine which channel is EEG, EMG
ch_alloc = get_infoparam(os.path.join(ppath, recordings[0], 'info.txt'), 'ch_alloc')[0]
# plot the whole stuff:
# (1) spectrogram
# (2) EMG Power
# (3) Delta
# (4) TH_Delta
plt.figure()
t = np.arange(0, sdt*(ntbins-1)+sdt/2.0, sdt)
ax1 = plt.subplot(411)
im = np.where((freq>=0) & (freq<=30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im,:]), vmin=0, vmax=med*2)
plt.yticks(list(range(0, 31, 10)), list(range(30, -1, -10)))
plt.ylabel('Freq. (Hz)')
plt.axis('tight')
ax2 = plt.subplot(412)
ax2.plot(t, pow_mu, color='black')
ax2.plot(t, np.ones((len(t),))*thr_mu, color='red')
plt.ylabel('EMG Pow.')
plt.xlim((t[0], t[-1]))
ax3 = plt.subplot(413, sharex=ax2)
ax3.plot(t, pow_delta, color='black')
ax3.plot(t, np.ones((len(t),))*thr_delta, color='red')
plt.ylabel('Delta Pow.')
plt.xlim((t[0], t[-1]))
ax4 = plt.subplot(414, sharex=ax3)
ax4.plot(t, th_delta, color='black')
ax4.plot(t, np.ones((len(t),))*thr_th_delta1, color='red')
ax4.plot(t, np.ones((len(t),))*thr_th_delta2, color='pink')
ax4.plot(t, rem_idx*thr_th_delta1, color='blue')
plt.ylabel('Theta/Delta')
plt.xlabel('Time (s)')
plt.xlim((t[0], t[-1]))
plt.show(block=False)
# write config file
if psave:
cfile = os.path.join(ppath, idf + '_rem.txt')
fid = open(cfile, 'w')
fid.write(('IDF: %s'+os.linesep) % idf)
fid.write(('ch_alloc: %s'+os.linesep) % ch_alloc)
fid.write(('THR_DELTA: %.2f'+os.linesep) % thr_delta)
fid.write(('THR_MU: %.2f'+os.linesep) % thr_mu)
fid.write(('THR_TH_DELTA: %.2f %.2f'+os.linesep) % (thr_th_delta1, thr_th_delta2))
fid.write(('STD_THDELTA: %.2f'+os.linesep) % std_thdelta)
fid.write(('PAST_MU: %.2f'+os.linesep) % past_mu)
fid.write(('SF: %.2f'+os.linesep) % sf)
fid.write(('ALPHA: %.2f'+os.linesep) % alpha)
fid.write(('Bern: %.2f' + os.linesep) % 0.5)
if xemg:
fid.write(('XEMG: %d'+os.linesep) % 1)
else:
fid.write(('XEMG: %d' + os.linesep) % 0)
fid.close()
print('wrote file %s' % cfile)
def recursive_sleepstate_rem_control(ppath, recordings, past_len=120, sdt=2.5, delay=120):
"""
algorithm running laser control for REM sleep dependent activation/inhibtion.
$delay s after a detected REM sleep period, the laser is turned on for the same duration. If a new REM period starts,
the laser stops, but we keep track of the missing time. The next time is laser turns on again,
it stays on for the duration of the most recent REM period + the remaining time.
The algorithm for REM detection is the same as used forclosed-loop REM sleep manipulation.
The function reads in the required parameters from the configuration file (MOUSEID_rem.txt)
The algorithm uses for REM sleep detection a threshold on delta power, EMG power, and theta/delta power.
For theta/delta I use two thresholds: A hard (larger) threshold and a soft (lower) threshold. Initially,
theta/delta has to cross the hard threshold to initiate a REM period. Then, as long as,
theta/delta is above the soft threshold (and EMG power stays low) REM sleep continues.
@Parameters:
ppath base folder with recordings
recordings list of recordings
past_len window to calculate $past_mu
sdt time bin for brain sttate, typically 2.5s
delay delay to wait after a REM sleep periods ends, till the laser is turned on.
"""
idf = re.split('_', recordings[0])[0]
past_len = int(np.round(past_len/sdt))
# load parameters
cfile = os.path.join(ppath, idf + '_rem.txt')
params = load_sleep_params(ppath, cfile)
thr_th_delta1 = params['THR_TH_DELTA'][0]
thr_th_delta2 = params['THR_TH_DELTA'][1]
thr_delta = params['THR_DELTA'][0]
thr_mu = params['THR_MU'][0]
alpha = params['ALPHA'][0]
sf = params['SF'][0]
past_mu = params['PAST_MU'][0]
xemg = params['XEMG'][0]
# calculate spectrogram
(SE, SM) = ([], [])
for rec in recordings:
A, B, freq = recursive_spectrogram(ppath, rec, sf=sf, alpha=alpha)
SE.append(A)
SM.append(B)
# fuse lists SE and SM
SE = np.squeeze(reduce(lambda x, y: np.concatenate((x, y)), SE))
if not xemg:
SM = np.squeeze(reduce(lambda x, y: np.concatenate((x, y)), SM))
else:
SM = SE
# EEG, EMG bands
ntbins = SE.shape[1]
r_delta = [0.5, 4]
r_theta = [5, 12]
# EMG band
r_mu = [300, 500]
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
pow_delta = np.sum(SE[i_delta, :], axis=0)
pow_theta = np.sum(SE[i_theta, :], axis=0)
pow_mu = np.sum(SM[i_mu, :], axis=0)
th_delta = np.divide(pow_theta, pow_delta)
### The actual algorithm for REM detection
rem_idx = np.zeros((ntbins,))
prem = 0 # whether or not we are in REM
# NEW variables:
laser_idx = np.zeros((ntbins,))
delay = int(np.round(delay/sdt))
delay_count = 0
curr_rem_dur = 0
dur_count = 0
on_delay = False
laser_on = False
for i in range(ntbins):
if prem == 0 and pow_delta[i] < thr_delta and pow_mu[i] < thr_mu:
### could be REM
if th_delta[i] > thr_th_delta1:
### we are potentially entering REM
if (i - past_len) >= 0:
sstart = i - past_len
else:
sstart = 0
# count the percentage of brainstate bins with elevated EMG power
c_mu = np.sum(np.where(pow_mu[sstart:i] > thr_mu)[0]) / past_len
if c_mu < past_mu:
### we are in REM
prem = 1 # turn laser on
rem_idx[i] = 1
curr_rem_dur += 1 #NEW
# We are currently in REM; do we stay there?
if prem == 1:
### REM continues, if theta/delta is larger than soft threshold and if there's
### no EMG activation
if (th_delta[i] > thr_th_delta2) and (pow_mu[i] < thr_mu):
rem_idx[i] = 1
curr_rem_dur += 1
else:
prem = 0 # turn laser off
dur_count += curr_rem_dur #NEW
delay_count = delay #NEW
curr_rem_dur = 0 #NEW
on_delay = True #NEW
# NEW:
if on_delay:
if prem == 0:
delay_count -=1
if delay_count == 0:
laser_on = True
on_delay = False
if laser_on:
if prem == 0:
if dur_count >= 0:
dur_count -= 1
laser_idx[i] = 1
else:
laser_on = False
else:
laser_on = False
# plot the whole stuff:
# (1) spectrogram
# (2) EMG Power
# (3) Delta
# (4) TH_Delta
plt.figure()
t = np.arange(0, sdt*(ntbins-1)+sdt/2.0, sdt)
ax1 = plt.subplot(411)
im = np.where((freq>=0) & (freq<=30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im,:]), vmin=0, vmax=med*2)
plt.yticks(list(range(0, 31, 10)), list(range(30, -1, -10)))
plt.ylabel('Freq. (Hz)')
plt.axis('tight')
ax2 = plt.subplot(412)
ax2.plot(t, pow_mu, color='black')
ax2.plot(t, np.ones((len(t),))*thr_mu, color='red')
plt.ylabel('EMG Pow.')
plt.xlim((t[0], t[-1]))
ax3 = plt.subplot(413, sharex=ax2)
ax3.plot(t, pow_delta, color='black')
ax3.plot(t, np.ones((len(t),))*thr_delta, color='red')
plt.ylabel('Delta Pow.')
plt.xlim((t[0], t[-1]))
ax4 = plt.subplot(414, sharex=ax3)
ax4.plot(t, th_delta, color='black')
ax4.plot(t, np.ones((len(t),))*thr_th_delta1, color='red')
ax4.plot(t, np.ones((len(t),))*thr_th_delta2, color='pink')
ax4.plot(t, rem_idx*thr_th_delta1, color='green', label='REM')
ax4.plot(t, laser_idx * thr_th_delta1, color='blue', label='Laser')
plt.ylabel('Theta/Delta')
plt.xlabel('Time (s)')
plt.xlim((t[0], t[-1]))
plt.legend()
plt.show(block=False)
def load_sleep_params(path, param_file):
"""
load parameter file generated by &recursive_sleepstate_rem || &recursive_sleepstate_nrem
@Return:
Dictionary: Parameter --> Value
"""
fid = open(os.path.join(path, param_file), 'r')
lines = fid.readlines()
params = {}
for line in lines:
if re.match('^[\S_]+:', line):
a = re.split('\s+', line)
key = a[0][:-1]
params[key] = a[1:-1]
# transform number strings to floats
for k in params:
vals = params[k]
new_vals = []
for v in vals:
if re.match('^[\d\.]+$', v):
new_vals.append(float(v))
else:
new_vals.append(v)
params[k] = new_vals
return params
def recursive_sleepstate_nrem(ppath, recordings, sf=0.3, alpha=0.3, std_thdelta = 1.5, sdt=2.5, psave=False, xemg=False):
"""
predict NREMs period only based on EEG/EMG history; the same algorithm is also used for
closed-loop NREM sleep manipulation.
The algorithm uses for NREM sleep detection thresholds for delta power, EMG power, and theta/delta power.
For delta I use two thresholds: A hard (larger) threshold and a soft (lower) threshold. Initially,
theta/delta has to cross the hard threshold to initiate a NREM period. Then, as long as,
theta/delta is above the soft threshold (and EMG power stays low) REM sleep continues.
The values for hard and soft threshold are fitted using a Gaussian mixture model
:param ppath: base folder
:param recordings: list of recordings
:param sf: smoothing factor for each powerspectrum
:param alpha: spatial smoothing factor
:param std_thdelta: factor to set threshold for theta/delta
:param sdt: time step of brain state classification, typically 2.5 s
:param psave: save parameters to text file?
:param xemg: use EEG instead of EMG?
"""
# to fit Gaussian mixture model to delta power distributino
from sklearn import mixture
idf = re.split('_', recordings[0])[0]
# calculate spectrogram
(SE, SM) = ([],[])
for rec in recordings:
A,B, freq = recursive_spectrogram(ppath, rec, sf=sf, alpha=alpha)
SE.append(A)
SM.append(B)
# fuse lists SE and SM
SE = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SE))
if not xemg:
SM = np.squeeze(reduce(lambda x,y: np.concatenate((x,y)), SM))
else:
SM = SE
# EEG, EMG bands
ntbins = SE.shape[1]
r_delta = [0.5, 4]
r_theta = [5,12]
# EMG band
r_mu = [300, 500]
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
pow_delta = np.sum(SE[i_delta,:], axis=0)
pow_theta = np.sum(SE[i_theta,:], axis=0)
pow_mu = np.sum(SM[i_mu,:], axis=0)
# theta/delta
th_delta = np.divide(pow_theta, pow_delta)
thr_th_delta1 = np.nanmean(th_delta) + std_thdelta*np.nanstd(th_delta)
thr_th_delta2 = np.nanmean(th_delta) + 0.0*np.nanstd(th_delta)
thr_mu = pow_mu.mean() + 0.5*np.nanstd(pow_mu)
med_delta = np.median(pow_delta)
pow_delta_fit = pow_delta[np.where(pow_delta<=3*med_delta)]
# fit Gaussian mixture model to delta power
# see http://www.astroml.org/book_figures/chapter4/fig_GMM_1D.html
gm = mixture.GaussianMixture(n_components=2)
fit = gm.fit(pow_delta_fit.reshape(-1, 1))
means = np.squeeze(fit.means_)
x = np.arange(0, med_delta*3, 100)
plt.figure()
plt.hist(pow_delta_fit, 100, normed=True, histtype='stepfilled', alpha=0.4)
logprob = fit.score_samples(x.reshape(-1,1))
responsibilities = fit.predict_proba(x.reshape((-1,1)))
pdf = np.exp(logprob)
pdf_individual = responsibilities * pdf[:, np.newaxis]
plt.plot(x, pdf, '-k')
plt.plot(x, pdf_individual, '--k')
plt.xlim((0, med_delta*3))
plt.ylabel('p(x)')
plt.xlabel('x = Delta power')
# get point where curves cut each other
if means[0] < means[1]:
idx = np.where((x>= means[0]) & (x<= means[1]))[0]
else:
idx = np.where((x >= means[1]) & (x <= means[0]))[0]
imin = np.argmin(pdf[idx])
xcut = x[idx[0]+imin]
plt.plot(xcut, pdf[idx[0]+imin], 'ro')
ilow = np.argmin(np.abs(x-means[0]))
plt.plot(x[ilow], pdf[ilow], 'bo')
ihigh = np.argmin(np.abs(x-means[1]))
plt.plot(x[ihigh], pdf[ihigh], 'go')
plt.show(block=False)
# set parameters for hard and soft delta thresholds
tmp = np.array([x[ihigh], xcut, x[ilow]])
tmp.sort()
thr_delta1 = tmp[-1] # x[ihigh]; right peak of distribution
thr_delta2 = tmp[1] # trough of distribution
# NREM yes or no according to thresholds
# However, this variable does not directly control whether laser should
# be on or off; whether NREM sleep is really on or off is determined
# by nrem_idx; if pnrem_hidden == 1, then all threshold critera, but not
# sleep history criteria are fulfilled
pnrem_hidden = 0
# if nrem_idx[i] == 1, time point i is NREM
nrem_idx = np.zeros((ntbins,), dtype='int8')
# NREM stays on after thresholds are NOT fulfilled to avoid interruptions by microarousals
grace_period = int(20 / sdt)
# nrem_delay: NREM only starts with some delay
nrem_delay = int(10 / sdt)
grace_count = grace_period
delay_count = nrem_delay
for i in range(ntbins):
if pnrem_hidden == 0:
### Entering NREM:
# Delta power laser than high threshold
if pow_delta[i] > thr_delta1 and pow_mu[i] < thr_mu and th_delta[i] < thr_th_delta1:
### NOT-NREM -> NREM
pnrem_hidden = 1
nrem_idx[i] = 0
delay_count -= 1
# we are fully in NREM, that's why grace_count is reset:
grace_count = grace_period
else:
### NOT-NREM -> NOT-NREM
if grace_count > 0:
grace_count -= 1
nrem_idx[i] = 1
else:
nrem_idx[i] = 0
else:
### pnrem_hidden == 1
if pow_delta[i] > thr_delta2 and pow_mu[i] < thr_mu and th_delta[i] < thr_th_delta1:
if delay_count > 0:
delay_count -= 1
nrem_idx[i] = 0
else :
nrem_idx[i] = 1
else:
### Exit NREM -> NOT-NREM
# were are fully out of NREM, so delay_count can be reset:
delay_count = nrem_delay
pnrem_hidden = 0
if grace_count > 0:
grace_count -= 1
nrem_idx[i] = 1
#### figure ##############################################
plt.figure()
t = np.arange(0, sdt * (ntbins - 1) + sdt / 2.0, sdt)
ax1 = plt.subplot(411)
im = np.where((freq >= 0) & (freq <= 30))[0]
med = np.median(SE.max(axis=0))
ax1.imshow(np.flipud(SE[im, :]), vmin=0, vmax=med * 2, cmap='jet')
ax1.pcolorfast(t, freq[im], np.flipud(SE[im, :]), vmin=0, vmax=med * 2, cmap='jet')
plt.yticks(list(range(0, 31, 10)), list(range(30, -1, -10)))
plt.ylabel('Freq. (Hz)')
plt.axis('tight')
ax2 = plt.subplot(412, sharex=ax1)
ax2.plot(t, pow_mu, color='black')
ax2.plot(t, np.ones((len(t),)) * thr_mu, color='red')
plt.ylabel('EMG Pow.')
plt.xlim((t[0], t[-1]))
ax3 = plt.subplot(413, sharex=ax2)
ax3.plot(t, pow_delta, color='black')
ax3.plot(t, np.ones((len(t),)) * thr_delta1, color='red')
ax3.plot(t, np.ones((len(t),)) * thr_delta2, color=[1, 0.6, 0.6])
ax3.plot(t, nrem_idx * thr_delta1, color=[0.6, 0.6, 0.6])
plt.ylabel('Delta Pow.')
plt.xlim((t[0], t[-1]))
ax4 = plt.subplot(414, sharex=ax3)
ax4.plot(t, th_delta, color='black')
ax4.plot(t, np.ones((len(t),)) * thr_th_delta1, color='red')
plt.ylabel('Theta/Delta')
plt.xlabel('Time (s)')
plt.xlim((t[0], t[-1]))
plt.show(block=False)
# Determine which channel is EEG, EMG
ch_alloc = get_infoparam(os.path.join(ppath, recordings[0], 'info.txt'), 'ch_alloc')[0]
# write config file
if psave:
cfile = os.path.join(ppath, idf + '_nrem.txt')
fid = open(cfile, 'w')
fid.write(('IDF: %s' + os.linesep) % idf)
fid.write(('ch_alloc: %s' + os.linesep) % ch_alloc)
fid.write(('THR_DELTA: %.2f %.2f' + os.linesep) % (thr_delta1, thr_delta2))
fid.write(('THR_MU: %.2f' + os.linesep) % thr_mu)
fid.write(('THR_TH_DELTA: %.2f %.2f' + os.linesep) % (thr_th_delta1, thr_th_delta2))
fid.write(('STD_THDELTA: %.2f' + os.linesep) % std_thdelta)
fid.write(('SF: %.2f' + os.linesep) % sf)
fid.write(('ALPHA: %.2f' + os.linesep) % alpha)
if xemg:
fid.write(('XEMG: %d' + os.linesep) % 1)
else:
fid.write(('XEMG: %d' + os.linesep) % 0)
fid.close()
print('wrote file %s' % cfile)
def rem_online_analysis(ppath, recordings, backup='', single_mode=False, fig_file='', overlap=0):
"""
analyze results from closed-loop experiments
:param ppath: base folder
:param recordings: list of strings, recordinds
:param backup: string, potential second backup folder with recordings
:param single_mode: boolean, if True, average across all REM periods (irrespective of mouse)
and plot each single REM period as dot
:param overlap: float between 0 and 100; specifices percentage by which the online detected REM period has to
overlap with real (annotated) REM period to be further consided for analysis;
if overlap == 0, then any overlap counts, i.e. this parameter has no influence
:return: df, pd.DataFrame, with control and experimental REM durations as data columns
"""
if type(recordings) != list:
recordings = [recordings]
overlap = overlap / 100.0
paths = dict()
for rec in recordings:
if os.path.isdir(os.path.join(ppath, rec)):
paths[rec] = ppath
else:
paths[rec] = backup
mice = dict()
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in mice:
mice[idf] = 1
mice = list(mice.keys())
if len(mice) == 1:
single_mode=True
dur_exp = {m:[] for m in mice}
dur_ctr = {m:[] for m in mice}
for rec in recordings:
idf = re.split('_', rec)[0]
M,S = load_stateidx(paths[rec], rec)
sr = get_snr(paths[rec], rec)
nbin = int(np.round(sr)*2.5)
dt = (1.0/sr)*nbin
laser = load_laser(paths[rec], rec)
rem_trig = so.loadmat(os.path.join(paths[rec], rec, 'rem_trig_%s.mat'%rec), squeeze_me=True)['rem_trig']
laser = downsample_vec(laser, nbin)
laser[np.where(laser>0)] = 1
rem_trig = downsample_vec(rem_trig, nbin)
rem_trig[np.where(rem_trig>0)] = 1
laser_idx = np.where(laser==1)[0]
rem_idx = np.where(rem_trig==1)[0]
# REM sequences from offline analysis (assumed to be the
# "ground truth"
seq = get_sequences(np.where(M==1)[0])
for s in seq:
# check true REM sequences overlapping with online detected sequences
isect = np.intersect1d(s, rem_idx)
#print(len(isect)/ len(s))
# test if real REM period s overlaps with online detected REM periods and,
# if yes, make sure that the overlap is at least overlap *100 percent
if len(np.intersect1d(s, rem_idx)) > 0 and float(len(isect)) / len(s) >= overlap:
drn = (s[-1]-s[0]+1)*dt
# does the sequence overlap with laser?
if len(np.intersect1d(isect, laser_idx))>0:
dur_exp[idf].append(drn)
else:
dur_ctr[idf].append(drn)
data = {'exp':[], 'ctr':[]}
# if single_mode put all REM periods together,
# otherwise average across REM periods for each mouse
if len(mice) == 1 or single_mode==True:
for m in mice:
data['exp'] += dur_exp[m]
data['ctr'] += dur_ctr[m]
else:
for idf in dur_ctr:
dur_ctr[idf] = np.array(dur_ctr[idf]).mean()
dur_exp[idf] = np.array(dur_exp[idf]).mean()
data['exp'] = np.array(list(dur_exp.values()))
data['ctr'] = np.array(list(dur_ctr.values()))
df = pd.DataFrame({'ctr':pd.Series(data['ctr']), 'exp' : pd.Series(data['exp'])})
# plot everything
if not single_mode:
plt.ion()
plt.figure()
ax = plt.axes([0.2, 0.15, 0.3, 0.7])
df_mean = df.mean()
plt.bar([1], [df_mean['ctr']], color='grey', label='W/o Laser')
plt.bar([2], [df_mean['exp']], color='blue', label='With laser')
plt.xticks([1,2])
box_off(ax)
#ax.set_xticklabels(['ctr', 'exp'], rotation=30)
plt.ylabel('REM duration (s)')
for (a,b) in zip(df['ctr'], df['exp']):
plt.plot([1,2], [a,b], color='black')
plt.legend(bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', ncol=1, frameon=False)
else:
plt.figure()
ax = plt.axes([0.2, 0.15, 0.3, 0.7])
df_mean = df.mean()
plt.bar([1], [df_mean['ctr']], color='grey')
plt.bar([2], [df_mean['exp']], color='blue')
plt.xticks([1,2])
box_off(ax)
#ax.set_xticklabels(['ctr', 'exp'], rotation=30)
plt.ylabel('REM duration (s)')
a = df['ctr']
b = df['exp']
plt.plot(np.ones((len(a),)), a, '.', color='black', label='W/o Laser')
plt.plot(2*np.ones((len(b),)), b, '.', color='black', label='With laser')
plt.legend(bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', ncol=1, frameon=False)
plt.show()
if len(fig_file) > 0:
save_figure(fig_file)
return df
def online_homeostasis(ppath, recordings, backup='', mode=0, single_mode=False, pplot=True, overlap=0, ma_thr=0):
"""
Further analysis of data obtained from closed loop stimulation
Assume the sleep structure looks like this
R R R R W W N N N N N W W N N N N R R R R R
REM_pre -- inter REM ---- REM_post
REM_pre is the duration of the first REM period, inter-REM is everything between REM_pre and the
next REM period REM_post.
The function calculates the inter REM duration after REM periods with laser and after REM periods w/o laser
:param ppath: base folder
:param recordings: list of recording, or file listing
:param backup: backup folder for $ppath
:param mode: mode == 0, calculate complete inter REM duration
mode == 2, only calculate duration of wake in inter REM periods
mode == 3, only calculate duration of NREM in inter REM periods
:param single_mode: consider each single recording, instead of mice
:param overlap: percentage (number between 0 and 100). Defines the percentage
how much a true (offline annotated) REM period should overlap with laser
to be considered as REM sleep with laser.
Of note, REM periods w/o laser have to have 0 overlap with laser.
All remaining REM periods are discarded.
:param pplot: if True, plot figure; errorbars show 95% confidence intervals,
calculated using bootstrapping
:param ma:
:return: df, if single_mode == True $df is a pandas DataFrame:
REM iREM laser
mouse - mouse ID
REM - REM duration
iREM - inter REM duration after REM periods with laser
laser - 'y' or 'n'; depending on whether laser was on during REM sleep period (for "REM") or during the
preceding REM sleep period (for "iREM")
if single_mode == False, mouse is the data frame index
"""
if type(recordings) != list:
recordings = [recordings]
if overlap > 0:
overlap = overlap / 100
paths = dict()
for rec in recordings:
if os.path.isdir(os.path.join(ppath, rec)):
paths[rec] = ppath
else:
paths[rec] = backup
mice = dict()
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in mice:
mice[idf] = 1
mice = list(mice.keys())
if len(mice) == 1:
single_mode=True
remdur_exp = {m:[] for m in mice}
remdur_ctr = {m:[] for m in mice}
itdur_exp = {m:[] for m in mice}
itdur_ctr = {m:[] for m in mice}
for rec in recordings:
idf = re.split('_', rec)[0]
M = load_stateidx(paths[rec], rec)[0]
sr = get_snr(paths[rec], rec)
nbin = int(np.round(sr)*2.5)
dt = (1.0/sr)*nbin
if ma_thr>0:
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if len(s)*dt <= ma_thr:
M[s] = 3
laser = load_laser(paths[rec], rec)
rem_trig = so.loadmat(os.path.join(paths[rec], rec, 'rem_trig_%s.mat' % rec), squeeze_me=True)['rem_trig']
laser = downsample_vec(laser, nbin)
laser[np.where(laser>0)] = 1
rem_trig = downsample_vec(rem_trig, nbin)
rem_trig[np.where(rem_trig>0)] = 1
laser_idx = np.where(laser==1)[0]
rem_idx = np.where(rem_trig==1)[0]
# REM sequences from offline analysis (assumed to be the
# "ground truth"
seq = get_sequences(np.where(M==1)[0])
for (p,q) in zip(seq[0:-1], seq[1:]):
# check if true REM sequences do overlap with online detected sequences
# and only continue working with those:
if len(np.intersect1d(p, rem_idx)) > 0:
drn = (p[-1]-p[0]+1)*dt
it_M = M[p[-1]+1:q[0]]
if mode == 0:
it_drn = len(it_M)*dt
elif mode == 2:
it_drn = len(np.where(it_M==2)[0]) * dt
else:
it_drn = len(np.where(it_M == 3)[0]) * dt
# does the true REM sequence overlap with laser?
# by setting overlap to a value > 0, you can
# set a percentage how much the REM period should overlap with laser
# NEW 08/26/21
if len(np.intersect1d(p, laser_idx)) / len(p) > overlap:
remdur_exp[idf].append(drn)
itdur_exp[idf].append(it_drn)
elif len(np.intersect1d(p, laser_idx)) == 0:
remdur_ctr[idf].append(drn)
itdur_ctr[idf].append(it_drn)
else:
pass
# if single_mode put all REM periods together,
# otherwise average across REM periods for each mouse
if len(mice) == 1 or single_mode==True:
data = {'itexp':[], 'itctr':[], 'remexp':[], 'remctr':[]}
for m in mice:
data['itexp'] += itdur_exp[m]
data['itctr'] += itdur_ctr[m]
data['remexp'] += remdur_exp[m]
data['remctr'] += remdur_ctr[m]
df = pd.DataFrame({'REM': data['remexp']+data['remctr'], 'iREM':data['itexp']+data['itctr'], 'laser': ['y']*len(data['remexp']) + ['n']*len(data['remctr'])})
else:
for idf in mice:
itdur_ctr[idf] = np.array(itdur_ctr[idf]).mean()
itdur_exp[idf] = np.array(itdur_exp[idf]).mean()
remdur_ctr[idf] = np.array(remdur_ctr[idf]).mean()
remdur_exp[idf] = np.array(remdur_exp[idf]).mean()
data = {}
for s in ['itexp', 'itctr', 'remexp', 'remctr']:
data[s] = np.zeros((len(mice),))
i = 0
for m in mice:
data['itexp'][i] = itdur_exp[m]
data['itctr'][i] = itdur_ctr[m]
data['remexp'][i] = remdur_exp[m]
data['remctr'][i] = remdur_ctr[m]
i += 1
df = pd.DataFrame({'REM': np.concatenate((data['remexp'], data['remctr'])),
'iREM': np.concatenate((data['itexp'], data['itctr'])),
'laser': ['y']*len(mice) + ['n']*len(mice),
'mouse': mice+mice})
if pplot and not single_mode:
dfm = pd.melt(df, id_vars=['laser', 'mouse'], var_name='state')
sns.set_style('whitegrid')
plt.ion()
plt.figure()
sns.barplot(data=dfm, hue='laser', x='state', y='value', palette=['blue', 'gray'])
sns.swarmplot(data=dfm, hue='laser', x='state', y='value', dodge=True, color='black')
sns.despine()
plt.ylabel('Duration (s)')
if pplot and single_mode:
dfm = pd.melt(df, id_vars=['laser'], var_name='state')
plt.ion()
plt.figure()
sns.set(style="whitegrid")
#sns.swarmplot(data=df[['itctr', 'itexp']], color='black')
#sns.barplot(data=df[['itctr', 'itexp']], palette=['gray', 'blue'], errcolor='black')
sns.barplot(data=dfm, hue='laser', x='state', y='value', palette=['blue', 'gray'])
sns.swarmplot(data=dfm, hue='laser', x='state', y='value', dodge=True, color='black')
sns.despine()
plt.ylabel('Duration (s)')
return df
### FUNCTIONS USED BY SLEEP_STATE #####################################################
def get_sequences(idx, ibreak=1) :
"""
get_sequences(idx, ibreak=1)
idx - np.vector of indices
@RETURN:
seq - list of np.vectors
"""
diff = idx[1:] - idx[0:-1]
breaks = np.nonzero(diff>ibreak)[0]
breaks = np.append(breaks, len(idx)-1)
seq = []
iold = 0
for i in breaks:
r = list(range(iold, i+1))
seq.append(idx[r])
iold = i+1
return seq
def threshold_crossing(data, th, ilen, ibreak, m):
"""
seq = threshold_crossing(data, th, ilen, ibreak, m)
"""
if m>=0:
idx = np.where(data>=th)[0]
else:
idx = np.where(data<=th)[0]
# gather sequences
j = 0
seq = []
while (j <= len(idx)-1):
s = [idx[j]]
for k in range(j+1,len(idx)):
if (idx[k] - idx[k-1]-1) <= ibreak:
# add j to sequence
s.append(idx[k])
else:
break
if (s[-1] - s[0]+1) >= ilen and not(s[0] in [i[1] for i in seq]):
seq.append((s[0], s[-1]))
if j == len(idx)-1:
break
j=k
return seq
def closest_precessor(seq, i):
"""
find the preceding element in seq which is closest to i
helper function for sleep_state
"""
tmp = seq-i;
d = np.where(tmp<0)[0]
if len(d)>0:
id = seq[d[-1]];
else:
id = 0;
return id
def write_remidx(M, K, ppath, name, mode=1) :
"""
rewrite_remidx(idx, states, ppath, name)
replace the indices idx in the remidx file of recording name
with the assignment given in states
"""
if mode == 0 :
outfile = os.path.join(ppath, name, 'remidx_' + name + '.txt')
else :
outfile = os.path.join(ppath, name, 'remidx_' + name + '_corr.txt')
f = open(outfile, 'w')
s = ["%d\t%d\n" % (i,j) for (i,j) in zip(M[0,:],K)]
f.writelines(s)
f.close()
#######################################################################################
### MANIPULATING FIGURES ##############################################################
def set_fontsize(fs):
import matplotlib
matplotlib.rcParams.update({'font.size': fs})
def set_fontarial():
"""
set Arial as default font
"""
import matplotlib
matplotlib.rcParams['font.sans-serif'] = "Arial"
def save_figure(fig_file):
# alternative way of setting nice fonts:
#matplotlib.rcParams['pdf.fonttype'] = 42
#matplotlib.rcParams['ps.fonttype'] = 42
#matplotlib.pylab.savefig(fig_file, dpi=300)
#matplotlib.rcParams['text.usetex'] = False
#matplotlib.rcParams['text.usetex'] = True
plt.savefig(fig_file, bbox_inches="tight", dpi=200)
#matplotlib.rcParams['text.usetex'] = False
def box_off(ax):
"""
similar to Matlab's box off
"""
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
#######################################################################################
def sleep_state(ppath, name, th_delta_std=1, mu_std=0, sf=1, sf_delta=3, pwrite=0,
pplot=True, pemg=True, vmax=2.5, pspec_norm=False, use_idx=[]):
"""
automatic sleep state detection based on
delta, theta, sigma, gamma and EMG power.
New: use also sigma band: that's very helpful to classify pre-REM periods
as NREM; otherwise they tend to be classified as wake.
Gamma peaks nicely pick up during microarousals.
My strategy is the following:
I smooth delta band a lot to avoid strong fragmentation of sleep; but to
still pick up microarousals I use the gamma power.
spectrogram data has to be calculated before using calculate_spectrum
Each bin in the spectrogram gets assigned one of four states:
1-REM
2-Wake
3-NREM
0-undef
:param ppath: base folder
:param name: recording name
:param th_delta_std: threshold for theta/delta band is calculated as mean(theta/delta) + th_delta_std*std(theta/delta)
:param mu_std: threshold for EMG power is calculate as "mean(EMG) + mu_std * mean(EMG)
:param sf: smoothing factor for gamma and sigma power
:param sf_delta: smoothing factor for delta power
:param pwrite: if True, save sleep classification to file remidx_$name.txt
:param pplot: if True, plot figures
:param pemg: if True, use EMG as EMG, otherwise use EEG gamma power instead
:param vmax: float, set maximum of color range of EEG heatmap.
:param pspec_norm: boolean, if True, normalized EEG spectrogram by deviding each frequency band by its mean; only affects
plotting, no effect on sleep state calculation
:param use_idx: list, if not empty, use only given indices to calculate sleep state
:return:
"""
PRE_WAKE_REM = 30.0
# Minimum Duration and Break in
# high theta/delta, high emg, high delta, high sigma and gamma sequences
#
# duration[i,0] is the minimum duration of sequence of state i
# duration[i,1] is maximal break duration allowed in a sequence of state i
duration = np.zeros((5,2))
# high theta/delta
duration[0,:] = [5,15]
# high emg
duration[1,:] = [0, 5]
# high delta
duration[2,:] = [10, 10]
# high sigma
duration[3,:] = [10, 10]
# gamma
duration[4,:] = [0, 5]
# Frequency Bands/Ranges for delta, theta, and, gamma
r_delta = [0.5, 4]
r_sigma = [12, 20]
r_theta = [5,12]
# EMG band
r_mu = [50, 500]
if not pemg:
r_mu = [250, 500]
# high gamma power
r_gamma = [100, 150]
#load EEG and EMG spectrum, calculated by calculate_spectrum
P = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'))
if pemg:
Q = so.loadmat(os.path.join(ppath, name, 'msp_' + name + '.mat'))
else:
Q = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'))
SPEEG = np.squeeze(P['SP'])
if pemg == 1:
SPEMG = np.squeeze(Q['mSP'])
else:
SPEMG = np.squeeze(P['SP'])
if use_idx == []:
use_idx = range(0, SPEEG.shape[1])
freq = np.squeeze(P['freq'])
t = np.squeeze(P['t'])
dt = float(np.squeeze(P['dt']))
N = len(t)
duration = np.divide(duration,dt)
# get indices for frequency bands
i_delta = np.where((freq >= r_delta[0]) & (freq <= r_delta[1]))[0]
i_theta = np.where((freq >= r_theta[0]) & (freq <= r_theta[1]))[0]
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
i_sigma = np.where((freq >= r_sigma[0]) & (freq <= r_sigma[1]))[0]
i_gamma = np.where((freq >= r_gamma[0]) & (freq <= r_gamma[1]))[0]
p_delta = smooth_data( SPEEG[i_delta,:].mean(axis=0), sf_delta )
p_theta = smooth_data( SPEEG[i_theta,:].mean(axis=0), 0 )
# now filtering for EMG to pick up microarousals
p_mu = smooth_data( SPEMG[i_mu,:].mean(axis=0), sf )
p_sigma = smooth_data( SPEEG[i_sigma,:].mean(axis=0), sf )
p_gamma = smooth_data( SPEEG[i_gamma,:].mean(axis=0), 0 )
th_delta = np.divide(p_theta, p_delta)
#th_delta = smooth_data(th_delta, 2);
seq = {}
seq['high_theta'] = threshold_crossing(th_delta, np.nanmean(th_delta[use_idx])+th_delta_std*np.nanstd(th_delta[use_idx]),
duration[0,1], duration[0,1], 1)
seq['high_emg'] = threshold_crossing(p_mu, np.nanmean(p_mu[use_idx])+mu_std*np.nanstd(p_mu[use_idx]),
duration[1,0], duration[1,1], 1)
seq['high_delta'] = threshold_crossing(p_delta, np.nanmean(p_delta[use_idx]), duration[2,0], duration[2,1], 1)
seq['high_sigma'] = threshold_crossing(p_sigma, np.nanmean(p_sigma[use_idx]), duration[3,0], duration[3,1], 1)
seq['high_gamma'] = threshold_crossing(p_gamma, np.nanmean(p_gamma[use_idx]), duration[4,0], duration[4,1], 1)
# Sleep-State Rules
idx = {}
for k in seq:
tmp = [list(range(i,j+1)) for (i,j) in seq[k]]
# now idea why this works to flatten a list
# idx[k] = sum(tmp, [])
# alternative that I understand:
if len(tmp) == 0:
idx[k] = np.array([])
else:
idx[k] = np.array(reduce(lambda x,y: x+y, tmp))
idx['low_emg'] = np.setdiff1d(np.arange(0,N), np.array(idx['high_emg']))
idx['low_delta'] = np.setdiff1d(np.arange(0,N), np.array(idx['high_delta']))
idx['low_theta'] = np.setdiff1d(np.arange(0,N), np.array(idx['high_theta']))
#REM Sleep: thdel up, emg down, delta down
a = np.intersect1d(idx['high_theta'], idx['low_delta'])
# non high_emg phases
b = np.setdiff1d(a, idx['high_emg'])
rem = get_sequences(b, duration[0,1])
rem_idx = reduce(lambda x,y: np.concatenate((x,y)), rem)
# SWS Sleep
# delta high, no theta, no emg
a = np.setdiff1d(idx['high_delta'], idx['high_emg']) # no emg activation
b = np.setdiff1d(a, idx['high_theta']) # no theta;
sws = get_sequences(b)
sws_idx = reduce(lambda x,y: np.concatenate((x,y)), sws)
#print a
# Wake
# low delta + high emg and not rem
a = np.unique(np.union1d(idx['low_delta'], idx['high_emg']))
b = np.setdiff1d(a, rem_idx)
wake = get_sequences(b)
wake_idx = reduce(lambda x,y: np.concatenate((x,y)), wake)
# sequences with low delta, high sigma and low emg are NREM
a = np.intersect1d(np.intersect1d(idx['high_sigma'], idx['low_delta']), idx['low_emg'])
a = np.setdiff1d(a, rem_idx)
sws_idx = np.unique(np.union1d(a, sws_idx))
wake_idx = np.setdiff1d(wake_idx, a)
#NREM sequences with high gamma are wake
a = np.intersect1d(sws_idx, idx['high_gamma'])
sws_idx = np.setdiff1d(sws_idx, a)
wake_idx = np.unique(np.union1d(wake_idx,a))
# Wake and Theta
wake_motion_idx = np.intersect1d(wake_idx, idx['high_theta'])
# Wake w/o Theta
wake_nomotion_idx = np.setdiff1d(wake_idx, idx['low_theta'])
# Are there overlapping sequences?
a = np.intersect1d(np.intersect1d(rem_idx, wake_idx), sws_idx)
# Are there undefined sequences?
undef_idx = np.setdiff1d(np.setdiff1d(np.setdiff1d(np.arange(0,N), rem_idx), wake_idx), sws_idx)
# Wake wins over SWS
sws_idx = np.setdiff1d(sws_idx, wake_idx)
# Special rules
# if there's a REM sequence directly following a short wake sequence (PRE_WAKE_REM),
# this wake sequence goes to SWS
# NREM to REM transitions are sometimes mistaken as quite wake periods
for rem_seq in rem:
if len(rem_seq) > 0:
irem_start = rem_seq[0]
# is there wake in the preceding bin?
if irem_start-1 in wake_idx:
# get the closest sws bin in the preceding history
isws_end = closest_precessor(sws_idx, irem_start)
if (irem_start - isws_end)*dt < PRE_WAKE_REM:
new_rem = np.arange(isws_end+1,irem_start)
rem_idx = np.union1d(rem_idx, new_rem)
wake_idx = np.setdiff1d(wake_idx, new_rem)
else:
new_wake = rem_seq
wake_idx = np.union1d(wake_idx, new_wake)
rem_idx = np.setdiff1d(rem_idx, new_wake)
# two different representations for the results:
S = {}
S['rem'] = rem_idx
S['nrem'] = sws_idx
S['wake'] = wake_idx
S['awake'] = wake_motion_idx
S['qwake'] = wake_nomotion_idx
M = np.zeros((N,))
if len(rem_idx) > 0:
M[rem_idx] = 1
if len(wake_idx) > 0:
M[wake_idx] = 2
if len(sws_idx) > 0:
M[sws_idx] = 3
if len(undef_idx) > 0:
M[undef_idx] = 0
# write sleep annotation to file
if pwrite:
outfile = os.path.join(ppath, name, 'remidx_' + name + '.txt')
print("writing annotation to %s" % outfile)
f = open(outfile, 'w')
s = ["%d\t%d\n" % (i,j) for (i,j) in zip(M,np.zeros((N,)))]
f.writelines(s)
f.close()
# nice plotting
plt.ion()
if pplot:
plt.figure(figsize=(18,9))
axes1=plt.axes([0.1, 0.9, 0.8, 0.05])
A = np.zeros((1,len(M)))
A[0,:] = M
cmap = plt.cm.jet
my_map = cmap.from_list('ha', [[0,0,0], [0,1,1],[0.5,0,1], [0.8, 0.8, 0.8]], 4)
#tmp = axes1.imshow(A, vmin=0, vmax=3)
tmp = axes1.pcolorfast(t, [0,1], A, vmin=0, vmax=3)
tmp.set_cmap(my_map)
axes1.axis('tight')
tmp.axes.get_xaxis().set_visible(False)
tmp.axes.get_yaxis().set_visible(False)
box_off(axes1)
# show spectrogram
axes2=plt.axes([0.1, 0.75, 0.8, 0.1], sharex=axes1)
ifreq = np.where(freq <= 30)[0]
med = np.median(SPEEG.max(axis=0))
if pspec_norm:
ifreq = np.where(freq <= 80)[0]
filt = np.ones((6, 1))
filt = filt / np.sum(filt)
SPEEG = scipy.signal.convolve2d(SPEEG, filt, mode='same')
spec_mean = SPEEG.mean(axis=1)
SPEEG = np.divide(SPEEG, np.repeat([spec_mean], SPEEG.shape[1], axis=0).T)
med = np.median(SPEEG.max(axis=0))
axes2.pcolorfast(t, freq[ifreq], SPEEG[ifreq, :], vmax = med*vmax, cmap='jet')
else:
axes2.pcolorfast(t, freq[ifreq], SPEEG[ifreq, :], vmax=med * vmax, cmap='jet')
axes2.axis('tight')
plt.ylabel('Freq (Hz)')
box_off(axes2)
# show delta power
axes3=plt.axes([0.1, 0.6, 0.8, 0.1], sharex=axes2)
axes3.plot(t,p_delta, color='gray')
plt.ylabel('Delta (a.u.)')
plt.xlim((t[0], t[-1]))
seq = get_sequences(S['nrem'])
#for s in seq:
# plt.plot(t[s],p_delta[s], color='red')
s = idx['high_delta']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],p_delta[s], color='red')
box_off(axes3)
axes4=plt.axes([0.1, 0.45, 0.8, 0.1], sharex=axes3)
axes4.plot(t,p_sigma, color='gray')
plt.ylabel('Sigma (a.u.)')
plt.xlim((t[0], t[-1]))
s = idx['high_sigma']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],p_sigma[s], color='red')
box_off(axes4)
axes5=plt.axes([0.1, 0.31, 0.8, 0.1], sharex=axes4)
axes5.plot(t,th_delta, color='gray')
plt.ylabel('Th/Delta (a.u.)')
plt.xlim((t[0], t[-1]))
s = idx['high_theta']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],th_delta[s], color='red')
box_off(axes5)
axes6=plt.axes([0.1, 0.17, 0.8, 0.1], sharex=axes5)
axes6.plot(t,p_gamma, color='gray')
plt.ylabel('Gamma (a.u.)')
plt.xlim((t[0], t[-1]))
s = idx['high_gamma']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],p_gamma[s], color='red')
box_off(axes6)
axes7=plt.axes([0.1, 0.03, 0.8, 0.1], sharex=axes6)
axes7.plot(t,p_mu, color='gray')
plt.xlabel('Time (s)')
plt.ylabel('EMG (a.u.)')
plt.xlim((t[0], t[-1]))
s = idx['high_emg']
seq = get_sequences(s)
for s in seq:
plt.plot(t[s],p_mu[s], color='red')
box_off(axes7)
plt.show()
# 2nd figure showing distribution of different bands
plt.figure(figsize=(20,3))
axes1 = plt.axes([0.05, 0.1, 0.13, 0.8])
plt.hist(p_delta, bins=100)
plt.plot(np.nanmean(p_delta), 10, 'ro')
plt.title('delta')
plt.ylabel('# Occurances')
box_off(axes1)
axes1 = plt.axes([0.25, 0.1, 0.13, 0.8])
plt.hist(th_delta, bins=100)
plt.plot(np.nanmean(th_delta)+th_delta_std*np.nanstd(th_delta), 10, 'ro')
plt.title('theta/delta')
box_off(axes1)
axes1 = plt.axes([0.45, 0.1, 0.13, 0.8])
plt.hist(p_sigma, bins=100)
plt.plot(np.nanmean(p_sigma), 10, 'ro')
plt.title('sigma')
box_off(axes1)
axes1 = plt.axes([0.65, 0.1, 0.13, 0.8])
plt.hist(p_gamma, bins=100)
plt.plot(np.nanmean(p_gamma), 10, 'ro')
plt.title('gamma')
box_off(axes1)
axes1 = plt.axes([0.85, 0.1, 0.13, 0.8])
plt.hist(p_mu, bins=100)
plt.plot(np.nanmean(p_mu)+np.nanstd(p_mu), 10, 'ro')
plt.title('EMG')
plt.show(block=False)
box_off(axes1)
plt.show()
return M,S
def plot_hypnograms(ppath, recordings, tbin=0, unit='h', ma_thr=20, title='', tstart=0, tend=-1):
"""
plot all hypnograms specified in @recordings
:param ppath: base folder
:param recordings: list of recordings
:param tbin: tbin for xticks
:param unit: time unit; h - hour, min - minute, s - second
:param ma_thr: float, wake periods shorter than $ma_thr are considered as microarousals and further converted to NREM
:param tstart: float, start time point (in seconds!) of hypnograms
:param tend: float, last shown time point (in seconds!)
:param title: optional title for figure
"""
recordings = recordings[::-1]
sr = get_snr(ppath, recordings[0])
nbin = int(np.round(sr) * 2.5)
dt_sec = (1.0 / sr) * nbin
istart = int(np.round(tstart/dt_sec))
dt = dt_sec
if unit == 'h':
dt /= 3600
elif unit == 'min':
dt /= 60
rec_len = dict()
irec = 0
ny = (1.0-0.2) / len(recordings)
dy = ny * 0.75
cmap = plt.cm.jet
my_map = cmap.from_list('brs', [[0, 0, 0], [0, 1, 1], [0.5, 0, 1], [0.8, 0.8, 0.8]], 4)
plt.ion()
plt.figure(figsize=(9,4))
axes = []
for rec in recordings:
M,K = load_stateidx(ppath, rec)
#kcut = np.where(K<0)[0]
#M = M[kcut]
#M[kcut] = 0
if tend == -1:
iend = len(M)
else:
iend = int(tend/dt_sec)
M = M[istart:iend]
if ma_thr>0:
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if len(s)*dt_sec <= ma_thr:
M[s] = 3
rec_len[rec] = len(M)*dt
t = np.arange(0, len(M))*dt
ax = plt.axes([0.05, ny*irec+0.15, 0.75, dy])
tmp = ax.pcolorfast(t, [0, 1], np.array([M]), vmin=0, vmax=3, cmap=my_map)
box_off(ax)
ax.axis('tight')
tmp.axes.get_yaxis().set_visible(False)
if irec > 0:
tmp.axes.get_xaxis().set_visible(False)
if irec == 0:
plt.xlabel('Time (%s)' % unit)
irec += 1
axes.append(ax)
if len(title) > 0:
plt.title(title)
max_dur = max(rec_len.values())
if tbin > 0:
xtick = np.arange(0, max_dur, tbin)
for (ax, rec) in zip(axes, recordings):
ax.set_xlim([0, max_dur])
if tbin > 0:
ax.set_xticks(xtick)
ax.text(max_dur+max_dur*0.01, 0.5, rec)
plt.show()
def plot_swa(ppath, name, delta_win, alpha, band=[0.5, 4.5], swa_yrange=[]):
"""
plot slow wave (delta) activity during NREM
The top plot shows the hynogram.
The middle plot shows the delta power (irrespective of brain state) as line plot
The bottom plot shows for consecutive $delta_win seconds long bins, the
median delta power (SWA) during NREM, if the ration of NREM during the
corresponding bin >= $alpha
Example call:
dm=plot_swa(ppath, name, 30, 0.5, swa_yrange=[0, 0.012])
:param ppath, name: basefolder, recording name
:param delta_win: plot median swa value for each consecutive $delta_win seconds long window, if
:param alpha: the ratio of NREM in this window is larger than alpha (value between 0 and 1)
:param swa_yrange: tuple, minimun and maximum value of yrange for SWA
:return df: pd.DataFrame with SWA time points and corresponding median SWA values
"""
r_delta = band
sr = get_snr(ppath, name)
nbin = int(np.round(2.5*sr))
dt = nbin*(1.0/sr)
M,_ = load_stateidx(ppath, name)
t = np.arange(0, len(M))*dt
P = so.loadmat(os.path.join(ppath, name, 'sp_%s.mat' % name), squeeze_me=True)
SP = P['SP']
freq = P['freq']
df = freq[1]-freq[0]
idelta = np.where((freq>=r_delta[0]) & (freq<=r_delta[1]))[0]
pow_delta = SP[idelta,:].sum(axis=0)*df
# get NREM sequences contributing points for fitting
iwin = int(delta_win/dt)
#seq = get_sequences(nrem_idx, ibreak=int((delta_win/dt)*0.1))
delta_med = []
for j in range(0, len(M)-iwin, iwin):
s = range(j, j+iwin)
sc = j+int(iwin/2)
Mcut = M[s]
if (1.0*len(np.where(Mcut==3)[0])) / len(s) >= alpha:
i = np.where(Mcut==3)[0]
i = i+s[0]
a = np.median(pow_delta[i])
delta_med.append((t[sc],a))
df = pd.DataFrame(columns=['time', 'pow'], data=delta_med)
# generate figure
# show brainstate
plt.ion()
plt.figure(figsize=(10, 4))
axes_brs = plt.axes([0.1, 0.85, 0.8, 0.1])
cmap = plt.cm.jet
my_map = cmap.from_list('brs', [[0, 0, 0], [0, 1, 1], [0.6, 0, 1], [0.8, 0.8, 0.8]], 4)
tmp = axes_brs.pcolorfast(t, [0, 1], np.array([M]), vmin=0, vmax=3)
tmp.set_cmap(my_map)
axes_brs.axis('tight')
axes_brs.axes.get_xaxis().set_visible(False)
axes_brs.axes.get_yaxis().set_visible(False)
axes_brs.spines["top"].set_visible(False)
axes_brs.spines["right"].set_visible(False)
axes_brs.spines["bottom"].set_visible(False)
axes_brs.spines["left"].set_visible(False)
# plot delta power as function of time
c = 1000**2
axes_tdelta = plt.axes([0.1, 0.55, 0.8, 0.2], sharex=axes_brs)
plt.plot(t, pow_delta/c, 'k')
box_off(axes_tdelta)
axes_tdelta.axes.get_xaxis().set_visible(False)
axes_tdelta.spines["bottom"].set_visible(False)
plt.ylabel('SWA (mV$\mathrm{^2}$)')
# plot delta power medians
axes_delta = plt.axes([0.1, 0.12, 0.8, 0.35], sharex=axes_brs)
for (s,delta) in delta_med:
plt.plot(s, delta/c, 'ko')
print(t)
plt.xlim((t[0], t[-1]))
box_off(axes_delta)
plt.xlabel('Time (s)')
plt.ylabel('NREM SWA (mV$\mathrm{^2}$)')
if swa_yrange == []:
ymax = df['pow'].max()/c
plt.ylim([0, ymax+0.1*ymax])
else:
plt.ylim(swa_yrange)
plt.show()
return df
def laser_triggered_eeg(ppath, name, pre, post, f_max, pnorm=2, pplot=False, psave=False, tstart=0, tend=-1,
peeg2=False, vm=2.5, prune_trials=True, mu=[10, 200], trig_state=0, harmcs=0, iplt_level=1):
"""
calculate laser triggered, averaged EEG and EMG spectrum
:param ppath: base folder containing mouse recordings
:param name: recording
:param pre: time before laser
:param post: time after laser
:param f_max: calculate/plot frequencies up to frequency f_max
:param pnorm: normalization,
pnorm = 0, no normalization
pnorm = 1, normalize each frequency band by its average power
pnorm = 2, normalize each frequency band by the average power
during the preceding baseline period
:param vm: float to set saturation level of colormap
:param pplot: plot figure yes=True, no=False
:param psave: save the figure, yes=True, no = False
:param tstart: float, starting time point. Only lasers trials after tstart will be considered
:param tend: float, only laser trials up to tend will be considered; if tend==-1, use whole recording
:param peeg2: if True, use EEG channel 2
:param prune_trials: if True, throw out trials with EEG or EMG artifacts
:param mu: tuple; range used for EMG amplitude calculation
:param trig_state: int; if > 0, only use trials where brain is at laser onset in brainstate trig_state
1=REM, 2=Wake, 3=NREM
:param harmcs: if >0, interpolate all frequencies corresponding to multiples of harmcs by the average power
of the two neighboring frequencies.
:param iplt_level: options - 1 or 2. If 1 only take one neighboring frequency above and below the harmonic;
if 2, take 2 neighbors above and below, respectively
"""
def _interpolate_harmonics(SP, freq, f_max, harmcs, iplt_level):
df = freq[2]-freq[1]
for h in np.arange(harmcs, f_max, harmcs):
i = np.argmin(np.abs(freq - h))
if np.abs(freq[i] - h) < df and h != 60:
if iplt_level == 2:
SP[i,:] = (SP[i-2:i,:] + SP[i+1:i+3,:]).mean(axis=0) * 0.5
else:
SP[i,:] = (SP[i-1,:] + SP[i+1,:]) * 0.5
return SP
SR = get_snr(ppath, name)
NBIN = np.round(2.5*SR)
lsr = load_laser(ppath, name)
idxs, idxe = laser_start_end(lsr)
laser_dur = np.mean((idxe-idxs)/SR)
print('%s: Average laser duration: %f; Number of trials %d' % (name, laser_dur, len(idxs)))
# downsample EEG time to spectrogram time
idxs = [int(i/NBIN) for i in idxs]
idxe = [int(i/NBIN) for i in idxe]
#load EEG and EMG
P = so.loadmat(os.path.join(ppath, name, 'sp_' + name + '.mat'))
Q = so.loadmat(os.path.join(ppath, name, 'msp_' + name + '.mat'))
if not peeg2:
SPEEG = np.squeeze(P['SP'])
else:
SPEEG = np.squeeze(P['SP2'])
SPEMG = np.squeeze(Q['mSP'])
freq = np.squeeze(P['freq'])
t = np.squeeze(P['t'])
dt = float(np.squeeze(P['dt']))
ifreq = np.where(freq<=f_max)[0]
ipre = int(np.round(pre/dt))
ipost = int(np.round(post/dt))
speeg_mean = SPEEG.mean(axis=1)
spemg_mean = SPEMG.mean(axis=1)
# interpolate frequencies corresponding to harmonics of $harmcs
if harmcs > 0:
SPEEG = _interpolate_harmonics(SPEEG, freq, f_max, harmcs)
SPEMG = _interpolate_harmonics(SPEMG, freq, f_max, harmcs)
if tend > -1:
i = np.where((np.array(idxs)*dt >= tstart) & (np.array(idxs)*dt <= tend))[0]
else:
i = np.where(np.array(idxs)*dt >= tstart)[0]
idxs = [idxs[j] for j in i]
idxe = [idxe[j] for j in i]
skips = []
skipe = []
if prune_trials:
for (i,j) in zip(idxs, idxe):
A = SPEEG[0,i-ipre:i+ipost+1] / speeg_mean[0]
B = SPEMG[0,i-ipre:i+ipost+1] / spemg_mean[0]
k = np.where(A >= np.median(A)*50)[0]
l = np.where(B >= np.median(B)*500)[0]
if len(k) > 0 or len(l) > 0:
skips.append(i)
skipe.append(j)
print("%s: kicking out %d trials" % (name, len(skips)))
idxs_new = []
idxe_new = []
for (i,j) in zip(idxs, idxe):
if not i in skips:
idxs_new.append(i)
idxe_new.append(j)
idxs = idxs_new
idxe = idxe_new
# select trials where brain state is right before laser in trig_state
if trig_state > 0:
idxs_new = []
idxe_new = []
M = load_stateidx(ppath, name)[0]
for (i,j) in zip(idxs, idxe):
if i < len(M) and M[i] == trig_state:
idxs_new.append(i)
idxe_new.append(j)
idxs = idxs_new
idxe = idxe_new
# Spectrogram for EEG and EMG normalized by average power in each frequency band
if pnorm == 1:
SPEEG = np.divide(SPEEG, np.repeat(speeg_mean, len(t)).reshape(len(speeg_mean), len(t)))
SPEMG = np.divide(SPEMG, np.repeat(spemg_mean, len(t)).reshape(len(spemg_mean), len(t)))
speeg_parts = []
spemg_parts = []
for (i,j) in zip(idxs, idxe):
if i>=ipre and j+ipost < len(t):
eeg_part = SPEEG[ifreq,i-ipre:i+ipost+1]
speeg_parts.append(eeg_part)
spemg_parts.append(SPEMG[ifreq,i-ipre:i+ipost+1])
EEGLsr = np.array(speeg_parts).mean(axis=0)
EMGLsr = np.array(spemg_parts).mean(axis=0)
# smooth spectrogram
nfilt = 3
filt = np.ones((nfilt,nfilt))
filt = np.divide(filt, filt.sum())
EEGLsr = scipy.signal.convolve2d(EEGLsr, filt, boundary='symm', mode='same')
if pnorm == 2:
for i in range(EEGLsr.shape[0]):
EEGLsr[i,:] = np.divide(EEGLsr[i,:], np.sum(np.abs(EEGLsr[i,0:ipre]))/(1.0*ipre))
EMGLsr[i,:] = np.divide(EMGLsr[i,:], np.sum(np.abs(EMGLsr[i,0:ipre]))/(1.0*ipre))
# get time axis
dt = (1.0/SR)*NBIN
t = np.linspace(-ipre*dt, ipost*dt, ipre+ipost+1)
f = freq[ifreq]
if pplot:
# get rid of boxes around matplotlib plots
def box_off(ax):
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.ion()
plt.figure(figsize=(10,8))
ax = plt.axes([0.1, 0.55, 0.4, 0.35])
plt.pcolormesh(t,f,EEGLsr, vmin=0, vmax=np.median(EEGLsr)*vm, cmap='jet')
plt.plot([0,0], [0,f[-1]], color=(1,1,1))
plt.plot([laser_dur,laser_dur], [0,f[-1]], color=(1,1,1))
plt.axis('tight')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
box_off(ax)
plt.title('EEG', fontsize=12)
cbar = plt.colorbar()
if pnorm >0:
cbar.set_label('Rel. Power')
else:
cbar.set_label('Power uV^2s')
ax = plt.axes([0.62, 0.55, 0.35, 0.35])
ilsr = np.where((t>=0) & (t<=120))[0]
plt.plot(f,EEGLsr[:,0:ipre].mean(axis=1), color='gray', label='baseline', lw=2)
plt.plot(f,EEGLsr[:,ilsr].mean(axis=1), color='blue', label='laser', lw=2)
box_off(ax)
plt.xlabel('Freq. (Hz)')
plt.ylabel('Power (uV^2)')
#plt.legend(loc=0)
#plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, borderaxespad=0.)
ax = plt.axes([0.1, 0.1, 0.4, 0.35])
plt.pcolormesh(t,f,EMGLsr, cmap='jet')
plt.plot([0,0], [0,f[-1]], color=(1,1,1))
plt.plot([laser_dur,laser_dur], [0,f[-1]], color=(1,1,1))
plt.axis('tight')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
box_off(ax)
plt.title('EMG', fontsize=12)
cbar = plt.colorbar()
if pnorm >0:
cbar.set_label('Rel. Power')
else:
cbar.set_label('Power (uV^2s)')
ax = plt.axes([0.62, 0.1, 0.35, 0.35])
mf = np.where((f>=mu[0]) & (f <= mu[1]))[0]
df = f[1]-f[0]
# amplitude is square root of (integral over each frequency)
avg_emg = np.sqrt(EMGLsr[mf,:].sum(axis=0)*df)
m = np.max(avg_emg)*1.5
plt.plot([0,0], [0,np.max(avg_emg)*1.5], color=(0,0,0))
plt.plot([laser_dur,laser_dur], [0,np.max(avg_emg)*1.5], color=(0,0,0))
plt.xlim((t[0], t[-1]))
plt.ylim((0,m))
plt.plot(t,avg_emg, color='black', lw=2)
box_off(ax)
plt.xlabel('Time (s)')
plt.ylabel('EMG ampl. (uV)')
plt.show()
if psave:
img_file = os.path.join(ppath, name, 'fig_'+name+'_spec.png')
save_figure(img_file)
return EEGLsr, EMGLsr, freq[ifreq], t
def laser_triggered_eeg_avg(ppath, recordings, pre, post, f_max, laser_dur, pnorm=1, pplot=1, tstart=0, tend=-1,
vm=[], cb_ticks=[], mu=[10, 100], trig_state=0, harmcs=0, iplt_level=1, peeg2=False, fig_file=''):
"""
calculate average spectrogram for all recordings listed in @recordings; for averaging take
mouse identity into account
:param ppath: base folder
:param recordings: list of recordings
:param pre: time before laser onset
:param post: time after laser onset
:param f_max: maximum frequency shown for EEG spectrogram
:param laser_dur: duration of laser stimulation
:param pnorm: normalization,
pnorm = 0, no normalization
pnorm = 1, normalize each frequency band by its average power
pnorm = 2, normalize each frequency band by the average power
during the preceding baseline period
:param pplot: pplot==0 - no figure;
pplot==1 - conventional figure;
pplot==2 - pretty figure showing EEG spectrogram
along with EMG amplitude
note: the errorbar for the EMG amplitude is the S.E.M.
:param tstart: only consider laser trials with laser onset after tstart seconds
:param tend: only consider laser trials with laser onset before tend seconds
:param vm: saturation of heatmap for EEG spectrogram
:param cb_ticks: ticks for colorbar (only applies for pplot==2)
:param mu: frequencies for EMG amplitude calculation
:param trig_state: if > 0, only use trials where brain is at laser onset in brainstate trig_state
1=REM, 2=Wake, 3=NREM
:param peeg2: if True, use EEG2 instead of EEG
:param harmcs: if >0, interpolate all frequencies corresponding to multiples of harmcs by the average power
of the two neighboring frequencies.
:param iplt_level: options - 1 or 2. If 1 only take one neighboring frequency above and below the harmonic;
if 2, take 2 neighbors above and below, respectively
:param fig_file: if specified, save figure to given file
:return:
t, f, EEGSpec, EMGSpec, EEGLsr
t - time axis
f - frequency axis
EEGSpec - dict with mouse id -> 2D np.array(frequency x time)
EMGSpec - dict with mouse id -> 2D np.array(frequency x time)
EEGLsr - 2D np.array(frequency x time)
"""
EEGSpec = {}
EMGSpec = {}
mice = []
for rec in recordings:
idf = re.split('_', rec)[0]
if not(idf in mice):
mice.append(idf)
EEGSpec[idf] = []
EMGSpec[idf] = []
for rec in recordings:
idf = re.split('_', rec)[0]
EEG, EMG, f, t = laser_triggered_eeg(ppath, rec, pre, post, f_max, mu=mu, pnorm=pnorm, pplot=False,
psave=False, tstart=tstart, tend=tend, trig_state=trig_state,
peeg2=peeg2, harmcs=harmcs, iplt_level=iplt_level)
EEGSpec[idf].append(EEG)
EMGSpec[idf].append(EMG)
for idf in mice:
EEGSpec[idf] = np.array(EEGSpec[idf]).mean(axis=0)
EMGSpec[idf] = np.array(EMGSpec[idf]).mean(axis=0)
EEGLsr = np.array([EEGSpec[k] for k in mice]).mean(axis=0)
EMGLsr = np.array([EMGSpec[k] for k in mice]).mean(axis=0)
mf = np.where((f >= mu[0]) & (f <= mu[1]))[0]
if harmcs > 0:
harm_freq = np.arange(0, f.max(), harmcs)
for h in harm_freq:
mf = np.setdiff1d(mf, mf[np.where(f[mf]==h)[0]])
df = f[1] - f[0]
EMGAmpl = np.zeros((len(mice), EEGLsr.shape[1]))
i=0
for idf in mice:
# amplitude is square root of (integral over each frequency)
if harmcs == 0:
EMGAmpl[i,:] = np.sqrt(EMGSpec[idf][mf,:].sum(axis=0)*df)
else:
tmp = 0
for qf in mf:
tmp += EMGSpec[idf][qf,:] * (f[qf] - f[qf-1])
EMGAmpl[i,:] = np.sqrt(tmp)
i += 1
avg_emg = EMGAmpl.mean(axis=0)
sem_emg = EMGAmpl.std(axis=0) / np.sqrt(len(mice))
if pplot==1:
plt.ion()
plt.figure(figsize=(12,10))
ax = plt.axes([0.1, 0.55, 0.4, 0.4])
if len(vm) == 2:
plt.pcolormesh(t,f,EEGLsr, cmap='jet', vmin=vm[0], vmax=vm[1])
else:
plt.pcolormesh(t, f, EEGLsr, cmap='jet')
plt.plot([0,0], [0,f[-1]], color=(1,1,1))
plt.plot([laser_dur,laser_dur], [0,f[-1]], color=(1,1,1))
plt.axis('tight')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
box_off(ax)
plt.title('EEG')
cbar = plt.colorbar()
if pnorm >0:
cbar.set_label('Rel. Power')
else:
cbar.set_label('Power uV^2s')
ax = plt.axes([0.6, 0.55, 0.3, 0.4])
ipre = np.where(t<0)[0]
ilsr = np.where((t>=0) & (t<=120))[0]
plt.plot(f,EEGLsr[:,ipre].mean(axis=1), color='gray', label='baseline', lw=2)
plt.plot(f,EEGLsr[:,ilsr].mean(axis=1), color='blue', label='laser', lw=2)
box_off(ax)
plt.xlabel('Freq. (Hz)')
plt.ylabel('Power (uV^2)')
#plt.legend(loc=0)
#plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, borderaxespad=0.)
ax = plt.axes([0.1, 0.05, 0.4, 0.4])
plt.pcolormesh(t,f,EMGLsr, cmap='jet')
plt.plot([0,0], [0,f[-1]], color=(1,1,1))
plt.plot([laser_dur,laser_dur], [0,f[-1]], color=(1,1,1))
plt.axis('tight')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
box_off(ax)
plt.title('EMG')
cbar = plt.colorbar()
if pnorm >0:
cbar.set_label('Rel. Power')
else:
cbar.set_label('Power uV^2s')
ax = plt.axes([0.6, 0.05, 0.3, 0.4])
m = np.max(avg_emg)*1.5
plt.plot([0,0], [0,np.max(avg_emg)*1.5], color=(0,0,0))
plt.plot([laser_dur,laser_dur], [0,np.max(avg_emg)*1.5], color=(0,0,0))
plt.xlim((t[0], t[-1]))
plt.ylim((0,m))
plt.plot(t,avg_emg, color='black', lw=2)
plt.fill_between(t, avg_emg-sem_emg, avg_emg+sem_emg)
box_off(ax)
plt.xlabel('Time (s)')
plt.ylabel('EMG ampl. (uV)')
plt.show()
elif pplot==2:
# pretty figure
plt.figure()
if len(vm) > 0:
cb_ticks = vm
# plot EEG spectrogram
axes_cbar = plt.axes([0.8, 0.75, 0.1, 0.2])
ax = plt.axes([0.1, 0.55, 0.75, 0.4])
if len(vm) == 2:
im=ax.pcolorfast(t, f, EEGLsr, cmap='jet', vmin=vm[0], vmax=vm[1])
else:
im = ax.pcolorfast(t, f, EEGLsr, cmap='jet')
plt.plot([0,0], [0,f[-1]], color=(1,1,1))
plt.plot([laser_dur,laser_dur], [0,f[-1]], color=(1,1,1))
plt.axis('tight')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
box_off(ax)
# colorbar for EEG spectrogram
cb = plt.colorbar(im, ax=axes_cbar, pad=0.0, aspect=10.0, orientation='vertical')
if pnorm > 0:
cb.set_label('Rel. Power')
else:
cb.set_label('Power (uV^2s)')
cb.ax.xaxis.set_ticks_position("bottom")
cb.ax.xaxis.set_label_position('top')
if len(cb_ticks) > 0:
cb.set_ticks(cb_ticks)
axes_cbar.set_alpha(0.0)
axes_cbar.spines["top"].set_visible(False)
axes_cbar.spines["right"].set_visible(False)
axes_cbar.spines["bottom"].set_visible(False)
axes_cbar.spines["left"].set_visible(False)
axes_cbar.axes.get_xaxis().set_visible(False)
axes_cbar.axes.get_yaxis().set_visible(False)
# EMG amplitude
ax = plt.axes([0.1, 0.1, 0.75, 0.3])
m = np.max(avg_emg) * 1.5
ax.add_patch(patches.Rectangle((0, 0), laser_dur, np.max(avg_emg)*1.5, facecolor=[0.6, 0.6, 1], edgecolor=[0.6, 0.6, 1]))
plt.xlim((t[0], t[-1]))
plt.ylim((0, m))
plt.fill_between(t, avg_emg-sem_emg, avg_emg+sem_emg, color='gray', zorder=2)
plt.plot(t, avg_emg, color='black', lw=2)
box_off(ax)
plt.xlabel('Time (s)')
plt.ylabel('EMG ampl. ($\mathrm{\mu V}$)')
plt.show()
if len(fig_file) > 0:
save_figure(fig_file)
return t, f, EEGSpec, EMGSpec, EEGLsr
def laser_brainstate(ppath, recordings, pre, post, pplot=True, fig_file='', start_time=0, end_time=-1,
ma_thr=0, edge=0, sf=0, cond=0, single_mode=False, ci=95, backup='', csv_file=''):
"""
calculate laser triggered probability of REM, Wake, NREM
ppath - base folder holding all recording
recordings - list of recording
pre - time before laser onset
post - time after laser onset
@Optional:
pplot - pplot==True: plot figure
fig_file - specify filename including ending, if you wish to save figure
start_time - in [s], only consider laser onsets starting after $start_time
end_time - in [s], only consider laset onsets starting before $end_time
sf - smoothing factor for Gaussian kernel; if sf=0, no filtering
edge - only use if $sf > 0: to avoid smoothing artifacts, set edge to a value > 0, e.g. 20
ma_thr - if > 0, smooth out microarousals with duration < $ma_thr
cond - cond==0: consider all trials; cond==[1,2,3] only plot trials,
where mouse was in REM, Wake, or NREM as laser turned on
single_mode - if True, plot every single mouse
backup - optional backup folder; if specified each single recording folder can be either on $ppath or $backup;
if it's on both folders, the version in ppath is used
ci - string; possible values: 'sem', 'sd', or value betwen 0 and 100, corresponding
to the bootstrapped confidence interval. The default is ci=95
csv_file - if filename (without or including full file path) is provided,
save pd.DataFrame df (see @Return) to csv file
@Return:
df_timecourse: pd.DataFrame with columns: mouse, time, perc, state.
df: pd.DataFrame with columns mouse_id, REM, NREM, Wake, Lsr
Lsr has three values: 0 - before laser, 1 - during laser, 2 - after laser
if laser was on for laser_dur s, then
df[df['Lsr'] == 0]['REM'] is the average % of REM sleep during laser stimulation for each mouse
df[df['Lsr'] == 0]['REM'] is the average % of REM sleep
during the laser_dur s long time interval preceding laser onset.
df[df['Lsr'] == 2]['REM'] is the average during the time inverval of duration laser_dur that
directly follows laser stimulation
"""
if type(recordings) != list:
recordings = [recordings]
rec_paths = dict()
for rec in recordings:
if os.path.isdir(os.path.join(ppath, rec)):
rec_paths[rec] = ppath
else:
rec_paths[rec] = backup
pre += edge
post += edge
BrainstateDict = {}
mouse_order = []
for rec in recordings:
idf = re.split('_', rec)[0]
BrainstateDict[idf] = []
if not idf in mouse_order:
mouse_order.append(idf)
nmice = len(BrainstateDict)
for rec in recordings:
ppath = rec_paths[rec]
SR = get_snr(ppath, rec)
NBIN = np.round(2.5*SR)
dt = NBIN * 1.0/SR
istart_time = int(np.round(start_time / dt))
M = load_stateidx(ppath, rec)[0]
if end_time == -1:
iend_time = len(M)
else:
iend_time = int(np.round(end_time / dt))
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if len(s)*dt <= ma_thr:
M[s] = 3
(idxs, idxe) = laser_start_end(load_laser(ppath, rec))
idf = re.split('_', rec)[0]
ipre = int(np.round(pre/dt))
ipost = int(np.round(post/dt))
idxs = [int(i/NBIN) for i in idxs]
idxe = [int(i/NBIN) for i in idxe]
laser_dur = np.mean((np.array(idxe) - np.array(idxs))) * dt
for (i,j) in zip(idxs, idxe):
if i>=ipre and i+ipost<=len(M)-1 and i>istart_time and i < iend_time:
bs = M[i-ipre:i+ipost+1]
BrainstateDict[idf].append(bs)
# I assume here that every recording has same dt
t = np.linspace(-ipre*dt, ipost*dt, ipre+ipost+1)
# first time point where the laser was fully on (during the complete bin).
izero = np.where(t>0)[0][0]
# the first time bin overlapping with laser is then
izero -= 1
# @BS: mouse x time x state
BS = np.zeros((nmice, len(t), 3))
Trials = []
imouse = 0
for mouse in mouse_order:
if cond==0:
M = np.array(BrainstateDict[mouse])
Trials.append(M)
for state in range(1,4):
C = np.zeros(M.shape)
C[np.where(M==state)] = 1
BS[imouse,:,state-1] = C.mean(axis=0)
if cond>0:
M = BrainstateDict[mouse]
Msel = []
for trial in M:
if trial[izero] == cond:
Msel.append(trial)
M = np.array(Msel)
Trials.append(M)
for state in range(1,4):
C = np.zeros(M.shape)
C[np.where(M==state)] = 1
BS[imouse,:,state-1] = C.mean(axis=0)
imouse += 1
# flatten Trials
Trials = reduce(lambda x,y: np.concatenate((x,y), axis=0), Trials)
BS = BS*100
if sf > 0:
for state in [2, 1, 0]:
for i in range(nmice):
BS[i, :, state] = smooth_data(BS[i, :, state], sf)
df_timecourse = pd.DataFrame()
state_map = {1: 'REM', 2: 'Wake', 3: 'NREM'}
for s in state_map:
df = nparray2df(BS[:, :, s - 1], mouse_order, t, 'perc', 'mouse', 'time')
df['state'] = state_map[s]
df_timecourse = df_timecourse.append(df)
nmice = imouse
if pplot:
state_label = {0:'REM', 1:'Wake', 2:'NREM'}
it = np.where((t >= -pre + edge) & (t <= post - edge))[0]
plt.ion()
if not single_mode:
plt.figure()
ax = plt.axes([0.15, 0.15, 0.6, 0.7])
colors = [[0, 1, 1 ],[0.5, 0, 1],[0.6, 0.6, 0.6]]
if ci == 'sem':
for state in [2,1,0]:
tmp = BS[:, :, state].mean(axis=0)
plt.plot(t[it], tmp[it], color=colors[state], lw=3, label=state_label[state])
if nmice > 1:
smp = BS[:,:,state].std(axis=0) / np.sqrt(nmice)
plt.fill_between(t[it], tmp[it]-smp[it], tmp[it]+smp[it], color=colors[state], alpha=0.4, zorder=3)
plt.xlim([-pre+edge, post-edge])
plt.ylim([0,100])
ax.add_patch(patches.Rectangle((0,0), laser_dur, 100, facecolor=[0.6, 0.6, 1], edgecolor=[0.6, 0.6, 1]))
box_off(ax)
plt.xlabel('Time (s)')
plt.ylabel('Probability')
#plt.legend(bbox_to_anchor=(0., 1.02, 0.5, .102), loc=3, ncol=3, borderaxespad=0.)
plt.draw()
else:
bs_colors = {'REM': [0, 1, 1], 'Wake': [0.5, 0, 1], 'NREM': [0.6, 0.6, 0.6]}
dfm = df_timecourse.groupby(['mouse', 'state', 'time']).mean().reset_index()
for s in [3, 2, 1]:
sns.lineplot(data=dfm[dfm.state == state_map[s]], ci=ci, x='time', y='perc',
color=bs_colors[state_map[s]], err_kws={'alpha': 0.8, 'zorder': 3})
plt.xlim([-pre+edge, post-edge])
plt.ylim([0,100])
ax.add_patch(patches.Rectangle((0,0), laser_dur, 100, facecolor=[0.6, 0.6, 1], edgecolor=[0.6, 0.6, 1]))
box_off(ax)
plt.xlabel('Time (s)')
plt.ylabel('Probability')
else:
plt.figure(figsize=(7,7))
clrs = sns.color_palette("husl", nmice)
for state in [2,1,0]:
ax = plt.subplot('31' + str(3-state))
for i in range(nmice):
plt.plot(t[it], BS[i,it,state], color=clrs[i], label=mouse_order[i])
ax.add_patch(patches.Rectangle((0, 0), laser_dur, 100, facecolor=[0.6, 0.6, 1], edgecolor=[0.6, 0.6, 1], alpha=0.8))
plt.xlim((t[it][0], t[it][-1]))
plt.ylim((0,100))
plt.ylabel('% ' + state_label[state])
if state==0:
plt.xlabel('Time (s)')
else:
ax.set_xticklabels([])
if state==2:
ax.legend(mouse_order, bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', ncol=len(mouse_order),
frameon=False)
box_off(ax)
# figure showing all trials
plt.figure(figsize=(4,6))
set_fontarial()
plt.ion()
ax = plt.axes([0.15, 0.1, 0.8, 0.8])
cmap = plt.cm.jet
my_map = cmap.from_list('ha', [[0,1,1],[0.5,0,1], [0.6, 0.6, 0.6]], 3)
x = list(range(Trials.shape[0]))
plt.pcolormesh(t,np.array(x), np.flipud(Trials), cmap=my_map, vmin=1, vmax=3)
plt.plot([0,0], [0, len(x)-1], color='white')
plt.plot([laser_dur,laser_dur], [0, len(x)-1], color='white')
ax.axis('tight')
plt.draw()
plt.xlabel('Time (s)')
plt.ylabel('Trial No.')
box_off(ax)
plt.show()
if len(fig_file)>0:
plt.savefig(fig_file)
# compile dataframe with all baseline and laser values
ilsr = np.where((t>=0) & (t<=laser_dur))[0]
ibase = np.where((t>=-laser_dur) & (t<0))[0]
iafter = np.where((t>=laser_dur) & (t<laser_dur*2))[0]
df = pd.DataFrame(columns = ['Mouse', 'REM', 'NREM', 'Wake', 'Lsr'])
mice = mouse_order + mouse_order + mouse_order
lsr = np.concatenate((np.ones((nmice,), dtype='int'), np.zeros((nmice,), dtype='int'), np.ones((nmice,), dtype='int')*2))
df['Mouse'] = mice
df['Lsr'] = lsr
df['REM'] = np.concatenate((BS[:,ilsr,0].mean(axis=1), BS[:,ibase,0].mean(axis=1), BS[:,iafter,0].mean(axis=1)))
df['NREM'] = np.concatenate((BS[:,ilsr,2].mean(axis=1), BS[:,ibase,2].mean(axis=1), BS[:,iafter,2].mean(axis=1)))
df['Wake'] = np.concatenate((BS[:,ilsr,1].mean(axis=1), BS[:,ibase,1].mean(axis=1), BS[:,iafter,1].mean(axis=1)))
if len(csv_file) > 0:
df.to_csv(csv_file, index=False)
return df_timecourse, df, Trials
def laser_brainstate_bootstrap(ppath, recordings, pre, post, edge=0, sf=0, nboots=1000, alpha=0.05, backup='',
start_time=0, ma_thr=20, bootstrap_mode=0, fig_file=''):
"""
Align brain state with laser stimulation and calculate two-sided 1-$alpha confidence intervals using
bootstrapping.
:param ppath: base folder
:param recordings: list of recordings
:param pre: time before laser
:param post: time after laser onset
:param edge: add $edge seconds add beginning and end (that are not shown in the plot) to avoid filtering artifacts
:param sf: smoothing factor for Gaussian filter; better do not use
:param nboots: int, how many times the whole data set is resampled for boot-strapping
:param alpha: plot shows 1-$alpha confidence interval
:param backup: optional backup folder where recordings are stored
:param start_time: start time of recordding used for analysis
:param ma_thr: sleep periods < ma_thr are thrown away
:param bootstrap_mode: default=0
bootstrap_mode == 0: Take inter-mouse variance and inter-trial variance (of each mouse) into account.
That is, bootstrapping re-models the variance expected when re-doing the same
experimental design (same mouse number and total trial number).
To account for potentially different number of trials per mouse, resample the data
during each iteration the following way: Assume that there are n laser trials from m mice;
randomly select (with replacment) ntrial mice; then select from each mouse randomly one trial.
bootstrap_mode == 1: Only take inter-trial variance (of each mouse) into account. That is,
bootstrapping models the variance expected when redoing the experiment with exactly the same mice.
:param fig_file, if file name is specified, the figure will be saved
:return: P - p-values for NREM, REM, Wake
Mod - by how much the percentage of NREM, REM, Wake is increased compared to baseline
"""
pre += edge
post += edge
rec_paths = dict()
for rec in recordings:
if os.path.isdir(os.path.join(ppath, rec)):
rec_paths[rec] = ppath
else:
rec_paths[rec] = backup
# dict: mouse_id --> laser trials, R W N sequence
BrainstateDict = {}
for rec in recordings:
idf = re.split('_', rec)[0]
BrainstateDict[idf] = []
mice = list(BrainstateDict.keys())
nmice = len(BrainstateDict)
for rec in recordings:
ppath = rec_paths[rec]
SR = get_snr(ppath, rec)
NBIN = np.round(2.5 * SR)
dt = NBIN * 1 / SR
istart_time = int(np.round(start_time / dt))
M = load_stateidx(ppath, rec)[0]
seq = get_sequences(np.where(M == 2)[0])
for s in seq:
if len(s) * dt <= ma_thr:
M[s] = 3
(idxs, idxe) = laser_start_end(load_laser(ppath, rec))
idf = re.split('_', rec)[0]
#SR = get_snr(ppath, rec)
#NBIN = np.round(2.5 * SR)
ipre = int(np.round(pre / dt))
ipost = int(np.round(post / dt))
idxs = [int(i / NBIN) for i in idxs]
idxe = [int(i / NBIN) for i in idxe]
laser_dur = np.mean((np.array(idxe) - np.array(idxs))) * dt
for (i, j) in zip(idxs, idxe):
if i >= ipre and j + ipost <= len(M) - 1 and i > istart_time:
bs = M[i - ipre:i + ipost + 1]
BrainstateDict[idf].append(bs)
for mouse in mice:
BrainstateDict[mouse] = np.array(BrainstateDict[mouse])
# I assume here that every recording has same dt
t = np.linspace(-ipre*dt, ipost*dt, ipre+ipost+1)
Trials = dict()
for mouse in BrainstateDict:
Trials[mouse] = np.zeros((BrainstateDict[mouse].shape[0], len(t), 3))
# total number of trials:
ntrials = 0
for mouse in BrainstateDict:
M = np.array(BrainstateDict[mouse])
for state in range(1, 4):
C = np.zeros(M.shape)
C[np.where(M == state)] = 100.
Trials[mouse][:,:,state-1] = C
ntrials += Trials[mouse].shape[0]
Prob = np.zeros((nboots, len(t), 3))
if bootstrap_mode == 1:
for b in range(nboots):
# average brain state percentage for each mouse during iteration b
mouse_mean_state = np.zeros((nmice, len(t), 3))
i = 0
for mouse in mice:
mmouse = Trials[mouse].shape[0]
iselect = rand.randint(0, mmouse, (mmouse,))
for s in [1,2,3]:
#bBS[s][offset:offset+mmouse,:] = Trials[mouse][iselect,:,s-1]
mouse_mean_state[i,:,s-1] = Trials[mouse][iselect,:,s-1].mean(axis=0)
i += 1
for s in [1,2,3]:
Prob[b,:,s-1] = mouse_mean_state[:,:,s-1].mean(axis=0)
else:
mx_iter = np.zeros((ntrials, len(t), 3))
for b in range(nboots):
# for each iteration select randomly select ntrials mice
irand_mice = rand.randint(0, nmice, ntrials)
# average brain state percentage for each mouse during iteration b
# mouse_mean_state = np.zeros((nmice, len(t), 3))
i = 0
# there are ntrials mice
for j in irand_mice:
mouse = mice[irand_mice[j]]
# we have nmouse trials per mouse
mmouse = Trials[mouse].shape[0]
# select one random trial from the current mouse
iselect = rand.randint(0, mmouse)
for s in [1, 2, 3]:
mx_iter[i,:,s-1] = Trials[mouse][iselect,:,s-1]
i += 1
# mx_iter is the resampled data set for bootstrap iteration b
# now we calculate the statistics we're interesting in, which is the mean
for s in [1, 2, 3]:
Prob[b,:,s-1] = mx_iter[:,:,s-1].mean(axis=0)
# simple average for each brainstate across mice (w/o) bootstrapping
Prob_mean = np.zeros((nmice, len(t), 3))
for s in [1,2,3]:
i = 0
for mouse in mice:
Prob_mean[i,:,s-1] = Trials[mouse][:,:,s-1].mean(axis=0)
i += 1
usProb = Prob.copy()
Prob = np.sort(Prob, axis=0)
Bounds = np.zeros((2, len(t), 3))
a = int((nboots * alpha) / 2.0)
for s in [1,2,3]:
Bounds[0,:,s-1] = Prob[a,:,s-1]
Bounds[1,:,s-1] = Prob[-a,:, s-1]
# smooth_data
if sf > 0:
for s in range(3):
Bounds[0, :, s] = smooth_data(Bounds[0, :, s], sf)
Bounds[1, :, s] = smooth_data(Bounds[1, :, s], sf)
for i in range(nmice):
for s in range(3):
Prob_mean[i, :, s] = smooth_data(Prob_mean[i,:,s], sf)
# plot figure
colors = np.array([[0, 1, 1], [0.5, 0, 1], [0.6, 0.6, 0.6]])
br_states = {1:'REM', 2:'Wake', 3:'NREM'}
#colors = np.array([[55,255,255], [153,255,153],[153,153,153]])/255.
it = np.where((t>=-pre+edge) & (t<=post-edge))[0]
plt.ion()
plt.figure()
ax = plt.axes([0.15, 0.15, 0.6, 0.7])
for s in [3,2,1]:
ax.fill_between(t[it], Bounds[0,it,s-1], Bounds[1,it,s-1], color=colors[s-1,:], alpha=0.8, zorder=3, edgecolor=None)
ax.plot(t[it], Prob_mean[:, it, s-1].mean(axis=0), color=colors[s-1,:], label=br_states[s])
ax.add_patch(patches.Rectangle((0, 0), laser_dur, 100, facecolor=[0.6, 0.6, 1], edgecolor=[0.6, 0.6, 1]))
plt.xlim([-pre+edge, post-edge])
plt.ylim([0,100])
plt.xlabel('Time (s)')
plt.ylabel('Brain state (%)')
plt.legend(bbox_to_anchor = (1.0, 0.7, 1., .102), loc = 3, mode = 'expand', ncol = 1, frameon = False)
box_off(ax)
plt.draw()
# statistics
# OLD VERSION
# ibase = np.where((t>=-laser_dur) & (t<0))[0]
# ilsr = np.where((t>=0) & (t<laser_dur))[0]
# P = np.zeros((3,))
# Mod = np.zeros((3,))
# for istate in [1,2,3]:
# basel = usProb[:,ibase,istate-1].mean(axis=1)
# laser = usProb[:,ilsr, istate-1].mean(axis=1)
# d = laser - basel
# if np.mean(d) >= 0:
# # now we want all values be larger than 0
# p = len(np.where(d>0)[0]) / (1.0*nboots)
# sig = 1 - p
# if sig == 0:
# sig = 1.0/nboots
# Mod[istate-1] = (np.mean(laser) / np.mean(basel) - 1) * 100
# else:
# p = len(np.where(d<0)[0]) / (1.0*nboots)
# sig = 1 - p
# if sig == 0:
# sig = 1.0/nboots
# Mod[istate-1] = -(1 - np.mean(laser) / np.mean(basel)) * 100
# P[istate-1] = sig
# NEW VERSION
ibase = np.where((t>=-laser_dur) & (t<0))[0]
ilsr = np.where((t>=0) & (t<laser_dur))[0]
P = np.zeros((3,))
Mod = np.zeros((3,))
for istate in [1,2,3]:
basel = usProb[:,ibase,istate-1].mean(axis=1)
laser = usProb[:,ilsr, istate-1].mean(axis=1)
d = laser - basel
p = 2 * np.min([len(np.where(d > 0)[0]) / nboots, len(np.where(d <= 0)[0]) / nboots])
if np.mean(d) >= 0:
# now we want all values be larger than 0
#p = len(np.where(d>0)[0]) / (1.0*nboots)
sig = p
if sig == 0:
sig = 1.0/nboots
Mod[istate-1] = (np.mean(laser) / np.mean(basel) - 1) * 100
else:
# p = len(np.where(d<0)[0]) / (1.0*nboots)
sig = p
if sig == 0:
sig = 1.0/nboots
Mod[istate-1] = -(1 - np.mean(laser) / np.mean(basel)) * 100
P[istate-1] = sig
labels = {1:'REM', 2:'Wake', 3:'NREM'}
for s in [1,2,3]:
print('%s is changed by %f perc.; P = %f, bootstrap' % (labels[s], Mod[s-1], P[s-1]))
print("n = %d mice" % len(mice))
if len(fig_file) > 0:
plt.savefig(fig_file, bbox_inches="tight")
return P, Mod
def _despine_axes(ax):
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
def sleep_example(ppath, name, tlegend, tstart, tend, fmax=30, fig_file='', vm=[], ma_thr=10,
fontsize=12, cb_ticks=[], emg_ticks=[], r_mu = [10, 100],
fw_color=True, pemg_ampl=False, raw_ex = [], eegemg_legend=[], eegemg_max=[]):
"""
plot sleep example
:param ppath: base folder
:param name: recording name
:param tstart: start (in seconds) of shown example interval
:param tend: end of example interval
:param tlegend: length of time legend
:param fmax: maximum frequency shown for EEG spectrogram
:param fig_file: file name where figure will be saved
:param vm: saturation of EEG spectrogram
:param fontsize: fontsize
:param cb_ticks: ticks for colorbar
:param emg_ticks: ticks for EMG amplitude axis (uV)
:param r_mu: range of frequencies for EMG amplitude
:param fw_color: if True, use standard color scheme for brainstate (gray - NREM, violet - Wake, cyan - REM);
otherwise use Shinjae's color scheme
:param pemg_ampl: if True, plot EMG amplitude, other EMG raw traces
:param raw_ex: list of tuples; e.g. if you wish to show 2 raw examples of length t s at time point i and j s,
set raw_ex = [(i,t), (j,t)].
If raw_ex == [], no raw traces are plotted
The raw examples are labeled by gray rectangles
:param eegemg_legend: list with 2 floats: scale bar (in micro Volts) for EEG and EMG raw example
:param eegemg_max: list of 2 floats, the y range (ylim) for EEG and EMG raw examples (in micro Volts)
goes from -eegemg_max[0] to eegemg_max[0] (for EEG) and
from -eegemg_max[1] to eegemg_max[1] (for EMG)
Example call including EEG/EMG raw traces:
sleepy.sleep_example(ppath, name2, 300, 1000, 4000, raw_ex=[(2140, 5), (3000, 5)],
eegemg_legend=[200, 200], eegemg_max=[200, 200],
fig_file='/Users/tortugar/Desktop/example.png')
"""
set_fontarial()
set_fontsize(fontsize)
# True, if laser exists, otherwise set to False
plaser = True
sr = get_snr(ppath, name)
nbin = np.round(2.5 * sr)
dt = nbin * 1 / sr
ddt = 1.0/sr
istart = int(np.round(tstart/dt))
iend = int(np.round(tend/dt))
dur = (iend-istart+1)*dt
istart_emg = int(istart*nbin)
iend_emg = int((iend+1)*nbin)
M,K = load_stateidx(ppath, name)
#kcut = np.where(K>=0)[0]
#M = M[kcut]
if tend==-1:
iend = len(M)
M = M[istart:iend]
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if len(s)*dt <= ma_thr:
M[s] = 3
t = np.arange(0, len(M))*dt
t_emg = np.arange(0, iend_emg-istart_emg)*ddt
P = so.loadmat(os.path.join(ppath, name, 'sp_%s.mat' % name), squeeze_me=True)
SPEEG = P['SP']
# calculate median for choosing right saturation for heatmap
med = np.median(SPEEG.max(axis=0))
if len(vm) == 0:
vm = [0, med*2.5]
#t = np.squeeze(P['t'])
freq = P['freq']
if pemg_ampl:
P = so.loadmat(os.path.join(ppath, name, 'msp_%s.mat' % name), squeeze_me=True)
SPEMG = P['mSP']#/1000000.0
else:
emg = so.loadmat(os.path.join(ppath, name, 'EMG.mat'), squeeze_me=True)['EMG']
# load laser
if not os.path.isfile(os.path.join(ppath, name, 'laser_%s.mat' % name)):
plaser = False
if plaser:
laser = load_laser(ppath, name)
idxs, idxe = laser_start_end(laser, SR=sr)
idxs = [int(i / nbin) for i in idxs]
idxe = [int(i / nbin) for i in idxe]
# laser
if plaser:
laser_start = []
laser_end = []
for (i,j) in zip(idxs, idxe):
if i>=istart and j <= iend:
laser_start.append(i-istart)
laser_end.append(j-istart)
# create figure
plt.ion()
plt.figure(figsize=(8,4))
# axis in the background to draw laser patches
axes_back = plt.axes([0.1, .4, 0.8, 0.52])
_despine_axes(axes_back)
if plaser:
for (i,j) in zip(laser_start, laser_end):
axes_back.add_patch(patches.Rectangle((i*dt, 0), (j-i+1)*dt, 1, facecolor=[0.6, 0.6, 1], edgecolor=[0.6, 0.6, 1]))
axes_back.text(laser_end[0] * dt + dur * 0.01, 0.94, 'Laser', color=[0.6, 0.6, 1])
plt.ylim((0,1))
plt.xlim([t[0], t[-1]])
# show brainstate
axes_brs = plt.axes([0.1, 0.4, 0.8, 0.05])
cmap = plt.cm.jet
if fw_color:
my_map = cmap.from_list('brs', [[0, 0, 0], [0, 1, 1], [0.6, 0, 1], [0.8, 0.8, 0.8]], 4)
else:
my_map = cmap.from_list('brs', [[0, 0, 0], [153 / 255.0, 76 / 255.0, 9 / 255.0],
[120 / 255.0, 120 / 255.0, 120 / 255.0], [1, 0.75, 0]], 4)
tmp = axes_brs.pcolorfast(t, [0, 1], np.array([M]), vmin=0, vmax=3)
tmp.set_cmap(my_map)
axes_brs.axis('tight')
_despine_axes(axes_brs)
axes_legend = plt.axes([0.1, 0.33, 0.8, 0.05])
plt.ylim((0,1.1))
plt.xlim([t[0], t[-1]])
plt.plot([0, tlegend], [1, 1], color='black', lw=1)
plt.text(tlegend/4.0, 0.1, str(tlegend) + ' s')
_despine_axes(axes_legend)
# show spectrogram
ifreq = np.where(freq <= fmax)[0]
# axes for colorbar
axes_cbar = plt.axes([0.82, 0.68, 0.1, 0.2])
# axes for EEG spectrogram
axes_spec = plt.axes([0.1, 0.68, 0.8, 0.2], sharex=axes_brs)
im = axes_spec.pcolorfast(t, freq[ifreq], SPEEG[ifreq, istart:iend], cmap='jet', vmin=vm[0], vmax=vm[1])
axes_spec.axis('tight')
axes_spec.set_xticklabels([])
axes_spec.set_xticks([])
axes_spec.spines["bottom"].set_visible(False)
plt.ylabel('Freq (Hz)')
box_off(axes_spec)
plt.xlim([t[0], t[-1]])
# colorbar for EEG spectrogram
cb = plt.colorbar(im, ax=axes_cbar, pad=0.0, aspect=10.0)
cb.set_label('Power ($\mathrm{\mu}$V$^2$s)')
if len(cb_ticks) > 0:
cb.set_ticks(cb_ticks)
axes_cbar.set_alpha(0.0)
_despine_axes(axes_cbar)
# show EMG
axes_emg = plt.axes([0.1, 0.5, 0.8, 0.1], sharex=axes_spec)
if pemg_ampl:
i_mu = np.where((freq >= r_mu[0]) & (freq <= r_mu[1]))[0]
p_mu = np.sqrt(SPEMG[i_mu, :].sum(axis=0) * (freq[1] - freq[0])) #* 1000.0 # back to muV
axes_emg.plot(t, p_mu[istart:iend], color='black')
# * 1000: to go from mV to uV
if len(emg_ticks) > 0:
axes_emg.set_yticks(emg_ticks)
plt.ylabel('Ampl. ' + '$\mathrm{(\mu V)}$')
plt.xlim((t[0], t[-1] + 1))
else:
axes_emg.plot(t_emg, emg[istart_emg:iend_emg], color='black', lw=0.2)
plt.xlim((t_emg[0], t_emg[-1] + 1))
box_off(axes_emg)
axes_emg.patch.set_alpha(0.0)
axes_emg.spines["bottom"].set_visible(False)
# axis for raw data example
if len(raw_ex) > 0:
axes_raw_ex = plt.axes([0.1, .39, 0.8, 0.51])
axes_raw_ex.patch.set_alpha(0)
_despine_axes(axes_raw_ex)
for (ta, tlen) in raw_ex:
ta = ta-tstart
axes_raw_ex.add_patch(patches.Rectangle((ta, 0), tlen, 1, fill=False, edgecolor=[0.4, 0.4, 0.4], lw=0.3))
plt.ylim((0,1))
plt.xlim([t[0], t[-1]])
eeg = so.loadmat(os.path.join(ppath, name, 'EEG.mat'), squeeze_me=True)['EEG']
emg = so.loadmat(os.path.join(ppath, name, 'EMG.mat'), squeeze_me=True)['EMG']
# axes to label EEG EMG
ax_eeg_label = plt.axes([0.04, 0.18, 0.05, 0.1])
ax_eeg_label.set_xlim([0, 1])
ax_eeg_label.set_ylim([0, 1])
ax_eeg_label.text(0, 0.5, 'EEG', verticalalignment='center')
_despine_axes(ax_eeg_label)
ax_emg_label = plt.axes([0.04, 0.05, 0.05, 0.1])
ax_emg_label.set_xlim([0, 1])
ax_emg_label.set_ylim([0, 1])
ax_emg_label.text(0, 0.5, 'EMG', verticalalignment='center')
_despine_axes(ax_emg_label)
# axes for legend
ax_eeg_legend = plt.axes([0.92, 0.05, 0.05, 0.1])
ax_emg_legend = plt.axes([0.92, 0.18, 0.05, 0.1])
ax_eeg_legend.set_xlim([0, 1])
ax_emg_legend.set_xlim([0, 1])
ax_eeg_legend.set_ylim([-eegemg_max[0], eegemg_max[0]])
ax_emg_legend.set_ylim([-eegemg_max[1], eegemg_max[1]])
ax_eeg_legend.plot([0., 0.], [-eegemg_legend[0]/2, eegemg_legend[0]/2], color='black')
ax_emg_legend.plot([0., 0.], [-eegemg_legend[1]/2, eegemg_legend[1]/2], color='black')
ax_eeg_legend.text(0.1, -eegemg_legend[0]/2, str(eegemg_legend[0]/1000) + 'mV', rotation=90, fontsize=8)
ax_emg_legend.text(0.1, -eegemg_legend[1]/2, str(eegemg_legend[1]/1000) + 'mV', rotation=90, fontsize=8)
_despine_axes(ax_eeg_legend)
_despine_axes(ax_emg_legend)
nraw_ex = len(raw_ex)
raw_axes_eeg = []
raw_axes_emg = []
len_x = 0.8/nraw_ex-0.02
start_x = np.linspace(0.1+len_x, 0.9, nraw_ex) - len_x
for i in range(nraw_ex):
a = plt.axes([start_x[i], 0.05, (0.8/nraw_ex)-0.02, .1])
raw_axes_emg.append(a)
a = plt.axes([start_x[i], 0.18, (0.8/nraw_ex)-0.02, .1])
raw_axes_eeg.append(a)
for (ax, i) in zip(raw_axes_eeg, range(len(raw_ex))):
ta, tlen = raw_ex[i]
idx = range(int(ta*sr), int(ta*sr+tlen*sr))
t_eeg = np.arange(0, len(idx))*ddt
ax.plot(t_eeg, eeg[idx], color='k', lw=0.5)
ax.set_xlim([t_eeg[0], t_eeg[-1]])
_despine_axes(ax)
ax.set_ylim([-eegemg_max[0], eegemg_max[0]])
for (ax, i) in zip(raw_axes_emg, range(len(raw_ex))):
ta, tlen = raw_ex[i]
idx = range(int(ta*sr), int(ta*sr+tlen*sr))
t_eeg = np.arange(0, len(idx))*ddt
ax.plot(t_eeg, emg[idx], color='k', lw=0.5)
ax.set_xlim([t_eeg[0], t_eeg[-1]])
_despine_axes(ax)
ax.set_ylim([-eegemg_max[1], eegemg_max[1]])
axes_raw_time = plt.axes([0.1, 0.03, len_x, 0.02])
plt.plot([0, tlen/10], [0,0], color='black', lw=0.8)
plt.ylim([-1,1])
plt.xlim([t_eeg[0], t_eeg[-1]])
_despine_axes(axes_raw_time)
if len(fig_file) > 0:
save_figure(fig_file)
plt.show()
def sleep_stats(ppath, recordings, ma_thr=10.0, tstart=0, tend=-1, pplot=True, csv_file=''):
"""
Calculate average percentage of each brain state,
average duration and average frequency
plot histograms for REM, NREM, and Wake durations
@PARAMETERS:
ppath - base folder
recordings - single string specifying recording or list of recordings
@OPTIONAL:
ma_thr - threshold for wake periods to be considered as microarousals
tstart - only consider recorded data starting from time tstart, default 0s
tend - only consider data recorded up to tend s, default -1, i.e. everything till the end
pplot - generate plot in the end; True or False
csv_file - file where data should be saved as csv file (e.g. csv_file = '/home/Users/Franz/Documents/my_data.csv')
@RETURN:
ndarray of percentages (# mice x [REM,Wake,NREM])
ndarray of state durations
ndarray of transition frequency / hour
"""
if type(recordings) != list:
recordings = [recordings]
Percentage = {}
Duration = {}
Frequency = {}
mice = []
for rec in recordings:
idf = re.split('_', os.path.split(rec)[-1])[0]
if not idf in mice:
mice.append(idf)
Percentage[idf] = {1:[], 2:[], 3:[]}
Duration[idf] = {1:[], 2:[], 3:[]}
Frequency[idf] = {1:[], 2:[], 3:[]}
nmice = len(Frequency)
for rec in recordings:
idf = re.split('_', os.path.split(rec)[-1])[0]
SR = get_snr(ppath, rec)
NBIN = np.round(2.5*SR)
dt = NBIN * 1/SR
# load brain state
M, K = load_stateidx(ppath, rec)
kcut = np.where(K >= 0)[0]
M = M[kcut]
istart = int(np.round((1.0 * tstart) / dt))
if tend==-1:
iend = len(M)-1
else:
iend = int(np.round((1.0*tend) / dt))
M[np.where(M==5)] = 2
# polish out microarousals
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if np.round(len(s)*dt) <= ma_thr:
M[s] = 3
midx = np.arange(istart,iend+1)
Mcut = M[midx]
nm = len(Mcut)*1.0
# get percentage of each state
for s in [1,2,3]:
Percentage[idf][s].append(len(np.where(Mcut==s)[0]) / nm)
# get frequency of each state
for s in [1,2,3]:
Frequency[idf][s].append( len(get_sequences(np.where(Mcut==s)[0])) * (3600. / (nm*dt)) )
# get average duration for each state
for s in [1,2,3]:
seq = get_sequences(np.where(Mcut==s)[0])
Duration[idf][s] += [len(i)*dt for i in seq]
PercMx = np.zeros((nmice,3))
i=0
for k in mice:
for s in [1,2,3]:
PercMx[i,s-1] = np.array(Percentage[k][s]).mean()
i += 1
PercMx *= 100
FreqMx = np.zeros((nmice,3))
i = 0
for k in mice:
for s in [1,2,3]:
FreqMx[i,s-1] = np.array(Frequency[k][s]).mean()
i += 1
DurMx = np.zeros((nmice,3))
i = 0
for k in mice:
for s in [1,2,3]:
DurMx[i,s-1] = np.array(Duration[k][s]).mean()
i += 1
DurHist = {1:[], 2:[], 3:[]}
for s in [1,2,3]:
DurHist[s] = np.squeeze(np.array(reduce(lambda x,y: x+y, [Duration[k][s] for k in Duration])))
if pplot:
clrs = sns.color_palette("husl", nmice)
plt.ion()
# plot bars summarizing results - Figure 1
plt.figure(figsize=(10, 5))
ax = plt.axes([0.1, 0.15, 0.2, 0.8])
plt.bar([1,2,3], PercMx.mean(axis=0), align='center', color='gray', fill=False)
plt.xticks([1,2,3], ['REM', 'Wake', 'NREM'], rotation=60)
for i in range(nmice):
plt.plot([1,2,3], PercMx[i,:], 'o', label=mice[i], color=clrs[i])
plt.ylabel('Percentage (%)')
plt.legend(fontsize=9)
plt.xlim([0.2, 3.8])
box_off(ax)
ax = plt.axes([0.4, 0.15, 0.2, 0.8])
plt.bar([1,2,3], DurMx.mean(axis=0), align='center', color='gray', fill=False)
plt.xticks([1,2,3], ['REM', 'Wake', 'NREM'], rotation=60)
for i in range(nmice):
plt.plot([1, 2, 3], DurMx[i, :], 'o', label=mice[i], color=clrs[i])
plt.ylabel('Duration (s)')
plt.xlim([0.2, 3.8])
box_off(ax)
ax = plt.axes([0.7, 0.15, 0.2, 0.8])
plt.bar([1,2,3], FreqMx.mean(axis=0), align='center', color='gray', fill=False)
plt.xticks([1,2,3], ['REM', 'Wake', 'NREM'], rotation=60)
for i in range(nmice):
plt.plot([1, 2, 3], FreqMx[i, :], 'o', label=mice[i], color=clrs[i])
plt.ylabel('Frequency (1/h)')
plt.xlim([0.2, 3.8])
box_off(ax)
plt.show(block=False)
# plot histograms - Figure 2
plt.figure(figsize=(5, 10))
ax = plt.axes([0.2,0.1, 0.7, 0.2])
h, edges = np.histogram(DurHist[1], bins=40, range=(0, 300), density=True)
binWidth = edges[1] - edges[0]
plt.bar(edges[0:-1], h*binWidth, width=5)
plt.xlim((edges[0], edges[-1]))
plt.xlabel('Duration (s)')
plt.ylabel('Freq. REM')
box_off(ax)
ax = plt.axes([0.2,0.4, 0.7, 0.2])
h, edges = np.histogram(DurHist[2], bins=40, range=(0, 1200), density=True)
binWidth = edges[1] - edges[0]
plt.bar(edges[0:-1], h*binWidth, width=20)
plt.xlim((edges[0], edges[-1]))
plt.xlabel('Duration (s)')
plt.ylabel('Freq. Wake')
box_off(ax)
ax = plt.axes([0.2,0.7, 0.7, 0.2])
h, edges = np.histogram(DurHist[3], bins=40, range=(0, 1200), density=True)
binWidth = edges[1] - edges[0]
plt.bar(edges[0:-1], h*binWidth, width=20)
plt.xlim((edges[0], edges[-1]))
plt.xlabel('Duration (s)')
plt.ylabel('Freq. NREM')
box_off(ax)
plt.show()
mouse_list = [[m]*3 for m in mice]
mouse_list = sum(mouse_list, [])
state_list = ['REM', 'Wake', 'NREM']*nmice
df = pd.DataFrame({'mouse':mouse_list, 'state':state_list, 'Perc':PercMx.flatten(), 'Dur':DurMx.flatten(), 'Freq':FreqMx.flatten()})
if len(csv_file) > 0:
df.to_csv(csv_file, index=False)
return PercMx, DurMx, FreqMx, df
def sleep_timecourse_list(ppath, recordings, tbin, n, tstart=0, tend=-1, ma_thr=-1, pplot=True, single_mode=False, csv_file=''):
"""
simplified version of sleep_timecourse
plot sleep timecourse for a list of recordings
The function does not distinguish between control and experimental mice.
It computes/plots the how the percentage, frequency (1/h) of brain states and duration
of brain state episodes evolves over time.
See also sleep_timecourse
@Parameters:
ppath Base folder with recordings
recordings list of recordings as e.g. generated by &load_recordings
tbin duration of single time bin in seconds
n number of time bins
@Optional:
tstart start time of first bin in seconds
tend end time of last bin; end of recording if tend==-1
ma_thr set microarousals (wake periods <= ma_thr seconds) to NREM
if ma_thr==-1, don't do anything
pplot plot figures summarizing results
single_mode if True, plot each single mouse
csv_file string, if non-empty, write data into file $ppath/$csv_file .csv
to load csv file: data = pd.read_csv($csv_file.csv, header=[0,1,2])
@Return:
TimeMx, DurMx, FreqMx, df Dict[state][time_bin x mouse_id]
df is a pandas DataFrame of the format
Perc DUR
REM Wake NREM REM Wake etc.
bin1 ... binn bin1 ... binn bin1 ... bin2 bin1 ... bin2 etc.
mouse1
.
.
.
mousen
"""
if type(recordings) != list:
recordings = [recordings]
Mice = {}
mouse_order = []
for rec in recordings:
idf = re.split('_', os.path.split(rec)[-1])[0]
if not idf in mouse_order:
mouse_order.append(idf)
Mice[idf] = 1
Mice = list(Mice.keys())
TimeCourse = {}
FreqCourse = {}
DurCourse = {}
for rec in recordings:
idf = re.split('_', rec)[0]
SR = get_snr(ppath, rec)
NBIN = np.round(2.5*SR)
# time bin in Fourier time
dt = NBIN * 1/SR
M,K = load_stateidx(ppath, rec)
kcut = np.where(K>=0)[0]
#kidx = np.setdiff1d(np.arange(0, M.shape[0]), kcut)
M = M[kcut]
M[np.where(M)==5] = 2
# polish out microarousals
if ma_thr>0:
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if len(s)*dt <= ma_thr:
M[s] = 3
if tend==-1:
iend = len(M)-1
else:
iend = int(np.round((1.0*tend) / dt))
M = M[0:iend+1]
istart = int(np.round((1.0*tstart) / dt))
ibin = int(np.round(tbin / dt))
# how brain state percentage changes over time
perc_time = []
for i in range(n):
#midx = np.arange(istart+i*ibin, istart+(i+1)*ibin)
#midx = np.setdiff1d(midx, kcut)
#M_cut = M[np.arange(istart+i*ibin, istart+(i+1)*ibin)]
M_cut = M[(istart+i*ibin):(istart+(i+1)*ibin)]
perc = []
for s in [1,2,3]:
perc.append( len(np.where(M_cut==s)[0]) / (1.0*len(M_cut)) )
perc_time.append(perc)
perc_vec = np.zeros((n,3))
for i in range(3):
perc_vec[:,i] = np.array([v[i] for v in perc_time])
TimeCourse[rec] = perc_vec
# how frequency of sleep stage changes over time
freq_time = []
for i in range(n):
#midx = np.arange(istart+i*ibin, istart+(i+1)*ibin)
#midx = np.setdiff1d(midx, kcut)
#M_cut = M[np.arange(istart+i*ibin, istart+(i+1)*ibin)]
M_cut = M[(istart+i*ibin):(istart+(i+1)*ibin)]
freq = []
for s in [1,2,3]:
tmp = len(get_sequences(np.where(M_cut==s)[0])) * (3600. / (len(M_cut)*dt))
freq.append(tmp)
freq_time.append(freq)
freq_vec = np.zeros((n,3))
for i in range(3):
freq_vec[:,i] = np.array([v[i] for v in freq_time])
FreqCourse[rec] = freq_vec
# how duration of sleep stage changes over time
dur_time = []
for i in range(n):
#midx = np.arange(istart+i*ibin, istart+(i+1)*ibin)
#midx = np.setdiff1d(midx, kcut)
M_cut = M[(istart+i*ibin):(istart+(i+1)*ibin)]
dur = []
for s in [1,2,3]:
tmp = get_sequences(np.where(M_cut==s)[0])
tmp = np.array([len(j)*dt for j in tmp]).mean()
dur.append(tmp)
dur_time.append(dur)
dur_vec = np.zeros((n,3))
for i in range(3):
dur_vec[:,i] = np.array([v[i] for v in dur_time])
DurCourse[rec] = dur_vec
# collect all recordings belonging to a Control mouse
TimeCourseMouse = {}
DurCourseMouse = {}
FreqCourseMouse = {}
# Dict[mouse_id][time_bin x br_state]
for mouse in Mice:
TimeCourseMouse[mouse] = []
DurCourseMouse[mouse] = []
FreqCourseMouse[mouse] = []
for rec in recordings:
idf = re.split('_', rec)[0]
TimeCourseMouse[idf].append(TimeCourse[rec])
DurCourseMouse[idf].append(DurCourse[rec])
FreqCourseMouse[idf].append(FreqCourse[rec])
mx = np.zeros((n, len(Mice)))
TimeMx = {1:mx, 2:mx.copy(), 3:mx.copy()}
mx = np.zeros((n, len(Mice)))
DurMx = {1:mx, 2:mx.copy(), 3:mx.copy()}
mx = np.zeros((n, len(Mice)))
FreqMx = {1:mx, 2:mx.copy(), 3:mx.copy()}
# Dict[R|W|N][time_bin x mouse_id]
i = 0
for k in mouse_order:
for s in range(1,4):
tmp = np.array(TimeCourseMouse[k]).mean(axis=0)
TimeMx[s][:,i] = tmp[:,s-1]
tmp = np.array(DurCourseMouse[k]).mean(axis=0)
DurMx[s][:,i] = tmp[:,s-1]
tmp = np.array(FreqCourseMouse[k]).mean(axis=0)
FreqMx[s][:,i] = tmp[:,s-1]
i += 1
if pplot:
clrs = sns.color_palette("husl", len(mouse_order))
label = {1:'REM', 2:'Wake', 3:'NREM'}
tlabel = np.linspace(istart*dt, istart*dt+n*ibin*dt, n+1)
t = np.linspace(istart*dt, istart*dt+n*ibin*dt, n+1)[0:-1] + (ibin*dt/2.0)
t /= 3600.0
tlabel /= 3600.0
# plot percentage of brain state as function of time
plt.ion()
plt.figure()
for s in range(1,4):
ax = plt.axes([0.1, (s-1)*0.3+0.1, 0.8, 0.2])
if not single_mode:
plt.errorbar(t, np.nanmean(TimeMx[s],axis=1), yerr = np.nanstd(TimeMx[s],axis=1), color='gray', fmt = 'o', linestyle='-', linewidth=2, elinewidth=2)
else:
for i in range(len(mouse_order)):
plt.plot(t, TimeMx[s][:,i], 'o-', linewidth=1.5, color=clrs[i])
if s==3:
ax.legend(mouse_order, bbox_to_anchor = (0., 1.0, 1., .102), loc=3, mode='expand', ncol=len(mouse_order), frameon=False)
box_off(ax)
plt.xlim((-0.5, n-0.5))
if s==1:
#plt.ylim([0, 0.2])
pass
else:
plt.ylim([0, 1.0])
plt.ylabel('Perc ' + label[s] + '(%)')
plt.xticks(tlabel)
plt.xlim([tlabel[0], tlabel[-1]])
if s == 1:
plt.xlabel('Time (h)')
plt.draw()
# plot duration as function of time
plt.figure()
for s in range(1,4):
ax = plt.axes([0.1, (s-1)*0.3+0.1, 0.8, 0.2])
if not single_mode:
plt.errorbar(t, np.nanmean(DurMx[s],axis=1), yerr = np.nanstd(DurMx[s],axis=1), color='gray', fmt = 'o', linestyle='-', linewidth=2, elinewidth=2)
else:
for i in range(len(mouse_order)):
plt.plot(t, DurMx[s][:,i], 'o-', linewidth=1.5, color=clrs[i])
if s==3:
ax.legend(mouse_order, bbox_to_anchor = (0., 1.0, 1., .102), loc=3, mode='expand', ncol=len(mouse_order), frameon=False)
box_off(ax)
plt.xlim((-0.5, n-0.5))
plt.ylabel('Dur ' + label[s] + '(s)')
plt.xticks(tlabel)
plt.xlim([tlabel[0], tlabel[-1]])
if s==1:
plt.xlabel('Time (h)')
plt.draw()
# plot frequency as function of time
plt.figure()
for s in range(1,4):
ax = plt.axes([0.1, (s-1)*0.3+0.1, 0.8, 0.2])
if not single_mode:
plt.errorbar(t, np.nanmean(FreqMx[s],axis=1), yerr = np.nanstd(FreqMx[s],axis=1), color='gray', fmt = 'o', linestyle='-', linewidth=2, elinewidth=2)
else:
for i in range(len(mouse_order)):
plt.plot(t, FreqMx[s][:,i], 'o-', linewidth=1.5, color=clrs[i])
if s==3:
ax.legend(mouse_order, bbox_to_anchor = (0., 1.0, 1., .102), loc=3, mode='expand', ncol=len(mouse_order), frameon=False)
box_off(ax)
plt.xlim((-0.5, n-0.5))
plt.ylabel('Freq ' + label[s] + '(1/h)')
plt.xticks(tlabel)
plt.xlim([tlabel[0], tlabel[-1]])
if s==1:
plt.xlabel('Time (h)')
plt.draw()
# write data into dataframe and csv file
bins = ['bin' + str(i+1) for i in range(n)]
columns = pd.MultiIndex.from_product([['Perc', 'Dur', 'Freq'], ['REM', 'Wake', 'NREM'], bins],
names=['stats', 'state', 'bin'])
D = np.concatenate((TimeMx[1].T, TimeMx[2].T, TimeMx[3].T, DurMx[1].T, DurMx[2].T, DurMx[3].T, FreqMx[1].T, FreqMx[2].T, FreqMx[3].T), axis=1)
df = pd.DataFrame(D, index=mouse_order, columns=columns)
if len(csv_file) > 0:
df.to_csv(csv_file, index=False)
return TimeMx, DurMx, FreqMx, df
def ma_timecourse_list(ppath, recordings, tbin, n, tstart=0, tend=-1, ma_thr=20, pplot=True, single_mode=False, csv_file=''):
"""
Calculate percentage, duration, and frequency of microarousals
:param ppath: base folder
:param recordings: single recording or list of recordings
:param tbin: time bin in seconds
:param n: number of time bins
:param tstart: start time in recording(s) for analysi
:param tend: end time for analysis
:param ma_thr: microarousal threshold; any wake period shorter than $ma_thr will be considered as microarousal
:param pplot: if True, plot figure
:param single_mode: if True, plot each single mouse with different color
:param csv_file: string, if non-empty, write data into file "csv_file";
file name should end with ".csv"
:return: TimeMX, DurMX, FreqMx, df - np.array(# time bins x mice)
TimeMX, DurMX, FreqMx: arrays with shape "time bins x mice"
df: DataFrame with columns: mouse, perc, dur, freq, bin
For example, to get the first time bins of perc, dur, freq of mouse M1 type
df[(df.mouse == 'M1') & (df.bin == 't0')]
"""
if type(recordings) != list:
recordings = [recordings]
mouse_order = []
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in mouse_order:
mouse_order.append(idf)
TimeCourse = {}
FreqCourse = {}
DurCourse = {}
for rec in recordings:
SR = get_snr(ppath, rec)
NBIN = np.round(2.5 * SR)
# time bin in Fourier time
dt = NBIN * 1 / SR
M, K = load_stateidx(ppath, rec)
kcut = np.where(K >= 0)[0]
# kidx = np.setdiff1d(np.arange(0, M.shape[0]), kcut)
M = M[kcut]
Mnew = np.zeros(M.shape)
Mnew[np.where(M) == 5] = 1
# polish out microarousals
if ma_thr > 0:
seq = get_sequences(np.where(M == 2)[0])
for s in seq:
if len(s) * dt <= ma_thr:
Mnew[s] = 1
M = Mnew
if tend == -1:
iend = len(M) - 1
else:
iend = int(np.round((1.0 * tend) / dt))
M = M[0:iend + 1]
istart = int(np.round((1.0 * tstart) / dt))
ibin = int(np.round(tbin / dt))
# how brain state percentage changes over time
perc_time = []
for i in range(n):
midx = np.arange(istart + i * ibin, istart + (i + 1) * ibin)
# midx = np.setdiff1d(midx, kcut)
M_cut = M[midx]
perc = len(np.where(M_cut == 1)[0]) / (1.0 * len(M_cut))
perc_time.append(perc)
TimeCourse[rec] = np.array(perc_time)
# how frequency of sleep stage changes over time
freq_time = []
for i in range(n):
midx = np.arange(istart + i * ibin, istart + (i + 1) * ibin)
# midx = np.setdiff1d(midx, kcut)
M_cut = M[midx]
s = 1
freq = len(get_sequences(np.where(M_cut == s)[0])) * (3600. / (len(M_cut) * dt))
freq_time.append(freq)
FreqCourse[rec] = np.array(freq_time)
# how duration of microarousals changes over time
dur_time = []
for i in range(n):
midx = np.arange(istart + i * ibin, istart + (i + 1) * ibin)
# midx = np.setdiff1d(midx, kcut)
M_cut = M[midx]
s = 1
tmp = get_sequences(np.where(M_cut == s)[0])
dur = np.array([len(j) * dt for j in tmp]).mean()
dur_time.append(dur)
DurCourse[rec] = np.array(dur_time)
# collect all recordings belonging to a Control mouse
TimeCourseMouse = {}
DurCourseMouse = {}
FreqCourseMouse = {}
# Dict[mouse_id][time_bin x br_state]
for mouse in mouse_order:
TimeCourseMouse[mouse] = []
DurCourseMouse[mouse] = []
FreqCourseMouse[mouse] = []
for rec in recordings:
idf = re.split('_', rec)[0]
TimeCourseMouse[idf].append(TimeCourse[rec])
DurCourseMouse[idf].append(DurCourse[rec])
FreqCourseMouse[idf].append(FreqCourse[rec])
# np.array(time x mouse_id)
TimeMx = np.zeros((n, len(mouse_order)))
DurMx = np.zeros((n, len(mouse_order)))
FreqMx = np.zeros((n, len(mouse_order)))
i = 0
for k in mouse_order:
tmp = np.array(TimeCourseMouse[k]).mean(axis=0)
TimeMx[:,i] = tmp
tmp = np.array(DurCourseMouse[k]).mean(axis=0)
DurMx[:,i] = tmp
tmp = np.array(FreqCourseMouse[k]).mean(axis=0)
FreqMx[:,i] = tmp
i += 1
# plotting
if pplot:
plot_dict = {0:TimeMx, 1:DurMx, 2:FreqMx}
clrs = sns.color_palette("husl", len(mouse_order))
ylabel = {0:'Perc (%)', 1:'Dur (s)', 2:'Freq ($h^{-1}$)'}
tlabel = np.linspace(istart*dt, istart*dt+n*ibin*dt, n+1)
t = np.linspace(istart*dt, istart*dt+n*ibin*dt, n+1)[0:-1] + (ibin*dt/2.0)
t /= 3600.0
tlabel /= 3600.0
# plot percentage of brain state as function of time
plt.ion()
plt.figure()
for s in range(0, 3):
ax = plt.axes([0.15, s*0.3+0.1, 0.8, 0.2])
if not single_mode:
plt.errorbar(t, np.nanmean(plot_dict[s],axis=1), yerr = np.nanstd(plot_dict[s],axis=1), color='gray', fmt = 'o', linestyle='-', linewidth=2, elinewidth=2)
else:
for i in range(len(mouse_order)):
plt.plot(t, plot_dict[s][:,i], 'o-', linewidth=1.5, color=clrs[i])
if s==2:
ax.legend(mouse_order, bbox_to_anchor = (0., 1.0, 1., .102), loc=3, mode='expand', ncol=len(mouse_order), frameon=False)
box_off(ax)
plt.xlim((-0.5, n-0.5))
plt.ylabel(ylabel[s])
plt.xticks(tlabel)
plt.xlim([tlabel[0], tlabel[-1]])
if s == 0:
plt.xlabel('Time (h)')
plt.draw()
bins = [['t' + str(i)]*len(mouse_order) for i in range(n)]
bins = sum(bins, [])
cols = ['mouse', 'perc', 'dur', 'freq', 'bin']
mice = mouse_order*n
df = pd.DataFrame(columns=cols)
df['mouse'] = mice
df['bin'] = bins
df['perc'] = TimeMx.flatten()
df['dur'] = DurMx.flatten()
df['freq'] = FreqMx.flatten()
if len(csv_file) > 0:
df.to_csv(csv_file, index=False)
return TimeMx, DurMx, FreqMx, df
def transition_timecourse_list(ppath, recordings, tbin, n, tdown=10, tstart=0, tend=-1, ma_thr=-1, pplot=True, single_mode=False, csv_file=''):
if type(recordings) != list:
recordings = [recordings]
mouse_order = []
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in mouse_order:
mouse_order.append(idf)
Recordings = {idf:[] for idf in mouse_order}
SR = get_snr(ppath, recordings[0])
NBIN = np.round(2.5 * SR)
# time bin in Fourier time
dt = NBIN * 1 / SR
istart = int(np.round((1.0 * tstart) / dt))
ibin = int(np.round(tbin / dt))
idown = int(tdown/dt)
for rec in recordings:
idf = re.split('_', rec)[0]
M = load_stateidx(ppath, rec)[0]
# polish out microarousals
if ma_thr > 0:
seq = get_sequences(np.where(M == 2)[0])
for s in seq:
if len(s) * dt <= ma_thr:
M[s] = 1
if tend == -1:
iend = len(M) - 1
else:
iend = int(np.round((1.0 * tend) / dt))
M = M[0:iend + 1]
# how brain state percentage changes over time
Recordings[idf].append(M)
MX = {idf:[] for idf in mouse_order}
for i in range(n):
for idf in mouse_order:
recs = Recordings[idf]
midx = np.arange(istart + i * ibin, istart + (i + 1) * ibin)
recs = [downsample_states(rec[midx], idown) for rec in recs]
recs = np.array(recs, dtype='int')
#recs = downsample_states(recs, idown)
pmx = complete_transition_matrix(recs, np.array(range(recs.shape[1])))
MX[idf].append(pmx)
#transform MX to a DataFrame
trans_map = {'11':'RR', '12':'RW', '13':'RN',
'21':'WR', '22':'WW', '23':'WN',
'31':'NR', '32':'NW', '33':'NN'}
data = []
for i in range(n):
for si in [1,2,3]:
for sj in [1,2,3]:
for idf in mouse_order:
trans = trans_map[str(si)+str(sj)]
data += [[idf, 't'+str(i), MX[idf][i][si-1, sj-1], trans]]
df = pd.DataFrame(data=data, columns=['mouse', 'time', 'prob', 'trans'])
return df
def sleep_through_days(ppath, recordings, tstart=0, tend=-1, stats=0, xticks=[], ma_thr=20, min_dur=[0,0,0], single_mode=True, csv_file = ''):
"""
Follow sleep quantity (percentage, bout duration, or frequency / hour) over multiple days
:param ppath: base folder
:param recordings: list of lists of recordings, for example [[F1_010118n1, F2_010118n1], [F1_010218n1, F1_010218n1]]
specifies the recordings of F1 and F2 for two days
:param tstart: float, quantificiation of sleep starts at $start s
:param tend: float, quantification of sleep ends at $tend s
:param stats: Measured sleep variable (statistics):
0 - percentage, 1 - episode duration, 2 - episode frequency, 3 - latency to first state occurance REM, Wake, and NREM
:param xticks: list of string, specifying the xticks
:param ma_thr: float, wake periods shorter than $ma_thr are considered as microarousals and further converted to NREM
:param min_dur: list with 3 floats, specifying the minimum duration of the first REM, Wake, and NREM period,
only relevant if $stats == 3
:param single_mode: if True, plot each single mouse in different color
:param csv_file: string, save pd.DataFrame to file; the actual file name will be "$csv_file + stats + $stats .csv" and
will be save in the folder $ppath
:return: np.array, mice x [REM,Wake,NREM] x days AND pd.DataFrame
the pd.DataFrame has the following format:
state REM Wake NREM
day Day1 ... Dayn Day1 ... Dayn Day1 ... Dayn
mouse1
.
.
.
mousen
"""
states = {1:'REM', 2:'Wake', 3:'NREM'}
stats_label = {0:'(%)', 1:'Dur (s)', 2: 'Freq. (1/h)', 3: 'Lat. (min)'}
mice_per_day = {}
iday = 0
for day in recordings:
mice = []
for rec in day:
idf = re.split('_', os.path.split(rec)[-1])[0]
if not idf in mice:
mice.append(idf)
mice_per_day[iday] = mice
iday += 1
ndays = len(mice_per_day)
nmice = len(mice_per_day[0])
for i in range(ndays):
for j in range(i+1, ndays):
if mice_per_day[i] != mice_per_day[j]:
print("ERROR: mice on day %d and %d not consistent" % (i+1, j+1))
return
#DayResults: mice x [R|W|N] x days
DayResults = np.zeros((nmice, 3, ndays))
for day in range(ndays):
if stats<=2:
res = sleep_stats(ppath, recordings[day], tstart=tstart, tend=tend, pplot=False, ma_thr=ma_thr)[stats]
else:
res = np.zeros((nmice, 3))
for s in range(1,4):
res[:,s-1] = state_onset(ppath, recordings[day], s, min_dur=min_dur[s-1], tstart=tstart, tend=tend, pplot=False)
DayResults[:,:,day] = res
plt.ion()
clrs = sns.color_palette("husl", nmice)
plt.figure(figsize=(10,6))
for s in range(1, 4):
ax = plt.axes([0.1, (s - 1) * 0.3 + 0.1, 0.8, 0.2])
if single_mode:
for i in range(nmice):
plt.plot(list(range(1,ndays+1)), DayResults[i,s-1,:], 'o-', color=clrs[i], label=mice[i])
else:
plt.errorbar(list(range(1, ndays+1)), DayResults[:, s-1, :].mean(axis=0), yerr=DayResults[:, s-1, :].std(axis=0),
color='gray', label='avg', linewidth=2)
if s == 1:
if len(xticks) == 0:
plt.xticks(list(range(1,ndays+1)))
else:
plt.xticks(list(range(1, ndays + 1), xticks))
else:
plt.xticks(list(range(1, ndays + 1)))
ax.set_xticklabels([])
if s == 3:
ax.legend(bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', ncol=nmice,
frameon=False)
box_off(ax)
if s == 1:
plt.xlabel('Day')
plt.ylabel(states[s] + ' ' + stats_label[stats])
if len(xticks) > 0:
col = xticks
else:
col = ['Day' + str(i+1) for i in range(ndays)]
# put data into pandas dataframe
columns = pd.MultiIndex.from_product([['REM', 'Wake', 'NREM'], col], names=['state', 'day'])
D = np.concatenate((DayResults[:,0,:], DayResults[:,1,:], DayResults[:,2,:]), axis=1)
df = pd.DataFrame(D, index=mice, columns=columns)
if len(csv_file) > 0:
csv_file += '_stats' + str(stats) + '.csv'
df.to_csv(os.path.join(ppath, csv_file), index=False)
return DayResults, df
def sleep_timecourse(ppath, trace_file, tbin, n, tstart=0, tend=-1, pplot=True, stats='perc', csv_file='', ma_thr=0):
"""
plot how percentage of REM,Wake,NREM changes over time;
compares control with experimental data; experimental recordings can have different "doses"
a simpler version is sleep_timecourse_list
@Parameters
trace_file - text file, specifies control and experimental recordings,
the syntax for the file is the same as required for load_dose_recording
Example: with one control and two experimental groups (1 or 2 third column)
# Comments
#Mouse Recording dose
C B1_01012020n1
C B2_01012020n1
# no dose value for controls
E B1_01022020n1 1
E B2_01022020n1 1
E B1_01032020n1 2
E B2_01032020n1 2
tbin - size of time bin in seconds
n - number of time bins
@Optional:
tstart - beginning of recording (time <tstart is thrown away)
tend - end of recording (time >tend is thrown away)
pplot - plot figure if True
stats - statistics;
stats = 'perc': compute percentage of each brain state in each time bin;
stats = 'freq': compute frequency of each state for each time bin
stats = 'dur': compute average duration of each state sequence
for each time bin
@Return:
TimeMxCtr - Dict[R|W|N][time_bin x mouse_id]
TimeMxExp - Dict[R|W|N][dose][time_bin x mouse_id]
df - pandas.DataFrame with columns ['mouse', 'dose', 'state', 'time', $stats]
How to run 2way anova with repeated measures?
to determine with effects for different doses on REM:
# extract all REM values from DataFrame
df_rem = df[df.state == 'REM']
the within factors are 'time' and 'dose'; the dependent variable is 'perc'
using pingouin the anova can be calculated using
pg.rm_anova(data=df_rem, dv='perc', within=['time', 'dose'], subject='mouse', correction=True)
"""
(ctr_rec, exp_rec) = load_dose_recordings(ppath, trace_file)
Recordings = []
Recordings += ctr_rec
for k in exp_rec.keys():
Recordings += exp_rec[k]
CMice = []
for mouse in ctr_rec:
idf = re.split('_', mouse)[0]
if not idf in CMice:
CMice.append(idf)
EMice = {}
for d in exp_rec:
mice = exp_rec[d]
EMice[d] = []
for mouse in mice:
idf = re.split('_', mouse)[0]
if not idf in EMice[d]:
EMice[d].append(idf)
TimeCourse = {}
for rec in Recordings:
idf = re.split('_', rec)[0]
SR = get_snr(ppath, rec)
NBIN = np.round(2.5*SR)
# time bin in Fourier time
dt = NBIN * 1/SR
M = load_stateidx(ppath, rec)[0]
M[np.where(M)==5] = 2
if ma_thr > 0:
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if len(s)*dt <= ma_thr:
M[s] = 3
if tend==-1:
iend = len(M)-1
else:
iend = int(np.round((1.0*tend) / dt))
M = M[0:iend+1]
istart = int(np.round((1.0*tstart) / dt))
ibin = int(np.round(tbin / dt))
perc_time = []
for i in range(n):
# return something even if istart+i+1)*ibin >= len(M)
M_cut = M[istart+i*ibin:istart+(i+1)*ibin]
#midx = np.arange(istart + i * ibin, istart + (i + 1) * ibin)
perc = []
for s in [1,2,3]:
if stats == 'perc':
perc.append( 100 * len(np.where(M_cut==s)[0]) / (1.0*len(M_cut)) )
elif stats == 'freq':
tmp = len(get_sequences(np.where(M_cut==s)[0])) * (3600. / (len(M_cut)*dt))
perc.append(tmp)
else:
tmp = get_sequences(np.where(M_cut==s)[0])
tmp = np.array([len(j)*dt for j in tmp]).mean()
perc.append(tmp)
perc_time.append(perc)
# number of time bins x [REM|Wake|NREM]
perc_vec = np.zeros((n,3))
for i in range(3):
# for each time bin we have a list of 3 elements for each state.
# take from each of these triplets the i-th state, forming a column vector
perc_vec[:,i] = np.array([v[i] for v in perc_time])
TimeCourse[rec] = perc_vec
# define data frame containing all data
#bins = ['t' + str(i) for i in range(n)]
cols = ['mouse', 'dose', 'state', 'time', stats]
df = pd.DataFrame(columns=cols)
state_map = {1: 'REM', 2:'Wake', 3:'NREM'}
# collect all recordings belonging to a Control mouse
TimeCourseCtr = {}
# Dict[mouse_id][time_bin x br_state]
for mouse in CMice:
TimeCourseCtr[mouse] = []
for rec in Recordings:
idf = re.split('_', rec)[0]
if rec in ctr_rec:
TimeCourseCtr[idf].append(TimeCourse[rec])
mx = np.zeros((n, len(CMice)))
TimeMxCtr = {1:mx, 2:mx.copy(), 3:mx.copy()}
# Dict[R|W|N][time_bin x mouse_id]
i = 0
for k in TimeCourseCtr:
for s in range(1,4):
# [time_bin x br_state]
tmp = np.array(TimeCourseCtr[k]).mean(axis=0)
TimeMxCtr[s][:,i] = tmp[:,s-1]
#for j in range(n):
# df = df.append(pd.Series([k, '0', state_map[s], 't'+str(j), tmp[j,s-1]], index=cols), ignore_index=True)
#pdb.set_trace()
for j in range(n):
for r in range(len(TimeCourseCtr[k])):
df = df.append(pd.Series([k, '0', state_map[s], 't'+str(j), TimeCourseCtr[k][r][j,s-1]], index=cols), ignore_index=True)
i += 1
# collect all recording belonging to one Exp mouse with a specific dose
TimeCourseExp = {}
# Dict[dose][mouse_id][time_bin x br_state]
for d in EMice:
TimeCourseExp[d]={}
for mouse in EMice[d]:
TimeCourseExp[d][mouse] = []
for rec in Recordings:
idf = re.split('_', rec)[0]
for d in exp_rec:
if rec in exp_rec[d]:
TimeCourseExp[d][idf].append(TimeCourse[rec])
# dummy dictionary to initialize TimeMxExp
# Dict[R|W|N][dose][time_bin x mouse_id]
TimeMxExp = {1:{}, 2:{}, 3:{}}
for s in [1,2,3]:
TimeMxExp[s] = {}
for d in EMice:
TimeMxExp[s][d] = np.zeros((n, len(EMice[d])))
for d in TimeCourseExp:
i = 0
for k in TimeCourseExp[d]:
print(k)
tmp = np.array(TimeCourseExp[d][k]).mean(axis=0)
for s in [1,2,3]:
# [time_bin x br_state] for mouse k
#tmp = sum(TimeCourseExp[d][k]) / (1.0*len(TimeCourseExp[d][k]))
TimeMxExp[s][d][:,i] = tmp[:,s-1]
#for j in range(n):
# df = df.append(pd.Series([k, d, state_map[s], 't'+str(j), tmp[j, s-1]], index=cols), ignore_index=True)
for j in range(n):
for r in range(len(TimeCourseExp[d][k])):
df = df.append(pd.Series([k, d, state_map[s], 't'+str(j), TimeCourseExp[d][k][r][j,s-1]], index=cols), ignore_index=True)
i += 1
if pplot:
tlabel = np.linspace(istart*dt, istart*dt+n*ibin*dt, n+1)
t = np.linspace(istart*dt, istart*dt+n*ibin*dt, n+1)[0:-1] + (ibin*dt/2.0)
t /= 3600.0
tlabel /= 3600.0
plt.ion()
plt.figure()
ndose = len(EMice)
ax = plt.axes([0.1, 0.7, 0.8, 0.2])
plt.errorbar(t, TimeMxCtr[1].mean(axis=1), yerr = TimeMxCtr[1].std(axis=1), color='gray', fmt = 'o', linestyle='-', linewidth=2, elinewidth=2)
box_off(ax)
plt.xlim([t[0], t[-1]])
#plt.yticks([0, 0.1, 0.2])
plt.xticks(tlabel)
if stats=='perc':
plt.ylabel('% REM')
elif stats == 'freq':
plt.ylabel('Freq. REM (1/h)')
else:
plt.ylabel('Dur. REM (s)')
i = 1
for d in TimeMxExp[1]:
c = 1 - 1.0/ndose*i
plt.errorbar(t, TimeMxExp[1][d].mean(axis=1), yerr = TimeMxExp[1][d].std(axis=1), color=[c, c, 1], fmt = 'o', linestyle='-', linewidth=2, elinewidth=2)
i += 1
ax = plt.axes([0.1, 0.4, 0.8, 0.2])
plt.errorbar(t, TimeMxCtr[2].mean(axis=1), yerr = TimeMxCtr[2].std(axis=1), color='gray', fmt = 'o', linestyle='-', linewidth=2, elinewidth=2)
box_off(ax)
plt.xlim([t[0], t[-1]])
#plt.yticks([0, 0.1, 0.2])
plt.xticks(tlabel)
if stats=='perc':
plt.ylabel('% Wake')
elif stats == 'freq':
plt.ylabel('Freq. Wake (1/h)')
else:
plt.ylabel('Dur. Wake (s)')
i = 1
for d in TimeMxExp[2]:
c = 1 - 1.0/ndose*i
plt.errorbar(t, TimeMxExp[2][d].mean(axis=1), yerr = TimeMxExp[2][d].std(axis=1), color=[c, c, 1], fmt = 'o', linestyle='-', linewidth=2, elinewidth=2)
i += 1
ax = plt.axes([0.1, 0.1, 0.8, 0.2])
plt.errorbar(t, TimeMxCtr[3].mean(axis=1), yerr = TimeMxCtr[3].std(axis=1), color='gray', fmt = 'o', linestyle='-', linewidth=2, elinewidth=2)
box_off(ax)
plt.xlim([t[0], t[-1]])
#plt.yticks([0, 0.1, 0.2])
plt.xticks(tlabel)
if stats=='perc':
plt.ylabel('% NREM')
elif stats == 'freq':
plt.ylabel('Freq. NREM (1/h)')
else:
plt.ylabel('Dur. NREM (s)')
plt.xlabel('Time (h)')
plt.show()
i = 1
for d in TimeMxExp[2]:
c = 1 - 1.0/ndose*i
plt.errorbar(t, TimeMxExp[3][d].mean(axis=1), yerr = TimeMxExp[3][d].std(axis=1), color=[c, c, 1], fmt = 'o', linestyle='-', linewidth=2, elinewidth=2, label=d)
i += 1
plt.legend()
if len(csv_file) > 0:
df.to_csv(os.path.join(csv_file), index=False)
return TimeMxCtr, TimeMxExp, df
def state_onset(ppath, recordings, istate, min_dur, iseq=0, ma_thr=10, tstart=0, tend=-1, pplot=True):
"""
calculate time point of first occurance of state $istate in @recordings
:param ppath: base folder
:param recordings: list of recordings
:param istate: 1 = REM, 2 = Wake, 3 = NREM
:param min_dur: minimum duration in [s] to be counted as first occurance
:param iseq: calculate the $iseq-th occurance state $istate
:param ma_thr: microarousal threshould
:param tstart: float, quantificiation of sleep starts at $start s
:param tend: float, quantification of sleep ends at $tend s
:return: np.array, average latency (in minutes) for each mouse. If one mouse contributes several recordings
the average latency is computed
"""
if type(recordings) != list:
recordings = [recordings]
# get all mice in recordings
mice = []
dt = 2.5
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in mice:
mice.append(idf)
latency = {m:[] for m in mice}
for rec in recordings:
SR = get_snr(ppath, rec)
# Number of EEG bins / brainstate bin
NBIN = np.round(2.5 * SR)
# Precise time bin duration of each brain state:
dt = NBIN * 1.0 / SR
idf = re.split('_', rec)[0]
M,K = load_stateidx(ppath, rec)
# flatten out microarousals
if istate == 3 and ma_thr > 0:
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if len(s)*dt <= ma_thr:
M[s] = 3
kcut = np.where(K>=0)[0]
M = M[kcut]
istart = int(np.round(tstart/dt))
iend = int(np.round(tend/dt))
if tend == -1:
iend = len(M)
M = M[istart:iend]
seq = get_sequences(np.where(M==istate)[0])
seq = [s for s in seq if len(s) * dt >= min_dur]
#ifirst = seq[seq[iseq][0]]
ifirst = seq[iseq][0]
latency[idf].append(ifirst*dt)
for m in mice:
latency[m] = np.nanmax(np.array(latency[m]))
values = np.array([latency[m]/60. for m in mice])
clrs = sns.color_palette("husl", len(mice))
if pplot:
# print latencies
for m in mice:
print("%s - %.2f min" % (m, latency[m] / 60.))
plt.ion()
plt.figure()
ax = plt.subplot(111)
set_fontsize(14)
#plt.bar(range(0, len(values)), values, color='gray')
for i in range(len(mice)):
plt.plot(i, values[i], 'o', color=clrs[i], label=m)
#plt.legend(bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', ncol=len(mice), frameon=False)
plt.xticks(list(range(0, len(values))), mice)
plt.ylabel('Onset Latency (min)')
box_off(ax)
plt.show()
return values
def sleep_spectrum(ppath, recordings, istate=1, pmode=1, fres=1/3, ma_thr=20.0, f_max=30, pplot=True, sig_type='EEG', mu=[10, 100],
tstart=0, tend=-1, sthres=np.inf, peeg2=False, pnorm=False, single_mode=False, conv=1.0, fig_file='', laser_color='blue', ci='sd'):
"""
calculate power spectrum for brain state i state for the given recordings.
The function first calculates for each mouse the powerspectrum for each
istate sequence, and then averages across all sequences.
Note: If recordings with different sampling rates are combined, set f_max to a
frequency value, which exists for all recordings.
@Param:
ppath - folder containing all recordings
recordings - single recording (string) or list of recordings
@Optional:
istate - state for which to calculate power spectrum; 1=REM, 2=Wake, 3=NREM
fres - resolution of frequency axis; i.e. fres = F[i] - F[i-1]
ma_thr - short wake periods <= $ma_thr are considered as sleep
f_max - maximal frequency, if f_max==-1: f_max is maximally possible frequency
pplot - if True, plot figure showing result
pmode - mode:
pmode == 1, compare state during laser with baseline outside laser interval
pmode == 0, just plot power spectrum for state istate and don't care about laser
pmode == 2, compare periods of state if they overlap with laser and if the laser precedes the state
with state periods w/o laser. That's is we are looking here at "laser induced" periods; the period itself
can be longer as laser stimulation (as long as it follows laser onset).
tstart - use EEG starting from time point tstart [seconds]
sig_type - string, if 'EMG' calculate EMG amplitude (from the EMG spectrum). E.g.,
sleepy.sleep_spectrum(ppath, E, istate=2, f_max=30, sig_type='EMG')
mu - tuple, lower and upper range for EMG frequencies used for amplitude calculation
tend - use data up to tend [seconds], if tend == -1, use data till end
sthres - maximum length of bout duration of state $istate used for calculation. If bout duration > $sthres, only
use the bout up to $sthres seconds after bout onset.
peeg2 - if True, use EEG2 channel for spectrum analysis
pnorm - if True, normalize powerspectrum by dividing each frequency through each average power
over the whole EEG recording
single_mode - if True, plot each single mouse
fig_file - if specified save to given file
ci - parameter for seaborn.lineplot, if ci=='sd' plot standard deviation, for int values plot
confidence interval (e.g. ci=95 will plot the 95% confidence interval). Only works, if there
are more than one mouse!
errorbars: If it's multiple mice make errorbars over mice; if it's multiple
recordings of ONE mouse, show errorbars across recordings;
if just one recording show now errorbars
@Return:
Pow - Dict[No loaser = 0|Laser = 1][array], where array: mice x frequencies, if more than one mouse;
otherwise, array: recordings x frequencies
F - Frequencies
"""
if type(recordings) != list:
recordings = [recordings]
Mice = {}
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in Mice:
Mice[idf] = Mouse(idf, rec, 'E')
else:
Mice[idf].add(rec)
mouse_order = []
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in mouse_order:
mouse_order.append(idf)
# Spectra: Dict[mouse_id][laser_on|laser_off][list of powerspectrum_arrays]
Spectra = {}
Ids = list(Mice.keys())
for i in Ids:
Spectra[i] = {0:[], 1:[]}
Spectra[i] = {0:[], 1:[]}
for idf in mouse_order:
for rec in Mice[idf].recordings:
# load EEG
if sig_type =='EEG':
if not peeg2:
EEG = np.squeeze(so.loadmat(os.path.join(ppath, rec, 'EEG.mat'))['EEG']).astype('float')*conv
else:
EEG = np.squeeze(so.loadmat(os.path.join(ppath, rec, 'EEG2.mat'))['EEG2']).astype('float')*conv
elif sig_type == 'EMG':
if not peeg2:
EEG = np.squeeze(so.loadmat(os.path.join(ppath, rec, 'EMG.mat'))['EMG']).astype('float')*conv
else:
EEG = np.squeeze(so.loadmat(os.path.join(ppath, rec, 'EMG2.mat'))['EMG2']).astype('float')*conv
else:
pass
# load brain state
M,K = load_stateidx(ppath, rec)
# set brain states where K<0 to zero;
# this whay they are effectively discarded
M[K<0] = 0
sr = get_snr(ppath, rec)
# calculate time window
#twin = int(np.round(sr * (1/fres))) * (1/sr)
twin = sr * (1/fres) * (1/sr)
# number of time bins for each time bin in spectrogram
nbin = int(np.round(sr) * 2.5)
# duration of time bin in spectrogram / brainstate
dt = nbin * 1/sr
nwin = np.round(twin*sr)
istart = int(np.round(tstart/dt))
if tend==-1:
iend = M.shape[0]
else:
iend = int(np.round(tend/dt))
istart_eeg = istart*nbin
iend_eeg = (iend-1)*nbin+1
M[np.where(M==5)]=2
# flatten out microarousals
seq = get_sequences(np.where(M==2)[0])
for s in seq:
if len(s)*dt <= ma_thr:
M[s] = 3
# get all sequences of state $istate
M = M[istart:iend]
seq = get_sequences(np.where(M==istate)[0])
EEG = EEG[istart_eeg:iend_eeg]
if pnorm:
pow_norm = power_spectrum(EEG, nwin, 1 / sr)[0]
if pmode == 1 or pmode == 2:
laser = load_laser(ppath, rec)[istart_eeg:iend_eeg]
(idxs, idxe) = laser_start_end(laser, SR=sr)
# downsample EEG time to spectrogram time
idxs = [int(i/nbin) for i in idxs]
idxe = [int(i/nbin) for i in idxe]
laser_idx = []
for (i,j) in zip(idxs, idxe):
laser_idx += list(range(i,j+1))
laser_idx = np.array(laser_idx)
if pmode == 1 or pmode == 2:
# first analyze frequencies not overlapping with laser
seq_nolsr = []
for s in seq:
s = np.setdiff1d(s, laser_idx)
if len(s) > 0:
q = get_sequences(s)
seq_nolsr += q
for s in seq_nolsr:
if len(s)*nbin >= nwin:
drn = (s[-1]-s[0])*dt
if drn > sthres:
# b is the end of segment used for power spectrum calculation;
# that is, the last index (in raw EEG) of the segment
b = (s[0] + int(np.round(sthres/dt)))*nbin
else:
b = int((s[-1]+1)*nbin)
sup = list(range(int(s[0]*nbin), b))
if sup[-1]>len(EEG):
sup = list(range(int(s[0]*nbin), len(EEG)))
if len(sup) >= nwin:
Pow, F = power_spectrum(EEG[sup], nwin, 1/sr)
if pnorm:
Pow = np.divide(Pow, pow_norm)
Spectra[idf][0].append(Pow)
# now analyze sequences overlapping with laser
seq_lsr = []
for s in seq:
if pmode == 1:
s = np.intersect1d(s, laser_idx)
if len(s) > 0:
q = get_sequences(s)
seq_lsr += q
if pmode == 2:
r = np.intersect1d(s, laser_idx)
if len(r) > 0 and s[0] in laser_idx:
seq_lsr += [s]
for s in seq_lsr:
# should not be necessary any more...
#if pmode == 1:
# s = np.intersect1d(s, laser_idx)
if len(s)*nbin >= nwin:
# calculate power spectrum
# upsample indices
# brain state time 0 1 2
# EEG time 0-999 1000-1999 2000-2999
drn = (s[-1]-s[0])*dt
if drn > sthres:
b = (s[0] + int(np.round(sthres/dt)))*nbin
else:
b = int((s[-1]+1)*nbin)
sup = list(range(int(s[0]*nbin), b))
if sup[-1]>len(EEG):
sup = list(range(int(s[0]*nbin), len(EEG)))
# changed line on 02/08/2019
if len(sup) >= nwin:
Pow, F = power_spectrum(EEG[sup], nwin, 1/sr)
if pnorm:
Pow = np.divide(Pow, pow_norm)
Spectra[idf][1].append(Pow)
# don't care about laser
if pmode == 0:
for s in seq:
if len(s)*nbin >= nwin:
drn = (s[-1]-s[0])*dt
if drn > sthres:
b = (s[0] + int(np.round(sthres/dt)))*nbin
else:
b = int((s[-1]+1)*nbin)
sup = list(range(int(s[0]*nbin), b))
if sup[-1]>len(EEG):
sup = list(range(int(s[0]*nbin), len(EEG)))
# changed line on 02/08/2019
if len(sup) >= nwin:
Pow, F = power_spectrum(EEG[sup], nwin, 1/sr)
if pnorm:
Pow = np.divide(Pow, pow_norm)
Spectra[idf][0].append(Pow)
mF = F.copy()
if sig_type == 'EEG':
if f_max > -1:
ifreq = np.where(F<=f_max)[0]
F = F[ifreq]
else:
f_max = F[-1]
else:
f_max = F[-1]
ifreq = range(0, F.shape[0])
Pow = {0:[], 1:[]}
if len(Ids)==1:
# only one mouse
#Pow[0] = np.array(Spectra[Ids[0]][0])
#Pow[1] = np.array(Spectra[Ids[0]][1])
Pow[0] = np.array([s[ifreq] for s in Spectra[Ids[0]][0]])
Pow[1] = np.array([s[ifreq] for s in Spectra[Ids[0]][1]])
else:
# several mice
Pow[0] = np.zeros((len(Ids),len(F)))
Pow[1] = np.zeros((len(Ids),len(F)))
i = 0
for m in Ids:
#Pow[0][i,:] = np.array(Spectra[m][0]).mean(axis=0)
tmp = [s[ifreq] for s in Spectra[m][0]]
Pow[0][i,:] = np.array(tmp).mean(axis=0)
if pmode == 1 or pmode == 2:
#Pow[1][i,:] = np.array(Spectra[m][1]).mean(axis=0)
tmp = [s[ifreq] for s in Spectra[m][1]]
Pow[1][i,:] = np.array(tmp).mean(axis=0)
i += 1
if pplot:
plt.ion()
plt.figure()
if sig_type == 'EEG':
ax = plt.axes([0.2, 0.15, 0.6, 0.7])
n = Pow[0].shape[0]
clrs = sns.color_palette("husl", len(mouse_order))
if pmode==1 or pmode==2:
if not single_mode:
a = Pow[1].mean(axis=0) - Pow[1].std(axis=0) / np.sqrt(n)
b = Pow[1].mean(axis=0) + Pow[1].std(axis=0) / np.sqrt(n)
plt.fill_between(F, a, b, alpha=0.5, color=laser_color)
plt.plot(F, Pow[1].mean(axis=0), color=laser_color, lw=2, label='With laser')
else:
for i in range(len(mouse_order)):
plt.plot(F, Pow[1][i,:], '--', color=clrs[i])
if not single_mode:
a = Pow[0].mean(axis=0)-Pow[0].std(axis=0)/np.sqrt(n)
b = Pow[0].mean(axis=0)+Pow[0].std(axis=0)/np.sqrt(n)
plt.fill_between(F, a, b, alpha=0.5, color='gray')
plt.plot(F, Pow[0].mean(axis=0), color='gray', lw=2, alpha=0.5, label='W/o laser')
else:
for i in range(len(mouse_order)):
plt.plot(F, Pow[0][i, :], label=mouse_order[i], color=clrs[i])
plt.legend(bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', ncol=len(mouse_order), frameon=False)
if pmode>=1 and not single_mode:
plt.legend(bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', frameon=False)
box_off(ax)
plt.xlim([0, f_max])
plt.xlabel('Freq. (Hz)')
plt.ylabel('Power ($\mathrm{\mu V^2}$)')
plt.show()
else:
# plot EMG amplitude
nmice = len(mouse_order)
clrs = sns.color_palette("husl", nmice)
# plot EMG
Ampl = {0:[], 1:[]}
# range of frequencies
mfreq = np.where((mF >= mu[0]) & (mF <= mu[1]))[0]
df = mF[1] - mF[0]
if pmode>=1:
for i in [0, 1]:
Ampl[i] = np.sqrt(Pow[i][:,mfreq].sum(axis=1)*df)
else:
Ampl[0] = np.sqrt(Pow[0][:,mfreq].sum(axis=1)*df)
if pmode>=1:
ax = plt.axes([0.2, 0.15, 0.4, 0.7])
ax.bar([0], Ampl[0].mean(), color='gray', label='w/o laser')
ax.bar([1], Ampl[1].mean(), color='blue', label='laser')
plt.legend(bbox_to_anchor=(0., 1.0, 1., .102), loc=3, mode='expand', frameon=False)
if len(Ids) == 1:
i=0
plt.plot([0,1], [np.mean(Ampl[0]), np.mean(Ampl[1])], color=clrs[i], label=mouse_order[i])
else:
for i in range(nmice):
plt.plot([0,1], [Ampl[0][i], Ampl[1][i]], color=clrs[i], label=mouse_order[i])
box_off(ax)
plt.ylabel('EMG Ampl. ($\mathrm{\mu V}$)')
ax.set_xticks([0,1])
ax.set_xticklabels(['', ''])
# some basic stats
#[tstats, p] = stats.ttest_rel(Ampl[0], Ampl[1])
#print("Stats for EMG amplitude: t-statistics: %.3f, p-value: %.3f" % (tstats, p))
if len(fig_file) > 0:
save_figure(fig_file)
# Use seaborn to plot powerspectra with confidence intervals across mice:
# At some point I will make this the standard code
vals = []
if len(mouse_order) > 0:
mi = 0
for m in mouse_order:
for i in range(len(F)):
if len(mouse_order) > 1:
vals.append([0, m, Pow[0][mi][i], F[i]])
else:
vals.append([0, m, Pow[0][:,i].mean(), F[i]])
if pmode >= 1:
if len(mouse_order) > 1:
vals.append([1, m, Pow[1][mi][i], F[i]])
else:
vals.append([1, m, Pow[1][:,i].mean(), F[i]])
mi += 1
df = pd.DataFrame(columns=['Lsr', 'Idf', 'Pow', 'Freq'], data=vals)
if pplot:
plt.figure()
ax = plt.subplot(111)
if pmode >= 1:
sns.lineplot(x='Freq', y='Pow', hue='Lsr', data=df, ci=ci, palette={0:'gray', 1:'blue'})
else:
sns.lineplot(x='Freq', y='Pow', data=df, ci=ci, palette=['gray'])
box_off(ax)
plt.xlim([0, f_max])
return Pow, F, df
def set_awake(M, MSP, freq, mu=[10, 100]):
imu = np.where((freq>=mu[0]) & (freq<=mu[1]))[0]
df = freq[1]-freq[0]
widx = np.where(M==2)[0]
ampl = np.sqrt(MSP[imu, :].sum(axis=0)*df)
wampl = ampl[widx]
thr = wampl.mean() + 1*wampl.std()
awk_idx = widx[np.where(wampl>thr)[0]]
#qwk_idx = np.setdiff1d(widx, awk_idx)
M[awk_idx] = 5
return M
def sleep_spectrum_simple(ppath, recordings, istate=1, tstart=0, tend=-1, fmax=-1,
mu=[10,100], ci='sd', pmode=1, pnorm = False, pplot=True,
harmcs=0, harmcs_mode='iplt', iplt_level=0, peeg2=False,
pemg2=False, exclusive_mode=0, csv_files=[]):
"""
caluclate EEG power spectrum using pre-calculate spectogram save in ppath/sp_"name".mat
:param ppath: base folder
:param recordings: list of recordings
:param istate: brain state for which power spectrum is computed.
1-REM, 2-Wake, 3-NREM, 5-"active wake"
:param tstart: use EEG/EMG starting from time point tstart [seconds]
:param tend: use EEG/EMG up to time point tend [seconds]; if tend=-1, use EEG/EMG till the end
:param fmax: maximum frequency shown on x-axis
:param ci: 'sd' | int between 0 and 100 specificing confidence interval
:param pmode: mode:
pmode == 0, just plot power spectrum for state istate and don't care about laser
pmode == 1, compare state during laser with baseline outside laser interval
:param pnorm: if True, normalize spectrogram by dividing each frequency band by its average power
:param pplot: if True, plot figure
# What do to with episodes partially overlapping with laser
:param exclusive_mode: if > 0, apply some exception for episodes of state $istate,
that overlap with laser. Say there's a REM period that only partially overlaps with laser.
If $exclusive_mode == 1, then do not use the part w/o laser for the 'no laser' condition;
This can be relevant for closed-loop stimulation: The laser turns on after the
spontaneous onset of REM sleep. So, the initial part of REM sleep would be interpreted
as 'no laser', potentially inducing a bias, because at the very beginning the REM spectrum
looks different than later on.
If $exclusive_mode == 2, then add the part w/o laser to the 'with laser' condition.
If $exclusive_mode == 0, then interprete the part w/o laser as 'w/o laser'.
# interpolating/discarding harmonics
:param harmcs, harmcs_mode, iplt_level: if $harmcs > 0 and $harmcs_mode == 'emg',
remove all harmonics of base frequency $harmcs, from the frequencies used
for EMG amplitude calculation; do nothing for harmonics in EEG
if $harmcs > 0 and $harmcs_mode == 'iplt', interpolate all harmonics by substituting the power
at the harmonic by a sum of the neighboring frequencies. If $iplt_level == 1, only
take one neighboring frequency below and above the harmonic,
if $iplt_level == 2, use the two neighboring frequencies above and below for the
interpolation
:parm peeg2: if True, use EEG2.mat instead of EEG.mat for EEG powerspectrum calculation
:param pemg2: if True, use EMG2 for EMG amplitude calcuation
:param csv_files: if two file names are provided, the results for EEG power spectrum
and EMG amplitude are saved to the csv files. The EEG powerspectrum is
saved to the first file.
:return (ps_mx, freq, df, df_amp)
ps_mx: dict: 0|1 -> np.array(no. mice x frequencies)
freq: vector with frequencies
df: DataFrame with EEG powerspectrum; columns: 'Idf', 'Freq', 'Pow', 'Lsr'
df_amp: DataFrame with EMG amplitude; columns: 'Idf', 'Amp', 'Lsr'
"""
def _interpolate_harmonics(SP, freq, f_max, harmcs, iplt_level):
df = freq[2]-freq[1]
for h in np.arange(harmcs, f_max, harmcs):
i = np.argmin(np.abs(freq - h))
if np.abs(freq[i] - h) < df and h != 60:
if iplt_level == 2:
SP[i,:] = (SP[i-2:i,:] + SP[i+1:i+3,:]).mean(axis=0) * 0.5
elif iplt_level == 1:
SP[i,:] = (SP[i-1,:] + SP[i+1,:]) * 0.5
else:
pass
return SP
# def _interpolate_harmonics(SP, freq, f_max, harmcs, iplt_level):
# df = freq[2]-freq[1]
# for h in np.arange(harmcs, f_max, harmcs):
# i = np.argmin(np.abs(freq - h))
# if np.abs(freq[i] - h) < df and h != 60:
# SP[i,:] = (SP[i-iplt_level:i,:] + SP[i+1:i+1+iplt_level,:]).mean(axis=0) * 0.5
# return SP
mice = []
for rec in recordings:
idf = re.split('_', rec)[0]
if not idf in mice:
mice.append(idf)
ps_mice = {0: {m:[] for m in mice}, 1: {m:[] for m in mice}}
amp_mice = {0: {m:0 for m in mice}, 1: {m:0 for m in mice}}
count_mice = {0: {m:0 for m in mice}, 1: {m:0 for m in mice}}
data = []
for rec in recordings:
print(rec)
emg_loaded = False
# load brain state
idf = re.split('_', rec)[0]
M = load_stateidx(ppath, rec)[0]
sr = get_snr(ppath, rec)
# number of time bins for each time bin in spectrogram
nbin = int(np.round(sr) * 2.5)
dt = nbin * (1/sr)
# determine start and end of time frame used for calculation
istart = int(np.round(tstart / dt))
if tend > -1:
iend = int(np.round(tend / dt))
else:
iend = len(M)
istart_eeg = istart*nbin
#iend_eeg = (iend-1)*nbin+1
iend_eeg = iend*nbin
M = M[istart:iend]
if istate == 5:
tmp = so.loadmat(os.path.join(ppath, rec, 'msp_%s.mat' % rec), squeeze_me=True)
if not pemg2:
MSP = tmp['mSP'][:,istart:iend]
freq_emg = tmp['freq']
else:
MSP = tmp['mSP2'][:,istart:iend]
freq_emg = tmp['freq']
emg_loaded = True
M = set_awake(M[istart:iend], MSP[istart:iend], freq_emg, mu=mu)
if type(istate) == int:
idx = np.where(M==istate)[0]
else:
idx = np.array([], dtype='int')
for s in istate:
idx = np.concatenate((idx, np.where(M==s)[0]))
# load laser
if pmode == 1:
lsr = load_laser(ppath, rec)
idxs, idxe = laser_start_end(lsr[istart_eeg:iend_eeg])
# downsample EEG time to spectrogram time
idxs = [int(i/nbin) for i in idxs]
idxe = [int(i/nbin) for i in idxe]
laser_idx = []
for (i,j) in zip(idxs, idxe):
laser_idx += range(i,j+1)
laser_idx = np.array(laser_idx)
idx_lsr = np.intersect1d(idx, laser_idx)
idx_nolsr = np.setdiff1d(idx, laser_idx)
if exclusive_mode > 0 and exclusive_mode < 3:
#rm_idx = []
rem_seq = get_sequences(np.where(M==1)[0])
for s in rem_seq:
d = np.intersect1d(s, idx_lsr)
if len(d) > 0:
# that's the part of the REM period with laser
# that does not overlap with laser
drm = np.setdiff1d(s, d)
idx_nolsr = np.setdiff1d(idx_nolsr, drm)
if exclusive_mode == 2:
idx_lsr = np.union1d(idx_lsr, drm)
if exclusive_mode == 3:
rem_trig = so.loadmat(os.path.join(ppath, rec, 'rem_trig_%s.mat'%rec), squeeze_me=True)['rem_trig']
rem_trig = downsample_vec(rem_trig, nbin)
rem_trig[np.where(rem_trig>0)] = 1
trig_idx = np.where(rem_trig==1)[0]
idx_lsr = np.intersect1d(trig_idx, idx_lsr)
idx_nolsr = np.intersect1d(trig_idx, idx_nolsr)
######################################################################
# load EEG spectrogram
tmp = so.loadmat(os.path.join(ppath, rec, 'sp_%s.mat' % rec), squeeze_me=True)
if not peeg2:
SP = tmp['SP'][:,istart:iend]
else:
SP = tmp['SP2'][:, istart:iend]
if pnorm:
sp_mean = np.mean(SP, axis=1)
SP = np.divide(SP, np.tile(sp_mean, (SP.shape[1], 1)).T)
freq = tmp['freq']
df = freq[1]-freq[0]
if fmax > -1:
ifreq = np.where(freq <= fmax)[0]
freq = freq[ifreq]
SP = SP[ifreq,:]
# load EMG spectrogram
if not emg_loaded:
tmp = so.loadmat(os.path.join(ppath, rec, 'msp_%s.mat' % rec), squeeze_me=True)
if not pemg2:
MSP = tmp['mSP'][:,istart:iend]
freq_emg = tmp['freq']
else:
MSP = tmp['mSP2'][:,istart:iend]
freq_emg = tmp['freq']
imu = np.where((freq_emg>=mu[0]) & (freq_emg<=mu[-1]))[0]
if harmcs > 0 and harmcs_mode == 'iplt':
SP = _interpolate_harmonics(SP, freq, fmax, harmcs, iplt_level)
MSP = _interpolate_harmonics(MSP, freq, fmax, harmcs, iplt_level)
if harmcs > 0 and harmcs_mode == 'emg':
harm_freq = np.arange(0, freq_emg.max(), harmcs)
for h in harm_freq:
imu = np.setdiff1d(imu, imu[np.where(np.round(freq_emg[imu], decimals=1)==h)[0]])
tmp = 0
for i in imu:
tmp += MSP[i,:] * (freq_emg[i]-freq_emg[i-1])
emg_ampl = np.sqrt(tmp)
else:
emg_ampl = np.sqrt(MSP[imu,:].sum(axis=0)*df)
###################################################
if pmode == 1:
count_mice[0][idf] += len(idx_nolsr)
count_mice[1][idf] += len(idx_lsr)
ps_lsr = SP[:,idx_lsr].sum(axis=1)
ps_nolsr = SP[:,idx_nolsr].sum(axis=1)
ps_mice[1][idf].append(ps_lsr)
ps_mice[0][idf].append(ps_nolsr)
amp_mice[1][idf] += emg_ampl[idx_lsr].sum()
amp_mice[0][idf] += emg_ampl[idx_nolsr].sum()
else:
count_mice[0][idf] += len(idx)
ps_nolsr = SP[:,idx].sum(axis=1)
ps_mice[0][idf].append(ps_nolsr)
amp_mice[0][idf] += emg_ampl[idx].sum()
lsr_cond = []
if pmode == 0:
lsr_cond = [0]
else:
lsr_cond = [0,1]
ps_mx = {0:[], 1:[]}
amp_mx = {0:[], 1:[]}
for l in lsr_cond:
mx = np.zeros((len(mice), len(freq)))
amp = np.zeros((len(mice),))
for (i,idf) in zip(range(len(mice)), mice):
mx[i,:] = np.array(ps_mice[l][idf]).sum(axis=0) / count_mice[l][idf]
amp[i] = amp_mice[l][idf] / count_mice[l][idf]
ps_mx[l] = mx
amp_mx[l] = amp
# transform data arrays to pandas dataframe
data_nolsr = list(np.reshape(ps_mx[0], (len(mice)*len(freq),)))
amp_freq = list(freq)*len(mice)
amp_idf = reduce(lambda x,y: x+y, [[b]*len(freq) for b in mice])
if pmode == 1:
data_lsr = list(np.reshape(ps_mx[1], (len(mice)*len(freq),)))
list_lsr = ['yes']*len(freq)*len(mice) + ['no']*len(freq)*len(mice)
data = [[a,b,c,d] for (a,b,c,d) in zip(amp_idf*2, amp_freq*2, data_lsr+data_nolsr, list_lsr)]
else:
list_lsr = ['no']*len(freq)*len(mice)
data = [[a,b,c,d] for (a,b,c,d) in zip(amp_idf, amp_freq, data_nolsr, list_lsr)]
df = pd.DataFrame(columns=['Idf', 'Freq', 'Pow', 'Lsr'], data=data)
df_amp = pd.DataFrame(columns=['Idf', 'Amp', 'Lsr'])
if pmode == 1:
df_amp['Idf'] = mice*2
df_amp['Amp'] = list(amp_mx[0]) + list(amp_mx[1])
df_amp['Lsr'] = ['no'] * len(mice) + ['yes'] * len(mice)
else:
df_amp['Idf'] = mice
df_amp['Amp'] = list(amp_mx[0])
df_amp['Lsr'] = ['no'] * len(mice)
# plot figure
if pplot:
plt.ion()
plt.figure()
sns.set_style('ticks')
#sns.lineplot(data=df, x='Freq', y='Pow', hue='Lsr', ci=ci, palette={'yes':'blue', 'no':'gray'})
if ci == 'sem':
c = np.nanstd(ps_mx[0], axis=0) / np.sqrt(ps_mx[0].shape[0])
m = np.nanmean(ps_mx[0], axis=0)
plt.plot(freq, m, color='gray')
plt.fill_between(freq, m-c, m+c, color='gray', alpha=0.2)
c = np.nanstd(ps_mx[1], axis=0) / np.sqrt(ps_mx[1].shape[0])
m = np.nanmean(ps_mx[1], axis=0)
plt.plot(freq, m, color='blue')
plt.fill_between(freq, m-c, m+c, color='blue', alpha=0.2)
else:
sns.lineplot(data=df, x='Freq', y='Pow', hue='Lsr', ci=ci, palette={'yes': 'blue', 'no': 'gray'})
sns.despine()
plt.xlim([freq[0], freq[-1]])
plt.xlabel('Freq. (Hz)')
if not pnorm:
plt.ylabel('Spectral density ($\mathrm{\mu V^2/Hz}$)')
else:
plt.ylabel('Norm. power')
plt.show()
plt.figure()
plt.axes([0.1, 0.1, 0.4, 0.8])
if ci == 'sem':
ci = 68
sns.barplot(data=df_amp, x='Lsr', y='Amp', palette={'yes':'blue', 'no':'gray'}, ci=ci)
#sns.swarmplot(data=df_amp, x='Lsr', y='Amp', hue='Idf', palette='husl')
sns.lineplot(data=df_amp, x='Lsr', y='Amp', hue='Idf', palette='husl')
plt.ylabel('Amp ($\mathrm{\mu V}$)')
sns.despine()
plt.show()
if len(csv_files) > 0:
df.to_csv(csv_files[0], index=False)
df.to_csv(csv_files[1], index=False)
return ps_mx, freq, df, df_amp
def _detect_troughs(signal, thr):
lidx = np.where(signal[0:-2] > signal[1:-1])[0]
ridx = np.where(signal[1:-1] <= signal[2:])[0]
thidx = np.where(signal[1:-1] < thr)[0]
sidx = np.intersect1d(lidx, np.intersect1d(ridx, thidx))+1
return sidx
def phasic_rem(ppath, name, min_dur=2.5, pplot=False, plaser=False, nfilt=11):
"""
Detect phasic REM episodes using the algorithm described in
<NAME> et al. 2021, which comes from
https://www.nature.com/articles/nn.2894 Mizuseki et al. 2011
Parameters
----------
ppath : TYPE
DESCRIPTION.
name : TYPE
DESCRIPTION.
min_dur : TYPE, optional
DESCRIPTION. The default is 2.5.
plaser : bool, optional
if True, only use REM states w/o laser to set thresholds for the algorithm
Returns
-------
phrem : dict
dict: start index of each REM episode in hypnogram --> all sequences of phasic REM episodes;
note that phREM sequences are represented as indices in the raw EEG
"""
from scipy.signal import hilbert
M = load_stateidx(ppath, name)[0]
EEG = so.loadmat(os.path.join(ppath, name, 'EEG.mat'), squeeze_me=True)['EEG']
neeg = EEG.shape[0]
seq = get_sequences(np.where(M == 1)[0])
rem_idx = []
for s in seq:
rem_idx += list(s)
sr = get_snr(ppath, name)
nbin = int(np.round(2.5*sr))
sdt = nbin*(1/sr)
laser_idx_bs = []
if plaser:
# get laser indices as list
lsr = load_laser(ppath, name)
idxs, idxe = laser_start_end(lsr)
idxs = [int(i/nbin) for i in idxs]
idxe = [int(i/nbin) for i in idxe]
for (si,sj) in zip(idxs, idxe):
laser_idx_bs += list(range(si,sj+1))
w1 = 5.0 / (sr/2)
w2 = 12.0 / (sr/2)
filt = np.ones((nfilt,))
filt = filt / filt.sum()
trdiff_list = []
tridx_list = []
rem_eeg = np.array([])
eeg_seq = {}
sdiff_seq = {}
tridx_seq = {}
# Collect for each REM sequence the smoothed inter-trough intervals
# and EEG amplitudes as well as the indices of the troughs.
seq = [s for s in seq if len(s)*sdt>=min_dur]
for s in seq:
ta = s[0]*nbin
#[0 1 2 3]
#[0-2500 2500-5000 5000-7500 7500 9999]
tb = (s[-1]+1)*nbin
tb = np.min((tb, neeg))
eeg_idx = np.arange(ta, tb)
eeg = EEG[eeg_idx]
if len(eeg)*(1/sr) <= min_dur:
continue
eegh = my_bpfilter(eeg, w1, w2)
res = hilbert(eegh)
instantaneous_phase = np.angle(res)
amp = np.abs(res)
# trough indices
tridx = _detect_troughs(instantaneous_phase, -3)
# Alternative that does not seems to work that well:
#tridx = np.where(np.diff(np.sign(np.diff(eegh))))[0]+1
# differences between troughs
trdiff = np.diff(tridx)
# smoothed trough differences
sdiff_seq[s[0]] = np.convolve(trdiff, filt, 'same')
# dict of trough differences for each REM period
tridx_seq[s[0]] = tridx
eeg_seq[s[0]] = amp
rem_idx = []
for s in seq:
rem_idx += list(s)
if plaser:
rem_idx = np.setdiff1d(rem_idx, laser_idx_bs)
seq = get_sequences(rem_idx)
seq = [s for s in seq if len(s)*sdt>=min_dur]
# collect again smoothed inter-trough differences and amplitude;
# but this time concat the data to one long vector each (@trdiff_sm and rem_eeg)
for s in seq:
ta = s[0]*nbin
#tb = s[-1]*(nbin+1)
tb = (s[-1]+1) * nbin
tb = np.min((tb, neeg))
eeg_idx = np.arange(ta, tb)
eeg = EEG[eeg_idx]
if len(eeg)*(1/sr) <= min_dur:
continue
eegh = my_bpfilter(eeg, w1, w2)
res = hilbert(eegh)
instantaneous_phase = np.angle(res)
amp = np.abs(res)
# trough indices
tridx = _detect_troughs(instantaneous_phase, -3)
# alternative version:
#tridx = np.where(np.diff(np.sign(np.diff(eegh))))[0]+1
# differences between troughs
tridx_list.append(tridx+ta)
trdiff = np.diff(tridx)
trdiff_list += list(trdiff)
rem_eeg = np.concatenate((rem_eeg, amp))
trdiff = np.array(trdiff_list)
trdiff_sm = np.convolve(trdiff, filt, 'same')
# potential candidates for phasic REM:
# the smoothed difference between troughs is less than
# the 10th percentile:
thr1 = np.percentile(trdiff_sm, 10)
# the minimum difference in the candidate phREM is less than
# the 5th percentile
thr2 = np.percentile(trdiff_sm, 5)
# the peak amplitude is larger than the mean of the amplitude
# of the REM EEG.
thr3 = rem_eeg.mean()
phrem = {}
for si in tridx_seq:
offset = nbin*si
tridx = tridx_seq[si]
sdiff = sdiff_seq[si]
eegh = eeg_seq[si]
idx = np.where(sdiff <= thr1)[0]
cand = get_sequences(idx)
#thr4 = np.mean(eegh)
for q in cand:
dur = ( (tridx[q[-1]]-tridx[q[0]]+1)/sr ) * 1000
if dur > 900 and np.min(sdiff[q]) < thr2 and np.mean(eegh[tridx[q[0]]:tridx[q[-1]]+1]) > thr3:
a = tridx[q[0]] + offset
b = tridx[q[-1]] + offset
idx = range(a,b+1)
if si in phrem:
phrem[si].append(idx)
else:
phrem[si] = [idx]
# make plot:
if pplot:
nsr_seg = 1 # before 1
# overlap of consecutive FFT windows
perc_overlap = 0.8
file_sp = os.path.join(ppath, name, 'sp_fine_%s.mat' % name)
if (not os.path.isfile(file_sp)):
freq, t, SP = scipy.signal.spectrogram(EEG, fs=sr, window='hann', nperseg=int(nsr_seg * sr),
noverlap=int(nsr_seg * sr * perc_overlap))
# for nsr_seg=1 and perc_overlap = 0.9,
# t = [0.5, 0.6, 0.7 ...]
dt = t[1]-t[0]
# Note: sp_name.mat includes keys: SP, SP2, freq, dt, t
so.savemat(file_sp, {'SP':SP, 'SP2':[], 'dt':dt, 'freq':freq, 't':t})
tmp = so.loadmat(file_sp, squeeze_me=True)
SP = tmp['SP']
freq = tmp['freq']
tbs = tmp['t']
dt = tmp['dt']
plt.figure()
# plot spectrogram
ax = plt.subplot(512)
ifreq = np.where(freq <= 20)[0]
ax.pcolorfast(tbs, freq[ifreq], SP[ifreq,:], vmin=0, vmax=5000, cmap='jet')
plt.ylabel('Freq. (Hz)')
# plot hypnogram
axes_brs = plt.subplot(511, sharex=ax)
cmap = plt.cm.jet
my_map = cmap.from_list('brs', [[0, 0, 0], [0, 1, 1], [0.6, 0, 1], [0.8, 0.8, 0.8]], 4)
tmp = axes_brs.pcolorfast(tbs, [0, 1], np.array([M]), vmin=0, vmax=3)
tmp.set_cmap(my_map)
axes_brs.axis('tight')
_despine_axes(axes_brs)
plt.xlim([tbs[0], tbs[-1]])
# plot gamma power
plt.subplot(513, sharex=ax)
gamma = [50, 90]
df = freq[1] - freq[0]
igamma = np.where((freq >= gamma[0]) & (freq <= gamma[1]))[0]
pow_gamma = SP[igamma,:].sum(axis=0) * df
plt.plot(tbs, pow_gamma)
plt.xlim([tbs[0], tbs[-1]])
plt.ylabel(r'$\gamma$')
# plot theta/delta
#plt.subplot(514, sharex=ax)
# theta = [6, 12]
# delta = [0.5, 4.5]
# itheta = np.where((freq >= theta[0]) & (freq <= theta[1]))[0]
# idelta = np.where((freq >= delta[0]) & (freq <= delta[1]))[0]
# pow_theta = SP[itheta,:].sum(axis=0) * df
# pow_delta = SP[idelta,:].sum(axis=0) * df
# plt.plot(tbs, np.divide(pow_theta, pow_delta))
# plt.xlim([tbs[0], tbs[-1]])
# plot raw EEG; downsample for faster plotting
plt.subplot(515, sharex=axes_brs)
EEGdn = downsample_vec(EEG, 4)
teeg = np.arange(0, len(EEG)) * (1/sr)
teeg_dn = np.arange(0, len(EEGdn)) * ((1/sr)*4)
for tr in tridx_list:
idx = range(tr[0], tr[-1]+1)
idx_dn = [int(i/4) for i in idx]
eeg = EEGdn[idx_dn]
plt.plot(teeg_dn[idx_dn], eeg, 'k')
plt.xlim([0, teeg[-1]])
for si in phrem:
ta = si*nbin
idx_list = phrem[si]
eegh = eeg_seq[si]
sdiff = sdiff_seq[si]
# plot amplitude
plt.plot(teeg[ta:ta+len(eegh)], eegh, 'g')
# plot threshold for amplitude
plt.plot([teeg[ta], teeg[ta+len(eegh)-1]], [thr3, thr3], 'r--')
for idx in idx_list:
a = idx[0]
b = idx[-1]
a = int(a/4)
b = int(b/4)
plt.plot(teeg_dn[range(a,b+1)], EEGdn[a:b+1], 'r')
plt.ylabel('EEG')
# plot smoothed inter-through intervals
plt.subplot(514, sharex=ax)
for si in phrem:
ta = si*nbin
tridx = tridx_seq[si] + ta
sdiff = sdiff_seq[si]
plt.plot(teeg[tridx[:-1]], sdiff, 'k')
plt.plot(teeg[[tridx[0], tridx[-1]]], [thr2, thr2], 'r')
plt.plot(teeg[[tridx[0], tridx[-1]]], [thr1, thr1], 'b')
plt.ylabel('ITIs')
return phrem
# PLOTTING FUNCTIONALITY #####################################################
def plt_lineplot(df, subject, xcol, ycol, ax=-1, color='blue', xlabel='', ylabel='', lw=1):
subjects = list(df[subject].unique())
data = []
for s in subjects:
x = df[df[subject]==s][xcol]
y = df[df[subject]==s][ycol]
data.append(list(y))
data = np.array(data)
if ax == -1:
plt.figure()
ax = plt.subplot(111)
m = np.nanmean(data, axis=0)
s = np.nanstd(data, axis=0)/np.sqrt(len(subjects))
ax.plot(x, m, color=color, lw=lw)
ax.fill_between(x, m+s, m-s, color=color, alpha=0.2)
if len(ylabel) > 0:
plt.ylabel(ylabel)
else:
plt.ylabel(str(ycol))
if len(xlabel) > 0:
plt.xlabel(xlabel)
else:
plt.xlabel(str(xcol))
def plt_lineplot_byhue(df, subject, xcol, ycol, hue, ax=-1, color='blue', xlabel='', ylabel='', lw=1):
subjects = list(df[subject].unique())
data = []
for s in subjects:
x = df[df[subject]==s][xcol]
y = df[df[subject]==s][ycol]
data.append(list(y))
data = np.array(data)
if ax == -1:
plt.figure()
ax = plt.subplot(111)
m = np.nanmean(data, axis=0)
s = np.nanstd(data, axis=0)/np.sqrt(len(subjects))
ax.plot(x, m, color=color, lw=lw)
ax.fill_between(x, m+s, m-s, color=color, alpha=0.2)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
### TRANSITION ANALYSIS #########################################################
def transition_analysis(ppath, rec_file, pre, laser_tend, tdown, large_bin,
backup='', stats_mode=0, after_laser=0, tstart=0, tend=-1,
bootstrap_mode=0, paired_stats=True, ma_thr=0, ma_rem_exception=True,
bsl_shading=False, overlap_mode=False, fig_file='', fontsize=12, nboot=1000, seed=-1):
"""
Transition analysis
Example call:
sleepy.transition_analysis(ppath, rec_list, 120, 120, 20, 60, bootstrap_mode=0)
:param ppath: base recording folder
:param rec_file: file OR list of recordings
:param pre: time before laser onset [s]
:param laser_tend: duration of laser stimulation
:param tdown: time bin for brainstate (>= 2.5 s)
:param large_bin: >= tdown; average large_bin/tdown consecutive transition probabilities
:param backup: possible backup base folder, e.g. on an external hard drive
:param stats_mode: int; The transition probability during the baseline period is calculated
either by averaging over individual transition probabilities
during laser and baseline period (stats_mode == 1);
or by calculating a markov matrix on its own (at large_bin resolution)
for laser and baseline period (state_mode == 0)
:param after_laser: float, exclude $after_laser seconds after laser offset for
baseline calculation
:param tstart only consider laser trials starting after tstart seconds
:param tend only consider laser trials starting before tend seconds
:param bootstrap_mode: default=0
bootstrap_mode == 0: Take inter-mouse variance and inter-trial variance (of each mouse) into account.
That is, bootstrapping re-models the variance expected when re-doing the same
experimental design (same mouse number and total trial number).
To account for potentially different number of trials per mouse, resample the data
during each iteration the following way: Assume that there are n laser trials from m mice;
randomly select (with replacment) ntrial mice; then select from each mouse randomly one trial.
bootstrap_mode == 1: Only take inter-trial variance (of each mouse) into account; That is,
bootstrapping models the variance expected when redoing the experiment
with exactly the same mice.
:param paired_stats, boolean; if True, perform paired test between baseline and laser interval.
:param ma_thr: if > 0, set wake periods < ma_thr to NREM.
:param ma_rem_exception: if True, leave a MA after REM as wake.
:param bsl_shading: if True, plot error shading for red baseline
:param overlap_mode, if True alo include the last fine bin within a large bin for
the transition probability calculation. Say 1 large bin contains 3 time bins:
|b1 b2 b3 | b4 b5 b6
Then if overlap_mode == False, the transition probability for the first
large bin will only include pairs of time bin b1 to b2, b2 to b3, but not b3 to b4.
If overlap_mode == True, then transition probability for the first bin
will include also the 3rd fine bin, so
b1 to b2, b2 to b3, and b3 to b4.
:param fig_file, if file name specified, save figure
:param fontsize, if specified, set fontsize to given value
:param nboot, number of bootstrap iterations
:param seed, if >-1 re-set the random generator using the given seed
:return: dict, transitions id --> 3 x 3 x time bins np.array
"""
if seed > -1:
rand = np.random.RandomState(seed)
print('set seed')
else:
import numpy.random as rand
if type(rec_file) == str:
E = load_recordings(ppath, rec_file)[1]
else:
E = rec_file
post = pre + laser_tend
# set path for each recording: either ppath or backup
rec_paths = {}
mouse_ids = {}
for m in E:
idf = re.split('_', m)[0]
if len(backup) == 0:
rec_paths[m] = ppath
else:
if os.path.isdir(os.path.join(ppath, m)):
rec_paths[m] = ppath
else:
rec_paths[m] = backup
if not idf in mouse_ids:
mouse_ids[idf] = 1
mouse_ids = list(mouse_ids.keys())
# Dict: Mouse_id --> all trials of this mouse
MouseMx = {idf:[] for idf in mouse_ids}
for m in E:
trials = _whole_mx(rec_paths[m], m, pre, post, tdown, tstart=tstart, tend=tend, ma_thr=ma_thr, ma_rem_exception=True)
idf = re.split('_', m)[0]
MouseMx[idf] += trials
ntdown = len(trials[0])
for idf in mouse_ids:
MouseMx[idf] = np.array(MouseMx[idf])
# dict mouse_id --> number of trials
num_trials = {k:len(MouseMx[k]) for k in MouseMx}
# number of all trials
ntrials = sum(list(num_trials.values()))
# number of mice
nmice = len(mouse_ids)
# Markov Computation & Bootstrap
# time axis:
t = np.linspace(-pre, post-large_bin, int((pre+post)/large_bin))
# to make sure that the bar for block 0 to $large_bin is centered around $large_bin/2
# such that the bars left end touches 0 s
t += large_bin/2.0
dt = t[1]-t[0]
if tdown == large_bin:
# the first bin that includes 0s (i.e. that touches the laser),
# should be centered around 0s
t += dt/2
tfine = np.linspace(-pre, post-tdown, int((pre+post)/tdown))
ilsr_fine = np.where((tfine>=0) & (tfine<laser_tend))[0]
tmp = np.where((tfine >= 0) & (tfine < (laser_tend + after_laser)))[0]
ibase_fine = np.setdiff1d(np.arange(0, len(tfine)), tmp)
### indices during and outside laser stimulation
# indices of large bins during laser
ilsr = np.where((t>=0) & (t<laser_tend))[0]
tmp = np.where((t>=0) & (t<(laser_tend+after_laser)))[0]
# indices of large bins outsize laser (and $after_laser)
ibase = np.setdiff1d(np.arange(0, len(t)), tmp)
# number of large time bins
nseq = len(t)
# states
M = dict()
Base = dict()
Laser = dict()
states = {1:'R', 2:'W', 3:'N'}
for si in range(1,4):
for sj in range(1,4):
id = states[si] + states[sj]
M[id] = np.zeros((nboot, nseq))
Base[id] = np.zeros((nboot,))
Laser[id] = np.zeros((nboot,))
# that's the matrix used for computation in each bootstrap iteration
if bootstrap_mode == 1:
# each mouse contributes the same number of trials
mouse_trials = int(np.mean(list(num_trials.values())))
MXsel = np.zeros((mouse_trials*len(mouse_ids), ntdown))
else:
MXsel = np.zeros((ntrials, ntdown))
for b in range(nboot):
if bootstrap_mode == 1:
i = 0
for idf in mouse_ids:
num = num_trials[idf]
itrials = rand.randint(0, num, (mouse_trials,))
sel = MouseMx[idf][itrials,:]
MXsel[i*mouse_trials:(i+1)*mouse_trials,:] = sel
i += 1
else:
irand_mice = rand.randint(0, nmice, ntrials)
i=0
for j in irand_mice:
idf = mouse_ids[j]
itrial = rand.randint(0, num_trials[idf])
MXsel[i,:] = MouseMx[idf][itrial,:]
i+=1
# calculate average laser and baseline transition probability to have two values to compare
# for statistics
if stats_mode == 0:
baseline = complete_transition_matrix(MXsel, ibase_fine)
lsr = complete_transition_matrix(MXsel, ilsr_fine)
# caluclate actual transition probilities
if not int(large_bin/tdown) == 1:
MXb = build_markov_matrix_blocks(MXsel, tdown, large_bin, overlap_mode=overlap_mode)
else:
MXb = build_markov_matrix_seq(MXsel)
for si in states:
for sj in states:
id = states[si] + states[sj]
M[id][b,:] = np.squeeze(MXb[si-1, sj-1,:])
if stats_mode == 0:
Base[id][b] = baseline[si-1, sj-1]
Laser[id][b] = lsr[si-1, sj-1]
if stats_mode == 1:
for si in states:
for sj in states:
id = states[si] + states[sj]
Base[id] = np.mean(M[id][:,ibase], axis=1)
Laser[id] = np.mean(M[id][:, ilsr], axis=1)
# plotting
# M_us stores the non-sorted transition probability matrix M
M_us = {}
for si in states.keys():
for sj in states.keys():
id = states[si] + states[sj]
M_us[id] = M[id].copy()
alpha = 0.05
plt.ion()
set_fontsize(fontsize)
plt.figure(figsize=(8,6))
for si in states.keys():
for sj in states.keys():
id = states[si] + states[sj]
pi = (si - 1) * 3 + sj
ax = plt.subplot(int('33'+str(pi)))
ax.add_patch(patches.Rectangle((0, 0), laser_tend, 1, facecolor=[0.6, 0.6, 1], edgecolor=[0.6, 0.6, 1]))
plt.xlim([t[0]-dt, t[-1]])
box_off(ax)
P = M[id]
for i in range(P.shape[1]):
P[:,i].sort()
Bounds = np.zeros((2, P.shape[1]))
Bounds[0,:] = -1.0 * P[int(nboot*(alpha/2)),:] + np.nanmean(P, axis=0)
Bounds[1,:] = P[-int(nboot * (alpha / 2)),:] - np.nanmean(P, axis=0)
baseline = Base[id]
baseline.sort()
Bounds_baseline = np.zeros((2,))
basel_mean = np.nanmean(baseline)
Bounds_baseline[0] = -baseline[int(nboot*(alpha / 2))] + basel_mean
Bounds_baseline[1] = baseline[-int(nboot*(alpha / 2))] - basel_mean
aa = (basel_mean-Bounds_baseline[0]) * np.ones((nseq,))
bb = (basel_mean+Bounds_baseline[1]) * np.ones((nseq,))
ax.bar(t, np.nanmean(P, axis=0), yerr=Bounds, width=large_bin-large_bin*0.05, color='gray')
if bsl_shading:
plt.fill_between(t, aa, bb, alpha=0.5, lw=0, color='red', zorder=2)
else:
plt.plot(t, basel_mean * np.ones((nseq,)), 'r')
# set title
if si == 1 and sj == 1:
plt.ylabel('REM')
plt.title('REM', fontsize=fontsize)
if si == 1 and sj == 2:
plt.title('Wake', fontsize=fontsize)
if si == 1 and sj == 3:
plt.title('NREM', fontsize=fontsize)
if si == 2 and sj == 1:
plt.ylabel('Wake')
if si == 3 and sj == 1:
plt.ylabel('NREM')
if si == 3 and sj == 1:
plt.ylim([0, 0.5])
ax.set_xticks([-pre, 0, laser_tend, laser_tend+pre])
if si != 3:
ax.set_xticklabels([])
if si==3:
plt.xlabel('Time (s)')
# Statistics summary
Mod = np.zeros((3,3))
Sig = np.zeros((3,3))
for si in states:
for sj in states:
id = states[si] + states[sj]
# probabilities during laser stimulation
laser = Laser[id]
# probabilities outside laser stimulation
basel = Base[id]
Mod[si-1, sj-1] = np.nanmean(laser) / np.nanmean(basel)
if paired_stats:
d = laser - basel
else:
irand = random.sample(range(laser.shape[0]), laser.shape[0])
d = laser[irand] - basel
# "2 *" to make the test two sided. The different d can be either >0 or <0;
# see also http://qed.econ.queensu.ca/working_papers/papers/qed_wp_1127.pdf
if len(d) == np.where(d==0)[0].shape[0]:
p = np.nan
(cil, cih) = (np.nan, np.nan)
md = np.nanmean(d)
else:
p = 2 * np.min([len(np.where(d > 0)[0]) / (1.0 * len(d)), len(np.where(d <= 0)[0]) / (1.0 * len(d))])
(cil, cih) = np.percentile(d, (2.5, 97.5))
md = d.mean()
s = '='
if Mod[si-1, sj-1] > 1:
val = p
if val == 1:
s = '>'
val = 1 - 1.0 / nboot
if val == 0:
s = '<'
val = 1.0 / nboot
Sig[si-1, sj-1] = val
# division by 2.0 to make the test two-sided!
if val < alpha:#/2.0:
print('%s -> %s: Trans. prob. is INCREASED by a factor of %.4f; P %s %f, MD = %f, CI(%f, %f)'
% (states[si], states[sj], Mod[si-1, sj-1], s, val, md, cil, cih))
else:
print('%s -> %s: Trans. prob. is increased by a factor of %.4f; P %s %f, MD = %f, CI(%f, %f)'
% (states[si], states[sj], Mod[si-1, sj-1], s, val, md, cil, cih))
else:
val = p
if val == 1:
s = '>'
val = 1 - 1.0 / nboot
if val == 0:
s = '<'
val = 1.0 / nboot
Sig[si-1, sj-1] = val
# division by 2.0 to make the test two-sided!
if val < alpha:#/2.0:
print('%s -> %s: Trans. prob. is DECREASED by a factor of %.4f; P %s %f, MD = %f, CI(%f, %f)'
% (states[si], states[sj], Mod[si - 1, sj - 1], s, val, md, cil, cih))
else:
print('%s -> %s: Trans. prob. is decreased by a factor of %.4f; P %s %f, MD = %f, CI(%f, %F)'
% (states[si], states[sj], Mod[si - 1, sj - 1], s, val, md, cil, cih))
############################################################################################################
if len(fig_file) > 0:
save_figure(fig_file)
return M_us, Laser, Base
def build_markov_matrix_blocks(MX, tdown, large_bin, overlap_mode=False):
"""
pMX = build_markov_matrix_blocks(MX, down, large_bin)
build sequence of Markov matrices; build one Matrix for each large bin (block) of
duration $large_bin by using smaller bins of duration $down;
i.e. $down <= $large_bin
:param MX, np.array, with time bins on fine (tdown) time scale
:param tdown: fine time scale
:param large_bin: coarse time scale
:param overlap_mode: if True, include the last fine time, bordering the next large bin
:return: pMX, 3x3xtime np.array, series of markov matrices; time is the third dimension
"""
nbins = MX.shape[1] # number of time bins on fine time scale
ndown = int(large_bin/tdown) # number of fine bins in large bin
nstep = int(nbins/ndown) # number of large time bins
nrows = MX.shape[0]
pMX = np.zeros((3, 3, nstep))
for s in range(0, nstep):
if not overlap_mode:
mx = MX[:, s*ndown:(s+1)*ndown]
else:
mx = MX[:, s*ndown:((s+1)*ndown+1)]
pmx = np.zeros((3,3))
c = np.zeros((3,))
for i in range(0, nrows):
seq = mx[i,:]
m = mx.shape[1]
for j in range(0, m-1):
pmx[int(seq[j])-1, int(seq[j+1])-1] += 1
c[int(seq[j])-1] += 1
for i in range(3):
pmx[i,:] = pmx[i,:] / c[i]
pMX[:,:, s] = pmx
return pMX
def transition_markov_strength(ppath, rec_file, laser_tend, tdown, dur, bootstrap_mode=0,
backup='', pstationary=False, stats_lastpoint=True, paired_stats=True, nboot=1000, ma_thr=0):
"""
Cumulative transition probabilities:
How likely is is that a specific brain state transitions happens within a given time interval?
The function compares the cumulative probabilities during baseline (including time points
from -$pre till laser onset) with those during the laser stimulation interval (of duratin $laser_tend)
The brainstate is downsampled to time bins of duration $tdown.
See also function &quantify_transition()
NOTE: The CDF for a transition from si -> sj within time interval X is defined as P(si -> sj, t <= X)
I define the CDF for the discrete timepoint tdown as P(si -> sj, t <= tdown).
and count whether a brain state change happened between bin [-todown, 0[ and bin [0, tdown].
In other words the brain state right before the laser serves as si P(si -> sj, t <= X)
Example call:
sleepy.transition_markov_strength(ppath, recs, 120, 120, 20, np.arange(0, 121, 20))
:param ppath: base folder with sleep recordings
:param rec_file: file OR list of recordings
:param pre: time before laser onset [s]
:param laser_tend: duration of laser stimulation
:param tdown: downsample brainstate sequence to $tdown time bins; same as parameter tdown in sleepy.transition_analysis()
:param dur: list/vector with cumulative (=increasing) time intervals for each the cum. probabilities are computed
:param bootstrap_mode:
bootstrap_mode == 0: Take inter-mouse variance and inter-trial variance (of each mouse) into account.
That is, bootstrapping re-models the variance expected when re-doing the same
experimental design (same mouse number and total trial number).
To account for potentially different number of trials per mouse, resample the data
during each iteration the following way: Assume that there are n laser trials from m mice;
randomly select (with replacment) ntrial mice; then select from each mouse randomly one trial.
bootstrap_mode == 1: Only take inter-trial variance (of each mouse) into account; That is,
bootstrapping models the variance expected when redoing the experiment
with exactly the same mice.
If unsure, use bootstrap_mode = 0
:param backup: if a recording is not located in $ppath, the function looks for it in folder $backup
:param pstationary: if True, we assume that the laser induced changes in transition probabilities
are stationary; i.e. they are constant acorss the laser stimulation interval.
:param stats_lastpoint: if True, test whether the laser time point during laser (at laser_tend) is significantly different
from the corresponding time point in the baseline interval; if False, compare the averages of the cumulative distributions
to each other.
:param paired_stats: if True, treat baseline interval and following laser interval as paired statistics.
If False, treat baseline and laser interval as independent.
:param nboot: number of bootstrap iterations; >1000 recommended; but for a quick check just set to ~100
:param ma_thr: if > 0, set wake periods < $ma_thr to NREM.
:return:
"""
alpha = 0.05
fontsize = 12
# Note: We increase the preceding baseline period by one bin, to account for the
# fact that for the laser period, we also use one bin right before the laser as starting point.
pre = laser_tend + tdown
if type(rec_file) == str:
E = load_recordings(ppath, rec_file)[1]
else:
E = rec_file
post = pre + laser_tend
# set path for each recording: either ppath or backup
rec_paths = {}
mouse_ids = {}
for m in E:
idf = re.split('_', m)[0]
if len(backup) == 0:
rec_paths[m] = ppath
else:
if os.path.isdir(os.path.join(ppath, m)):
rec_paths[m] = ppath
else:
rec_paths[m] = backup
if not idf in mouse_ids:
mouse_ids[idf] = 1
mouse_ids = list(mouse_ids.keys())
# Dict: Mouse_id --> all trials of this mouse
MouseMx = {idf:[] for idf in mouse_ids}
for m in E:
trials = _whole_mx(rec_paths[m], m, pre, post, tdown, tstart=0, tend=-1, ma_thr=ma_thr)
idf = re.split('_', m)[0]
MouseMx[idf] += trials
ntdown = len(trials[0])
for idf in mouse_ids:
MouseMx[idf] = np.array(MouseMx[idf])
# dict mouse_id --> number of trials
num_trials = {k:len(MouseMx[k]) for k in MouseMx}
# number of all trials
ntrials = sum(list(num_trials.values()))
# number of mice
nmice = len(mouse_ids)
# states
cum_base = dict()
cum_laser = dict()
bounds_bsl = dict()
bounds_lsr = dict()
states = {1:'R', 2:'W', 3:'N'}
for si in range(1,4):
for sj in range(1,4):
id = states[si] + states[sj]
cum_base[id] = np.zeros((nboot,len(dur)))
cum_laser[id] = np.zeros((nboot,len(dur)))
bounds_bsl[id] = np.zeros((2, len(dur)))
bounds_lsr[id] = np.zeros((2,len(dur)))
# that's the matrix used for computation in each bootstrap iteration
if bootstrap_mode == 1:
# each mouse contributes the same number of trials
mouse_trials = int(np.mean(list(num_trials.values())))
MXsel = np.zeros((mouse_trials*len(mouse_ids), ntdown), dtype='int')
else:
MXsel = np.zeros((ntrials, ntdown), dtype='int')
for b in range(nboot):
if (b % 100) == 0:
print('done with iteration %d' % b)
if bootstrap_mode == 1:
i = 0
for idf in mouse_ids:
num = num_trials[idf]
itrials = rand.randint(0, num, (mouse_trials,))
sel = MouseMx[idf][itrials,:]
MXsel[i*mouse_trials:(i+1)*mouse_trials,:] = sel
i += 1
else:
irand_mice = rand.randint(0, nmice, ntrials)
i=0
for j in irand_mice:
idf = mouse_ids[j]
itrial = rand.randint(0, num_trials[idf])
MXsel[i,:] = MouseMx[idf][itrial,:]
i+=1
base_boot = np.zeros((3,3, len(dur)))
lsr_boot = np.zeros((3,3, len(dur)))
k=0
for d in dur:
base_boot[:,:,k] = quantify_transition(MXsel.astype('int'), pre, laser_tend, tdown, False, d, pstationary=pstationary)
lsr_boot[:,:,k] = quantify_transition(MXsel.astype('int'), pre, laser_tend, tdown, True, d, pstationary=pstationary)
k+=1
for si in states:
for sj in states:
id = states[si] + states[sj]
cum_base[id][b,:] = base_boot[si-1, sj-1,:]
cum_laser[id][b,:] = lsr_boot[si-1, sj-1,:]
# bounds_bsl[id][0,:] is the lower bounds
for si in states:
for sj in states:
id = states[si] + states[sj]
bounds_bsl[id][0,:] = np.sort(cum_base[id], axis=0)[int(nboot*(alpha / 2)),:]
bounds_bsl[id][1,:] = np.sort(cum_base[id], axis=0)[-int(nboot * (alpha / 2)), :]
bounds_lsr[id][0,:] = np.sort(cum_laser[id], axis=0)[int(nboot*(alpha / 2)),:]
bounds_lsr[id][1,:] = np.sort(cum_laser[id], axis=0)[-int(nboot * (alpha / 2)), :]
plt.ion()
plt.figure(figsize=(8,6))
for si in states:
for sj in states:
id = states[si] + states[sj]
pi = (si - 1) * 3 + sj
ax = plt.subplot(int('33'+str(pi)))
plt.plot(dur, np.nanmean(cum_laser[id], axis=0), color='blue')
plt.plot(dur, np.nanmean(cum_base[id], axis=0), color='gray')
plt.fill_between(dur, bounds_bsl[id][0,:], bounds_bsl[id][1,:], alpha=0.4, zorder=3, color='gray')
plt.fill_between(dur, bounds_lsr[id][0,:], bounds_lsr[id][1,:], alpha=0.4, zorder=3, color='blue')
plt.ylim([0, 1])
plt.xlim([dur[0], dur[-1]])
box_off(ax)
# set title
if si == 1 and sj == 1:
plt.ylabel('REM')
plt.title('REM', fontsize=fontsize)
if si == 1 and sj == 2:
plt.title('Wake', fontsize=fontsize)
if si == 1 and sj == 3:
plt.title('NREM', fontsize=fontsize)
if si == 2 and sj == 1:
plt.ylabel('Wake')
if si == 3 and sj == 1:
plt.ylabel('NREM')
if si != 3:
ax.set_xticklabels([])
if sj != 1:
ax.set_yticklabels([])
if si==3:
plt.xlabel('Time (s)')
# stats
P = {}
Mod = {}
data = []
S = {}
irand = random.sample(range(nboot), nboot)
for si in states:
for sj in states:
id_trans = states[si] + states[sj]
# old version
#d = cum_base[id_trans].mean(axis=1) - cum_laser[id_trans].mean(axis=1)
#d = d.mean(axis=1)
if stats_lastpoint:
# here, we test whether the probability to observe transition id_trans during a time interval of duration
# $laster_tend was significantly changed by laser.
a = cum_base[id_trans][:,-1]
b = cum_laser[id_trans][:,-1]
else:
#a = np.nanmean(cum_base[id_trans], axis=1)
#b = np.nanmean(cum_laser[id_trans], axis=1)
#NEW:
a = np.nansum(cum_base[id_trans], axis=1) * tdown
b = np.nansum(cum_laser[id_trans], axis=1) * tdown
if not paired_stats:
d = b-a[irand]
else:
d = b-a
p = 2 * np.min([len(np.where(d > 0)[0]) / (1.0 * len(d)), len(np.where(d <= 0)[0]) / (1.0 * len(d))])
#NEW:
(cil, cih) = np.percentile(d, (2.5, 97.5))
md = d.mean()
# if np.mean(d) >= 0:
# # now we want all values be larger than 0
# p = len(np.where(d>0)[0]) / (1.0*nboot)
# sig = 1 - p
# if sig == 0:
# sig = 1.0/nboot
# else:
# p = len(np.where(d<0)[0]) / (1.0*nboot)
# sig = 1 - p
# if sig == 0:
# sig = 1.0/nboot
if p == 1 or p == 0:
p = 1.0 / nboot
P[id_trans] = p
if p < alpha:
S[id_trans] = 'yes'
else:
S[id_trans] = 'no'
Mod[id_trans] = np.nanmean(b) / np.nanmean(a)
print('Transition %s was changed by a factor of %f; MD = %f CI(%f, %f)' % (id_trans, Mod[id_trans], md, cil, cih))
data.append([id_trans, P[id_trans], Mod[id_trans], S[id_trans], md, cil, cih])
df = pd.DataFrame(data=data, columns=['trans', 'p-value', 'change', 'sig', 'md', 'cil', 'cih'])
print(df)
return df
def quantify_transition(MX, pre, laser_tend, tdown, plaser, win, pstationary=False):
"""
Calculate probability to observe a transition for a given pair of brain states
with time interval $win. P( si -> sj, t <= $win )
:param MX: Matrix with all laser stimulation trials
:param pre: Duration of baseline interval preceding laser onset
:param laser_tend: duration of laser stimulation interval
:param tdown: duration of downsampled brain state bins
:param plaser: if True, compute cum. probabilities for laser interval;
otherwise for baseline
:param win: time interval [s] for which we test whether there was a given brain state transition
:param pstationary: boolean; if True, we assume that the transition probabilities are constant during
laser stimulation
:return: pmx, np.array(3 x 3)
pmx[i,j] holds the cumulative probability of a transition from state i to j
"""
ipre = int(np.round(pre / tdown))
ipost = int(np.round((pre+laser_tend) / tdown)) #HERE
nwin = int(np.round(win / tdown))
nrows = MX.shape[0]
pmx = np.zeros((3, 3))
c = np.zeros((3,))
idx = [0]
if plaser:
for i in range(nrows):
# I'm starting with one bin before the laser!
seq = MX[i,ipre-1:ipost]
if pstationary:
idx = range(0, len(seq)-nwin)
for j in idx:
p = seq[j:j+nwin+1]
si = p[0]
c[si-1] += 1
res = np.where(p != si)[0]
if len(res) == 0:
pmx[si-1, si-1] += 1
else:
sj = p[res[0]]
pmx[si-1, sj-1] += 1
# no laser
else:
for i in range(nrows):
seq = MX[i,0:ipre+1]
if pstationary:
idx = range(0, len(seq)-nwin)
for j in idx:
p = seq[j:j+nwin+1]
si = p[0]
c[si-1] += 1
res = np.where(p != si)[0]
if len(res) == 0:
pmx[si-1, si-1] += 1
else:
sj = p[res[0]]
pmx[si-1, sj-1] += 1
#for i in range(nrows):
# seq = MX[i,ipost:]
# for j in range(0, len(seq)-nwin):
# p = seq[j:j+nwin+1]
# si = p[0]
# c[si-1] += 1
# res = np.where(p != si)[0]
# if len(res) == 0:
# pmx[si-1, si-1] += 1
# else:
# sj = p[res[0]]
# pmx[si-1, sj-1] += 1
for i in range(0, 3):
pmx[i,:] = pmx[i,:] / c[i]
return pmx
def build_markov_matrix_seq(MX):
"""
build Markov matrix from hypnogram sequences.
Parameters
----------
MX : 2D np.array
rows are trials (e.g. laser stimulation trials), columns refer to time bins
Returns
-------
pMX : 3 x 3 np.array
pMX[i,j] is the probability to transition from state i to j
"""
nseq = MX.shape[1]
nrows = MX.shape[0]
MX[np.where(MX==4)] = 3
pMX = np.zeros((3,3,nseq))
C = np.zeros((3,nseq))
for i in range (0, nrows):
seq = MX[i,:]
for j in range (0, nseq-1):
pMX[int(seq[j])-1,int(seq[j+1])-1,j] += 1
C[int(seq[j]-1),j] +=1
for t in range(0, nseq):
for s in range(3):
pMX[s,:,t] = pMX[s,:,t] / C[s,t]
return pMX
def complete_transition_matrix(MX, idx):
idx_list = get_sequences(idx)
nrows = MX.shape[0]
pmx = np.zeros((3, 3))
c = np.zeros((3,))
#cwr = 0
for idx in idx_list:
for i in range(0, nrows):
seq = MX[i, idx]
for j in range(0, len(seq)-1):
pmx[int(seq[j])-1, int(seq[j+1])-1] += 1
c[int(seq[j])-1] += 1
#if seq[j] == 2 and seq[j+1] == 1:
# cwr += 1
#print ('We found %d W2R transitions' % cwr)
for i in range(3):
pmx[i, :] = pmx[i, :] / c[i]
return pmx
def _whole_mx(ppath, name, pre, post, tdown, ptie_break=True, tstart=0, tend=-1,
ma_thr=0, ma_rem_exception=True):
"""
get all laser trials (brain state sequences) discretized in $tdown second bins
Note: the first ipre = int(pre/tdown) columns are before the laser.
The first bin with laser corresponds to column ipre (starting counting with 0).
:param tdown: time bin for downsampled brain state annotation; ideally a multiple of the time bin
for the originial annotation as saved in remidx_*.txt (typically 2.5s)
@Return:
List of all laser stimulation trials of recording ppath/name
"""
SR = get_snr(ppath, name)
# Number of EEG bins / brainstate bin
NBIN = np.round(2.5*SR)
# Precise time bin duration of each brain state:
dt = NBIN * 1.0/SR
# ds - number how often dt fits into coarse brain state bin tdown:
ds = int(np.round(tdown/dt))
NBIN *= ds
ipre = int(np.round(pre/tdown))
ipost = int(np.round(post/tdown))
# load brain state
M = load_stateidx(ppath, name)[0]
# NEED CORRECTION!! - corrected on 8/9/21
if ma_thr > 0:
wake_seq = get_sequences(np.where(M==2)[0])
for seq in wake_seq:
if np.round(len(seq)*dt) <= ma_thr:
if ma_rem_exception:
if (seq[0]>1) and (M[seq[0] - 1] != 1):
M[seq] = 3
else:
M[seq] = 3
if tend == -1:
iend = M.shape[0] - 1
istart = np.round(tstart/dt)
# downsample brain states
M = downsample_states(M, ds, ptie_break)
len_rec = len(M)
(idxs, idxe) = laser_start_end(load_laser(ppath, name))
idxs = [s for s in idxs if s >= istart*(NBIN/ds) and s<= iend*(NBIN/ds)]
idxs = [int(i/NBIN) for i in idxs]
trials = []
for s in idxs:
# i.e. element ii+1 is the first overlapping with laser
if s>=ipre-1 and s+ipost < len_rec:
trials.append(M[s-ipre:s+ipost])
return trials
def downsample_states(M, nbin, ptie_break=True):
"""
downsample brain state sequency by replacing $nbin consecutive bins by the most frequent state
ptie_break - tie break rule:
if 1, wake wins over NREM which wins over REM (Wake>NREM>REM) in case of tie
"""
n = int(np.floor(len(M))/(1.0*nbin))
Mds = np.zeros((n,))
for i in range(n):
m = M[i*nbin:(i+1)*nbin]
S = np.array([len(np.where(m==s)[0]) for s in [1,2,3]])
if not ptie_break:
Mds[i] = np.argmax(S)+1
else:
tmp = S[[1,2,0]]
ii = np.argmax(tmp)
ii = [1,2,0][ii]
Mds[i] = ii+1
return Mds
### END Transition Analysis ####################################
def infraslow_rhythm(ppath, recordings, ma_thr=20, min_dur = 180,
band=[10,15], state=3, win=64, pplot=True, pflipx=True, pnorm='mean',
spec_norm=True, spec_filt=False, box=[1,4],
tstart=0, tend=-1, peeg2=False):
"""
calculate powerspectrum of EEG spectrogram to identify oscillations in sleep activity within different frequency bands;
only contineous NREM periods are considered for
@PARAMETERS:
ppath - base folder of recordings
recordings - single recording name or list of recordings
@OPTIONAL:
ma_thr - microarousal threshold; wake periods <= $min_dur are transferred to NREM
min_dur - minimal duration [s] of a NREM period
band - frequency band used for calculation
win - window (number of indices) for FFT calculation
pplot - if True, plot window showing result
pflipx - if True, plot wavelength instead of frequency on x-axis
pnorm - string, if pnorm == 'mean', normalize spectrum by the mean power,
if pnorm == 'area', normalize by area under spectrogram
if pnorm == 'no', perform no normalization
@RETURN:
SpecMx, f - ndarray [mice x frequencies], vector [frequencies]
"""
# for downwards compatability
if type(pnorm) == bool:
pnorm = 'mean'
#import scipy.linalg as LA
print('Greetings from infraslow_rhythm')
min_dur = np.max([win*2.5, min_dur])
if type(recordings) != list:
recordings = [recordings]
Spec = {}
for rec in recordings:
idf = re.split('_', rec)[0]
Spec[idf] = []
mice = list(Spec.keys())
for rec in recordings:
idf = re.split('_', rec)[0]
# sampling rate and time bin for spectrogram
SR = get_snr(ppath, rec)
NBIN = np.round(2.5*SR)
dt = NBIN * 1/SR
dt = 2.5
istart = int(np.round(tstart/dt))
if tend > -1:
iend = int(np.round(tend/dt))
# load sleep state
M = load_stateidx(ppath, rec)[0]
if tend == -1:
iend = M.shape[0]
M = M[istart:iend]
# load frequency band
P = so.loadmat(os.path.join(ppath, rec, 'sp_' + rec + '.mat'))
if not peeg2:
SP = np.squeeze(P['SP'])[:,istart:iend]
else:
SP = np.squeeze(P['SP2'])[:, istart:iend]
freq = np.squeeze(P['freq'])
ifreq = np.where((freq>=band[0]) & (freq<=band[1]))[0]
mmin = np.min((SP.shape[1], len(M)))
M=M[0:mmin]
if spec_filt:
filt = np.ones(box)
filt = np.divide(filt, filt.sum())
SP = scipy.signal.convolve2d(SP, filt, boundary='symm', mode='same')
if spec_norm:
sp_mean = SP.mean(axis=1)
SP = np.divide(SP, np.tile(sp_mean, (SP.shape[1], 1)).T)
pow_band = SP[ifreq,:].mean(axis=0)
else:
pow_band = SP[ifreq,:].sum(axis=0) * (freq[2]-freq[1])
nidx = np.where(M==3)[0]
pow_band = pow_band / np.mean(pow_band[nidx])
seq = get_sequences(np.where(M==state)[0], int(np.round(ma_thr/dt))+1)
seq = [list(range(s[0], s[-1]+1)) for s in seq]
seq = [s for s in seq if len(s)*dt >= min_dur]
for s in seq:
y,f = power_spectrum(pow_band[s], win, dt)
Spec[idf].append(y)
# Transform %Spec to ndarray
SpecMx = np.zeros((len(Spec), len(f)))
i=0
data = []
for idf in Spec:
SpecMx[i,:] = np.array(Spec[idf]).mean(axis=0)
if len(pnorm) > 0:
if pnorm == 'mean':
SpecMx[i,:] = SpecMx[i,:]/SpecMx[i,:].mean()
elif pnorm == 'area':
SpecMx[i,:] = SpecMx[i,:]/(SpecMx[i,:].sum()*(f[1]-f[0]))
else:
# no normalization
pass
data += zip([idf]*len(f), SpecMx[i,:], f)
i += 1
if pplot:
plt.figure()
ax = plt.axes([0.1, 0.1, 0.8, 0.8])
if pflipx:
x = f[1:]
x = 1.0/f[1:]
y = SpecMx[:,1:]
if len(mice) <= 1:
ax.plot(x, y.mean(axis=0), color='gray', lw=2)
else:
ax.errorbar(x, y.mean(axis=0), yerr=y.std(axis=0), color='gray', fmt='-o')
else:
y = SpecMx
if len(mice) <= 1:
ax.plot(f, y.mean(axis=0), color='gray', lw=2)
else:
ax.errorbar(f, y.mean(axis=0), yerr=y.std(axis=0), color='gray', fmt='-o')
box_off(ax)
if pflipx:
plt.xlabel('Wavelength (s)')
else:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Power (uV^2)')
plt.show()
df = pd.DataFrame(data=data, columns=['mouse', 'pow', 'freq'])
return SpecMx, f, df, Spec
def ma_rhythm(ppath, recordings, ma_thr=20.0, min_dur = 160, band=[10,15],
state=3, win=64, pplot=True, pflipx=True, pnorm=False):
"""
calculate powerspectrum of EEG spectrogram to identify oscillations in sleep activity within different frequency bands;
only contineous NREM periods are considered for
@PARAMETERS:
ppath - base folder of recordings
recordings - single recording name or list of recordings
@OPTIONAL:
ma_thr - microarousal threshold; wake periods <= $min_dur are transferred to NREM
min_dur - minimal duration [s] of a NREM period
band - frequency band used for calculation
win - window (number of indices) for FFT calculation
pplot - if True, plot window showing result
pflipx - if True, plot wavelength instead of frequency on x-axis
pnorm - if True, normalize spectrum (for each mouse) by its total power
@RETURN:
SpecMx, f - ndarray [mice x frequencies], vector [frequencies]
"""
import scipy.linalg as LA
min_dur = np.max([win*2.5, min_dur])
if type(recordings) != list:
recordings = [recordings]
Spec = {}
for rec in recordings:
idf = re.split('_', rec)[0]
Spec[idf] = []
mice = list(Spec.keys())
for rec in recordings:
idf = re.split('_', rec)[0]
# sampling rate and time bin for spectrogram
#SR = get_snr(ppath, rec)
#NBIN = np.round(2.5*SR)
#dt = NBIN * 1/SR
dt = 2.5
# load sleep state
M = load_stateidx(ppath, "", ann_name=rec)[0]
Mseq = M.copy()
Mseq[np.where(M != 2)] = 0
Mseq[np.where(M == 2)] = 1
seq = get_sequences(np.where(M==state)[0], ibreak=int(np.round(ma_thr/dt))+1)
seq = [range(s[0], s[-1]+1) for s in seq]
# load frequency band
#P = so.loadmat(os.path.join(ppath, rec, 'sp_' + rec + '.mat'));
#SP = np.squeeze(P['SP'])
#freq = np.squeeze(P['freq'])
#ifreq = np.where((freq>=band[0]) & (freq<=band[1]))
#pow_band = SP[ifreq,:].mean(axis=0)
seq = [s for s in seq if len(s)*dt >= min_dur]
for s in seq:
y,f = power_spectrum(Mseq[s], win, dt)
#y = y.mean(axis=0)
Spec[idf].append(y)
# Transform %Spec to ndarray
SpecMx = np.zeros((len(Spec), len(f)))
i=0
for idf in Spec:
SpecMx[i,:] = np.array(Spec[idf]).mean(axis=0)
if pnorm==True:
SpecMx[i,:] = SpecMx[i,:]/LA.norm(SpecMx[i,:])
i += 1
if pplot:
plt.figure()
ax = plt.axes([0.1, 0.1, 0.8, 0.8])
x = f[1:]
if pflipx == True: x = 1.0/f[1:]
y = SpecMx[:,1:]
if len(mice) <= 1:
ax.plot(x, y.mean(axis=0), color='gray', lw=2)
else:
ax.errorbar(x, y.mean(axis=0), yerr=y.std(axis=0), color='gray', fmt='-o')
box_off(ax)
if pflipx == True:
plt.xlabel('Wavelength (s)')
else:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Power (uV^2)')
plt.show()
return SpecMx, f
### pandas support functions ##################################################
def nparray2df(mx, rows, cols, mx_label='', row_label='', column_label=''):
"""
transform a 2D np.array to pandas DataFrame.
:param mx: 2D np.array
:param rows: list or vector of row labels
:param cols: list or vector of columns labels
:param mx_label, row_label, column_label: label (string) for values in matrix,
name for rows and columns
"""
nx = mx.shape[0]
ny = mx.shape[1]
vals = []
for i in range(nx):
for j in range(ny):
vals.append([mx[i,j], rows[i], cols[j]])
columns = ['val', 'row', 'col']
if len(row_label) > 0:
columns[1] = row_label
if len(column_label) > 0:
columns[2] = column_label
if len(mx_label) > 0:
columns[0] = mx_label
df = | pd.DataFrame(columns=columns, data=vals) | pandas.DataFrame |
import numpy as np
import argparse
from pathlib import Path
import re
import glob
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True, style="white", context="talk", font_scale=1)
PALETTE = sns.color_palette("Set1")
name_dict = {
"Gradients 0": "Gradient 1",
"Gradients 1": "Gradient 2",
"Gradients 2": "Gradient 3",
"Gradients (0, 1)": "Gradients 1,2",
"Gradients (1, 2)": "Gradients 2,3",
"Gradients (2, 0)": "Gradients 1,3",
"Gradients (0, 1, 2)": "Gradients 1,2,3",
"Gradients 0": "Gradient 1",
"Gradients 1": "Gradient 2",
"Gradients 2": "Gradient 3",
"Gradients (0, 1)": "Gradients 1,2",
"Gradients (1, 2)": "Gradients 2,3",
"Gradients (2, 0)": "Gradients 1,3",
"Gradients (0, 1, 2)": "Gradients 1,2,3",
"Experts Resting vs. Experts Compassion": ["EXP", "res", "EXP", "com"],
"Experts Resting vs. Experts Open Monitoring": ["EXP", "res", "EXP", "o m"],
"Experts Open Monitoring vs. Experts Compassion": ["EXP", "o m", "EXP", "com"],
"Experts Resting vs. Experts Meditating": ["EXP", "res", "EXP", "med"],
"Novices Resting vs. Novices Compassion": ["NOV", "res", "NOV", "com"],
"Novices Resting vs. Novices Open Monitoring": ["NOV", "res", "NOV", "o m"],
"Novices Open Monitoring vs. Novices Compassion": ["NOV", "o m", "NOV", "com"],
"Novices Resting vs. Novices Meditating": ["NOV", "res", "NOV", "med"],
"Experts Resting vs. Novices Resting": ["EXP", "res", "NOV", "res"],
"Experts Compassion vs. Novices Compassion": ["EXP", "com", "NOV", "com"],
"Experts Open Monitoring vs. Novices Open Monitoring": ["EXP", "o m", "NOV", "o m"],
"Experts Meditating vs. Novices Meditating": ["EXP", "med", "NOV", "med"],
"Experts All vs. Novices All": ["EXP", "all", "NOV", "all"],
"Experts Resting vs. Novices Compassion": ["EXP", "res", "NOV", "com"],
"Experts Resting vs. Novices Open Monitoring": ["EXP", "res", "NOV", "o m"],
"Experts Compassion vs. Novices Resting": ["EXP", "com", "NOV", "res"],
"Experts Compassion vs. Novices Open Monitoring": ["EXP", "com", "NOV", "o m"],
"Experts Open Monitoring vs. Novices Resting": ["EXP", "o m", "NOV", "res"],
"Experts Open Monitoring vs. Novices Compassion": ["EXP", "o m", "NOV", "com"],
"Resting vs. Compassion": ["ALL", "res", "ALL", "com"],
"Resting vs. Open Monitoring": ["ALL", "res", "ALL", "o m"],
"Compassion vs. Open Monitoring": ["ALL", "com", "ALL", "o m"],
"Resting vs. Meditating": ["ALL", "res", "ALL", "med"],
}
label_dict = {
"EXP": "EXP",
"NOV": "NOV",
"ALL": "ALL",
"o m": "open",
"med": "med ",
"res": "rest",
"com": "comp",
"all": "all ",
}
def make_heatmap(source_dir, save_path):
files = glob.glob(str(Path(source_dir) / "2-*.csv"))
pvalues = pd.read_csv(files[0], index_col=0)
pvalues.columns = [name_dict[v].split(" ")[-1] for v in pvalues.columns]
index = [
["2", "X" if name_dict[v][0] == name_dict[v][2] else "", ""]
+ [label_dict[vv] for vv in name_dict[v]]
for v in pvalues.index
]
fmt_index = [
"{:^1s} | {:^1s} | {:^1s} | {:^3s} {:<3s}, {:^3s} {:<3s}".format(*v)
for v in index
]
pvalues.index = fmt_index
# Add pvalues from k-sample tests
k_sample_paths = ["6-*.csv", "3E-*.csv", "3N-*.csv"]
files = [glob.glob(str(Path(source_dir) / path))[0] for path in k_sample_paths]
kpvals = np.vstack([pd.read_csv(f, index_col=0).values for f in files])
# Scale
kpvals = np.asarray(kpvals) * 7
kpvals[1:, :] = kpvals[1:, :] * 2
df = pd.DataFrame(kpvals, columns=pvalues.columns)
df.index = [
"6 | X | X | All states, traits",
"3 | X | | EXP states ",
"3 | X | | NOV states ",
]
df[df > 1] = 1
pvalues = | pd.concat([df, pvalues]) | pandas.concat |
'''
__author__=<NAME>
MIT License
Copyright (c) 2020 crewml
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import pandas as pd
import logging
from crewml.common import DATA_DIR
from sklearn.preprocessing import LabelEncoder
from category_encoders import TargetEncoder
class FlightFeatureGenerator:
'''
This class creates new features for the flights based
on following criteria
1. All flgiths that departs and arrives within a base city
is marked as "1" with new feature BASE_FL
2. All flights with CRS_ELAPSED_TIME <=900 grouped together with
new feature GROUP=1,2,3, etc.
'''
def __init__(self, pairing_month,
feature_file,
feature_gen_file,
fa_bases,
fa_non_bases):
self.logger = logging.getLogger(__name__)
self.pairing_month = pairing_month
self.feature_file = feature_file
self.fa_bases = fa_bases
self.fa_non_bases = fa_non_bases
self.feature_gen_file = feature_gen_file
self.flights_df = pd.read_csv(
DATA_DIR+self.pairing_month+"/"+self.feature_file)
self.flights_df.drop(self.flights_df.filter(
regex="Unname"), axis=1, inplace=True)
self.flights_df.dropna()
self.flights_df['ORIGIN_UTC'] = pd.to_datetime(
self.flights_df['ORIGIN_UTC'])
self.flights_df['DEST_UTC'] = pd.to_datetime(
self.flights_df['DEST_UTC'])
self.group_id = 1
self.final_df = pd.DataFrame()
def process(self):
self.timestamp2_epoch()
#self.time2_int()
# self.generate_base_indicator()
# self.generate_pairing_group()
self.label_encode_categories()
# self.target_encode_categories()
# self.onehot_encode_categories()
# self.origin_dest_diff()
# self.combine_origin_dest()
self.convert_fl_date()
self.flights_df.to_csv(
DATA_DIR+self.pairing_month+"/"+self.feature_gen_file)
def generate_base_indicator(self):
self.flights_df['BASE_FL'] = self.flights_df.apply(
lambda x: "1" if x['ORIGIN'] and x['DEST'] in self.fa_bases else '0', axis=1)
def origin_dest_diff(self):
self.flights_df["DEST_ARR_DIFF"] = self.flights_df['DEST_UTC'] - \
self.flights_df['ORIGIN_UTC']
def combine_origin_dest(self):
'''
Create a new feature=DEST_ARR_DIFF ^ (ORIGIN+DEST)
Returns
-------
None.
'''
self.flights_df["ORIGIN"] = self.flights_df["ORIGIN"].astype(int)
self.flights_df["DEST"] = self.flights_df["DEST"].astype(int)
temp = self.flights_df["ORIGIN"]+self.flights_df["DEST"]
self.flights_df["DEST_ARR_DIFF_SQR"] = \
self.flights_df["DEST_ARR_DIFF"] * (
self.flights_df["ORIGIN"]-self.flights_df["DEST"])
def generate_pairing_group(self):
origin_dest = list(
zip(self.flights_df.ORIGIN, self.flights_df.DEST))
# use set to remove duplicates
b2b = [x for x in origin_dest if x[0]
in self.fa_bases and x[1] in self.fa_bases]
b2b = list(set(b2b))
for i in b2b:
df = self.group_flights(i)
if len(df) is None:
continue
df = df.sort_values('ORIGIN_UTC')
total_elapsed_time = 0
fl_ids = []
self.flights_df['GROUP'] = ""
for index, row in df.iterrows():
total_elapsed_time += row["CRS_ELAPSED_TIME"]
fl_ids.append(int(row["FL_ID"]))
if total_elapsed_time <= 900 or len(fl_ids) % 2 == 0:
continue
else:
self.flights_df.loc[self.flights_df.FL_ID.isin(fl_ids),
'GROUP'] = str(self.group_id)
self.final_df = self.final_df.append(
self.flights_df[self.flights_df['FL_ID'].isin(fl_ids)])
self.flights_df = \
self.flights_df[~self.flights_df.FL_ID.isin(fl_ids)]
total_elapsed_time = 0
fl_ids.clear()
self.group_id += 1
self.final_df = self.final_df.append(self.flights_df)
nb2nb = [x for x in origin_dest if x[0]
in self.fa_non_bases and x[1] in self.fa_non_bases]
nb2nb = list(set(nb2nb))
b2nb = [x for x in origin_dest if x[0]
in self.fa_bases and x[1] in self.fa_non_bases]
b2nb = list(set(b2nb))
nb2b = [x for x in origin_dest if x[0]
in self.fa_non_bases and x[1] in self.fa_bases]
nb2b = list(set(nb2b))
self.flights_df = self.final_df
'''
The fl_pairs contain all combinations of tuple like ('CVG', 'MSP'),
('EWR', 'DTW'), etc for B2B flights. Similiarly it will have for
NB2NB flights. This method extract B2B, B2NB, NB2B, NB2NB flights from the
flight list based on fl_pairs
'''
def group_flights(self, fl_pairs):
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
import h5py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pprint import pprint
def main():
show_h5()
# show_evts()
# test_bool()
write_h5()
def show_h5():
"""
simple LEGEND data viewer function.
shows the group structure, attributes, units, wfs, etc.
can also do a lot of this from the cmd line with:
$ h5ls -vlr [file]
"""
hf = h5py.File("daq_testdata2.h5")
for path, dset in h5iter(hf):
print(path)
# skip bool columns for now (see test_bool below)
if any(x in path for x in ["inverted", "muveto"]):
print(" skipping bool dataset:", path)
continue
dt = dset.dtype
ds = dset.size
at = dset.attrs
ldtype = get_legend_dtype(dset.attrs['datatype'])
print(f" type: {dt} size: {ds}")
for name in dset.attrs:
print(f" attr: {name} value: {dset.attrs[name]}")
for d in ldtype:
print(f" {d} : {ldtype[d]}")
print("")
def h5iter(g, prefix=''):
"""
simple iterator to expose groups and attributes of
everything in a given hdf5 file. can also start at [prefix]-level.
"""
for key in g.keys():
item = g[key]
path = '{}/{}'.format(prefix, key)
if isinstance(item, h5py.Dataset): # test for dataset
yield (path, item)
elif isinstance(item, h5py.Group): # test for group (goes down a level)
yield from h5iter (item, path)
def get_legend_dtype(dtstr):
"""
the 'datatype' attribute is specific to LEGEND HDF5 files.
reference: http://legend-exp.org/legend-data-format-specs/dev/
right now, this fills an output dict by string splitting.
there is probably a better way to read the Julia-formatted string.
also, think about changing enums to numpy named tuples for faster lookups.
"""
dt = {}
# TODO: oliver gave me these, we should update the function below
# to use them instead of all that messy string splitting
# datatype_regexp = r"""^(([A-Za-z_]*)(<([0-9,]*)>)?)(\{(.*)\})?$"""
# arraydims_regexp = r"""^<([0-9,]*)>$"""
sp1 = dtstr.split("{")[0]
dt["format"] = sp1 # scalar, array, struct, or table
if "array" in dt["format"]:
dt["ndim"] = int(sp1.split("<")[1].split(">")[0])
sp2 = dtstr.split("{")[1:]
if "enum" in sp2:
dt["dtype"] = "enum"
sp3 = sp2[1].split(",")
for tmp in sp3:
tmp = tmp.rstrip("}").split("=")
dt[int(tmp[1])] = tmp[0]
else:
dt["dtype"] = sp2[0].rstrip("}")
# pprint(dt)
return dt
def show_evts():
"""
look at events (make a small pandas dataframe).
this is specific to Oliver's test file.
*** Let's worry about making this fast LATER. ***
"""
hf = h5py.File("daq_testdata2.h5")
data = hf['daqdata']
nevt = data['evtno'].size
cols = list(data.keys())
# handle single-valued stuff
tmp = []
for c in cols:
# skip bool columns for now (see test_bool below)
if any(x in c for x in ["inverted", "muveto"]):
continue
# waveform data is handled below
if not isinstance(data[c], h5py.Dataset):
continue
# for now, slice into the whole array and turn into pd.Series
ser = | pd.Series(data[c][...], name=c) | pandas.Series |
#!/usr/bin/python
from xml.dom.minidom import parse
import numpy as np
import zipfile
import tempfile
import sys
if sys.version_info.major == 3:
import urllib.request as request
else:
import urllib2 as request
import io
import os.path
from impactutils.extern.openquake.geodetic import geodetic_distance
import pandas as pd
from .dataset import DataSetException,DataSetWarning
GEONAME_URL = 'http://download.geonames.org/export/dump/cities1000.zip'
def _fetchGeoNames():
"""
Internal method to retrieve a cities1000.txt file from GeoNames.
:returns:
Path to local cities1000.txt file.
"""
fh = request.urlopen(GEONAME_URL)
data = fh.read()
fh.close()
f = io.BytesIO(data)
myzip = zipfile.ZipFile(f)
fdir = tempfile.mkdtemp()
myzip.extract('cities1000.txt',fdir)
myzip.close()
return os.path.join(fdir,'cities1000.txt')
class Cities(object):
"""
Handles loading and searching for cities.
"""
REQFIELDS = ['name','lat','lon'] #class variable
def __init__(self,dataframe):
"""Construct a Cities object from a pandas DataFrame.
:param dataframe:
pandas DataFrame, where each row represents a city.
Columns include:
- name Name of the city (required).
- lat Latitude of city (required).
- lon Longitude of city (required).
- pop Population of city (optional).
- iscap Boolean indicating capital status (optional).
- placement String indicating where city label
should be placed relative to city coordinates,
one of: E,W,N,S,NE,SE,SW,NW (optional).
-xoff Longitude offset for label relative to city coordinates (optional).
-yoff Latitude offset for label relative to city coordinates (optional).
:raises DataSetException:
When any of required columns are missing.
:returns:
Cities instance.
"""
if len(set(dataframe.columns).intersection(set(self.REQFIELDS))) < 3:
raise DataSetException('Missing some of required keys: %s' % self.REQFIELDS)
self._dataframe = dataframe.copy()
#"magic" methods
def __len__(self):
"""Return the number of cities in the Cities object.
:returns:
Number of cities in the Cities object.
"""
return len(self._dataframe)
def __repr__(self):
"""
Return the string to represent the Cities instance.
:returns:
String representing Cities instance.
"""
return str(self._dataframe)
@classmethod
def loadFromGeoNames(cls,cityfile=None):
"""Load a cities data set from a GeoNames cities1000.txt file or by downloading
it from GeoNames, then loading it.
:param cityfile:
Path to cities1000.txt file from GeoNames, or None (file will be downloaded).
:returns:
Cities instance.
"""
CAPFLAG1 = 'PPLC'
CAPFLAG2 = 'PPLA'
delete_folder = False
if cityfile is None:
cityfile = _fetchGeoNames()
delete_folder = True
mydict = {'name':[],
'ccode':[],
'lat':[],
'lon':[],
'iscap':[],
'pop':[]}
f = open(cityfile,'rb')
for line in f.readlines():
line = line.decode('utf-8')
parts = line.split('\t')
tname = parts[2].strip()
if not tname:
continue
myvals = np.array([ord(c) for c in tname])
if len((myvals > 127).nonzero()[0]):
continue
mydict['name'].append(tname)
mydict['ccode'].append(parts[8].strip())
mydict['lat'].append(float(parts[4].strip()))
mydict['lon'].append(float(parts[5].strip()))
capfield = parts[7].strip()
iscap = (capfield == CAPFLAG1) or (capfield == CAPFLAG2)
mydict['iscap'].append(iscap)
mydict['pop'].append(int(parts[14].strip()))
f.close()
if delete_folder:
fdir,bname = os.path.split(cityfile)
os.remove(cityfile)
os.rmdir(fdir)
df = | pd.DataFrame.from_dict(mydict) | pandas.DataFrame.from_dict |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Biota Technology.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import division
from unittest import TestCase, main
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sourcetracker._sourcetracker import (intersect_and_sort_samples,
collapse_source_data,
subsample_dataframe,
validate_gibbs_input,
validate_gibbs_parameters,
collate_gibbs_results,
get_samples,
generate_environment_assignments,
cumulative_proportions,
single_sink_feature_table,
ConditionalProbability,
gibbs_sampler, gibbs)
from sourcetracker._plot import plot_heatmap
class TestValidateGibbsInput(TestCase):
def setUp(self):
self.index = ['s%s' % i for i in range(5)]
self.columns = ['f%s' % i for i in range(4)]
def test_no_errors_(self):
# A table where nothing is wrong, no changes expected.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32), index=self.index,
columns=self.columns)
exp_sources = pd.DataFrame(data.astype(np.int32), index=self.index,
columns=self.columns)
obs = validate_gibbs_input(sources)
pd.util.testing.assert_frame_equal(obs, sources)
# Sources and sinks.
sinks = pd.DataFrame(data, index=self.index, columns=self.columns)
exp_sinks = pd.DataFrame(data.astype(np.int32), index=self.index,
columns=self.columns)
obs_sources, obs_sinks = validate_gibbs_input(sources, sinks)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
pd.util.testing.assert_frame_equal(obs_sinks, exp_sinks)
def test_float_data(self):
# Data is float, expect rounding.
data = np.random.uniform(0, 1, size=20).reshape(5, 4)
sources = pd.DataFrame(data, index=self.index, columns=self.columns)
exp_sources = pd.DataFrame(np.zeros(20).reshape(5, 4).astype(np.int32),
index=self.index, columns=self.columns)
obs_sources = validate_gibbs_input(sources)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
data = np.random.uniform(0, 1, size=20).reshape(5, 4) + 1.
sources = pd.DataFrame(data, index=self.index, columns=self.columns)
exp_sources = pd.DataFrame(np.ones(20).reshape(5, 4).astype(np.int32),
index=self.index, columns=self.columns)
obs_sources = validate_gibbs_input(sources)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
# Sources and sinks.
data = np.random.uniform(0, 1, size=20).reshape(5, 4) + 5
sinks = pd.DataFrame(data,
index=self.index,
columns=self.columns)
exp_sinks = \
pd.DataFrame(5 * np.ones(20).reshape(5, 4).astype(np.int32),
index=self.index,
columns=self.columns)
obs_sources, obs_sinks = validate_gibbs_input(sources, sinks)
| pd.util.testing.assert_frame_equal(obs_sources, exp_sources) | pandas.util.testing.assert_frame_equal |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def hello():
return 'hello world'
def sample_from_finite_probability_space(finite_prob_space):
"""
Produces a random outcome from a given finite probability space.
Input
-----
- finite_prob_space: finite probability space encoded as a
dictionary
Output
------
- random outcome, which is one of the keys in the
finite_probability_space dictionary's set of keys
(remember: these keys form the sample space)
"""
# first produce a list of pairs of the form (outcome, outcome probability)
outcome_probability_pairs = list(finite_prob_space.items())
# convert the pairs into two lists "outcomes" and "outcome_probabilities":
# - outcomes: list of outcomes
# - outcome_probabilities: i-th element is the probability of the i-th
# outcome in the "outcomes" list
# (note that this step is needed because NumPy wants these lists
# separately)
outcomes, outcome_probabilities = zip(*outcome_probability_pairs)
# use NumPy to randomly sample
random_outcome = np.random.choice(outcomes, p=outcome_probabilities)
return random_outcome
def flip_fair_coin():
"""
Returns a fair coin flip.
Output
------
- either the string 'heads' or 'tails'
"""
finite_prob_space = {'heads': 0.5, 'tails': 0.5}
return sample_from_finite_probability_space(finite_prob_space)
def flip_fair_coins(number_of_coins):
"""
Returns a list of fair coin flip results.
Input
-----
- number_of_coins: number of coin flips
Output
------
- list of length <number_of_coins> consisting of strings 'heads'/'tails'
"""
finite_prob_space = {'heads': 0.5, 'tails': 0.5}
return [sample_from_finite_probability_space(finite_prob_space)
for i in range(number_of_coins)]
def plot_discrete_histogram(array, frequency=False, figsize=(5, 4)):
"""
Plots a discrete histogram given a 1D array of values.
Input
-----
- array: 1D array consisting of data
- frequency: boolean (True => plot frequencies, False => plot counts)
- figsize: tuple (width, height) of how large to make the plotted figure
"""
array_as_series = pd.Series(array)
counts = array_as_series.value_counts().sort_index()
if frequency:
counts /= counts.sum()
plt.figure(figsize=figsize)
plt.xlabel('Value')
if frequency:
plt.ylabel('Frequency')
else:
plt.ylabel('Count')
axis = counts.plot(kind='bar')
figure = axis.get_figure()
figure.autofmt_xdate() # rotates x-axis labels to be more readable
plt.tight_layout() # tidy up and remove some margins
def print_prob_table_array(probabilities, outcomes):
"""
Prints a probability table that is stored as a 1D array.
Input
-----
- probabilities: a 1D array of nonnegative entries that add to 1
- outcomes: list of labels; i-th label is for the i-th entry in
<probabilities>
"""
if len(probabilities) != len(outcomes):
raise Exception("The number of outcomes and number of probabilities "
+ "must match.")
print(pd.Series(probabilities, outcomes))
def print_joint_prob_table_dict(dicts_in_dict):
"""
Prints a joint probability table that is stored using the dictionaries
within a dictionary representation.
Input
-----
- dicts_in_dict: joint probability table stored as dictionaries within a
dictionary
"""
print(pd.DataFrame(dicts_in_dict).T)
def print_joint_prob_table_array(array, row_labels, col_labels):
"""
Prints a joint probability table that is stored using the 2D array
representation.
Input
-----
- array: 2D array for the joint probability table (doesn't have label info)
- row_labels: list of labels; i-th label is for the i-th row in <array>
- col_labels: list of labels; i-th label is for the i-th column in <array>
"""
if len(array.shape) != 2:
raise Exception("The array specified must be two-dimensional.")
print( | pd.DataFrame(array, row_labels, col_labels) | pandas.DataFrame |
# Copyright (C) 2022 National Center for Atmospheric Research and National Oceanic and Atmospheric Administration
# SPDX-License-Identifier: Apache-2.0
#
#Code to create plots for surface observations
import os
import monetio as mio
import monet as monet
import seaborn as sns
from monet.util.tools import calc_8hr_rolling_max, calc_24hr_ave
import xarray as xr
import pandas as pd
import numpy as np
import cartopy.crs as ccrs
import matplotlib as mpl
import matplotlib.pyplot as plt
from numpy import corrcoef
sns.set_context('paper')
from monet.plots.taylordiagram import TaylorDiagram as td
from matplotlib.colors import ListedColormap
from monet.util.tools import get_epa_region_bounds as get_epa_bounds
import math
from ..plots import savefig
# from util import write_ncf
def make_24hr_regulatory(df, col=None):
"""Calculates 24-hour averages
Parameters
----------
df : dataframe
Model/obs pair of hourly data
col : str
Column label of observation variable to apply the calculation
Returns
-------
dataframe
dataframe with applied calculation
"""
return calc_24hr_ave(df, col)
def make_8hr_regulatory(df, col=None):
"""Calculates 8-hour rolling average daily
Parameters
----------
df : dataframe
Model/obs pair of hourly data
col : str
Column label of observation variable to apply the calculation
Returns
-------
dataframe
dataframe with applied calculation
"""
return calc_8hr_rolling_max(df, col, window=8)
def calc_default_colors(p_index):
"""List of default colors, lines, and markers to use if user does not
specify them in the input yaml file.
Parameters
----------
p_index : integer
Number of pairs in analysis class
Returns
-------
list
List of dictionaries containing default colors, lines, and
markers to use for plotting for the number of pairs in analysis class
"""
x = [dict(color='b', linestyle='--',marker='x'),
dict(color='g', linestyle='-.',marker='o'),
dict(color='r', linestyle=':',marker='v'),
dict(color='c', linestyle='--',marker='^'),
dict(color='m', linestyle='-.',marker='s')]
#Repeat these 5 instances over and over if more than 5 lines.
return x[p_index % 5]
def new_color_map():
"""Creates new color map for difference plots
Returns
-------
colormap
Orange and blue color map
"""
top = mpl.cm.get_cmap('Blues_r', 128)
bottom = mpl.cm.get_cmap('Oranges', 128)
newcolors = np.vstack((top(np.linspace(0, 1, 128)),
bottom(np.linspace(0, 1, 128))))
return ListedColormap(newcolors, name='OrangeBlue')
def map_projection(f):
"""Defines map projection. This needs updating to make it more generic.
Parameters
----------
f : class
model class
Returns
-------
cartopy projection
projection to be used by cartopy in plotting
"""
import cartopy.crs as ccrs
if f.model.lower() == 'cmaq':
proj = ccrs.LambertConformal(
central_longitude=f.obj.XCENT, central_latitude=f.obj.YCENT)
elif f.model.lower() == 'wrfchem' or f.model.lower() == 'rapchem':
if f.obj.MAP_PROJ == 1:
proj = ccrs.LambertConformal(
central_longitude=f.obj.CEN_LON, central_latitude=f.obj.CEN_LAT)
elif f.MAP_PROJ == 6:
#Plate Carree is the equirectangular or equidistant cylindrical
proj = ccrs.PlateCarree(
central_longitude=f.obj.CEN_LON)
else:
raise NotImplementedError('WRFChem projection not supported. Please add to surfplots.py')
#Need to add the projections you want to use for the other models here.
elif f.model.lower() == 'rrfs':
proj = ccrs.LambertConformal(
central_longitude=f.obj.cen_lon, central_latitude=f.obj.cen_lat)
elif f.model.lower() in ['cesm_fv','cesm_se']:
proj = ccrs.PlateCarree()
elif f.model.lower() == 'random':
proj = ccrs.PlateCarree()
else: #Let's change this tomorrow to just plot as lambert conformal if nothing provided.
raise NotImplementedError('Projection not defined for new model. Please add to surfplots.py')
return proj
def make_spatial_bias(df, column_o=None, label_o=None, column_m=None,
label_m=None, ylabel = None, vdiff=None,
outname = 'plot',
domain_type=None, domain_name=None, fig_dict=None,
text_dict=None,debug=False):
"""Creates surface spatial bias plot.
Parameters
----------
df : dataframe
model/obs pair data to plot
column_o : str
Column label of observation variable to plot
label_o : str
Name of observation variable to use in plot title
column_m : str
Column label of model variable to plot
label_m : str
Name of model variable to use in plot title
ylabel : str
Title of colorbar axis
vdiff : real number
Min and max value to use on colorbar axis
outname : str
file location and name of plot (do not include .png)
domain_type : str
Domain type specified in input yaml file
domain_name : str
Domain name specified in input yaml file
fig_dict : dictionary
Dictionary containing information about figure
text_dict : dictionary
Dictionary containing information about text
debug : boolean
Whether to plot interactively (True) or not (False). Flag for
submitting jobs to supercomputer turn off interactive mode.
Returns
-------
plot
surface bias plot
"""
if debug == False:
plt.ioff()
def_map = dict(states=True,figsize=[10, 5])
if fig_dict is not None:
map_kwargs = {**def_map, **fig_dict}
else:
map_kwargs = def_map
#If not specified use the PlateCarree projection
if 'crs' not in map_kwargs:
map_kwargs['crs'] = ccrs.PlateCarree()
#set default text size
def_text = dict(fontsize=20)
if text_dict is not None:
text_kwargs = {**def_text, **text_dict}
else:
text_kwargs = def_text
# set ylabel to column if not specified.
if ylabel is None:
ylabel = column_o
#Take the mean for each siteid
df_mean=df.groupby(['siteid'],as_index=False).mean()
#Specify val_max = vdiff. the sp_scatter_bias plot in MONET only uses the val_max value
#and then uses -1*val_max value for the minimum.
ax = monet.plots.sp_scatter_bias(
df_mean, col1=column_o, col2=column_m, map_kwargs=map_kwargs,val_max=vdiff,
cmap=new_color_map(), edgecolor='k',linewidth=.8)
if domain_type == 'all':
latmin= 25.0
lonmin=-130.0
latmax= 50.0
lonmax=-60.0
plt.title(domain_name + ': ' + label_m + ' - ' + label_o,fontweight='bold',**text_kwargs)
elif domain_type == 'epa_region' and domain_name is not None:
latmin,lonmin,latmax,lonmax,acro = get_epa_bounds(index=None,acronym=domain_name)
plt.title('EPA Region ' + domain_name + ': ' + label_m + ' - ' + label_o,fontweight='bold',**text_kwargs)
else:
latmin= math.floor(min(df.latitude))
lonmin= math.floor(min(df.longitude))
latmax= math.ceil(max(df.latitude))
lonmax= math.ceil(max(df.longitude))
plt.title(domain_name + ': ' + label_m + ' - ' + label_o,fontweight='bold',**text_kwargs)
if 'extent' not in map_kwargs:
map_kwargs['extent'] = [lonmin,lonmax,latmin,latmax]
ax.axes.set_extent(map_kwargs['extent'],crs=ccrs.PlateCarree())
#Update colorbar
f = plt.gcf()
model_ax = f.get_axes()[0]
cax = f.get_axes()[1]
#get the position of the plot axis and use this to rescale nicely the color bar to the height of the plot.
position_m = model_ax.get_position()
position_c = cax.get_position()
cax.set_position([position_c.x0, position_m.y0, position_c.x1 - position_c.x0, (position_m.y1-position_m.y0)*1.1])
cax.set_ylabel(ylabel,fontweight='bold',**text_kwargs)
cax.tick_params(labelsize=text_kwargs['fontsize']*0.8,length=10.0,width=2.0,grid_linewidth=2.0)
#plt.tight_layout(pad=0)
savefig(outname + '.png', loc=4, logo_height=120)
def make_timeseries(df, column=None, label=None, ax=None, avg_window=None, ylabel=None,
vmin = None, vmax = None,
domain_type=None, domain_name=None,
plot_dict=None, fig_dict=None, text_dict=None,debug=False):
"""Creates timeseries plot.
Parameters
----------
df : dataframe
model/obs pair data to plot
column : str
Column label of variable to plot
label : str
Name of variable to use in plot legend
ax : ax
matplotlib ax from previous occurrence so can overlay obs and model
results on the same plot
avg_window : rule
Pandas resampling rule (e.g., 'H', 'D')
ylabel : str
Title of y-axis
vmin : real number
Min value to use on y-axis
vmax : real number
Max value to use on y-axis
domain_type : str
Domain type specified in input yaml file
domain_name : str
Domain name specified in input yaml file
plot_dict : dictionary
Dictionary containing information about plotting for each pair
(e.g., color, linestyle, markerstyle)
fig_dict : dictionary
Dictionary containing information about figure
text_dict : dictionary
Dictionary containing information about text
debug : boolean
Whether to plot interactively (True) or not (False). Flag for
submitting jobs to supercomputer turn off interactive mode.
Returns
-------
ax
matplotlib ax such that driver.py can iterate to overlay multiple models on the
same plot
"""
if debug == False:
plt.ioff()
#First define items for all plots
#set default text size
def_text = dict(fontsize=14)
if text_dict is not None:
text_kwargs = {**def_text, **text_dict}
else:
text_kwargs = def_text
# set ylabel to column if not specified.
if ylabel is None:
ylabel = column
if label is not None:
plot_dict['label'] = label
if vmin is not None and vmax is not None:
plot_dict['ylim'] = [vmin,vmax]
#scale the fontsize for the x and y labels by the text_kwargs
plot_dict['fontsize'] = text_kwargs['fontsize']*0.8
#Then, if no plot has been created yet, create a plot and plot the obs.
if ax is None:
#First define the colors for the observations.
obs_dict = dict(color='k', linestyle='-',marker='*', linewidth=1.2, markersize=6.)
if plot_dict is not None:
#Whatever is not defined in the yaml file is filled in with the obs_dict here.
plot_kwargs = {**obs_dict, **plot_dict}
else:
plot_kwargs = obs_dict
# create the figure
if fig_dict is not None:
f,ax = plt.subplots(**fig_dict)
else:
f,ax = plt.subplots(figsize=(10,6))
# plot the line
if avg_window is None:
ax = df[column].plot(ax=ax, **plot_kwargs)
else:
ax = df[column].resample(avg_window).mean().plot(ax=ax, legend=True, **plot_kwargs)
# If plot has been created add to the current axes.
else:
# this means that an axis handle already exists and use it to plot the model output.
if avg_window is None:
ax = df[column].plot(ax=ax, legend=True, **plot_dict)
else:
ax = df[column].resample(avg_window).mean().plot(ax=ax, legend=True, **plot_dict)
#Set parameters for all plots
ax.set_ylabel(ylabel,fontweight='bold',**text_kwargs)
ax.set_xlabel(df.index.name,fontweight='bold',**text_kwargs)
ax.legend(frameon=False,fontsize=text_kwargs['fontsize']*0.8)
ax.tick_params(axis='both',length=10.0,direction='inout')
ax.tick_params(axis='both',which='minor',length=5.0,direction='out')
ax.legend(frameon=False,fontsize=text_kwargs['fontsize']*0.8,
bbox_to_anchor=(1.0, 0.9), loc='center left')
if domain_type is not None and domain_name is not None:
if domain_type == 'epa_region':
ax.set_title('EPA Region ' + domain_name,fontweight='bold',**text_kwargs)
else:
ax.set_title(domain_name,fontweight='bold',**text_kwargs)
return ax
def make_taylor(df, column_o=None, label_o='Obs', column_m=None, label_m='Model',
dia=None, ylabel=None, ty_scale=1.5,
domain_type=None, domain_name=None,
plot_dict=None, fig_dict=None, text_dict=None,debug=False):
"""Creates taylor plot. Note sometimes model values are off the scale
on this plot. This will be fixed soon.
Parameters
----------
df : dataframe
model/obs pair data to plot
column_o : str
Column label of observational variable to plot
label_o : str
Name of observational variable to use in plot legend
column_m : str
Column label of model variable to plot
label_m : str
Name of model variable to use in plot legend
dia : dia
matplotlib ax from previous occurrence so can overlay obs and model
results on the same plot
ylabel : str
Title of x-axis
ty_scale : real
Scale to apply to taylor plot to control the plotting range
domain_type : str
Domain type specified in input yaml file
domain_name : str
Domain name specified in input yaml file
plot_dict : dictionary
Dictionary containing information about plotting for each pair
(e.g., color, linestyle, markerstyle)
fig_dict : dictionary
Dictionary containing information about figure
text_dict : dictionary
Dictionary containing information about text
debug : boolean
Whether to plot interactively (True) or not (False). Flag for
submitting jobs to supercomputer turn off interactive mode.
Returns
-------
class
Taylor diagram class defined in MONET
"""
#First define items for all plots
if debug == False:
plt.ioff()
#set default text size
def_text = dict(fontsize=14.0)
if text_dict is not None:
text_kwargs = {**def_text, **text_dict}
else:
text_kwargs = def_text
# set ylabel to column if not specified.
if ylabel is None:
ylabel = column_o
#Then, if no plot has been created yet, create a plot and plot the first pair.
if dia is None:
# create the figure
if fig_dict is not None:
f = plt.figure(**fig_dict)
else:
f = plt.figure(figsize=(12,10))
sns.set_style('ticks')
# plot the line
dia = td(df[column_o].std(), scale=ty_scale, fig=f,
rect=111, label=label_o)
plt.grid(linewidth=1, alpha=.5)
cc = corrcoef(df[column_o].values, df[column_m].values)[0, 1]
dia.add_sample(df[column_m].std(), cc, zorder=9, label=label_m, **plot_dict)
# If plot has been created add to the current axes.
else:
# this means that an axis handle already exists and use it to plot another model
cc = corrcoef(df[column_o].values, df[column_m].values)[0, 1]
dia.add_sample(df[column_m].std(), cc, zorder=9, label=label_m, **plot_dict)
#Set parameters for all plots
contours = dia.add_contours(colors='0.5')
plt.clabel(contours, inline=1, fontsize=text_kwargs['fontsize']*0.8)
plt.grid(alpha=.5)
plt.legend(frameon=False,fontsize=text_kwargs['fontsize']*0.8,
bbox_to_anchor=(0.75, 0.93), loc='center left')
if domain_type is not None and domain_name is not None:
if domain_type == 'epa_region':
plt.title('EPA Region ' + domain_name,fontweight='bold',**text_kwargs)
else:
plt.title(domain_name,fontweight='bold',**text_kwargs)
ax = plt.gca()
ax.axis["left"].label.set_text('Standard Deviation: '+ylabel)
ax.axis["top"].label.set_text('Correlation')
ax.axis["left"].label.set_fontsize(text_kwargs['fontsize'])
ax.axis["top"].label.set_fontsize(text_kwargs['fontsize'])
ax.axis["left"].label.set_fontweight('bold')
ax.axis["top"].label.set_fontweight('bold')
ax.axis["top"].major_ticklabels.set_fontsize(text_kwargs['fontsize']*0.8)
ax.axis["left"].major_ticklabels.set_fontsize(text_kwargs['fontsize']*0.8)
ax.axis["right"].major_ticklabels.set_fontsize(text_kwargs['fontsize']*0.8)
return dia
def make_spatial_overlay(df, vmodel, column_o=None, label_o=None, column_m=None,
label_m=None, ylabel = None, vmin=None,
vmax = None, nlevels = None, proj = None, outname = 'plot',
domain_type=None, domain_name=None, fig_dict=None,
text_dict=None,debug=False):
"""Creates spatial overlay plot.
Parameters
----------
df : dataframe
model/obs pair data to plot
vmodel: dataarray
slice of model data to plot
column_o : str
Column label of observation variable to plot
label_o : str
Name of observation variable to use in plot title
column_m : str
Column label of model variable to plot
label_m : str
Name of model variable to use in plot title
ylabel : str
Title of colorbar axis
vmin : real number
Min value to use on colorbar axis
vmax : real number
Max value to use on colorbar axis
nlevels: integer
Number of levels used in colorbar axis
proj: cartopy projection
cartopy projection to use in plot
outname : str
file location and name of plot (do not include .png)
domain_type : str
Domain type specified in input yaml file
domain_name : str
Domain name specified in input yaml file
fig_dict : dictionary
Dictionary containing information about figure
text_dict : dictionary
Dictionary containing information about text
debug : boolean
Whether to plot interactively (True) or not (False). Flag for
submitting jobs to supercomputer turn off interactive mode.
Returns
-------
plot
spatial overlay plot
"""
if debug == False:
plt.ioff()
def_map = dict(states=True,figsize=[15, 8])
if fig_dict is not None:
map_kwargs = {**def_map, **fig_dict}
else:
map_kwargs = def_map
#set default text size
def_text = dict(fontsize=20)
if text_dict is not None:
text_kwargs = {**def_text, **text_dict}
else:
text_kwargs = def_text
# set ylabel to column if not specified.
if ylabel is None:
ylabel = column_o
#Take the mean for each siteid
df_mean=df.groupby(['siteid'],as_index=False).mean()
#Take the mean over time for the model output
vmodel_mean = vmodel[column_m].mean(dim='time').squeeze()
#Determine the domain
if domain_type == 'all':
latmin= 25.0
lonmin=-130.0
latmax= 50.0
lonmax=-60.0
title_add = domain_name + ': '
elif domain_type == 'epa_region' and domain_name is not None:
latmin,lonmin,latmax,lonmax,acro = get_epa_bounds(index=None,acronym=domain_name)
title_add = 'EPA Region ' + domain_name + ': '
else:
latmin= math.floor(min(df.latitude))
lonmin= math.floor(min(df.longitude))
latmax= math.ceil(max(df.latitude))
lonmax= math.ceil(max(df.longitude))
title_add = domain_name + ': '
#Map the model output first.
cbar_kwargs = dict(aspect=15,shrink=.8)
#Add options that this could be included in the fig_kwargs in yaml file too.
if 'extent' not in map_kwargs:
map_kwargs['extent'] = [lonmin,lonmax,latmin,latmax]
if 'crs' not in map_kwargs:
map_kwargs['crs'] = proj
#With pcolormesh, a Warning shows because nearest interpolation may not work for non-monotonically increasing regions.
#Because I do not want to pull in the edges of the lat lon for every model I switch to contourf.
#First determine colorbar, so can use the same for both contourf and scatter
if vmin == None and vmax == None:
vmin = np.min((vmodel_mean.quantile(0.01), df_mean[column_o].quantile(0.01)))
vmax = np.max((vmodel_mean.quantile(0.99), df_mean[column_o].quantile(0.99)))
if nlevels == None:
nlevels = 21
clevel = np.linspace(vmin,vmax,nlevels)
cmap = mpl.cm.get_cmap('Spectral_r',nlevels-1)
norm = mpl.colors.BoundaryNorm(clevel, ncolors=cmap.N, clip=False)
# For unstructured grid, we need a more advanced plotting code
# Call an external funtion (Plot_2D)
if vmodel.attrs.get('mio_has_unstructured_grid',False):
from .Plot_2D import Plot_2D
fig = plt.figure( figsize=fig_dict['figsize'] )
ax = fig.add_subplot(1,1,1,projection=proj)
p2d = Plot_2D( vmodel_mean, scrip_file=vmodel.mio_scrip_file, cmap=cmap, #colorticks=clevel, colorlabels=clevel,
cmin=vmin, cmax=vmax, lon_range=[lonmin,lonmax], lat_range=[latmin,latmax],
ax=ax, state=fig_dict['states'] )
else:
#I add extend='both' here because the colorbar is setup to plot the values outside the range
ax = vmodel_mean.monet.quick_contourf(cbar_kwargs=cbar_kwargs, figsize=map_kwargs['figsize'], map_kws=map_kwargs,
robust=True, norm=norm, cmap=cmap, levels=clevel, extend='both')
plt.gcf().canvas.draw()
plt.tight_layout(pad=0)
plt.title(title_add + label_o + ' overlaid on ' + label_m,fontweight='bold',**text_kwargs)
ax.axes.scatter(df_mean.longitude.values, df_mean.latitude.values,s=30,c=df_mean[column_o],
transform=ccrs.PlateCarree(), edgecolor='b', linewidth=.50, norm=norm,
cmap=cmap)
ax.axes.set_extent(map_kwargs['extent'],crs=ccrs.PlateCarree())
#Uncomment these lines if you update above just to verify colorbars are identical.
#Also specify plot above scatter = ax.axes.scatter etc.
#cbar = ax.figure.get_axes()[1]
#plt.colorbar(scatter,ax=ax)
#Update colorbar
# Call below only for structured grid cases
if not vmodel.attrs.get('mio_has_unstructured_grid',False):
f = plt.gcf()
model_ax = f.get_axes()[0]
cax = f.get_axes()[1]
#get the position of the plot axis and use this to rescale nicely the color bar to the height of the plot.
position_m = model_ax.get_position()
position_c = cax.get_position()
cax.set_position([position_c.x0, position_m.y0, position_c.x1 - position_c.x0, (position_m.y1-position_m.y0)*1.1])
cax.set_ylabel(ylabel,fontweight='bold',**text_kwargs)
cax.tick_params(labelsize=text_kwargs['fontsize']*0.8,length=10.0,width=2.0,grid_linewidth=2.0)
#plt.tight_layout(pad=0)
savefig(outname + '.png', loc=4, logo_height=100, dpi=150)
return ax
def calculate_boxplot(df, column=None, label=None, plot_dict=None, comb_bx = None, label_bx = None):
"""Combines data into acceptable format for box-plot
Parameters
----------
df : dataframe
Model/obs pair object
column : str
Column label of variable to plot
label : str
Name of variable to use in plot legend
comb_bx: dataframe
dataframe containing information to create box-plot from previous
occurrence so can overlay multiple model results on plot
label_bx: list
list of string labels to use in box-plot from previous occurrence so
can overlay multiple model results on plot
Returns
-------
dataframe, list
dataframe containing information to create box-plot
list of string labels to use in box-plot
"""
if comb_bx is None and label_bx is None:
comb_bx = | pd.DataFrame() | pandas.DataFrame |
import csv
import itertools
import math
import re
from pathlib import Path
from typing import *
import pandas
from loguru import logger
NumericType = Union[int, float]
IterableValues = Union[List[NumericType], pandas.Series]
NUMERIC_REGEX = re.compile("^.?(?P<number>[\d]+)")
def _coerce_to_series(item:Any)->pandas.Series:
if not isinstance(item, pandas.Series):
item = | pandas.Series(item) | pandas.Series |
#!/usr/bin/python3
# coding: utf-8
import sys
import os.path
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
# get_ipython().run_line_magic('matplotlib', 'inline')
# plt.close('all')
# dpi = 300
# figsize = (1920 / dpi, 1080 / dpi)
from plotHitMissUnkRate import plotHitMissUnkRate
def getExamplesDf(path):
assert os.path.isfile(path), "file '%s' not found." % path
df = pd.read_csv(filepath_or_buffer=path, header=None)
df['id'] = df.index
df['class'] = df[22]
return df
def getOriginalMatchesDf(path):
assert os.path.isfile(path), "file '%s' not found." % path
df = pd.read_table(filepath_or_buffer=path, header=None)
df.columns = ['id', 'class', 'label']
df = df[df['id'].str.startswith('Ex:')]
def cleanLabel(text):
label = text.replace('Classe MINAS:', '').strip()
if label == 'Unk':
return '-'
if label.startswith('C '):
return label.replace('C ', '')
return label
return pd.DataFrame({
'id': df['id'].apply(lambda x: x.replace('Ex:', '').strip()).astype(int) - 1,
'label': df['label'].apply(cleanLabel),
})
def getMatchesDf(path):
assert os.path.isfile(path), "file '%s' not found." % path
df = pd.read_csv(filepath_or_buffer=path)
df['id'] = df['#pointId'].astype(int)
return df
def merge(exDf, maDf):
def checkCols(df, cols):
return pd.Series(cols).isin(df.columns).all()
assert checkCols(exDf, ['id', 'class'])
assert checkCols(maDf, ['id', 'label'])
return | pd.merge(exDf[['id', 'class']], maDf[['id', 'label']], on='id', how='left') | pandas.merge |
import io
import os
import time
import re
import string
from PIL import Image, ImageFilter
import requests
import numpy as np
import pandas as pd
from scipy.fftpack import fft
from sklearn.cluster import KMeans
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
# import sklearn.metrics as sm
from keras.preprocessing import image
from keras.applications.inception_v3 \
import decode_predictions, preprocess_input
from keras.applications.inception_v3 import InceptionV3
class Unicorn():
def __init__(self, weights_path):
self.cnn_features = True
self.target_size = (299, 299)
self.alpha_fill = '#ffffff'
self.prep_func = preprocess_input
self.scale_features = True
self.n_clusters = 4
self.n_pca_comps = 10
self.model = InceptionV3(weights=weights_path)
def load_image(self, img_path):
''' load image given path and convert to an array
'''
img = image.load_img(img_path, target_size=self.target_size)
x = image.img_to_array(img)
return self.prep_func(x)
def load_image_from_web(self, image_url):
''' load an image from a provided hyperlink
'''
# get image
response = requests.get(image_url)
with Image.open(io.BytesIO(response.content)) as img:
# fill transparency if needed
if img.mode in ('RGBA', 'LA'):
img = self.strip_alpha_channel(img)
# convert to jpeg
if img.format is not 'jpeg':
img = img.convert('RGB')
img.save('target_img.jpg')
def validate_url(self, url):
''' takes input string and returns True if string is
a url.
'''
url_validator = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return bool(url_validator.match(url))
def featurize_image(self, image_array):
''' Returns binary array with ones where the model predicts that
the image contains an instance of one of the target classes
(specified by wordnet id)
'''
predictions = self.model.predict(image_array)
return predictions
def strip_alpha_channel(self, image):
''' Strip the alpha channel of an image and fill with fill color
'''
background = Image.new(image.mode[:-1], image.size, self.alpha_fill)
background.paste(image, image.split()[-1])
return background
def fft_images(self, image_paths):
''' Returns the fft transform of images from paths provided as a list
'''
num_images = len(image_paths)
feature_data = pd.DataFrame()
for i, image_path in enumerate(image_paths):
try:
if self.validate_url(image_path):
filename = 'target_img.jpg'
self.load_image_from_web(image_path)
else:
filename = image_path
if i % 10 == 0:
print('processing image {}/{}'.format(i + 1, num_images))
X = np.array([self.load_image(filename)])
# # # flatten and apply fft
image_features = fft(X.flatten())
if filename == 'target_img.jpg':
os.remove('target_img.jpg')
feature_data = feature_data.append(
pd.Series(image_features),
ignore_index=True)
# feature_data = feature_data.append(
# pd.Series(image_features.flatten()),
# ignore_index=True)
except Exception as e:
print(e)
feature_data = feature_data.append(
pd.Series([np.nan]),
ignore_index=True)
feature_data = feature_data.set_index(
pd.Series(
[i.split('/')[-1] for i in image_paths]
)
)
return feature_data
def get_net_features(self, image_paths):
''' Returns features of images (defaults to inception V3:imagenet wts)
from paths provided as a list
'''
# from keras.applications.inception_v3 import InceptionV3
# self.model = InceptionV3(weights='imagenet', include_top=False)
num_images = len(image_paths)
feature_data = pd.DataFrame()
for i, image_path in enumerate(image_paths):
try:
if self.validate_url(image_path):
filename = 'target_img.jpg'
self.load_image_from_web(image_path)
else:
filename = image_path
if i % 10 == 0:
print('processing image {}/{}'.format(i + 1, num_images))
X = np.array([self.load_image(filename)])
# # # flatten and get
image_features = self.featurize_image(X)
if filename == 'target_img.jpg':
os.remove('target_img.jpg')
feature_data = feature_data.append(
pd.Series(image_features.flatten()),
ignore_index=True)
except Exception as e:
print(e)
feature_data = feature_data.append(
pd.Series([0]),
ignore_index=True)
feature_data = feature_data.set_index(
pd.Series(
[i.split('/')[-1] for i in image_paths]
)
)
feature_data = feature_data.dropna(how='any')
return feature_data
def haar_wavelet_features():
pass
def pca_image_features(self, feature_data):
''' Runs PCA on images in dataframe where rows are images
and columns are features.
'''
if self.scale_features:
# # # standardize features? eg for PCA
feature_data = pd.DataFrame(
data=StandardScaler().fit_transform(feature_data)
)
# # # apply PCA feature matrix
pca = PCA(n_components=self.n_pca_comps)
pca_features = pca.fit_transform(
feature_data
)
pca_feature_data = pd.DataFrame(
data=pca_features,
columns=['pc' + str(i) for i in range(0, self.n_pca_comps)])
pca_feature_data.set_index(feature_data.index)
return pca_feature_data
def calc_distance(self, feature_data):
''' Calculate pairwise feature distance between images in a dataframe
where rows are images and columns are features
'''
from scipy.spatial.distance import squareform, pdist
pwise_dist_df = pd.DataFrame(
squareform(
pdist(feature_data)
),
columns=feature_data.index,
index=feature_data.index
)
return pwise_dist_df
def run_kmeans(self, feature_data, target_data):
model = KMeans(n_clusters=self.n_clusters)
model.fit(target_data)
output_data = pd.concat(
{'label': pd.Series(feature_data.index),
'pred_class': pd.Series(model.labels_)},
axis=1
)
return output_data
def run_knn(self, target_data):
nbrs = NearestNeighbors(
n_neighbors=2,
algorithm='ball_tree'
).fit(target_data)
distances, indices = nbrs.kneighbors(target_data)
output_data = pd.concat(
{'indices_0': pd.Series(target_data.index[indices[:, 0]]),
'indices_1': | pd.Series(target_data.index[indices[:, 1]]) | pandas.Series |
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from datetime import time
from os.path import abspath, dirname, join
from unittest import TestCase
import typing
import re
import functools
import itertools
import pathlib
from collections import abc
import pytest
import numpy as np
import pandas as pd
import pandas.testing as tm
from pandas import Timedelta, read_csv
from parameterized import parameterized
import pytz
from pytz import UTC
from toolz import concat
from exchange_calendars import get_calendar
from exchange_calendars.calendar_utils import (
ExchangeCalendarDispatcher,
_default_calendar_aliases,
_default_calendar_factories,
)
from exchange_calendars.errors import (
CalendarNameCollision,
InvalidCalendarName,
NoSessionsError,
)
from exchange_calendars.exchange_calendar import ExchangeCalendar, days_at_time
from .test_utils import T
class FakeCalendar(ExchangeCalendar):
name = "DMY"
tz = "Asia/Ulaanbaatar"
open_times = ((None, time(11, 13)),)
close_times = ((None, time(11, 49)),)
class CalendarRegistrationTestCase(TestCase):
def setup_method(self, method):
self.dummy_cal_type = FakeCalendar
self.dispatcher = ExchangeCalendarDispatcher({}, {}, {})
def teardown_method(self, method):
self.dispatcher.clear_calendars()
def test_register_calendar(self):
# Build a fake calendar
dummy_cal = self.dummy_cal_type()
# Try to register and retrieve the calendar
self.dispatcher.register_calendar("DMY", dummy_cal)
retr_cal = self.dispatcher.get_calendar("DMY")
self.assertEqual(dummy_cal, retr_cal)
# Try to register again, expecting a name collision
with self.assertRaises(CalendarNameCollision):
self.dispatcher.register_calendar("DMY", dummy_cal)
# Deregister the calendar and ensure that it is removed
self.dispatcher.deregister_calendar("DMY")
with self.assertRaises(InvalidCalendarName):
self.dispatcher.get_calendar("DMY")
def test_register_calendar_type(self):
self.dispatcher.register_calendar_type("DMY", self.dummy_cal_type)
retr_cal = self.dispatcher.get_calendar("DMY")
self.assertEqual(self.dummy_cal_type, type(retr_cal))
def test_both_places_are_checked(self):
dummy_cal = self.dummy_cal_type()
# if instance is registered, can't register type with same name
self.dispatcher.register_calendar("DMY", dummy_cal)
with self.assertRaises(CalendarNameCollision):
self.dispatcher.register_calendar_type("DMY", type(dummy_cal))
self.dispatcher.deregister_calendar("DMY")
# if type is registered, can't register instance with same name
self.dispatcher.register_calendar_type("DMY", type(dummy_cal))
with self.assertRaises(CalendarNameCollision):
self.dispatcher.register_calendar("DMY", dummy_cal)
def test_force_registration(self):
self.dispatcher.register_calendar("DMY", self.dummy_cal_type())
first_dummy = self.dispatcher.get_calendar("DMY")
# force-register a new instance
self.dispatcher.register_calendar("DMY", self.dummy_cal_type(), force=True)
second_dummy = self.dispatcher.get_calendar("DMY")
self.assertNotEqual(first_dummy, second_dummy)
class DefaultsTestCase(TestCase):
def test_default_calendars(self):
dispatcher = ExchangeCalendarDispatcher(
calendars={},
calendar_factories=_default_calendar_factories,
aliases=_default_calendar_aliases,
)
# These are ordered aliases first, so that we can deregister the
# canonical factories when we're done with them, and we'll be done with
# them after they've been used by all aliases and by canonical name.
for name in concat([_default_calendar_aliases, _default_calendar_factories]):
self.assertIsNotNone(
dispatcher.get_calendar(name), "get_calendar(%r) returned None" % name
)
dispatcher.deregister_calendar(name)
class DaysAtTimeTestCase(TestCase):
@parameterized.expand(
[
# NYSE standard day
(
"2016-07-19",
0,
time(9, 31),
pytz.timezone("America/New_York"),
"2016-07-19 9:31",
),
# CME standard day
(
"2016-07-19",
-1,
time(17, 1),
pytz.timezone("America/Chicago"),
"2016-07-18 17:01",
),
# CME day after DST start
(
"2004-04-05",
-1,
time(17, 1),
pytz.timezone("America/Chicago"),
"2004-04-04 17:01",
),
# ICE day after DST start
(
"1990-04-02",
-1,
time(19, 1),
pytz.timezone("America/Chicago"),
"1990-04-01 19:01",
),
]
)
def test_days_at_time(self, day, day_offset, time_offset, tz, expected):
days = pd.DatetimeIndex([pd.Timestamp(day, tz=tz)])
result = days_at_time(days, time_offset, tz, day_offset)[0]
expected = pd.Timestamp(expected, tz=tz).tz_convert(UTC)
self.assertEqual(result, expected)
class ExchangeCalendarTestBase(object):
# Override in subclasses.
answer_key_filename = None
calendar_class = None
# Affects test_start_bound. Should be set to earliest date for which
# calendar can be instantiated, or None if no start bound.
START_BOUND: pd.Timestamp | None = None
# Affects test_end_bound. Should be set to latest date for which
# calendar can be instantiated, or None if no end bound.
END_BOUND: pd.Timestamp | None = None
# Affects tests that care about the empty periods between sessions. Should
# be set to False for 24/7 calendars.
GAPS_BETWEEN_SESSIONS = True
# Affects tests that care about early closes. Should be set to False for
# calendars that don't have any early closes.
HAVE_EARLY_CLOSES = True
# Affects tests that care about late opens. Since most do not, defaulting
# to False.
HAVE_LATE_OPENS = False
# Affects test_for_breaks. True if one or more calendar sessions has a
# break.
HAVE_BREAKS = False
# Affects test_session_has_break.
SESSION_WITH_BREAK = None # None if no session has a break
SESSION_WITHOUT_BREAK = T("2011-06-15") # None if all sessions have breaks
# Affects test_sanity_check_session_lengths. Should be set to the largest
# number of hours that ever appear in a single session.
MAX_SESSION_HOURS = 0
# Affects test_minute_index_to_session_labels.
# Change these if the start/end dates of your test suite don't contain the
# defaults.
MINUTE_INDEX_TO_SESSION_LABELS_START = pd.Timestamp("2011-01-04", tz=UTC)
MINUTE_INDEX_TO_SESSION_LABELS_END = pd.Timestamp("2011-04-04", tz=UTC)
# Affects tests around daylight savings. If possible, should contain two
# dates that are not both in the same daylight savings regime.
DAYLIGHT_SAVINGS_DATES = ["2004-04-05", "2004-11-01"]
# Affects test_start_end. Change these if your calendar start/end
# dates between 2010-01-03 and 2010-01-10 don't match the defaults.
TEST_START_END_FIRST = pd.Timestamp("2010-01-03", tz=UTC)
TEST_START_END_LAST = pd.Timestamp("2010-01-10", tz=UTC)
TEST_START_END_EXPECTED_FIRST = pd.Timestamp("2010-01-04", tz=UTC)
TEST_START_END_EXPECTED_LAST = pd.Timestamp("2010-01-08", tz=UTC)
@staticmethod
def load_answer_key(filename):
"""
Load a CSV from tests/resources/{filename}.csv
"""
fullpath = join(
dirname(abspath(__file__)),
"./resources",
filename + ".csv",
)
return read_csv(
fullpath,
index_col=0,
# NOTE: Merely passing parse_dates=True doesn't cause pandas to set
# the dtype correctly, and passing all reasonable inputs to the
# dtype kwarg cause read_csv to barf.
parse_dates=[0, 1, 2],
date_parser=lambda x: pd.Timestamp(x, tz=UTC),
)
@classmethod
def setup_class(cls):
cls.answers = cls.load_answer_key(cls.answer_key_filename)
cls.start_date = cls.answers.index[0]
cls.end_date = cls.answers.index[-1]
cls.calendar = cls.calendar_class(cls.start_date, cls.end_date)
cls.one_minute = pd.Timedelta(1, "T")
cls.one_hour = pd.Timedelta(1, "H")
cls.one_day = pd.Timedelta(1, "D")
cls.today = pd.Timestamp.now(tz="UTC").floor("D")
@classmethod
def teardown_class(cls):
cls.calendar = None
cls.answers = None
def test_bound_start(self):
if self.START_BOUND is not None:
cal = self.calendar_class(self.START_BOUND, self.today)
self.assertIsInstance(cal, ExchangeCalendar)
start = self.START_BOUND - pd.DateOffset(days=1)
with pytest.raises(ValueError, match=re.escape(f"{start}")):
self.calendar_class(start, self.today)
else:
# verify no bound imposed
cal = self.calendar_class(pd.Timestamp("1902-01-01", tz="UTC"), self.today)
self.assertIsInstance(cal, ExchangeCalendar)
def test_bound_end(self):
if self.END_BOUND is not None:
cal = self.calendar_class(self.today, self.END_BOUND)
self.assertIsInstance(cal, ExchangeCalendar)
end = self.END_BOUND + pd.DateOffset(days=1)
with pytest.raises(ValueError, match=re.escape(f"{end}")):
self.calendar_class(self.today, end)
else:
# verify no bound imposed
cal = self.calendar_class(self.today, pd.Timestamp("2050-01-01", tz="UTC"))
self.assertIsInstance(cal, ExchangeCalendar)
def test_sanity_check_session_lengths(self):
# make sure that no session is longer than self.MAX_SESSION_HOURS hours
for session in self.calendar.all_sessions:
o, c = self.calendar.open_and_close_for_session(session)
delta = c - o
self.assertLessEqual(delta.seconds / 3600, self.MAX_SESSION_HOURS)
def test_calculated_against_csv(self):
tm.assert_index_equal(self.calendar.schedule.index, self.answers.index)
def test_adhoc_holidays_specification(self):
"""adhoc holidays should be tz-naive (#33, #39)."""
dti = pd.DatetimeIndex(self.calendar.adhoc_holidays)
assert dti.tz is None
def test_is_open_on_minute(self):
one_minute = pd.Timedelta(minutes=1)
m = self.calendar.is_open_on_minute
for market_minute in self.answers.market_open[1:]:
market_minute_utc = market_minute
# The exchange should be classified as open on its first minute
self.assertTrue(m(market_minute_utc, _parse=False))
if self.GAPS_BETWEEN_SESSIONS:
# Decrement minute by one, to minute where the market was not
# open
pre_market = market_minute_utc - one_minute
self.assertFalse(m(pre_market, _parse=False))
for market_minute in self.answers.market_close[:-1]:
close_minute_utc = market_minute
# should be open on its last minute
self.assertTrue(m(close_minute_utc, _parse=False))
if self.GAPS_BETWEEN_SESSIONS:
# increment minute by one minute, should be closed
post_market = close_minute_utc + one_minute
self.assertFalse(m(post_market, _parse=False))
def _verify_minute(
self,
calendar,
minute,
next_open_answer,
prev_open_answer,
next_close_answer,
prev_close_answer,
):
next_open = calendar.next_open(minute, _parse=False)
self.assertEqual(next_open, next_open_answer)
prev_open = self.calendar.previous_open(minute, _parse=False)
self.assertEqual(prev_open, prev_open_answer)
next_close = self.calendar.next_close(minute, _parse=False)
self.assertEqual(next_close, next_close_answer)
prev_close = self.calendar.previous_close(minute, _parse=False)
self.assertEqual(prev_close, prev_close_answer)
def test_next_prev_open_close(self):
# for each session, check:
# - the minute before the open (if gaps exist between sessions)
# - the first minute of the session
# - the second minute of the session
# - the minute before the close
# - the last minute of the session
# - the first minute after the close (if gaps exist between sessions)
opens = self.answers.market_open.iloc[1:-2]
closes = self.answers.market_close.iloc[1:-2]
previous_opens = self.answers.market_open.iloc[:-1]
previous_closes = self.answers.market_close.iloc[:-1]
next_opens = self.answers.market_open.iloc[2:]
next_closes = self.answers.market_close.iloc[2:]
for (
open_minute,
close_minute,
previous_open,
previous_close,
next_open,
next_close,
) in zip(
opens, closes, previous_opens, previous_closes, next_opens, next_closes
):
minute_before_open = open_minute - self.one_minute
# minute before open
if self.GAPS_BETWEEN_SESSIONS:
self._verify_minute(
self.calendar,
minute_before_open,
open_minute,
previous_open,
close_minute,
previous_close,
)
# open minute
self._verify_minute(
self.calendar,
open_minute,
next_open,
previous_open,
close_minute,
previous_close,
)
# second minute of session
self._verify_minute(
self.calendar,
open_minute + self.one_minute,
next_open,
open_minute,
close_minute,
previous_close,
)
# minute before the close
self._verify_minute(
self.calendar,
close_minute - self.one_minute,
next_open,
open_minute,
close_minute,
previous_close,
)
# the close
self._verify_minute(
self.calendar,
close_minute,
next_open,
open_minute,
next_close,
previous_close,
)
# minute after the close
if self.GAPS_BETWEEN_SESSIONS:
self._verify_minute(
self.calendar,
close_minute + self.one_minute,
next_open,
open_minute,
next_close,
close_minute,
)
def test_next_prev_minute(self):
all_minutes = self.calendar.all_minutes
# test 20,000 minutes because it takes too long to do the rest.
for idx, minute in enumerate(all_minutes[1:20000]):
self.assertEqual(
all_minutes[idx + 2], self.calendar.next_minute(minute, _parse=False)
)
self.assertEqual(
all_minutes[idx], self.calendar.previous_minute(minute, _parse=False)
)
# test a couple of non-market minutes
if self.GAPS_BETWEEN_SESSIONS:
for open_minute in self.answers.market_open[1:]:
hour_before_open = open_minute - self.one_hour
self.assertEqual(
open_minute,
self.calendar.next_minute(hour_before_open, _parse=False),
)
for close_minute in self.answers.market_close[1:]:
hour_after_close = close_minute + self.one_hour
self.assertEqual(
close_minute,
self.calendar.previous_minute(hour_after_close, _parse=False),
)
def test_date_to_session_label(self):
m = self.calendar.date_to_session_label
sessions = self.answers.index[:30] # first 30 sessions
# test for error if request session prior to first calendar session.
date = self.answers.index[0] - self.one_day
error_msg = (
"Cannot get a session label prior to the first calendar"
f" session ('{self.answers.index[0]}'). Consider passing"
" `direction` as 'next'."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(date, "previous", _parse=False)
# direction as "previous"
dates = pd.date_range(sessions[0], sessions[-1], freq="D")
last_session = None
for date in dates:
session_label = m(date, "previous", _parse=False)
if date in sessions:
assert session_label == date
last_session = session_label
else:
assert session_label == last_session
# direction as "next"
last_session = None
for date in dates.sort_values(ascending=False):
session_label = m(date, "next", _parse=False)
if date in sessions:
assert session_label == date
last_session = session_label
else:
assert session_label == last_session
# test for error if request session after last calendar session.
date = self.answers.index[-1] + self.one_day
error_msg = (
"Cannot get a session label later than the last calendar"
f" session ('{self.answers.index[-1]}'). Consider passing"
" `direction` as 'previous'."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(date, "next", _parse=False)
if self.GAPS_BETWEEN_SESSIONS:
not_sessions = dates[~dates.isin(sessions)][:5]
for not_session in not_sessions:
error_msg = (
f"`date` '{not_session}' does not represent a session. Consider"
" passing a `direction`."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(not_session, "none", _parse=False)
# test default behaviour
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(not_session, _parse=False)
# non-valid direction (can only be thrown if gaps between sessions)
error_msg = (
"'not a direction' is not a valid `direction`. Valid `direction`"
' values are "next", "previous" and "none".'
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(not_session, "not a direction", _parse=False)
def test_minute_to_session_label(self):
m = self.calendar.minute_to_session_label
# minute is prior to first session's open
minute_before_first_open = self.answers.iloc[0].market_open - self.one_minute
session_label = self.answers.index[0]
minutes_that_resolve_to_this_session = [
m(minute_before_first_open, _parse=False),
m(minute_before_first_open, direction="next", _parse=False),
]
unique_session_labels = set(minutes_that_resolve_to_this_session)
self.assertTrue(len(unique_session_labels) == 1)
self.assertIn(session_label, unique_session_labels)
with self.assertRaises(ValueError):
m(minute_before_first_open, direction="previous", _parse=False)
with self.assertRaises(ValueError):
m(minute_before_first_open, direction="none", _parse=False)
# minute is between first session's open and last session's close
for idx, (session_label, open_minute, close_minute, _, _) in enumerate(
self.answers.iloc[1:-2].itertuples(name=None)
):
hour_into_session = open_minute + self.one_hour
minute_before_session = open_minute - self.one_minute
minute_after_session = close_minute + self.one_minute
next_session_label = self.answers.index[idx + 2]
previous_session_label = self.answers.index[idx]
# verify that minutes inside a session resolve correctly
minutes_that_resolve_to_this_session = [
m(open_minute, _parse=False),
m(open_minute, direction="next", _parse=False),
m(open_minute, direction="previous", _parse=False),
m(open_minute, direction="none", _parse=False),
m(hour_into_session, _parse=False),
m(hour_into_session, direction="next", _parse=False),
m(hour_into_session, direction="previous", _parse=False),
m(hour_into_session, direction="none", _parse=False),
m(close_minute),
m(close_minute, direction="next", _parse=False),
m(close_minute, direction="previous", _parse=False),
m(close_minute, direction="none", _parse=False),
session_label,
]
if self.GAPS_BETWEEN_SESSIONS:
minutes_that_resolve_to_this_session.append(
m(minute_before_session, _parse=False)
)
minutes_that_resolve_to_this_session.append(
m(minute_before_session, direction="next", _parse=False)
)
minutes_that_resolve_to_this_session.append(
m(minute_after_session, direction="previous", _parse=False)
)
self.assertTrue(
all(
x == minutes_that_resolve_to_this_session[0]
for x in minutes_that_resolve_to_this_session
)
)
minutes_that_resolve_to_next_session = [
m(minute_after_session, _parse=False),
m(minute_after_session, direction="next", _parse=False),
next_session_label,
]
self.assertTrue(
all(
x == minutes_that_resolve_to_next_session[0]
for x in minutes_that_resolve_to_next_session
)
)
self.assertEqual(
m(minute_before_session, direction="previous", _parse=False),
previous_session_label,
)
if self.GAPS_BETWEEN_SESSIONS:
# Make sure we use the cache correctly
minutes_that_resolve_to_different_sessions = [
m(minute_after_session, direction="next", _parse=False),
m(minute_after_session, direction="previous", _parse=False),
m(minute_after_session, direction="next", _parse=False),
]
self.assertEqual(
minutes_that_resolve_to_different_sessions,
[next_session_label, session_label, next_session_label],
)
# make sure that exceptions are raised at the right time
with self.assertRaises(ValueError):
m(open_minute, "asdf", _parse=False)
if self.GAPS_BETWEEN_SESSIONS:
with self.assertRaises(ValueError):
m(minute_before_session, direction="none", _parse=False)
# minute is later than last session's close
minute_after_last_close = self.answers.iloc[-1].market_close + self.one_minute
session_label = self.answers.index[-1]
minute_that_resolves_to_session_label = m(
minute_after_last_close, direction="previous", _parse=False
)
self.assertEqual(session_label, minute_that_resolves_to_session_label)
with self.assertRaises(ValueError):
m(minute_after_last_close, _parse=False)
with self.assertRaises(ValueError):
m(minute_after_last_close, direction="next", _parse=False)
with self.assertRaises(ValueError):
m(minute_after_last_close, direction="none", _parse=False)
@parameterized.expand(
[
(1, 0),
(2, 0),
(2, 1),
]
)
def test_minute_index_to_session_labels(self, interval, offset):
minutes = self.calendar.minutes_for_sessions_in_range(
self.MINUTE_INDEX_TO_SESSION_LABELS_START,
self.MINUTE_INDEX_TO_SESSION_LABELS_END,
)
minutes = minutes[range(offset, len(minutes), interval)]
np.testing.assert_array_equal(
pd.DatetimeIndex(minutes.map(self.calendar.minute_to_session_label)),
self.calendar.minute_index_to_session_labels(minutes),
)
def test_next_prev_session(self):
session_labels = self.answers.index[1:-2]
max_idx = len(session_labels) - 1
# the very first session
first_session_label = self.answers.index[0]
with self.assertRaises(ValueError):
self.calendar.previous_session_label(first_session_label, _parse=False)
# all the sessions in the middle
for idx, session_label in enumerate(session_labels):
if idx < max_idx:
self.assertEqual(
self.calendar.next_session_label(session_label, _parse=False),
session_labels[idx + 1],
)
if idx > 0:
self.assertEqual(
self.calendar.previous_session_label(session_label, _parse=False),
session_labels[idx - 1],
)
# the very last session
last_session_label = self.answers.index[-1]
with self.assertRaises(ValueError):
self.calendar.next_session_label(last_session_label, _parse=False)
@staticmethod
def _find_full_session(calendar):
for session_label in calendar.schedule.index:
if session_label not in calendar.early_closes:
return session_label
return None
def test_minutes_for_period(self):
# full session
# find a session that isn't an early close. start from the first
# session, should be quick.
full_session_label = self._find_full_session(self.calendar)
if full_session_label is None:
raise ValueError("Cannot find a full session to test!")
minutes = self.calendar.minutes_for_session(full_session_label)
_open, _close = self.calendar.open_and_close_for_session(full_session_label)
_break_start, _break_end = self.calendar.break_start_and_end_for_session(
full_session_label
)
if not pd.isnull(_break_start):
constructed_minutes = np.concatenate(
[
pd.date_range(start=_open, end=_break_start, freq="min"),
pd.date_range(start=_break_end, end=_close, freq="min"),
]
)
else:
constructed_minutes = pd.date_range(start=_open, end=_close, freq="min")
np.testing.assert_array_equal(
minutes,
constructed_minutes,
)
# early close period
if self.HAVE_EARLY_CLOSES:
early_close_session_label = self.calendar.early_closes[0]
minutes_for_early_close = self.calendar.minutes_for_session(
early_close_session_label
)
_open, _close = self.calendar.open_and_close_for_session(
early_close_session_label
)
np.testing.assert_array_equal(
minutes_for_early_close,
pd.date_range(start=_open, end=_close, freq="min"),
)
# late open period
if self.HAVE_LATE_OPENS:
late_open_session_label = self.calendar.late_opens[0]
minutes_for_late_open = self.calendar.minutes_for_session(
late_open_session_label
)
_open, _close = self.calendar.open_and_close_for_session(
late_open_session_label
)
np.testing.assert_array_equal(
minutes_for_late_open,
pd.date_range(start=_open, end=_close, freq="min"),
)
def test_sessions_in_range(self):
# pick two sessions
session_count = len(self.calendar.schedule.index)
first_idx = session_count // 3
second_idx = 2 * first_idx
first_session_label = self.calendar.schedule.index[first_idx]
second_session_label = self.calendar.schedule.index[second_idx]
answer_key = self.calendar.schedule.index[first_idx : second_idx + 1]
rtrn = self.calendar.sessions_in_range(
first_session_label, second_session_label, _parse=False
)
np.testing.assert_array_equal(answer_key, rtrn)
def get_session_block(self):
"""
Get an "interesting" range of three sessions in a row. By default this
tries to find and return a (full session, early close session, full
session) block.
"""
if not self.HAVE_EARLY_CLOSES:
# If we don't have any early closes, just return a "random" chunk
# of three sessions.
return self.calendar.all_sessions[10:13]
shortened_session = self.calendar.early_closes[0]
shortened_session_idx = self.calendar.schedule.index.get_loc(shortened_session)
session_before = self.calendar.schedule.index[shortened_session_idx - 1]
session_after = self.calendar.schedule.index[shortened_session_idx + 1]
return [session_before, shortened_session, session_after]
def test_minutes_in_range(self):
sessions = self.get_session_block()
first_open, first_close = self.calendar.open_and_close_for_session(sessions[0])
minute_before_first_open = first_open - self.one_minute
middle_open, middle_close = self.calendar.open_and_close_for_session(
sessions[1]
)
last_open, last_close = self.calendar.open_and_close_for_session(sessions[-1])
minute_after_last_close = last_close + self.one_minute
# get all the minutes between first_open and last_close
minutes1 = self.calendar.minutes_in_range(first_open, last_close, _parse=False)
minutes2 = self.calendar.minutes_in_range(
minute_before_first_open, minute_after_last_close, _parse=False
)
if self.GAPS_BETWEEN_SESSIONS:
np.testing.assert_array_equal(minutes1, minutes2)
else:
# if no gaps, then minutes2 should have 2 extra minutes
np.testing.assert_array_equal(minutes1, minutes2[1:-1])
# manually construct the minutes
(
first_break_start,
first_break_end,
) = self.calendar.break_start_and_end_for_session(sessions[0])
(
middle_break_start,
middle_break_end,
) = self.calendar.break_start_and_end_for_session(sessions[1])
(
last_break_start,
last_break_end,
) = self.calendar.break_start_and_end_for_session(sessions[-1])
intervals = [
(first_open, first_break_start, first_break_end, first_close),
(middle_open, middle_break_start, middle_break_end, middle_close),
(last_open, last_break_start, last_break_end, last_close),
]
all_minutes = []
for _open, _break_start, _break_end, _close in intervals:
if pd.isnull(_break_start):
all_minutes.append(
pd.date_range(start=_open, end=_close, freq="min"),
)
else:
all_minutes.append(
pd.date_range(start=_open, end=_break_start, freq="min"),
)
all_minutes.append(
pd.date_range(start=_break_end, end=_close, freq="min"),
)
all_minutes = np.concatenate(all_minutes)
np.testing.assert_array_equal(all_minutes, minutes1)
def test_minutes_for_sessions_in_range(self):
sessions = self.get_session_block()
minutes = self.calendar.minutes_for_sessions_in_range(sessions[0], sessions[-1])
# do it manually
session0_minutes = self.calendar.minutes_for_session(sessions[0])
session1_minutes = self.calendar.minutes_for_session(sessions[1])
session2_minutes = self.calendar.minutes_for_session(sessions[2])
concatenated_minutes = np.concatenate(
[session0_minutes.values, session1_minutes.values, session2_minutes.values]
)
np.testing.assert_array_equal(concatenated_minutes, minutes.values)
def test_sessions_window(self):
sessions = self.get_session_block()
np.testing.assert_array_equal(
self.calendar.sessions_window(sessions[0], len(sessions) - 1, _parse=False),
self.calendar.sessions_in_range(sessions[0], sessions[-1], _parse=False),
)
np.testing.assert_array_equal(
self.calendar.sessions_window(
sessions[-1], -1 * (len(sessions) - 1), _parse=False
),
self.calendar.sessions_in_range(sessions[0], sessions[-1], _parse=False),
)
def test_session_distance(self):
sessions = self.get_session_block()
forward_distance = self.calendar.session_distance(
sessions[0],
sessions[-1],
_parse=False,
)
self.assertEqual(forward_distance, len(sessions))
backward_distance = self.calendar.session_distance(
sessions[-1],
sessions[0],
_parse=False,
)
self.assertEqual(backward_distance, -len(sessions))
one_day_distance = self.calendar.session_distance(
sessions[0],
sessions[0],
_parse=False,
)
self.assertEqual(one_day_distance, 1)
def test_open_and_close_for_session(self):
for session_label, open_answer, close_answer, _, _ in self.answers.itertuples(
name=None
):
found_open, found_close = self.calendar.open_and_close_for_session(
session_label, _parse=False
)
# Test that the methods for just session open and close produce the
# same values as the method for getting both.
alt_open = self.calendar.session_open(session_label, _parse=False)
self.assertEqual(alt_open, found_open)
alt_close = self.calendar.session_close(session_label, _parse=False)
self.assertEqual(alt_close, found_close)
self.assertEqual(open_answer, found_open)
self.assertEqual(close_answer, found_close)
def test_session_opens_in_range(self):
found_opens = self.calendar.session_opens_in_range(
self.answers.index[0],
self.answers.index[-1],
_parse=False,
)
found_opens.index.freq = None
tm.assert_series_equal(found_opens, self.answers["market_open"])
def test_session_closes_in_range(self):
found_closes = self.calendar.session_closes_in_range(
self.answers.index[0],
self.answers.index[-1],
_parse=False,
)
found_closes.index.freq = None
tm.assert_series_equal(found_closes, self.answers["market_close"])
def test_daylight_savings(self):
# 2004 daylight savings switches:
# Sunday 2004-04-04 and Sunday 2004-10-31
# make sure there's no weirdness around calculating the next day's
# session's open time.
m = dict(self.calendar.open_times)
m[pd.Timestamp.min] = m.pop(None)
open_times = pd.Series(m)
for date in self.DAYLIGHT_SAVINGS_DATES:
next_day = pd.Timestamp(date, tz=UTC)
open_date = next_day + Timedelta(days=self.calendar.open_offset)
the_open = self.calendar.schedule.loc[next_day].market_open
localized_open = the_open.tz_localize(UTC).tz_convert(self.calendar.tz)
self.assertEqual(
(open_date.year, open_date.month, open_date.day),
(localized_open.year, localized_open.month, localized_open.day),
)
open_ix = open_times.index.searchsorted(pd.Timestamp(date), side="right")
if open_ix == len(open_times):
open_ix -= 1
self.assertEqual(open_times.iloc[open_ix].hour, localized_open.hour)
self.assertEqual(open_times.iloc[open_ix].minute, localized_open.minute)
def test_start_end(self):
"""
Check ExchangeCalendar with defined start/end dates.
"""
calendar = self.calendar_class(
start=self.TEST_START_END_FIRST,
end=self.TEST_START_END_LAST,
)
self.assertEqual(
calendar.first_trading_session,
self.TEST_START_END_EXPECTED_FIRST,
)
self.assertEqual(
calendar.last_trading_session,
self.TEST_START_END_EXPECTED_LAST,
)
def test_has_breaks(self):
has_breaks = self.calendar.has_breaks()
self.assertEqual(has_breaks, self.HAVE_BREAKS)
def test_session_has_break(self):
if self.SESSION_WITHOUT_BREAK is not None:
self.assertFalse(
self.calendar.session_has_break(self.SESSION_WITHOUT_BREAK)
)
if self.SESSION_WITH_BREAK is not None:
self.assertTrue(self.calendar.session_has_break(self.SESSION_WITH_BREAK))
# TODO remove this class when all calendars migrated. No longer requried as
# `minute_index_to_session_labels` comprehensively tested under new suite.
class OpenDetectionTestCase(TestCase):
# This is an extra set of unit tests that were added during a rewrite of
# `minute_index_to_session_labels` to ensure that the existing
# calendar-generic test suite correctly covered edge cases around
# non-market minutes.
def test_detect_non_market_minutes(self):
cal = get_calendar("NYSE")
# NOTE: This test is here instead of being on the base class for all
# calendars because some of our calendars are 24/7, which means there
# aren't any non-market minutes to find.
day0 = cal.minutes_for_sessions_in_range(
pd.Timestamp("2013-07-03", tz=UTC),
pd.Timestamp("2013-07-03", tz=UTC),
)
for minute in day0:
self.assertTrue(cal.is_open_on_minute(minute))
day1 = cal.minutes_for_sessions_in_range(
pd.Timestamp("2013-07-05", tz=UTC),
pd.Timestamp("2013-07-05", tz=UTC),
)
for minute in day1:
self.assertTrue(cal.is_open_on_minute(minute))
def NYSE_timestamp(s):
return pd.Timestamp(s, tz="America/New_York").tz_convert(UTC)
non_market = [
# After close.
NYSE_timestamp("2013-07-03 16:01"),
# Holiday.
NYSE_timestamp("2013-07-04 10:00"),
# Before open.
NYSE_timestamp("2013-07-05 9:29"),
]
for minute in non_market:
self.assertFalse(cal.is_open_on_minute(minute), minute)
input_ = pd.to_datetime(
np.hstack([day0.values, minute.asm8, day1.values]),
utc=True,
)
with self.assertRaises(ValueError) as e:
cal.minute_index_to_session_labels(input_)
exc_str = str(e.exception)
self.assertIn("First Bad Minute: {}".format(minute), exc_str)
# TODO remove this class when all calendars migrated. No longer requried as
# this case is handled by new test base internally.
class NoDSTExchangeCalendarTestBase(ExchangeCalendarTestBase):
def test_daylight_savings(self):
"""
Several countries in Africa / Asia do not observe DST
so we need to skip over this test for those markets
"""
pass
def get_csv(name: str) -> pd.DataFrame:
"""Get csv file as DataFrame for given calendar `name`."""
filename = name.replace("/", "-").lower() + ".csv"
path = pathlib.Path(__file__).parent.joinpath("resources", filename)
df = pd.read_csv(
path,
index_col=0,
parse_dates=[0, 1, 2, 3, 4],
infer_datetime_format=True,
)
df.index = df.index.tz_localize("UTC")
for col in df:
df[col] = df[col].dt.tz_localize("UTC")
return df
class Answers:
"""Inputs and expected output for testing a given calendar and side.
Inputs and expected outputs are provided by public instance methods and
properties. These either read directly from the corresponding .csv file
or are evaluated from the .csv file contents. NB Properites / methods
MUST NOT make evaluations by way of repeating the code of the
ExchangeCalendar method they are intended to test!
Parameters
----------
calendar_name
Canonical name of calendar for which require answer info. For
example, 'XNYS'.
side {'both', 'left', 'right', 'neither'}
Side of sessions to treat as trading minutes.
"""
ONE_MIN = pd.Timedelta(1, "T")
TWO_MIN = pd.Timedelta(2, "T")
ONE_DAY = pd.Timedelta(1, "D")
LEFT_SIDES = ["left", "both"]
RIGHT_SIDES = ["right", "both"]
def __init__(
self,
calendar_name: str,
side: str,
):
self._name = calendar_name.upper()
self._side = side
# --- Exposed constructor arguments ---
@property
def name(self) -> str:
"""Name of corresponding calendar."""
return self._name
@property
def side(self) -> str:
"""Side of calendar for which answers valid."""
return self._side
# --- Properties read (indirectly) from csv file ---
@functools.lru_cache(maxsize=4)
def _answers(self) -> pd.DataFrame:
return get_csv(self.name)
@property
def answers(self) -> pd.DataFrame:
"""Answers as correspoding csv."""
return self._answers()
@property
def sessions(self) -> pd.DatetimeIndex:
"""Session labels."""
return self.answers.index
@property
def opens(self) -> pd.Series:
"""Market open time for each session."""
return self.answers.market_open
@property
def closes(self) -> pd.Series:
"""Market close time for each session."""
return self.answers.market_close
@property
def break_starts(self) -> pd.Series:
"""Break start time for each session."""
return self.answers.break_start
@property
def break_ends(self) -> pd.Series:
"""Break end time for each session."""
return self.answers.break_end
# --- get and helper methods ---
def get_next_session(self, session: pd.Timestamp) -> pd.Timestamp:
"""Get session that immediately follows `session`."""
assert (
session != self.last_session
), "Cannot get session later than last answers' session."
idx = self.sessions.get_loc(session) + 1
return self.sessions[idx]
def session_has_break(self, session: pd.Timestamp) -> bool:
"""Query if `session` has a break."""
return session in self.sessions_with_break
@staticmethod
def get_sessions_sample(sessions: pd.DatetimeIndex):
"""Return sample of given `sessions`.
Sample includes:
All sessions within first two years of `sessions`.
All sessions within last two years of `sessions`.
All sessions falling:
within first 3 days of any month.
from 28th of any month.
from 14th through 16th of any month.
"""
if sessions.empty:
return sessions
mask = (
(sessions < sessions[0] + pd.DateOffset(years=2))
| (sessions > sessions[-1] - pd.DateOffset(years=2))
| (sessions.day <= 3)
| (sessions.day >= 28)
| (14 <= sessions.day) & (sessions.day <= 16)
)
return sessions[mask]
def get_sessions_minutes(
self, start: pd.Timestamp, end: pd.Timestamp | int = 1
) -> pd.DatetimeIndex:
"""Get trading minutes for 1 or more consecutive sessions.
Parameters
----------
start
Session from which to get trading minutes.
end
Session through which to get trading mintues. Can be passed as:
pd.Timestamp: return will include trading minutes for `end`
session.
int: where int represents number of consecutive sessions
inclusive of `start`, for which require trading
minutes. Default is 1, such that by default will return
trading minutes for only `start` session.
"""
idx = self.sessions.get_loc(start)
stop = idx + end if isinstance(end, int) else self.sessions.get_loc(end) + 1
indexer = slice(idx, stop)
dtis = []
for first, last, last_am, first_pm in zip(
self.first_minutes[indexer],
self.last_minutes[indexer],
self.last_am_minutes[indexer],
self.first_pm_minutes[indexer],
):
if pd.isna(last_am):
dtis.append(pd.date_range(first, last, freq="T"))
else:
dtis.append(pd.date_range(first, last_am, freq="T"))
dtis.append(pd.date_range(first_pm, last, freq="T"))
return dtis[0].union_many(dtis[1:])
# --- Evaluated general calendar properties ---
@functools.lru_cache(maxsize=4)
def _has_a_session_with_break(self) -> pd.DatetimeIndex:
return self.break_starts.notna().any()
@property
def has_a_session_with_break(self) -> bool:
"""Does any session of answers have a break."""
return self._has_a_session_with_break()
@property
def has_a_session_without_break(self) -> bool:
"""Does any session of answers not have a break."""
return self.break_starts.isna().any()
# --- Evaluated properties for first and last sessions ---
@property
def first_session(self) -> pd.Timestamp:
"""First session covered by answers."""
return self.sessions[0]
@property
def last_session(self) -> pd.Timestamp:
"""Last session covered by answers."""
return self.sessions[-1]
@property
def sessions_range(self) -> tuple[pd.Timestamp, pd.Timestamp]:
"""First and last sessions covered by answers."""
return self.first_session, self.last_session
@property
def first_session_open(self) -> pd.Timestamp:
"""Open time of first session covered by answers."""
return self.opens[0]
@property
def last_session_close(self) -> pd.Timestamp:
"""Close time of last session covered by answers."""
return self.closes[-1]
@property
def first_trading_minute(self) -> pd.Timestamp:
open_ = self.first_session_open
return open_ if self.side in self.LEFT_SIDES else open_ + self.ONE_MIN
@property
def last_trading_minute(self) -> pd.Timestamp:
close = self.last_session_close
return close if self.side in self.RIGHT_SIDES else close - self.ONE_MIN
@property
def trading_minutes_range(self) -> tuple[pd.Timestamp, pd.Timestamp]:
"""First and last trading minutes covered by answers."""
return self.first_trading_minute, self.last_trading_minute
# --- out-of-bounds properties ---
@property
def minute_too_early(self) -> pd.Timestamp:
"""Minute earlier than first trading minute."""
return self.first_trading_minute - self.ONE_MIN
@property
def minute_too_late(self) -> pd.Timestamp:
"""Minute later than last trading minute."""
return self.last_trading_minute + self.ONE_MIN
@property
def session_too_early(self) -> pd.Timestamp:
"""Date earlier than first session."""
return self.first_session - self.ONE_DAY
@property
def session_too_late(self) -> pd.Timestamp:
"""Date later than last session."""
return self.last_session + self.ONE_DAY
# --- Evaluated properties covering every session. ---
@functools.lru_cache(maxsize=4)
def _first_minutes(self) -> pd.Series:
if self.side in self.LEFT_SIDES:
minutes = self.opens.copy()
else:
minutes = self.opens + self.ONE_MIN
minutes.name = "first_minutes"
return minutes
@property
def first_minutes(self) -> pd.Series:
"""First trading minute of each session (UTC)."""
return self._first_minutes()
@property
def first_minutes_plus_one(self) -> pd.Series:
"""First trading minute of each session plus one minute."""
return self.first_minutes + self.ONE_MIN
@property
def first_minutes_less_one(self) -> pd.Series:
"""First trading minute of each session less one minute."""
return self.first_minutes - self.ONE_MIN
@functools.lru_cache(maxsize=4)
def _last_minutes(self) -> pd.Series:
if self.side in self.RIGHT_SIDES:
minutes = self.closes.copy()
else:
minutes = self.closes - self.ONE_MIN
minutes.name = "last_minutes"
return minutes
@property
def last_minutes(self) -> pd.Series:
"""Last trading minute of each session."""
return self._last_minutes()
@property
def last_minutes_plus_one(self) -> pd.Series:
"""Last trading minute of each session plus one minute."""
return self.last_minutes + self.ONE_MIN
@property
def last_minutes_less_one(self) -> pd.Series:
"""Last trading minute of each session less one minute."""
return self.last_minutes - self.ONE_MIN
@functools.lru_cache(maxsize=4)
def _last_am_minutes(self) -> pd.Series:
if self.side in self.RIGHT_SIDES:
minutes = self.break_starts.copy()
else:
minutes = self.break_starts - self.ONE_MIN
minutes.name = "last_am_minutes"
return minutes
@property
def last_am_minutes(self) -> pd.Series:
"""Last pre-break trading minute of each session.
NaT if session does not have a break.
"""
return self._last_am_minutes()
@property
def last_am_minutes_plus_one(self) -> pd.Series:
"""Last pre-break trading minute of each session plus one minute."""
return self.last_am_minutes + self.ONE_MIN
@property
def last_am_minutes_less_one(self) -> pd.Series:
"""Last pre-break trading minute of each session less one minute."""
return self.last_am_minutes - self.ONE_MIN
@functools.lru_cache(maxsize=4)
def _first_pm_minutes(self) -> pd.Series:
if self.side in self.LEFT_SIDES:
minutes = self.break_ends.copy()
else:
minutes = self.break_ends + self.ONE_MIN
minutes.name = "first_pm_minutes"
return minutes
@property
def first_pm_minutes(self) -> pd.Series:
"""First post-break trading minute of each session.
NaT if session does not have a break.
"""
return self._first_pm_minutes()
@property
def first_pm_minutes_plus_one(self) -> pd.Series:
"""First post-break trading minute of each session plus one minute."""
return self.first_pm_minutes + self.ONE_MIN
@property
def first_pm_minutes_less_one(self) -> pd.Series:
"""First post-break trading minute of each session less one minute."""
return self.first_pm_minutes - self.ONE_MIN
# --- Evaluated session sets and ranges that meet a specific condition ---
@property
def _mask_breaks(self) -> pd.Series:
return self.break_starts.notna()
@functools.lru_cache(maxsize=4)
def _sessions_with_break(self) -> pd.DatetimeIndex:
return self.sessions[self._mask_breaks]
@property
def sessions_with_break(self) -> pd.DatetimeIndex:
return self._sessions_with_break()
@functools.lru_cache(maxsize=4)
def _sessions_without_break(self) -> pd.DatetimeIndex:
return self.sessions[~self._mask_breaks]
@property
def sessions_without_break(self) -> pd.DatetimeIndex:
return self._sessions_without_break()
@property
def sessions_without_break_run(self) -> pd.DatetimeIndex:
"""Longest run of consecutive sessions without a break."""
s = self.break_starts.isna()
if s.empty:
return pd.DatetimeIndex([], tz="UTC")
trues_grouped = (~s).cumsum()[s]
group_sizes = trues_grouped.value_counts()
max_run_size = group_sizes.max()
max_run_group_id = group_sizes[group_sizes == max_run_size].index[0]
run_without_break = trues_grouped[trues_grouped == max_run_group_id].index
return run_without_break
@property
def sessions_without_break_range(self) -> tuple[pd.Timestamp, pd.Timestamp] | None:
"""Longest session range that does not include a session with a break.
Returns None if all sessions have a break.
"""
sessions = self.sessions_without_break_run
if sessions.empty:
return None
return sessions[0], sessions[-1]
@property
def _mask_sessions_without_gap_after(self) -> pd.Series:
if self.side == "neither":
# will always have gap after if neither open or close are trading
# minutes (assuming sessions cannot overlap)
return pd.Series(False, index=self.sessions)
elif self.side == "both":
# a trading minute cannot be a minute of more than one session.
assert not (self.closes == self.opens.shift(-1)).any()
# there will be no gap if next open is one minute after previous close
closes_plus_min = self.closes + pd.Timedelta(1, "T")
return self.opens.shift(-1) == closes_plus_min
else:
return self.opens.shift(-1) == self.closes
@property
def _mask_sessions_without_gap_before(self) -> pd.Series:
if self.side == "neither":
# will always have gap before if neither open or close are trading
# minutes (assuming sessions cannot overlap)
return pd.Series(False, index=self.sessions)
elif self.side == "both":
# a trading minute cannot be a minute of more than one session.
assert not (self.closes == self.opens.shift(-1)).any()
# there will be no gap if previous close is one minute before next open
opens_minus_one = self.opens - pd.Timedelta(1, "T")
return self.closes.shift(1) == opens_minus_one
else:
return self.closes.shift(1) == self.opens
@functools.lru_cache(maxsize=4)
def _sessions_without_gap_after(self) -> pd.DatetimeIndex:
mask = self._mask_sessions_without_gap_after
return self.sessions[mask][:-1]
@property
def sessions_without_gap_after(self) -> pd.DatetimeIndex:
"""Sessions not followed by a non-trading minute.
Rather, sessions immediately followed by first trading minute of
next session.
"""
return self._sessions_without_gap_after()
@functools.lru_cache(maxsize=4)
def _sessions_with_gap_after(self) -> pd.DatetimeIndex:
mask = self._mask_sessions_without_gap_after
return self.sessions[~mask][:-1]
@property
def sessions_with_gap_after(self) -> pd.DatetimeIndex:
"""Sessions followed by a non-trading minute."""
return self._sessions_with_gap_after()
@functools.lru_cache(maxsize=4)
def _sessions_without_gap_before(self) -> pd.DatetimeIndex:
mask = self._mask_sessions_without_gap_before
return self.sessions[mask][1:]
@property
def sessions_without_gap_before(self) -> pd.DatetimeIndex:
"""Sessions not preceeded by a non-trading minute.
Rather, sessions immediately preceeded by last trading minute of
previous session.
"""
return self._sessions_without_gap_before()
@functools.lru_cache(maxsize=4)
def _sessions_with_gap_before(self) -> pd.DatetimeIndex:
mask = self._mask_sessions_without_gap_before
return self.sessions[~mask][1:]
@property
def sessions_with_gap_before(self) -> pd.DatetimeIndex:
"""Sessions preceeded by a non-trading minute."""
return self._sessions_with_gap_before()
# times are changing...
@functools.lru_cache(maxsize=16)
def _get_sessions_with_times_different_to_next_session(
self,
column: str, # typing.Literal["opens", "closes", "break_starts", "break_ends"]
) -> list[pd.DatetimeIndex]:
"""For a given answers column, get session labels where time differs
from time of next session.
Where `column` is a break time ("break_starts" or "break_ends"), return
will not include sessions when next session has a different `has_break`
status. For example, if session_0 has a break and session_1 does not have
a break, or vice versa, then session_0 will not be included to return. For
sessions followed by a session with a different `has_break` status, see
`_get_sessions_with_has_break_different_to_next_session`.
Returns
-------
list of pd.Datetimeindex
[0] sessions with earlier next session
[1] sessions with later next session
"""
# column takes string to allow lru_cache (Series not hashable)
is_break_col = column[0] == "b"
column_ = getattr(self, column)
if is_break_col:
if column_.isna().all():
return [pd.DatetimeIndex([], tz="UTC")] * 4
column_ = column_.fillna(method="ffill").fillna(method="bfill")
diff = (column_.shift(-1) - column_)[:-1]
remainder = diff % pd.Timedelta(hours=24)
mask = remainder != pd.Timedelta(0)
sessions = self.sessions[:-1][mask]
next_session_earlier_mask = remainder[mask] > pd.Timedelta(hours=12)
next_session_earlier = sessions[next_session_earlier_mask]
next_session_later = sessions[~next_session_earlier_mask]
if is_break_col:
mask = next_session_earlier.isin(self.sessions_without_break)
next_session_earlier = next_session_earlier.drop(next_session_earlier[mask])
mask = next_session_later.isin(self.sessions_without_break)
next_session_later = next_session_later.drop(next_session_later[mask])
return [next_session_earlier, next_session_later]
@property
def _sessions_with_opens_different_to_next_session(
self,
) -> list[pd.DatetimeIndex]:
return self._get_sessions_with_times_different_to_next_session("opens")
@property
def _sessions_with_closes_different_to_next_session(
self,
) -> list[pd.DatetimeIndex]:
return self._get_sessions_with_times_different_to_next_session("closes")
@property
def _sessions_with_break_start_different_to_next_session(
self,
) -> list[pd.DatetimeIndex]:
return self._get_sessions_with_times_different_to_next_session("break_starts")
@property
def _sessions_with_break_end_different_to_next_session(
self,
) -> list[pd.DatetimeIndex]:
return self._get_sessions_with_times_different_to_next_session("break_ends")
@property
def sessions_next_open_earlier(self) -> pd.DatetimeIndex:
return self._sessions_with_opens_different_to_next_session[0]
@property
def sessions_next_open_later(self) -> pd.DatetimeIndex:
return self._sessions_with_opens_different_to_next_session[1]
@property
def sessions_next_open_different(self) -> pd.DatetimeIndex:
return self.sessions_next_open_earlier.union(self.sessions_next_open_later)
@property
def sessions_next_close_earlier(self) -> pd.DatetimeIndex:
return self._sessions_with_closes_different_to_next_session[0]
@property
def sessions_next_close_later(self) -> pd.DatetimeIndex:
return self._sessions_with_closes_different_to_next_session[1]
@property
def sessions_next_close_different(self) -> pd.DatetimeIndex:
return self.sessions_next_close_earlier.union(self.sessions_next_close_later)
@property
def sessions_next_break_start_earlier(self) -> pd.DatetimeIndex:
return self._sessions_with_break_start_different_to_next_session[0]
@property
def sessions_next_break_start_later(self) -> pd.DatetimeIndex:
return self._sessions_with_break_start_different_to_next_session[1]
@property
def sessions_next_break_start_different(self) -> pd.DatetimeIndex:
earlier = self.sessions_next_break_start_earlier
later = self.sessions_next_break_start_later
return earlier.union(later)
@property
def sessions_next_break_end_earlier(self) -> pd.DatetimeIndex:
return self._sessions_with_break_end_different_to_next_session[0]
@property
def sessions_next_break_end_later(self) -> pd.DatetimeIndex:
return self._sessions_with_break_end_different_to_next_session[1]
@property
def sessions_next_break_end_different(self) -> pd.DatetimeIndex:
earlier = self.sessions_next_break_end_earlier
later = self.sessions_next_break_end_later
return earlier.union(later)
@functools.lru_cache(maxsize=4)
def _get_sessions_with_has_break_different_to_next_session(
self,
) -> tuple[pd.DatetimeIndex, pd.DatetimeIndex]:
"""Get sessions with 'has_break' different to next session.
Returns
-------
tuple[pd.DatetimeIndex, pd.DatetimeIndex]
[0] Sessions that have a break and are immediately followed by
a session which does not have a break.
[1] Sessions that do not have a break and are immediately
followed by a session which does have a break.
"""
mask = (self.break_starts.notna() & self.break_starts.shift(-1).isna())[:-1]
sessions_with_break_next_session_without_break = self.sessions[:-1][mask]
mask = (self.break_starts.isna() & self.break_starts.shift(-1).notna())[:-1]
sessions_without_break_next_session_with_break = self.sessions[:-1][mask]
return (
sessions_with_break_next_session_without_break,
sessions_without_break_next_session_with_break,
)
@property
def sessions_with_break_next_session_without_break(self) -> pd.DatetimeIndex:
return self._get_sessions_with_has_break_different_to_next_session()[0]
@property
def sessions_without_break_next_session_with_break(self) -> pd.DatetimeIndex:
return self._get_sessions_with_has_break_different_to_next_session()[1]
@functools.lru_cache(maxsize=4)
def _sessions_next_time_different(self) -> pd.DatetimeIndex:
return self.sessions_next_open_different.union_many(
[
self.sessions_next_close_different,
self.sessions_next_break_start_different,
self.sessions_next_break_end_different,
self.sessions_with_break_next_session_without_break,
self.sessions_without_break_next_session_with_break,
]
)
@property
def sessions_next_time_different(self) -> pd.DatetimeIndex:
"""Sessions where next session has a different time for any column.
Includes sessions where next session has a different `has_break`
status.
"""
return self._sessions_next_time_different()
# session blocks...
def _create_changing_times_session_block(
self, session: pd.Timestamp
) -> pd.DatetimeIndex:
"""Create block of sessions with changing times.
Given a `session` known to have at least one time (open, close,
break_start or break_end) different from the next session, returns
a block of consecutive sessions ending with the first session after
`session` that has the same times as the session that immediately
preceeds it (i.e. the last two sessions of the block will have the
same times), or the last calendar session.
"""
start_idx = self.sessions.get_loc(session)
end_idx = start_idx + 1
while self.sessions[end_idx] in self.sessions_next_time_different:
end_idx += 1
end_idx += 2 # +1 to include session with same times, +1 to serve as end index
return self.sessions[start_idx:end_idx]
def _get_normal_session_block(self) -> pd.DatetimeIndex:
"""Block of 3 sessions with unchanged timings."""
start_idx = len(self.sessions) // 3
end_idx = start_idx + 21
for i in range(start_idx, end_idx):
times_1 = self.answers.iloc[i].dt.time
times_2 = self.answers.iloc[i + 1].dt.time
times_3 = self.answers.iloc[i + 2].dt.time
one_and_two_equal = (times_1 == times_2) | (times_1.isna() & times_2.isna())
one_and_three_equal = (times_1 == times_3) | (
times_1.isna() & times_3.isna()
)
if (one_and_two_equal & one_and_three_equal).all():
break
assert i < (end_idx - 1), "Unable to evaluate a normal session block!"
return self.sessions[i : i + 3]
def _get_session_block(
self, from_session_of: pd.DatetimeIndex, to_session_of: pd.DatetimeIndex
) -> pd.DatetimeIndex:
"""Get session block with bounds defined by sessions of given indexes.
Block will start with middle session of `from_session_of`.
Block will run to the nearest subsequent session of `to_session_of`
(or `self.final_session` if this comes first). Block will end with
the session that immedidately follows this session.
"""
i = len(from_session_of) // 2
start_session = from_session_of[i]
start_idx = self.sessions.get_loc(start_session)
end_idx = start_idx + 1
end_session = self.sessions[end_idx]
while end_session not in to_session_of and end_session != self.last_session:
end_idx += 1
end_session = self.sessions[end_idx]
return self.sessions[start_idx : end_idx + 2]
@functools.lru_cache(maxsize=4)
def _session_blocks(self) -> dict[str, pd.DatetimeIndex]:
blocks = {}
blocks["normal"] = self._get_normal_session_block()
blocks["first_three"] = self.sessions[:3]
blocks["last_three"] = self.sessions[-3:]
# blocks here include where:
# session 1 has at least one different time from session 0
# session 0 has a break and session 1 does not (and vice versa)
sessions_indexes = (
("next_open_earlier", self.sessions_next_open_earlier),
("next_open_later", self.sessions_next_open_later),
("next_close_earlier", self.sessions_next_close_earlier),
("next_close_later", self.sessions_next_close_later),
("next_break_start_earlier", self.sessions_next_break_start_earlier),
("next_break_start_later", self.sessions_next_break_start_later),
("next_break_end_earlier", self.sessions_next_break_end_earlier),
("next_break_end_later", self.sessions_next_break_end_later),
(
"with_break_to_without_break",
self.sessions_with_break_next_session_without_break,
),
(
"without_break_to_with_break",
self.sessions_without_break_next_session_with_break,
),
)
for name, index in sessions_indexes:
if index.empty:
blocks[name] = pd.DatetimeIndex([], tz="UTC")
else:
session = index[0]
blocks[name] = self._create_changing_times_session_block(session)
# blocks here move from session with gap to session without gap and vice versa
if (not self.sessions_with_gap_after.empty) and (
not self.sessions_without_gap_after.empty
):
without_gap_to_with_gap = self._get_session_block(
self.sessions_without_gap_after, self.sessions_with_gap_after
)
with_gap_to_without_gap = self._get_session_block(
self.sessions_with_gap_after, self.sessions_without_gap_after
)
else:
without_gap_to_with_gap = pd.DatetimeIndex([], tz="UTC")
with_gap_to_without_gap = pd.DatetimeIndex([], tz="UTC")
blocks["without_gap_to_with_gap"] = without_gap_to_with_gap
blocks["with_gap_to_without_gap"] = with_gap_to_without_gap
# blocks that adjoin or contain a non_session date
follows_non_session = pd.DatetimeIndex([], tz="UTC")
preceeds_non_session = pd.DatetimeIndex([], tz="UTC")
contains_non_session = pd.DatetimeIndex([], tz="UTC")
if len(self.non_sessions) > 1:
diff = self.non_sessions[1:] - self.non_sessions[:-1]
mask = diff != pd.Timedelta(
1, "D"
) # non_session dates followed by a session
valid_non_sessions = self.non_sessions[:-1][mask]
if len(valid_non_sessions) > 1:
slce = self.sessions.slice_indexer(
valid_non_sessions[0], valid_non_sessions[1]
)
sessions_between_non_sessions = self.sessions[slce]
block_length = min(2, len(sessions_between_non_sessions))
follows_non_session = sessions_between_non_sessions[:block_length]
preceeds_non_session = sessions_between_non_sessions[-block_length:]
# take session before and session after non-session
contains_non_session = self.sessions[slce.stop - 1 : slce.stop + 1]
blocks["follows_non_session"] = follows_non_session
blocks["preceeds_non_session"] = preceeds_non_session
blocks["contains_non_session"] = contains_non_session
return blocks
@property
def session_blocks(self) -> dict[str, pd.DatetimeIndex]:
"""Dictionary of session blocks of a particular behaviour.
A block comprises either a single session or multiple contiguous
sessions.
Keys:
"normal" - three sessions with unchanging timings.
"first_three" - answers' first three sessions.
"last_three" - answers's last three sessions.
"next_open_earlier" - session 1 open is earlier than session 0
open.
"next_open_later" - session 1 open is later than session 0
open.
"next_close_earlier" - session 1 close is earlier than session
0 close.
"next_close_later" - session 1 close is later than session 0
close.
"next_break_start_earlier" - session 1 break_start is earlier
than session 0 break_start.
"next_break_start_later" - session 1 break_start is later than
session 0 break_start.
"next_break_end_earlier" - session 1 break_end is earlier than
session 0 break_end.
"next_break_end_later" - session 1 break_end is later than
session 0 break_end.
"with_break_to_without_break" - session 0 has a break, session
1 does not have a break.
"without_break_to_with_break" - session 0 does not have a
break, session 1 does have a break.
"without_gap_to_with_gap" - session 0 is not followed by a
gap, session -2 is followed by a gap, session -1 is
preceeded by a gap.
"with_gap_to_without_gap" - session 0 is followed by a gap,
session -2 is not followed by a gap, session -1 is not
preceeded by a gap.
"follows_non_session" - one or two sessions where session 0
is preceeded by a date that is a non-session.
"follows_non_session" - one or two sessions where session -1
is followed by a date that is a non-session.
"contains_non_session" = two sessions with at least one
non-session date in between.
If no such session block exists for any key then value will take an
empty DatetimeIndex (UTC).
"""
return self._session_blocks()
def session_block_generator(self) -> abc.Iterator[tuple[str, pd.DatetimeIndex]]:
"""Generator of session blocks of a particular behaviour."""
for name, block in self.session_blocks.items():
if not block.empty:
yield (name, block)
@functools.lru_cache(maxsize=4)
def _session_block_minutes(self) -> dict[str, pd.DatetimeIndex]:
d = {}
for name, block in self.session_blocks.items():
if block.empty:
d[name] = pd.DatetimeIndex([], tz="UTC")
continue
d[name] = self.get_sessions_minutes(block[0], len(block))
return d
@property
def session_block_minutes(self) -> dict[str, pd.DatetimeIndex]:
"""Trading minutes for each `session_block`.
Key:
Session block name as documented to `session_blocks`.
Value:
Trading minutes of corresponding session block.
"""
return self._session_block_minutes()
@property
def sessions_sample(self) -> pd.DatetimeIndex:
"""Sample of normal and unusual sessions.
Sample comprises set of sessions of all `session_blocks` (see
`session_blocks` doc). In this way sample includes at least one
sample of every indentified unique circumstance.
"""
dtis = list(self.session_blocks.values())
return dtis[0].union_many(dtis[1:])
# non-sessions...
@functools.lru_cache(maxsize=4)
def _non_sessions(self) -> pd.DatetimeIndex:
all_dates = pd.date_range(
start=self.first_session, end=self.last_session, freq="D"
)
return all_dates.difference(self.sessions)
@property
def non_sessions(self) -> pd.DatetimeIndex:
"""Dates (UTC midnight) within answers range that are not sessions."""
return self._non_sessions()
@property
def sessions_range_defined_by_non_sessions(
self,
) -> tuple[tuple[pd.Timestamp, pd.Timestamp], pd.Datetimeindex] | None:
"""Range containing sessions although defined with non-sessions.
Returns
-------
tuple[tuple[pd.Timestamp, pd.Timestamp], pd.Datetimeindex]:
[0] tuple[pd.Timestamp, pd.Timestamp]:
[0] range start as non-session date.
[1] range end as non-session date.
[1] pd.DatetimeIndex:
Sessions in range.
"""
non_sessions = self.non_sessions
if len(non_sessions) <= 1:
return None
limit = len(self.non_sessions) - 2
i = 0
start, end = non_sessions[i], non_sessions[i + 1]
while (end - start) < pd.Timedelta(4, "D"):
i += 1
start, end = non_sessions[i], non_sessions[i + 1]
if i == limit:
# Unable to evaluate range from consecutive non-sessions
# that covers >= 3 sessions. Just go with max range...
start, end = non_sessions[0], non_sessions[-1]
slice_start, slice_end = self.sessions.searchsorted((start, end))
return (start, end), self.sessions[slice_start:slice_end]
@property
def non_sessions_run(self) -> pd.DatetimeIndex:
"""Longest run of non_sessions."""
ser = self.sessions.to_series()
diff = ser.shift(-1) - ser
max_diff = diff.max()
if max_diff == pd.Timedelta(1, "D"):
return pd.DatetimeIndex([])
session_before_run = diff[diff == max_diff].index[-1]
run = pd.date_range(
start=session_before_run + pd.Timedelta(1, "D"),
periods=(max_diff // pd.Timedelta(1, "D")) - 1,
freq="D",
)
assert run.isin(self.non_sessions).all()
assert run[0] > self.first_session
assert run[-1] < self.last_session
return run
@property
def non_sessions_range(self) -> tuple[pd.Timestamp, pd.Timestamp] | None:
"""Longest range covering a period without a session."""
non_sessions_run = self.non_sessions_run
if non_sessions_run.empty:
return None
else:
return self.non_sessions_run[0], self.non_sessions_run[-1]
# --- Evaluated sets of minutes ---
@functools.lru_cache(maxsize=4)
def _evaluate_trading_and_break_minutes(self) -> tuple[tuple, tuple]:
sessions = self.sessions_sample
first_mins = self.first_minutes[sessions]
first_mins_plus_one = first_mins + self.ONE_MIN
last_mins = self.last_minutes[sessions]
last_mins_less_one = last_mins - self.ONE_MIN
trading_mins = []
break_mins = []
for session, mins_ in zip(
sessions,
zip(first_mins, first_mins_plus_one, last_mins, last_mins_less_one),
):
trading_mins.append((mins_, session))
if self.has_a_session_with_break:
last_am_mins = self.last_am_minutes[sessions]
last_am_mins = last_am_mins[last_am_mins.notna()]
first_pm_mins = self.first_pm_minutes[last_am_mins.index]
last_am_mins_less_one = last_am_mins - self.ONE_MIN
last_am_mins_plus_one = last_am_mins + self.ONE_MIN
last_am_mins_plus_two = last_am_mins + self.TWO_MIN
first_pm_mins_plus_one = first_pm_mins + self.ONE_MIN
first_pm_mins_less_one = first_pm_mins - self.ONE_MIN
first_pm_mins_less_two = first_pm_mins - self.TWO_MIN
for session, mins_ in zip(
last_am_mins.index,
zip(
last_am_mins,
last_am_mins_less_one,
first_pm_mins,
first_pm_mins_plus_one,
),
):
trading_mins.append((mins_, session))
for session, mins_ in zip(
last_am_mins.index,
zip(
last_am_mins_plus_one,
last_am_mins_plus_two,
first_pm_mins_less_one,
first_pm_mins_less_two,
),
):
break_mins.append((mins_, session))
return (tuple(trading_mins), tuple(break_mins))
@property
def trading_minutes(self) -> tuple[tuple[tuple[pd.Timestamp], pd.Timestamp]]:
"""Edge trading minutes of `sessions_sample`.
Returns
-------
tuple of tuple[tuple[trading_minutes], session]
tuple[trading_minutes] includes:
first two trading minutes of a session.
last two trading minutes of a session.
If breaks:
last two trading minutes of session's am subsession.
first two trading minutes of session's pm subsession.
session
Session of trading_minutes
"""
return self._evaluate_trading_and_break_minutes()[0]
def trading_minutes_only(self) -> abc.Iterator[pd.Timestamp]:
"""Generator of trading minutes of `self.trading_minutes`."""
for mins, _ in self.trading_minutes:
for minute in mins:
yield minute
@property
def trading_minute(self) -> pd.Timestamp:
"""A single trading minute."""
return self.trading_minutes[0][0][0]
@property
def break_minutes(self) -> tuple[tuple[tuple[pd.Timestamp], pd.Timestamp]]:
"""Sample of break minutes of `sessions_sample`.
Returns
-------
tuple of tuple[tuple[break_minutes], session]
tuple[break_minutes]:
first two minutes of a break.
last two minutes of a break.
session
Session of break_minutes
"""
return self._evaluate_trading_and_break_minutes()[1]
def break_minutes_only(self) -> abc.Iterator[pd.Timestamp]:
"""Generator of break minutes of `self.break_minutes`."""
for mins, _ in self.break_minutes:
for minute in mins:
yield minute
@functools.lru_cache(maxsize=4)
def _non_trading_minutes(
self,
) -> tuple[tuple[tuple[pd.Timestamp], pd.Timestamp, pd.Timestamp]]:
non_trading_mins = []
sessions = self.sessions_sample
sessions = prev_sessions = sessions[sessions.isin(self.sessions_with_gap_after)]
next_sessions = self.sessions[self.sessions.get_indexer(sessions) + 1]
last_mins_plus_one = self.last_minutes[sessions] + self.ONE_MIN
first_mins_less_one = self.first_minutes[next_sessions] - self.ONE_MIN
for prev_session, next_session, mins_ in zip(
prev_sessions, next_sessions, zip(last_mins_plus_one, first_mins_less_one)
):
non_trading_mins.append((mins_, prev_session, next_session))
return tuple(non_trading_mins)
@property
def non_trading_minutes(
self,
) -> tuple[tuple[tuple[pd.Timestamp], pd.Timestamp, pd.Timestamp]]:
"""non_trading_minutes that edge `sessions_sample`.
NB. Does not include break minutes.
Returns
-------
tuple of tuple[tuple[non-trading minute], previous session, next session]
tuple[non-trading minute]
Two non-trading minutes.
[0] first non-trading minute to follow a session.
[1] last non-trading minute prior to the next session.
previous session
Session that preceeds non-trading minutes.
next session
Session that follows non-trading minutes.
See Also
--------
break_minutes
"""
return self._non_trading_minutes()
def non_trading_minutes_only(self) -> abc.Iterator[pd.Timestamp]:
"""Generator of non-trading minutes of `self.non_trading_minutes`."""
for mins, _, _ in self.non_trading_minutes:
for minute in mins:
yield minute
# --- method-specific inputs/outputs ---
def prev_next_open_close_minutes(
self,
) -> abc.Iterator[
tuple[
pd.Timestamp,
tuple[
pd.Timestamp | None,
pd.Timestamp | None,
pd.Timestamp | None,
pd.Timestamp | None,
],
]
]:
"""Generator of test parameters for prev/next_open/close methods.
Inputs include following minutes of each session:
open
one minute prior to open (not included for first session)
one minute after open
close
one minute before close
one minute after close (not included for last session)
NB Assumed that minutes prior to first open and after last close
will be handled via parse_timestamp.
Yields
------
2-tuple:
[0] Input a minute sd pd.Timestamp
[1] 4 tuple of expected output of corresponding method:
[0] previous_open as pd.Timestamp | None
[1] previous_close as pd.Timestamp | None
[2] next_open as pd.Timestamp | None
[3] next_close as pd.Timestamp | None
NB None indicates that corresponding method is expected to
raise a ValueError for this input.
"""
close_is_next_open_bv = self.closes == self.opens.shift(-1)
open_was_prev_close_bv = self.opens == self.closes.shift(+1)
close_is_next_open = close_is_next_open_bv[0]
# minutes for session 0
minute = self.opens[0]
yield (minute, (None, None, self.opens[1], self.closes[0]))
minute = minute + self.ONE_MIN
yield (minute, (self.opens[0], None, self.opens[1], self.closes[0]))
minute = self.closes[0]
next_open = self.opens[2] if close_is_next_open else self.opens[1]
yield (minute, (self.opens[0], None, next_open, self.closes[1]))
minute += self.ONE_MIN
prev_open = self.opens[1] if close_is_next_open else self.opens[0]
yield (minute, (prev_open, self.closes[0], next_open, self.closes[1]))
minute = self.closes[0] - self.ONE_MIN
yield (minute, (self.opens[0], None, self.opens[1], self.closes[0]))
# minutes for sessions over [1:-1] except for -1 close and 'close + one_min'
opens = self.opens[1:-1]
closes = self.closes[1:-1]
prev_opens = self.opens[:-2]
prev_closes = self.closes[:-2]
next_opens = self.opens[2:]
next_closes = self.closes[2:]
opens_after_next = self.opens[3:]
# add dummy row to equal lengths (won't be used)
_ = pd.Series(pd.Timestamp("2200-01-01", tz="UTC"))
opens_after_next = opens_after_next.append(_)
stop = closes[-1]
for (
open_,
close,
prev_open,
prev_close,
next_open,
next_close,
open_after_next,
close_is_next_open,
open_was_prev_close,
) in zip(
opens,
closes,
prev_opens,
prev_closes,
next_opens,
next_closes,
opens_after_next,
close_is_next_open_bv[1:-2],
open_was_prev_close_bv[1:-2],
):
if not open_was_prev_close:
# only include open minutes if not otherwise duplicating
# evaluations already made for prior close.
yield (open_, (prev_open, prev_close, next_open, close))
yield (open_ - self.ONE_MIN, (prev_open, prev_close, open_, close))
yield (open_ + self.ONE_MIN, (open_, prev_close, next_open, close))
yield (close - self.ONE_MIN, (open_, prev_close, next_open, close))
if close != stop:
next_open_ = open_after_next if close_is_next_open else next_open
yield (close, (open_, prev_close, next_open_, next_close))
open_ = next_open if close_is_next_open else open_
yield (close + self.ONE_MIN, (open_, close, next_open_, next_close))
# close and 'close + one_min' for session -2
minute = self.closes[-2]
next_open = None if close_is_next_open_bv[-2] else self.opens[-1]
yield (minute, (self.opens[-2], self.closes[-3], next_open, self.closes[-1]))
minute += self.ONE_MIN
prev_open = self.opens[-1] if close_is_next_open_bv[-2] else self.opens[-2]
yield (minute, (prev_open, self.closes[-2], next_open, self.closes[-1]))
# minutes for session -1
if not open_was_prev_close_bv[-1]:
open_ = self.opens[-1]
prev_open = self.opens[-2]
prev_close = self.closes[-2]
next_open = None
close = self.closes[-1]
yield (open_, (prev_open, prev_close, next_open, close))
yield (open_ - self.ONE_MIN, (prev_open, prev_close, open_, close))
yield (open_ + self.ONE_MIN, (open_, prev_close, next_open, close))
minute = self.closes[-1]
next_open = self.opens[2] if close_is_next_open_bv[-1] else self.opens[1]
yield (minute, (self.opens[-1], self.closes[-2], None, None))
minute -= self.ONE_MIN
yield (minute, (self.opens[-1], self.closes[-2], None, self.closes[-1]))
# dunder
def __repr__(self) -> str:
return f"<Answers: calendar {self.name}, side {self.side}>"
def no_parsing(f: typing.Callable):
"""Wrap a method under test so that it skips input parsing."""
return lambda *args, **kwargs: f(*args, _parse=False, **kwargs)
class ExchangeCalendarTestBaseNew:
"""Test base for an ExchangeCalendar.
Notes
-----
=== Fixtures ===
In accordance with the pytest framework, whilst methods are requried to
have `self` as their first argument, no method should use `self`.
All required inputs should come by way of fixtures received to the
test method's arguments.
Methods that are directly or indirectly dependent on the evaluation of
trading minutes should be tested against the parameterized
`all_calendars_with_answers` fixture. This fixture will execute the
test against multiple calendar instances, one for each viable `side`.
The following methods directly evaluate trading minutes:
all_minutes
_last_minute_nanos()
_last_am_minute_nanos()
_first_minute_nanos()
_first_pm_minute_nanos()
NB this list does not include methods that indirectly evaluate methods
by way of calling (directly or indirectly) one of the above methods.
Methods that are not dependent on the evaluation of trading minutes
should only be tested against only the `default_calendar_with_answers`
or `default_calendar` fixture.
Calendar instances provided by fixtures should be used exclusively to
call the method being tested. NO TEST INPUT OR EXPECTED OUTPUT SHOULD
BE EVALUATED BY WAY OF CALLING A CALENDAR METHOD. Rather, test
inputs and expected output should be taken directly, or evaluated from,
properties/methods of the corresponding Answers fixture.
Subclasses are required to override a limited number of fixtures and
may be required to override others. Refer to the block comments.
"""
# subclass must override the following fixtures
@pytest.fixture(scope="class")
def calendar_cls(self) -> abc.Iterator[typing.Type[ExchangeCalendar]]:
"""ExchangeCalendar class to be tested.
Examples:
XNYSExchangeCalendar
AlwaysOpenCalendar
"""
raise NotImplementedError("fixture must be implemented on subclass")
@pytest.fixture
def max_session_hours(self) -> abc.Iterator[int | float]:
"""Largest number of hours that can comprise a single session.
Examples:
8
6.5
"""
raise NotImplementedError("fixture must be implemented on subclass")
# if subclass has a 24h session then subclass must override this fixture.
# Define on subclass as is here with only difference being passing
# ["left", "right"] to decorator's 'params' arg (24h calendars cannot
# have a side defined as 'both' or 'neither'.).
@pytest.fixture(scope="class", params=["both", "left", "right", "neither"])
def all_calendars_with_answers(
self, request, calendars, answers
) -> abc.Iterator[tuple[ExchangeCalendar, Answers]]:
"""Parameterized calendars and answers for each side."""
yield (calendars[request.param], answers[request.param])
# subclass should override the following fixtures in the event that the
# default defined here does not apply.
@pytest.fixture
def start_bound(self) -> abc.Iterator[pd.Timestamp | None]:
"""Earliest date for which calendar can be instantiated, or None if
there is no start bound."""
yield None
@pytest.fixture
def end_bound(self) -> abc.Iterator[pd.Timestamp | None]:
"""Latest date for which calendar can be instantiated, or None if
there is no end bound."""
yield None
# Subclass can optionally override the following fixtures. By overriding
# a fixture the associated test will be executed with input as yielded
# by the fixture. Where fixtures are not overriden the associated tests
# will be skipped.
@pytest.fixture
def regular_holidays_sample(self) -> abc.Iterator[list[str]]:
"""Sample of known regular calendar holidays. Empty list if no holidays.
`test_regular_holidays_sample` will check that each date does not
represent a calendar session.
Example return:
["2020-12-25", "2021-01-01", ...]
"""
yield []
@pytest.fixture
def adhoc_holidays_sample(self) -> abc.Iterator[list[str]]:
"""Sample of adhoc calendar holidays. Empty list if no adhoc holidays.
`test_adhoc_holidays_sample` will check that each date does not
represent a calendar session.
Example return:
["2015-04-17", "2021-09-12", ...]
"""
yield []
@pytest.fixture
def non_holidays_sample(self) -> abc.Iterator[list[str]]:
"""Sample of known dates that are not holidays.
`test_non_holidays_sample` will check that each date represents a
calendar session.
Subclass should use this fixture if wishes to test edge cases, for
example where a session is an exception to a rule, or where session
preceeds/follows a holiday that is an exception to a rule.
Example return:
["2019-12-27", "2020-01-02", ...]
"""
yield []
@pytest.fixture
def late_opens_sample(self) -> abc.Iterator[list[str]]:
"""Sample of known calendar sessions with late opens.
`test_late_opens_sample` will check that each date represents a
session with a late open.
Example returns:
["2022-01-03", "2022-04-22", ...]
"""
yield []
@pytest.fixture
def early_closes_sample(self) -> abc.Iterator[list[str]]:
"""Sample of known calendar sessions with early closes.
`test_early_closes_sample` will check that each date represents a
session with an early close.
Example returns:
["2019-12-24", "2019-12-31", ...]
"""
yield []
@pytest.fixture
def early_closes_sample_time(self) -> abc.Iterator[pd.Timedelta | None]:
"""Local close time of sessions of `early_closes_sample` fixture.
`test_early_closes_sample_time` will check all sessions of
`early_closes_sample` have this close time.
Only override fixture if:
- `early_closes_sample` is overriden by subclass
- ALL sessions of `early_closes_sample` have the same local
close time (if sessions of `early_closes_sample` have
different local close times then the subclass should
instead check close times with a test defined on the
subclass).
Example returns:
pd.Timedelta(14, "H") # 14:00 local time
pd.Timedelta(hours=13, minutes=15) # 13:15 local time
"""
yield None
@pytest.fixture
def non_early_closes_sample(self) -> abc.Iterator[list[str]]:
"""Sample of known calendar sessions with normal close times.
`test_non_early_closes_sample` will check each date does not
represent a calendar session with an early close.
Subclass should use this fixture to test edge cases, for example
where an otherwise early close is an exception to a rule.
Example return:
["2022-12-23", "2022-12-30]
"""
yield []
@pytest.fixture
def non_early_closes_sample_time(self) -> abc.Iterator[pd.Timedelta | None]:
"""Local close time of sessions of `non_early_closes_sample` fixture.
`test_non_early_closes_sample_time` will check all sessions of
`non_early_closes_sample` have this close time.
Only override fixture if:
- `non_early_closes_sample` is overriden by subclass.
- ALL sessions of `non_early_closes_sample` have the same local
close time (if sessions of `non_early_closes_sample` have
different local close times then the subclass should
instead check close times with a test defined on the
subclass).
Example returns:
pd.Timedelta(17, "H") # 17:00 local time
pd.Timedelta(hours=16, minutes=30) # 16:30 local time
"""
yield None
# --- NO FIXTURE BELOW THIS LINE SHOULD BE OVERRIDEN ON A SUBCLASS ---
def test_testbase_integrity(self):
"""Ensure integrity of TestBase.
Raises error if a reserved fixture is overriden by the subclass.
"""
cls = self.__class__
for fixture in [
"test_testbase_integrity",
"name",
"has_24h_session",
"default_side",
"sides",
"answers",
"default_answers",
"calendars",
"default_calendar",
"calendars_with_answers",
"default_calendar_with_answers",
"one_minute",
"today",
"all_directions",
"valid_overrides",
"non_valid_overrides",
"daylight_savings_dates",
"late_opens",
"early_closes",
]:
if getattr(cls, fixture) != getattr(ExchangeCalendarTestBaseNew, fixture):
raise RuntimeError(f"fixture '{fixture}' should not be overriden!")
# Base class fixtures
@pytest.fixture(scope="class")
def name(self, calendar_cls) -> abc.Iterator[str]:
"""Calendar name."""
yield calendar_cls.name
@pytest.fixture(scope="class")
def has_24h_session(self, name) -> abc.Iterator[bool]:
df = get_csv(name)
yield (df.market_close == df.market_open.shift(-1)).any()
@pytest.fixture(scope="class")
def default_side(self, has_24h_session) -> abc.Iterator[str]:
"""Default calendar side."""
if has_24h_session:
yield "left"
else:
yield "both"
@pytest.fixture(scope="class")
def sides(self, has_24h_session) -> abc.Iterator[list[str]]:
"""All valid sides options for calendar."""
if has_24h_session:
yield ["left", "right"]
else:
yield ["both", "left", "right", "neither"]
# Calendars and answers
@pytest.fixture(scope="class")
def answers(self, name, sides) -> abc.Iterator[dict[str, Answers]]:
"""Dict of answers, key as side, value as corresoponding answers."""
yield {side: Answers(name, side) for side in sides}
@pytest.fixture(scope="class")
def default_answers(self, answers, default_side) -> abc.Iterator[Answers]:
yield answers[default_side]
@pytest.fixture(scope="class")
def calendars(
self, calendar_cls, default_answers, sides
) -> abc.Iterator[dict[str, ExchangeCalendar]]:
"""Dict of calendars, key as side, value as corresoponding calendar."""
start = default_answers.first_session
end = default_answers.last_session
yield {side: calendar_cls(start, end, side) for side in sides}
@pytest.fixture(scope="class")
def default_calendar(
self, calendars, default_side
) -> abc.Iterator[ExchangeCalendar]:
yield calendars[default_side]
@pytest.fixture(scope="class")
def calendars_with_answers(
self, calendars, answers, sides
) -> abc.Iterator[dict[str, tuple[ExchangeCalendar, Answers]]]:
"""Dict of calendars and answers, key as side."""
yield {side: (calendars[side], answers[side]) for side in sides}
@pytest.fixture(scope="class")
def default_calendar_with_answers(
self, calendars_with_answers, default_side
) -> abc.Iterator[tuple[ExchangeCalendar, Answers]]:
yield calendars_with_answers[default_side]
# General use fixtures.
@pytest.fixture(scope="class")
def one_minute(self) -> abc.Iterator[pd.Timedelta]:
yield pd.Timedelta(1, "T")
@pytest.fixture(scope="class")
def today(self) -> abc.Iterator[pd.Timedelta]:
yield pd.Timestamp.now(tz="UTC").floor("D")
@pytest.fixture(scope="class", params=["next", "previous", "none"])
def all_directions(self, request) -> abc.Iterator[str]:
"""Parameterised fixture of direction to go if minute is not a trading minute"""
yield request.param
@pytest.fixture(scope="class")
def valid_overrides(self) -> abc.Iterator[list[str]]:
"""Names of methods that can be overriden by a subclass."""
yield [
"name",
"bound_start",
"bound_end",
"_bound_start_error_msg",
"_bound_end_error_msg",
"default_start",
"default_end",
"tz",
"open_times",
"break_start_times",
"break_end_times",
"close_times",
"weekmask",
"open_offset",
"close_offset",
"regular_holidays",
"adhoc_holidays",
"special_opens",
"special_opens_adhoc",
"special_closes",
"special_closes_adhoc",
"special_weekmasks",
"special_offsets",
"special_offsets_adhoc",
]
@pytest.fixture(scope="class")
def non_valid_overrides(self, valid_overrides) -> abc.Iterator[list[str]]:
"""Names of methods that cannot be overriden by a subclass."""
yield [
name
for name in dir(ExchangeCalendar)
if name not in valid_overrides
and not name.startswith("__")
and not name == "_abc_impl"
]
@pytest.fixture(scope="class")
def daylight_savings_dates(
self, default_calendar
) -> abc.Iterator[list[pd.Timestamp]]:
"""All dates in a specific year that mark the first day of a new
time regime.
Yields empty list if timezone's UCT offset does not change.
Notes
-----
NB Any test that employs this fixture assumes the accuarcy of the
default calendar's `tz` property.
"""
cal = default_calendar
year = cal.last_session.year - 1
days = pd.date_range(str(year), str(year + 1), freq="D")
tzinfo = pytz.timezone(cal.tz.zone)
prev_offset = tzinfo.utcoffset(days[0])
dates = []
for day in days[1:]:
try:
offset = tzinfo.utcoffset(day)
except pytz.NonExistentTimeError:
offset = tzinfo.utcoffset(day + pd.Timedelta(1, "H"))
if offset != prev_offset:
dates.append(day)
if len(dates) == 2:
break
prev_offset = offset
yield dates
@pytest.fixture(scope="class")
def late_opens(
self, default_calendar_with_answers
) -> abc.Iterator[pd.DatetimeIndex]:
"""Calendar sessions with a late open.
Late opens evaluated as those that are later than the prevailing
open time as defined by `default_calendar.open_times`.
Notes
-----
NB Any test that employs this fixture ASSUMES the accuarcy of the
following calendar properties:
`open_times`
`tz`
"""
cal, ans = default_calendar_with_answers
d = dict(cal.open_times)
d[pd.Timestamp.min] = d.pop(None)
s = pd.Series(d).sort_index(ascending=False)
date_to = pd.Timestamp.max
dtis: list[pd.DatetimeIndex] = []
# For each period over which a distinct open time prevails...
for date_from, time_ in s.iteritems():
opens = ans.opens.tz_convert(None)[date_from:date_to] # index to tz-naive
sessions = opens.index
td = pd.Timedelta(hours=time_.hour, minutes=time_.minute)
# Evaluate session opens as if were all normal open time.
normal_opens = sessions + pd.Timedelta(cal.open_offset, "D") + td
normal_opens_utc = normal_opens.tz_localize(cal.tz).tz_convert("UTC")
# Append those sessions with opens (according to answers) later than
# what would be normal.
dtis.append(sessions[opens > normal_opens_utc])
if date_from != pd.Timestamp.min:
date_to = date_from - pd.Timedelta(1, "D")
late_opens = dtis[0].union_many(dtis[1:]).tz_localize("UTC")
yield late_opens
@pytest.fixture(scope="class")
def early_closes(
self, default_calendar_with_answers
) -> abc.Iterator[pd.DatetimeIndex]:
"""Calendar sessions with a late open.
Early closes evaluated as those that are earlier than the
prevailing close time as defined by `default_calendar.close_times`.
Notes
-----
NB Any test that employs this fixture ASSUMES the accuarcy of the
following calendar properties:
`close_times`
`tz`
"""
cal, ans = default_calendar_with_answers
d = dict(cal.close_times)
d[pd.Timestamp.min] = d.pop(None)
s = pd.Series(d).sort_index(ascending=False)
date_to = pd.Timestamp.max
dtis: list[pd.DatetimeIndex] = []
for date_from, time_ in s.iteritems():
closes = ans.closes.tz_convert(None)[date_from:date_to] # index to tz-naive
sessions = closes.index
td = pd.Timedelta(hours=time_.hour, minutes=time_.minute)
normal_closes = sessions + pd.Timedelta(cal.close_offset, "D") + td
normal_closes_utc = normal_closes.tz_localize(cal.tz).tz_convert("UTC")
dtis.append(sessions[closes < normal_closes_utc])
if date_from != pd.Timestamp.min:
date_to = date_from - pd.Timedelta(1, "D")
early_closes = dtis[0].union_many(dtis[1:]).tz_localize("UTC")
yield early_closes
# --- TESTS ---
# Tests for calendar definition and construction methods.
def test_base_integrity(self, calendar_cls, non_valid_overrides):
cls = calendar_cls
for name in non_valid_overrides:
assert getattr(cls, name) == getattr(ExchangeCalendar, name)
def test_calculated_against_csv(self, default_calendar_with_answers):
calendar, ans = default_calendar_with_answers
tm.assert_index_equal(calendar.schedule.index, ans.sessions)
def test_start_end(self, default_answers, calendar_cls):
ans = default_answers
sessions = ans.session_blocks["normal"]
start, end = sessions[0], sessions[-1]
cal = calendar_cls(start, end)
assert cal.first_session == start
assert cal.last_session == end
if len(ans.non_sessions) > 1:
# start and end as non-sessions
(start, end), sessions = ans.sessions_range_defined_by_non_sessions
cal = calendar_cls(start, end)
assert cal.first_session == sessions[0]
assert cal.last_session == sessions[-1]
def test_invalid_input(self, calendar_cls, sides, default_answers, name):
ans = default_answers
invalid_side = "both" if "both" not in sides else "invalid_side"
error_msg = f"`side` must be in {sides} although received as {invalid_side}."
with pytest.raises(ValueError, match=re.escape(error_msg)):
calendar_cls(side=invalid_side)
start = ans.sessions[1]
end_same_as_start = ans.sessions[1]
error_msg = (
"`start` must be earlier than `end` although `start` parsed as"
f" '{start}' and `end` as '{end_same_as_start}'."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
calendar_cls(start=start, end=end_same_as_start)
end_before_start = ans.sessions[0]
error_msg = (
"`start` must be earlier than `end` although `start` parsed as"
f" '{start}' and `end` as '{end_before_start}'."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
calendar_cls(start=start, end=end_before_start)
if len(ans.non_sessions) > 1:
start, end = ans.non_sessions_range
error_msg = (
f"The requested ExchangeCalendar, {name.upper()}, cannot be created as"
f" there would be no sessions between the requested `start` ('{start}')"
f" and `end` ('{end}') dates."
)
with pytest.raises(NoSessionsError, match=re.escape(error_msg)):
calendar_cls(start=start, end=end)
def test_bound_start(self, calendar_cls, start_bound, today):
if start_bound is not None:
cal = calendar_cls(start_bound, today)
assert isinstance(cal, ExchangeCalendar)
start = start_bound - pd.DateOffset(days=1)
with pytest.raises(ValueError, match=re.escape(f"{start}")):
calendar_cls(start, today)
else:
# verify no bound imposed
cal = calendar_cls(pd.Timestamp("1902-01-01", tz="UTC"), today)
assert isinstance(cal, ExchangeCalendar)
def test_bound_end(self, calendar_cls, end_bound, today):
if end_bound is not None:
cal = calendar_cls(today, end_bound)
assert isinstance(cal, ExchangeCalendar)
end = end_bound + pd.DateOffset(days=1)
with pytest.raises(ValueError, match=re.escape(f"{end}")):
calendar_cls(today, end)
else:
# verify no bound imposed
cal = calendar_cls(today, pd.Timestamp("2050-01-01", tz="UTC"))
assert isinstance(cal, ExchangeCalendar)
def test_sanity_check_session_lengths(self, default_calendar, max_session_hours):
cal = default_calendar
cal_max_secs = (cal.market_closes_nanos - cal.market_opens_nanos).max()
assert cal_max_secs / 3600000000000 <= max_session_hours
def test_adhoc_holidays_specification(self, default_calendar):
"""adhoc holidays should be tz-naive (#33, #39)."""
dti = pd.DatetimeIndex(default_calendar.adhoc_holidays)
assert dti.tz is None
def test_daylight_savings(self, default_calendar, daylight_savings_dates):
# make sure there's no weirdness around calculating the next day's
# session's open time.
if not daylight_savings_dates:
pytest.skip()
cal = default_calendar
d = dict(cal.open_times)
d[pd.Timestamp.min] = d.pop(None)
open_times = pd.Series(d)
for date in daylight_savings_dates:
# where `next day` is first session of new daylight savings regime
next_day = cal.date_to_session_label(T(date), "next")
open_date = next_day + Timedelta(days=cal.open_offset)
the_open = cal.schedule.loc[next_day].market_open
localized_open = the_open.tz_localize(UTC).tz_convert(cal.tz)
assert open_date.year == localized_open.year
assert open_date.month == localized_open.month
assert open_date.day == localized_open.day
open_ix = open_times.index.searchsorted(date, side="right")
if open_ix == len(open_times):
open_ix -= 1
open_time = open_times.iloc[open_ix]
assert open_time.hour == localized_open.hour
assert open_time.minute == localized_open.minute
# Tests for properties covering all sessions.
def test_all_sessions(self, default_calendar_with_answers):
cal, ans = default_calendar_with_answers
ans_sessions = ans.sessions
cal_sessions = cal.all_sessions
tm.assert_index_equal(ans_sessions, cal_sessions)
def test_opens_closes_break_starts_ends(self, default_calendar_with_answers):
"""Test `opens`, `closes, `break_starts` and `break_ends` properties."""
cal, ans = default_calendar_with_answers
for prop in (
"opens",
"closes",
"break_starts",
"break_ends",
):
ans_series = getattr(ans, prop).dt.tz_convert(None)
cal_series = getattr(cal, prop)
tm.assert_series_equal(ans_series, cal_series, check_freq=False)
def test_minutes_properties(self, all_calendars_with_answers):
"""Test minute properties.
Tests following calendar properties:
all_first_minutes
all_last_minutes
all_last_am_minutes
all_first_pm_minutes
"""
cal, ans = all_calendars_with_answers
for prop in (
"first_minutes",
"last_minutes",
"last_am_minutes",
"first_pm_minutes",
):
ans_minutes = getattr(ans, prop)
cal_minutes = getattr(cal, "all_" + prop)
tm.assert_series_equal(ans_minutes, cal_minutes, check_freq=False)
# Tests for properties covering all minutes.
def test_all_minutes(self, all_calendars_with_answers, one_minute):
"""Test trading minutes at sessions' bounds."""
calendar, ans = all_calendars_with_answers
side = ans.side
mins = calendar.all_minutes
assert isinstance(mins, pd.DatetimeIndex)
assert not mins.empty
mins_plus_1 = mins + one_minute
mins_less_1 = mins - one_minute
if side in ["left", "neither"]:
# Test that close and break_start not in mins,
# but are in mins_plus_1 (unless no gap after)
# do not test here for sessions with no gap after as for "left" these
# sessions' close IS a trading minute as it's the same as next session's
# open.
# NB For "neither" all sessions will have gap after.
closes = ans.closes[ans.sessions_with_gap_after]
# closes should not be in minutes
assert not mins.isin(closes).any()
# all closes should be in minutes plus 1
# for speed, use only subset of mins that are of interest
mins_plus_1_on_close = mins_plus_1[mins_plus_1.isin(closes)]
assert closes.isin(mins_plus_1_on_close).all()
# as noted above, if no gap after then close should be a trading minute
# as will be first minute of next session.
closes = ans.closes[ans.sessions_without_gap_after]
mins_on_close = mins[mins.isin(closes)]
assert closes.isin(mins_on_close).all()
if ans.has_a_session_with_break:
# break start should not be in minutes
assert not mins.isin(ans.break_starts).any()
# break start should be in minutes plus 1
break_starts = ans.break_starts[ans.sessions_with_break]
mins_plus_1_on_start = mins_plus_1[mins_plus_1.isin(break_starts)]
assert break_starts.isin(mins_plus_1_on_start).all()
if side in ["left", "both"]:
# Test that open and break_end are in mins,
# but not in mins_plus_1 (unless no gap before)
mins_on_open = mins[mins.isin(ans.opens)]
assert ans.opens.isin(mins_on_open).all()
opens = ans.opens[ans.sessions_with_gap_before]
assert not mins_plus_1.isin(opens).any()
opens = ans.opens[ans.sessions_without_gap_before]
mins_plus_1_on_open = mins_plus_1[mins_plus_1.isin(opens)]
assert opens.isin(mins_plus_1_on_open).all()
if ans.has_a_session_with_break:
break_ends = ans.break_ends[ans.sessions_with_break]
mins_on_end = mins[mins.isin(ans.break_ends)]
assert break_ends.isin(mins_on_end).all()
if side in ["right", "neither"]:
# Test that open and break_end are not in mins,
# but are in mins_less_1 (unless no gap before)
opens = ans.opens[ans.sessions_with_gap_before]
assert not mins.isin(opens).any()
mins_less_1_on_open = mins_less_1[mins_less_1.isin(opens)]
assert opens.isin(mins_less_1_on_open).all()
opens = ans.opens[ans.sessions_without_gap_before]
mins_on_open = mins[mins.isin(opens)]
assert opens.isin(mins_on_open).all()
if ans.has_a_session_with_break:
assert not mins.isin(ans.break_ends).any()
break_ends = ans.break_ends[ans.sessions_with_break]
mins_less_1_on_end = mins_less_1[mins_less_1.isin(break_ends)]
assert break_ends.isin(mins_less_1_on_end).all()
if side in ["right", "both"]:
# Test that close and break_start are in mins,
# but not in mins_less_1 (unless no gap after)
mins_on_close = mins[mins.isin(ans.closes)]
assert ans.closes.isin(mins_on_close).all()
closes = ans.closes[ans.sessions_with_gap_after]
assert not mins_less_1.isin(closes).any()
closes = ans.closes[ans.sessions_without_gap_after]
mins_less_1_on_close = mins_less_1[mins_less_1.isin(closes)]
assert closes.isin(mins_less_1_on_close).all()
if ans.has_a_session_with_break:
break_starts = ans.break_starts[ans.sessions_with_break]
mins_on_start = mins[mins.isin(ans.break_starts)]
assert break_starts.isin(mins_on_start).all()
# Tests for calendar properties.
def test_calendar_bounds_properties(self, all_calendars_with_answers):
"""Test calendar properties that define a calendar bound.
Tests following calendar properties:
first_session
last_session
first_session_open
last_session_close
first_trading_minute
last_trading_minute
"""
cal, ans = all_calendars_with_answers
assert ans.first_session == cal.first_session
assert ans.last_session == cal.last_session
assert ans.first_session_open.tz_convert(None) == cal.first_session_open
assert ans.last_session_close.tz_convert(None) == cal.last_session_close
assert ans.first_trading_minute == cal.first_trading_minute
assert ans.last_trading_minute == cal.last_trading_minute
def test_has_breaks(self, default_calendar_with_answers):
cal, ans = default_calendar_with_answers
f = no_parsing(cal.has_breaks)
has_a_break = ans.has_a_session_with_break
assert f() == has_a_break
if ans.has_a_session_without_break:
assert not f(*ans.sessions_without_break_range)
if has_a_break:
# i.e. mixed, some sessions have a break, some don't
block = ans.session_blocks["with_break_to_without_break"]
if not block.empty:
# guard against starting with no breaks, then an introduction
# of breaks to every session after a certain date
# (i.e. there would be no with_break_to_without_break)
assert f(block[0], block[-1])
block = ans.session_blocks["without_break_to_with_break"]
if not block.empty:
# ...guard against opposite case (e.g. XKRX)
assert f(block[0], block[-1])
else:
# in which case all sessions must have a break. Make sure...
assert cal.break_starts.notna().all()
def test_regular_holidays_sample(self, default_calendar, regular_holidays_sample):
"""Test that calendar-specific sample of holidays are not sessions."""
if not regular_holidays_sample:
pytest.skip()
for holiday in regular_holidays_sample:
assert T(holiday) not in default_calendar.all_sessions
def test_adhoc_holidays_sample(self, default_calendar, adhoc_holidays_sample):
"""Test that calendar-specific sample of holidays are not sessions."""
if not adhoc_holidays_sample:
pytest.skip()
for holiday in adhoc_holidays_sample:
assert T(holiday) not in default_calendar.all_sessions
def test_non_holidays_sample(self, default_calendar, non_holidays_sample):
"""Test that calendar-specific sample of non-holidays are sessions."""
if not non_holidays_sample:
pytest.skip()
for date in non_holidays_sample:
assert T(date) in default_calendar.all_sessions
# NOTE: As of Oct 21 no calendar tests late opens (indeed, believe that no
# calendar defines late opens). Test commented out to prevent skip tests littering
# output. REINSTATE TEST IF any calendar defines and test late opens.
# def test_late_opens_sample(self, default_calendar, late_opens_sample):
# """Test calendar-specific sample of sessions are included to late opens."""
# if not late_opens_sample:
# pytest.skip()
# for date in late_opens_sample:
# assert T(date) in default_calendar.late_opens
def test_early_closes_sample(self, default_calendar, early_closes_sample):
"""Test calendar-specific sample of sessions are included to early closes."""
if not early_closes_sample:
pytest.skip()
for date in early_closes_sample:
assert T(date) in default_calendar.early_closes
def test_early_closes_sample_time(
self, default_calendar, early_closes_sample, early_closes_sample_time
):
"""Test close time of calendar-specific sample of early closing sessions.
Notes
-----
TEST RELIES ON ACCURACY OF CALENDAR PROPERTIES `closes`, `tz` and
`close_offset`.
"""
if early_closes_sample_time is None:
pytest.skip()
cal, tz = default_calendar, default_calendar.tz
offset = pd.Timedelta(cal.close_offset, "D") + early_closes_sample_time
for date in early_closes_sample:
early_close = cal.closes[date].tz_localize(UTC).tz_convert(tz)
expected = pd.Timestamp(date, tz=tz) + offset
assert early_close == expected
def test_non_early_closes_sample(self, default_calendar, non_early_closes_sample):
"""Test calendar-specific sample of sessions are not early closes."""
if not non_early_closes_sample:
pytest.skip()
for date in non_early_closes_sample:
assert T(date) not in default_calendar.early_closes
def test_non_early_closes_sample_time(
self, default_calendar, non_early_closes_sample, non_early_closes_sample_time
):
"""Test close time of calendar-specific sample of sessions with normal closes.
Notes
-----
TEST RELIES ON ACCURACY OF CALENDAR PROPERTIES `closes`, `tz` and
`close_offset`.
"""
if non_early_closes_sample_time is None:
pytest.skip()
cal, tz = default_calendar, default_calendar.tz
offset = pd.Timedelta(cal.close_offset, "D") + non_early_closes_sample_time
for date in non_early_closes_sample:
close = cal.closes[date].tz_localize(UTC).tz_convert(tz)
expected_close = pd.Timestamp(date, tz=tz) + offset
assert close == expected_close
def test_late_opens(self, default_calendar, late_opens):
"""Test late opens.
Notes
-----
TEST RELIES ON ACCURACY OF CALENDAR PROPERTIES `open_times` and
`tz`. See `late_opens` fixture.
"""
tm.assert_index_equal(late_opens, default_calendar.late_opens)
def test_early_closes(self, default_calendar, early_closes):
"""Test early closes.
Notes
-----
TEST RELIES ON ACCURACY OF CALENDAR PROPERTIES `close_times` and
`tz`. See `early_closes` fixture.
"""
tm.assert_index_equal(early_closes, default_calendar.early_closes)
# Tests for methods that interrogate a given session.
def test_session_open_close_break_start_end(self, default_calendar_with_answers):
"""Test methods that get session open, close, break_start, break_end.
Tests following calendar methods:
session_open
session_close
open_and_close_for_session
session_break_start
session_break_end
break_start_and_end_for_session
"""
# considered sufficient to limit test to sessions of session blocks.
cal, ans = default_calendar_with_answers
for _, block in ans.session_block_generator():
for session in block:
ans_open = ans.opens[session]
ans_close = ans.closes[session]
assert cal.session_open(session, _parse=False) == ans_open
assert cal.session_close(session, _parse=False) == ans_close
assert cal.open_and_close_for_session(session, _parse=False) == (
ans_open,
ans_close,
)
break_start = cal.session_break_start(session, _parse=False)
break_end = cal.session_break_end(session, _parse=False)
break_start_and_end = cal.break_start_and_end_for_session(
session, _parse=False
)
ans_break_start = ans.break_starts[session]
ans_break_end = ans.break_ends[session]
if pd.isna(ans_break_start):
assert pd.isna(break_start) and pd.isna(break_end)
assert pd.isna(break_start_and_end[0])
assert pd.isna(break_start_and_end[1])
else:
assert break_start == ans_break_start
assert break_end == ans_break_end
assert break_start_and_end[0] == ans_break_start
assert break_start_and_end[1] == ans_break_end
def test_session_minute_methods(self, all_calendars_with_answers):
"""Test methods that get a minute bound of a session or subsession.
Tests following calendar methods:
session_first_minute
session_last_minute
session_last_am_minute
session_first_pm_minute
session_first_and_last_minute
"""
# considered sufficient to limit test to sessions of session blocks.
cal, ans = all_calendars_with_answers
for _, block in ans.session_block_generator():
for session in block:
ans_first_minute = ans.first_minutes[session]
ans_last_minute = ans.last_minutes[session]
assert (
cal.session_first_minute(session, _parse=False) == ans_first_minute
)
assert cal.session_last_minute(session, _parse=False) == ans_last_minute
assert cal.session_first_and_last_minute(session, _parse=False) == (
ans_first_minute,
ans_last_minute,
)
last_am_minute = cal.session_last_am_minute(session, _parse=False)
first_pm_minute = cal.session_first_pm_minute(session, _parse=False)
ans_last_am_minute = ans.last_am_minutes[session]
ans_first_pm_minute = ans.first_pm_minutes[session]
if pd.isna(ans_last_am_minute):
assert pd.isna(last_am_minute) and pd.isna(first_pm_minute)
else:
assert last_am_minute == ans_last_am_minute
assert first_pm_minute == ans_first_pm_minute
def test_session_has_break(self, default_calendar_with_answers):
cal, ans = default_calendar_with_answers
f = no_parsing(cal.session_has_break)
# test every 10th session...
for session in ans.sessions_with_break[::10]:
assert f(session)
for session in ans.sessions_without_break[::10]:
assert not f(session)
def test_next_prev_session(self, default_calendar_with_answers):
cal, ans = default_calendar_with_answers
f_prev = no_parsing(cal.previous_session_label)
f_next = no_parsing(cal.next_session_label)
# NB non-sessions handled by methods via parse_session
# first session
with pytest.raises(ValueError):
f_prev(ans.first_session)
# middle sessions (and m_prev for last session)
for session, next_session in zip(ans.sessions[:-1], ans.sessions[1:]):
assert f_next(session) == next_session
assert f_prev(next_session) == session
# last session
with pytest.raises(ValueError):
f_next(ans.last_session)
def test_minutes_for_session(self, all_calendars_with_answers):
cal, ans = all_calendars_with_answers
f = no_parsing(cal.minutes_for_session)
# Limit test to every session of each session block.
for _, block in ans.session_block_generator():
for session in block:
tm.assert_index_equal(f(session), ans.get_sessions_minutes(session))
# Tests for methods that interrogate a date.
def test_is_session(self, default_calendar_with_answers):
cal, ans = default_calendar_with_answers
f = no_parsing(cal.is_session)
for session in ans.sessions:
assert f(session)
for session in ans.non_sessions:
assert not f(session)
def test_date_to_session_label(self, default_calendar_with_answers):
cal, ans = default_calendar_with_answers
f = no_parsing(cal.date_to_session_label)
# test for error if request session prior to first calendar session.
error_msg = (
"Cannot get a session label prior to the first calendar"
f" session ('{ans.first_session}'). Consider passing"
" `direction` as 'next'."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
f(ans.session_too_early, "previous")
sessions = ans.sessions
# direction as "previous"
dates = pd.date_range(sessions[0], sessions[-1], freq="D")
date_is_session = dates.isin(sessions)
last_session = None
for date, is_session in zip(dates, date_is_session):
session_label = f(date, "previous")
if is_session:
assert session_label == date
last_session = session_label
else:
assert session_label == last_session
# direction as "next"
last_session = None
for date, is_session in zip(
dates.sort_values(ascending=False), date_is_session[::-1]
):
session_label = f(date, "next")
if date in sessions:
assert session_label == date
last_session = session_label
else:
assert session_label == last_session
# test for error if request session after last calendar session.
error_msg = (
"Cannot get a session label later than the last calendar"
f" session ('{ans.last_session}'). Consider passing"
" `direction` as 'previous'."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
f(ans.session_too_late, "next")
# test for non_sessions without direction
if not ans.non_sessions.empty:
for non_session in ans.non_sessions[0 : None : len(ans.non_sessions) // 9]:
error_msg = (
f"`date` '{non_session}' does not represent a session. Consider"
" passing a `direction`."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
f(non_session, "none")
# test default behaviour
with pytest.raises(ValueError, match=re.escape(error_msg)):
f(non_session)
# non-valid direction (only raised if pass a date that is not a session)
error_msg = (
"'not a direction' is not a valid `direction`. Valid `direction`"
' values are "next", "previous" and "none".'
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
f(non_session, "not a direction")
# Tests for methods that interrogate a given minute (trading or non-trading)
def test_is_trading_minute(self, all_calendars_with_answers):
calendar, ans = all_calendars_with_answers
f = no_parsing(calendar.is_trading_minute)
for non_trading_min in ans.non_trading_minutes_only():
assert f(non_trading_min) is False
for trading_min in ans.trading_minutes_only():
assert f(trading_min) is True
for break_min in ans.break_minutes_only():
assert f(break_min) is False
def test_is_break_minute(self, all_calendars_with_answers):
calendar, ans = all_calendars_with_answers
f = no_parsing(calendar.is_break_minute)
for non_trading_min in ans.non_trading_minutes_only():
assert f(non_trading_min) is False
for trading_min in ans.trading_minutes_only():
assert f(trading_min) is False
for break_min in ans.break_minutes_only():
assert f(break_min) is True
def test_is_open_on_minute(self, all_calendars_with_answers):
calendar, ans = all_calendars_with_answers
f = no_parsing(calendar.is_open_on_minute)
# minimal test as is_open_on_minute delegates evaluation to is_trading_minute
# and is_break_minute, both of which are comprehensively tested.
for non_trading_min in itertools.islice(ans.non_trading_minutes_only(), 50):
assert f(non_trading_min) is False
for trading_min in itertools.islice(ans.trading_minutes_only(), 50):
assert f(trading_min) is True
for break_min in ans.break_minutes_only():
rtrn = f(break_min, ignore_breaks=True)
assert rtrn is True
rtrn = f(break_min)
assert rtrn is False
def test_prev_next_open_close(self, default_calendar_with_answers):
"""Test methods that return previous/next open/close.
Tests methods:
previous_open
previous_close
next_open
next_close
"""
cal, ans = default_calendar_with_answers
generator = ans.prev_next_open_close_minutes()
for minute, (prev_open, prev_close, next_open, next_close) in generator:
if prev_open is None:
with pytest.raises(ValueError):
cal.previous_open(minute, _parse=False)
else:
assert cal.previous_open(minute, _parse=False) == prev_open
if prev_close is None:
with pytest.raises(ValueError):
cal.previous_close(minute, _parse=False)
else:
assert cal.previous_close(minute, _parse=False) == prev_close
if next_open is None:
with pytest.raises(ValueError):
cal.next_open(minute, _parse=False)
else:
assert cal.next_open(minute, _parse=False) == next_open
if next_close is None:
with pytest.raises(ValueError):
cal.next_close(minute, _parse=False)
else:
assert cal.next_close(minute, _parse=False) == next_close
def test_prev_next_minute(self, all_calendars_with_answers, one_minute):
"""Test methods that return previous/next minute.
Test focuses on and inside of edge cases.
Tests methods:
next_minute
previous_minute
"""
cal, ans = all_calendars_with_answers
f_next = no_parsing(cal.next_minute)
f_prev = no_parsing(cal.previous_minute)
# minutes of first session
first_min = ans.first_minutes[0]
first_min_plus_one = ans.first_minutes_plus_one[0]
first_min_less_one = ans.first_minutes_less_one[0]
last_min = ans.last_minutes[0]
last_min_plus_one = ans.last_minutes_plus_one[0]
last_min_less_one = ans.last_minutes_less_one[0]
with pytest.raises(ValueError):
f_prev(first_min)
# minutes earlier than first_trading_minute assumed handled via parse_timestamp
assert f_next(first_min) == first_min_plus_one
assert f_next(first_min_plus_one) == first_min_plus_one + one_minute
assert f_prev(first_min_plus_one) == first_min
assert f_prev(last_min) == last_min_less_one
assert f_prev(last_min_less_one) == last_min_less_one - one_minute
assert f_next(last_min_less_one) == last_min
assert f_prev(last_min_plus_one) == last_min
prev_last_min = last_min
for (
first_min,
first_min_plus_one,
first_min_less_one,
last_min,
last_min_plus_one,
last_min_less_one,
gap_before,
) in zip(
ans.first_minutes[1:],
ans.first_minutes_plus_one[1:],
ans.first_minutes_less_one[1:],
ans.last_minutes[1:],
ans.last_minutes_plus_one[1:],
ans.last_minutes_less_one[1:],
~ans._mask_sessions_without_gap_before[1:],
):
assert f_next(prev_last_min) == first_min
assert f_prev(first_min) == prev_last_min
assert f_next(first_min) == first_min_plus_one
assert f_prev(first_min_plus_one) == first_min
assert f_next(first_min_less_one) == first_min
assert f_prev(last_min) == last_min_less_one
assert f_next(last_min_less_one) == last_min
assert f_prev(last_min_plus_one) == last_min
if gap_before:
assert f_next(prev_last_min + one_minute) == first_min
assert f_prev(first_min_less_one) == prev_last_min
else:
assert f_next(prev_last_min + one_minute) == first_min_plus_one
assert f_next(prev_last_min + one_minute) == first_min_plus_one
prev_last_min = last_min
with pytest.raises(ValueError):
f_next(last_min)
# minutes later than last_trading_minute assumed handled via parse_timestamp
if ans.has_a_session_with_break:
for (
last_am_min,
last_am_min_less_one,
last_am_min_plus_one,
first_pm_min,
first_pm_min_less_one,
first_pm_min_plus_one,
) in zip(
ans.last_am_minutes,
ans.last_am_minutes_less_one,
ans.last_am_minutes_plus_one,
ans.first_pm_minutes,
ans.first_pm_minutes_less_one,
ans.first_pm_minutes_plus_one,
):
if pd.isna(last_am_min):
continue
assert f_next(last_am_min_less_one) == last_am_min
assert f_next(last_am_min) == first_pm_min
assert f_prev(last_am_min) == last_am_min_less_one
assert f_next(last_am_min_plus_one) == first_pm_min
assert f_prev(last_am_min_plus_one) == last_am_min
assert f_prev(first_pm_min_less_one) == last_am_min
assert f_next(first_pm_min_less_one) == first_pm_min
assert f_prev(first_pm_min) == last_am_min
assert f_next(first_pm_min) == first_pm_min_plus_one
assert f_prev(first_pm_min_plus_one) == first_pm_min
def test_minute_to_session_label(self, all_calendars_with_answers, all_directions):
direction = all_directions
calendar, ans = all_calendars_with_answers
f = no_parsing(calendar.minute_to_session_label)
for non_trading_mins, prev_session, next_session in ans.non_trading_minutes:
for non_trading_min in non_trading_mins:
if direction == "none":
with pytest.raises(ValueError):
f(non_trading_min, direction)
else:
session = f(non_trading_min, direction)
if direction == "next":
assert session == next_session
else:
assert session == prev_session
for trading_minutes, session in ans.trading_minutes:
for trading_minute in trading_minutes:
rtrn = f(trading_minute, direction)
assert rtrn == session
if ans.has_a_session_with_break:
for break_minutes, session in ans.break_minutes[:15]:
for break_minute in break_minutes:
rtrn = f(break_minute, direction)
assert rtrn == session
oob_minute = ans.minute_too_early
if direction in ["previous", "none"]:
error_msg = (
f"Received `minute` as '{oob_minute}' although this is earlier than"
f" the calendar's first trading minute ({ans.first_trading_minute})"
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
f(oob_minute, direction)
else:
session = f(oob_minute, direction)
assert session == ans.first_session
oob_minute = ans.minute_too_late
if direction in ["next", "none"]:
error_msg = (
f"Received `minute` as '{oob_minute}' although this is later"
f" than the calendar's last trading minute ({ans.last_trading_minute})"
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
f(oob_minute, direction)
else:
session = f(oob_minute, direction)
assert session == ans.last_session
# Tests for methods that evaluate or interrogate a range of minutes.
def test_minutes_in_range(self, all_calendars_with_answers, one_minute):
cal, ans = all_calendars_with_answers
f = no_parsing(cal.minutes_in_range)
block_minutes = ans.session_block_minutes
for name, block in ans.session_block_generator():
ans_dti = block_minutes[name]
from_ = ans.first_minutes[block[0]]
to = ans.last_minutes[block[-1]]
cal_dti = f(from_, to)
tm.assert_index_equal(ans_dti, cal_dti)
# test consequence of getting range from one minute before/after the
# block's first/last trading minute.
if name in ["first_three", "last_three"]:
continue
cal_dti = f(from_ - one_minute, to + one_minute)
start_idx = 1 if block[0] in ans.sessions_without_gap_before else 0
end_idx = -1 if block[-1] in ans.sessions_without_gap_after else None
tm.assert_index_equal(ans_dti, cal_dti[start_idx:end_idx])
# intra-session
from_ = ans.first_minutes[ans.first_session] + pd.Timedelta(15, "T")
to = ans.first_minutes[ans.first_session] + pd.Timedelta(45, "T")
expected = pd.date_range(from_, to, freq="T")
rtrn = f(from_, to)
tm.assert_index_equal(expected, rtrn)
# inter-session
if not ans.sessions_with_gap_after.empty:
session = ans.sessions_with_gap_after[0]
next_session = ans.get_next_session(session)
from_ = ans.last_minutes[session] + one_minute
to = ans.first_minutes[next_session] - one_minute
assert f(from_, to).empty
def test_minutes_window(self, all_calendars_with_answers):
cal, ans = all_calendars_with_answers
f = no_parsing(cal.minutes_window)
block_minutes = ans.session_block_minutes
for name, block in ans.session_block_generator():
start = ans.first_minutes[block[0]]
ans_dti = block_minutes[name]
count = len(ans_dti) - 1
cal_dti = f(start, count)
| tm.assert_index_equal(ans_dti, cal_dti) | pandas.testing.assert_index_equal |
from __future__ import annotations
from datetime import timedelta
import operator
from sys import getsizeof
from typing import (
TYPE_CHECKING,
Any,
Callable,
Hashable,
List,
cast,
)
import warnings
import numpy as np
from pandas._libs import index as libindex
from pandas._libs.lib import no_default
from pandas._typing import Dtype
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.util._exceptions import rewrite_exception
from pandas.core.dtypes.common import (
ensure_platform_int,
ensure_python_int,
is_float,
is_integer,
is_scalar,
is_signed_integer_dtype,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCTimedeltaIndex
from pandas.core import ops
import pandas.core.common as com
from pandas.core.construction import extract_array
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import maybe_extract_name
from pandas.core.indexes.numeric import (
Float64Index,
Int64Index,
NumericIndex,
)
from pandas.core.ops.common import unpack_zerodim_and_defer
if TYPE_CHECKING:
from pandas import Index
_empty_range = range(0)
class RangeIndex(NumericIndex):
"""
Immutable Index implementing a monotonic integer range.
RangeIndex is a memory-saving special case of Int64Index limited to
representing monotonic ranges. Using RangeIndex may in some instances
improve computing speed.
This is the default index type used
by DataFrame and Series when no explicit index is provided by the user.
Parameters
----------
start : int (default: 0), range, or other RangeIndex instance
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
dtype : np.int64
Unused, accepted for homogeneity with other index types.
copy : bool, default False
Unused, accepted for homogeneity with other index types.
name : object, optional
Name to be stored in the index.
Attributes
----------
start
stop
step
Methods
-------
from_range
See Also
--------
Index : The base pandas Index type.
Int64Index : Index of int64 data.
"""
_typ = "rangeindex"
_engine_type = libindex.Int64Engine
_dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
_can_hold_na = False
_range: range
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
start=None,
stop=None,
step=None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
) -> RangeIndex:
cls._validate_dtype(dtype)
name = maybe_extract_name(name, start, cls)
# RangeIndex
if isinstance(start, RangeIndex):
return start.copy(name=name)
elif isinstance(start, range):
return cls._simple_new(start, name=name)
# validate the arguments
if com.all_none(start, stop, step):
raise TypeError("RangeIndex(...) must be called with integers")
start = ensure_python_int(start) if start is not None else 0
if stop is None:
start, stop = 0, start
else:
stop = ensure_python_int(stop)
step = ensure_python_int(step) if step is not None else 1
if step == 0:
raise ValueError("Step must not be zero")
rng = range(start, stop, step)
return cls._simple_new(rng, name=name)
@classmethod
def from_range(
cls, data: range, name=None, dtype: Dtype | None = None
) -> RangeIndex:
"""
Create RangeIndex from a range object.
Returns
-------
RangeIndex
"""
if not isinstance(data, range):
raise TypeError(
f"{cls.__name__}(...) must be called with object coercible to a "
f"range, {repr(data)} was passed"
)
cls._validate_dtype(dtype)
return cls._simple_new(data, name=name)
@classmethod
def _simple_new(cls, values: range, name: Hashable = None) -> RangeIndex:
result = object.__new__(cls)
assert isinstance(values, range)
result._range = values
result._name = name
result._cache = {}
result._reset_identity()
return result
# --------------------------------------------------------------------
@cache_readonly
def _constructor(self) -> type[Int64Index]:
""" return the class to use for construction """
return Int64Index
@cache_readonly
def _data(self) -> np.ndarray:
"""
An int array that for performance reasons is created only when needed.
The constructed array is saved in ``_cache``.
"""
return np.arange(self.start, self.stop, self.step, dtype=np.int64)
@cache_readonly
def _cached_int64index(self) -> Int64Index:
return Int64Index._simple_new(self._data, name=self.name)
@property
def _int64index(self) -> Int64Index:
# wrap _cached_int64index so we can be sure its name matches self.name
res = self._cached_int64index
res._name = self._name
return res
def _get_data_as_items(self):
""" return a list of tuples of start, stop, step """
rng = self._range
return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)]
def __reduce__(self):
d = self._get_attributes_dict()
d.update(dict(self._get_data_as_items()))
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
# Rendering Methods
def _format_attrs(self):
"""
Return a list of tuples of the (attr, formatted_value)
"""
attrs = self._get_data_as_items()
if self.name is not None:
attrs.append(("name", ibase.default_pprint(self.name)))
return attrs
def _format_data(self, name=None):
# we are formatting thru the attributes
return None
def _format_with_header(self, header: list[str], na_rep: str = "NaN") -> list[str]:
if not len(self._range):
return header
first_val_str = str(self._range[0])
last_val_str = str(self._range[-1])
max_length = max(len(first_val_str), len(last_val_str))
return header + [f"{x:<{max_length}}" for x in self._range]
# --------------------------------------------------------------------
_deprecation_message = (
"RangeIndex.{} is deprecated and will be "
"removed in a future version. Use RangeIndex.{} "
"instead"
)
@property
def start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
"""
# GH 25710
return self._range.start
@property
def _start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``start`` instead.
"""
warnings.warn(
self._deprecation_message.format("_start", "start"),
FutureWarning,
stacklevel=2,
)
return self.start
@property
def stop(self) -> int:
"""
The value of the `stop` parameter.
"""
return self._range.stop
@property
def _stop(self) -> int:
"""
The value of the `stop` parameter.
.. deprecated:: 0.25.0
Use ``stop`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_stop", "stop"),
FutureWarning,
stacklevel=2,
)
return self.stop
@property
def step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
"""
# GH 25710
return self._range.step
@property
def _step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``step`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_step", "step"),
FutureWarning,
stacklevel=2,
)
return self.step
@cache_readonly
def nbytes(self) -> int:
"""
Return the number of bytes in the underlying data.
"""
rng = self._range
return getsizeof(rng) + sum(
getsizeof(getattr(rng, attr_name))
for attr_name in ["start", "stop", "step"]
)
def memory_usage(self, deep: bool = False) -> int:
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self.nbytes
@property
def dtype(self) -> np.dtype:
return np.dtype(np.int64)
@property
def is_unique(self) -> bool:
""" return if the index has unique values """
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
return self._range.step > 0 or len(self) <= 1
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
return self._range.step < 0 or len(self) <= 1
def __contains__(self, key: Any) -> bool:
hash(key)
try:
key = ensure_python_int(key)
except TypeError:
return False
return key in self._range
@property
def inferred_type(self) -> str:
return "integer"
# --------------------------------------------------------------------
# Indexing Methods
@doc(Int64Index.get_loc)
def get_loc(self, key, method=None, tolerance=None):
if method is None and tolerance is None:
if is_integer(key) or (is_float(key) and key.is_integer()):
new_key = int(key)
try:
return self._range.index(new_key)
except ValueError as err:
raise KeyError(key) from err
raise KeyError(key)
return super().get_loc(key, method=method, tolerance=tolerance)
def _get_indexer(
self,
target: Index,
method: str | None = None,
limit: int | None = None,
tolerance=None,
) -> np.ndarray:
# -> np.ndarray[np.intp]
if com.any_not_none(method, tolerance, limit):
return super()._get_indexer(
target, method=method, tolerance=tolerance, limit=limit
)
if self.step > 0:
start, stop, step = self.start, self.stop, self.step
else:
# GH 28678: work on reversed range for simplicity
reverse = self._range[::-1]
start, stop, step = reverse.start, reverse.stop, reverse.step
if not is_signed_integer_dtype(target):
# checks/conversions/roundings are delegated to general method
return super()._get_indexer(target, method=method, tolerance=tolerance)
target_array = np.asarray(target)
locs = target_array - start
valid = (locs % step == 0) & (locs >= 0) & (target_array < stop)
locs[~valid] = -1
locs[valid] = locs[valid] / step
if step != self.step:
# We reversed this range: transform to original locs
locs[valid] = len(self) - 1 - locs[valid]
return ensure_platform_int(locs)
# --------------------------------------------------------------------
def repeat(self, repeats, axis=None) -> Int64Index:
return self._int64index.repeat(repeats, axis=axis)
def delete(self, loc) -> Int64Index: # type: ignore[override]
return self._int64index.delete(loc)
def take(
self, indices, axis: int = 0, allow_fill: bool = True, fill_value=None, **kwargs
) -> Int64Index:
with rewrite_exception("Int64Index", type(self).__name__):
return self._int64index.take(
indices,
axis=axis,
allow_fill=allow_fill,
fill_value=fill_value,
**kwargs,
)
def tolist(self) -> list[int]:
return list(self._range)
@doc(Int64Index.__iter__)
def __iter__(self):
yield from self._range
@doc(Int64Index._shallow_copy)
def _shallow_copy(self, values, name: Hashable = no_default):
name = self.name if name is no_default else name
if values.dtype.kind == "f":
return Float64Index(values, name=name)
return Int64Index._simple_new(values, name=name)
def _view(self: RangeIndex) -> RangeIndex:
result = type(self)._simple_new(self._range, name=self._name)
result._cache = self._cache
return result
@doc(Int64Index.copy)
def copy(
self,
name: Hashable = None,
deep: bool = False,
dtype: Dtype | None = None,
names=None,
):
name = self._validate_names(name=name, names=names, deep=deep)[0]
new_index = self._rename(name=name)
if dtype:
warnings.warn(
"parameter dtype is deprecated and will be removed in a future "
"version. Use the astype method instead.",
FutureWarning,
stacklevel=2,
)
new_index = new_index.astype(dtype)
return new_index
def _minmax(self, meth: str):
no_steps = len(self) - 1
if no_steps == -1:
return np.nan
elif (meth == "min" and self.step > 0) or (meth == "max" and self.step < 0):
return self.start
return self.start + self.step * no_steps
def min(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""The minimum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_min(args, kwargs)
return self._minmax("min")
def max(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""The maximum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_max(args, kwargs)
return self._minmax("max")
def argsort(self, *args, **kwargs) -> np.ndarray:
"""
Returns the indices that would sort the index and its
underlying data.
Returns
-------
np.ndarray[np.intp]
See Also
--------
numpy.ndarray.argsort
"""
ascending = kwargs.pop("ascending", True) # EA compat
nv.validate_argsort(args, kwargs)
if self._range.step > 0:
result = np.arange(len(self), dtype=np.intp)
else:
result = np.arange(len(self) - 1, -1, -1, dtype=np.intp)
if not ascending:
result = result[::-1]
return result
def factorize(
self, sort: bool = False, na_sentinel: int | None = -1
) -> tuple[np.ndarray, RangeIndex]:
codes = np.arange(len(self), dtype=np.intp)
uniques = self
if sort and self.step < 0:
codes = codes[::-1]
uniques = uniques[::-1]
return codes, uniques
def equals(self, other: object) -> bool:
"""
Determines if two Index objects contain the same elements.
"""
if isinstance(other, RangeIndex):
return self._range == other._range
return super().equals(other)
# --------------------------------------------------------------------
# Set Operations
def _intersection(self, other: Index, sort=False):
if not isinstance(other, RangeIndex):
# Int64Index
return super()._intersection(other, sort=sort)
if not len(self) or not len(other):
return self._simple_new(_empty_range)
first = self._range[::-1] if self.step < 0 else self._range
second = other._range[::-1] if other.step < 0 else other._range
# check whether intervals intersect
# deals with in- and decreasing ranges
int_low = max(first.start, second.start)
int_high = min(first.stop, second.stop)
if int_high <= int_low:
return self._simple_new(_empty_range)
# Method hint: linear Diophantine equation
# solve intersection problem
# performance hint: for identical step sizes, could use
# cheaper alternative
gcd, s, _ = self._extended_gcd(first.step, second.step)
# check whether element sets intersect
if (first.start - second.start) % gcd:
return self._simple_new(_empty_range)
# calculate parameters for the RangeIndex describing the
# intersection disregarding the lower bounds
tmp_start = first.start + (second.start - first.start) * first.step // gcd * s
new_step = first.step * second.step // gcd
new_range = range(tmp_start, int_high, new_step)
new_index = self._simple_new(new_range)
# adjust index to limiting interval
new_start = new_index._min_fitting_element(int_low)
new_range = range(new_start, new_index.stop, new_index.step)
new_index = self._simple_new(new_range)
if (self.step < 0 and other.step < 0) is not (new_index.step < 0):
new_index = new_index[::-1]
if sort is None:
new_index = new_index.sort_values()
return new_index
def _min_fitting_element(self, lower_limit: int) -> int:
"""Returns the smallest element greater than or equal to the limit"""
no_steps = -(-(lower_limit - self.start) // abs(self.step))
return self.start + abs(self.step) * no_steps
def _max_fitting_element(self, upper_limit: int) -> int:
"""Returns the largest element smaller than or equal to the limit"""
no_steps = (upper_limit - self.start) // abs(self.step)
return self.start + abs(self.step) * no_steps
def _extended_gcd(self, a: int, b: int) -> tuple[int, int, int]:
"""
Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t
"""
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t
def _union(self, other: Index, sort):
"""
Form the union of two Index objects and sorts if possible
Parameters
----------
other : Index or array-like
sort : False or None, default None
Whether to sort resulting index. ``sort=None`` returns a
monotonically increasing ``RangeIndex`` if possible or a sorted
``Int64Index`` if not. ``sort=False`` always returns an
unsorted ``Int64Index``
.. versionadded:: 0.25.0
Returns
-------
union : Index
"""
if isinstance(other, RangeIndex) and sort is None:
start_s, step_s = self.start, self.step
end_s = self.start + self.step * (len(self) - 1)
start_o, step_o = other.start, other.step
end_o = other.start + other.step * (len(other) - 1)
if self.step < 0:
start_s, step_s, end_s = end_s, -step_s, start_s
if other.step < 0:
start_o, step_o, end_o = end_o, -step_o, start_o
if len(self) == 1 and len(other) == 1:
step_s = step_o = abs(self.start - other.start)
elif len(self) == 1:
step_s = step_o
elif len(other) == 1:
step_o = step_s
start_r = min(start_s, start_o)
end_r = max(end_s, end_o)
if step_o == step_s:
if (
(start_s - start_o) % step_s == 0
and (start_s - end_o) <= step_s
and (start_o - end_s) <= step_s
):
return type(self)(start_r, end_r + step_s, step_s)
if (
(step_s % 2 == 0)
and (abs(start_s - start_o) <= step_s / 2)
and (abs(end_s - end_o) <= step_s / 2)
):
return type(self)(start_r, end_r + step_s / 2, step_s / 2)
elif step_o % step_s == 0:
if (
(start_o - start_s) % step_s == 0
and (start_o + step_s >= start_s)
and (end_o - step_s <= end_s)
):
return type(self)(start_r, end_r + step_s, step_s)
elif step_s % step_o == 0:
if (
(start_s - start_o) % step_o == 0
and (start_s + step_o >= start_o)
and (end_s - step_o <= end_o)
):
return type(self)(start_r, end_r + step_o, step_o)
return self._int64index._union(other, sort=sort)
def _difference(self, other, sort=None):
# optimized set operation if we have another RangeIndex
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_name = self._convert_can_do_setop(other)
if not isinstance(other, RangeIndex):
return super()._difference(other, sort=sort)
res_name = ops.get_op_result_name(self, other)
first = self._range[::-1] if self.step < 0 else self._range
overlap = self.intersection(other)
if overlap.step < 0:
overlap = overlap[::-1]
if len(overlap) == 0:
return self.rename(name=res_name)
if len(overlap) == len(self):
return self[:0].rename(res_name)
if not isinstance(overlap, RangeIndex):
# We won't end up with RangeIndex, so fall back
return super()._difference(other, sort=sort)
if overlap.step != first.step:
# In some cases we might be able to get a RangeIndex back,
# but not worth the effort.
return super()._difference(other, sort=sort)
if overlap[0] == first.start:
# The difference is everything after the intersection
new_rng = range(overlap[-1] + first.step, first.stop, first.step)
elif overlap[-1] == first[-1]:
# The difference is everything before the intersection
new_rng = range(first.start, overlap[0], first.step)
else:
# The difference is not range-like
return super()._difference(other, sort=sort)
new_index = type(self)._simple_new(new_rng, name=res_name)
if first is not self._range:
new_index = new_index[::-1]
return new_index
def symmetric_difference(self, other, result_name: Hashable = None, sort=None):
if not isinstance(other, RangeIndex) or sort is not None:
return super().symmetric_difference(other, result_name, sort)
left = self.difference(other)
right = other.difference(self)
result = left.union(right)
if result_name is not None:
result = result.rename(result_name)
return result
# --------------------------------------------------------------------
def _concat(self, indexes: list[Index], name: Hashable) -> Index:
"""
Overriding parent method for the case of all RangeIndex instances.
When all members of "indexes" are of type RangeIndex: result will be
RangeIndex if possible, Int64Index otherwise. E.g.:
indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6)
indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5])
"""
if not all(isinstance(x, RangeIndex) for x in indexes):
return super()._concat(indexes, name)
elif len(indexes) == 1:
return indexes[0]
rng_indexes = cast(List[RangeIndex], indexes)
start = step = next_ = None
# Filter the empty indexes
non_empty_indexes = [obj for obj in rng_indexes if len(obj)]
for obj in non_empty_indexes:
rng = obj._range
if start is None:
# This is set by the first non-empty index
start = rng.start
if step is None and len(rng) > 1:
step = rng.step
elif step is None:
# First non-empty index had only one element
if rng.start == start:
values = np.concatenate([x._values for x in rng_indexes])
result = Int64Index(values)
return result.rename(name)
step = rng.start - start
non_consecutive = (step != rng.step and len(rng) > 1) or (
next_ is not None and rng.start != next_
)
if non_consecutive:
result = Int64Index(np.concatenate([x._values for x in rng_indexes]))
return result.rename(name)
if step is not None:
next_ = rng[-1] + step
if non_empty_indexes:
# Get the stop value from "next" or alternatively
# from the last non-empty index
stop = non_empty_indexes[-1].stop if next_ is None else next_
return RangeIndex(start, stop, step).rename(name)
# Here all "indexes" had 0 length, i.e. were empty.
# In this case return an empty range index.
return RangeIndex(0, 0).rename(name)
def __len__(self) -> int:
"""
return the length of the RangeIndex
"""
return len(self._range)
@property
def size(self) -> int:
return len(self)
def __getitem__(self, key):
"""
Conserve RangeIndex type for scalar and slice keys.
"""
if isinstance(key, slice):
new_range = self._range[key]
return self._simple_new(new_range, name=self._name)
elif is_integer(key):
new_key = int(key)
try:
return self._range[new_key]
except IndexError as err:
raise IndexError(
f"index {key} is out of bounds for axis 0 with size {len(self)}"
) from err
elif is_scalar(key):
raise IndexError(
"only integers, slices (`:`), "
"ellipsis (`...`), numpy.newaxis (`None`) "
"and integer or boolean "
"arrays are valid indices"
)
# fall back to Int64Index
return super().__getitem__(key)
def _getitem_slice(self: RangeIndex, slobj: slice) -> RangeIndex:
"""
Fastpath for __getitem__ when we know we have a slice.
"""
res = self._range[slobj]
return type(self)._simple_new(res, name=self._name)
@unpack_zerodim_and_defer("__floordiv__")
def __floordiv__(self, other):
if is_integer(other) and other != 0:
if len(self) == 0 or self.start % other == 0 and self.step % other == 0:
start = self.start // other
step = self.step // other
stop = start + len(self) * step
new_range = range(start, stop, step or 1)
return self._simple_new(new_range, name=self.name)
if len(self) == 1:
start = self.start // other
new_range = range(start, start + 1, 1)
return self._simple_new(new_range, name=self.name)
return self._int64index // other
# --------------------------------------------------------------------
# Reductions
def all(self, *args, **kwargs) -> bool:
return 0 not in self._range
def any(self, *args, **kwargs) -> bool:
return any(self._range)
# --------------------------------------------------------------------
def _cmp_method(self, other, op):
if isinstance(other, RangeIndex) and self._range == other._range:
# Both are immutable so if ._range attr. are equal, shortcut is possible
return super()._cmp_method(self, op)
return super()._cmp_method(other, op)
def _arith_method(self, other, op):
"""
Parameters
----------
other : Any
op : callable that accepts 2 params
perform the binary op
"""
if isinstance(other, ABCTimedeltaIndex):
# Defer to TimedeltaIndex implementation
return NotImplemented
elif isinstance(other, (timedelta, np.timedelta64)):
# GH#19333 is_integer evaluated True on timedelta64,
# so we need to catch these explicitly
return op(self._int64index, other)
elif is_timedelta64_dtype(other):
# Must be an np.ndarray; GH#22390
return op(self._int64index, other)
if op in [
operator.pow,
ops.rpow,
operator.mod,
ops.rmod,
ops.rfloordiv,
divmod,
ops.rdivmod,
]:
return op(self._int64index, other)
step: Callable | None = None
if op in [operator.mul, ops.rmul, operator.truediv, ops.rtruediv]:
step = op
# TODO: if other is a RangeIndex we may have more efficient options
other = extract_array(other, extract_numpy=True, extract_range=True)
attrs = self._get_attributes_dict()
left, right = self, other
try:
# apply if we have an override
if step:
with np.errstate(all="ignore"):
rstep = step(left.step, right)
# we don't have a representable op
# so return a base index
if not is_integer(rstep) or not rstep:
raise ValueError
else:
rstep = left.step
with np.errstate(all="ignore"):
rstart = op(left.start, right)
rstop = op(left.stop, right)
result = type(self)(rstart, rstop, rstep, **attrs)
# for compat with numpy / Int64Index
# even if we can represent as a RangeIndex, return
# as a Float64Index if we have float-like descriptors
if not all( | is_integer(x) | pandas.core.dtypes.common.is_integer |
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
"""
Unitary tests for bigfish.stack.utils module.
"""
import os
import pytest
import tempfile
import bigfish.stack as stack
import numpy as np
import pandas as pd
from bigfish.stack.utils import fit_recipe
from bigfish.stack.utils import get_path_from_recipe
from bigfish.stack.utils import get_nb_element_per_dimension
from bigfish.stack.utils import count_nb_fov
# TODO add test for bigfish.stack.load_and_save_url
# TODO add test for bigfish.stack.check_hash
# TODO add test for bigfish.stack.compute_hash
# ### Test sanity check functions ###
def test_check_parameter():
# define a function with different parameters to check
def foo(a, b, c, d, e, f, g, h):
stack.check_parameter(a=(list, type(None)),
b=str,
c=int,
d=float,
e=np.ndarray,
f=bool,
g=(pd.DataFrame, pd.Series),
h=pd.DataFrame)
return True
# test the consistency of the check function when it works...
assert foo(a=[], b="bar", c=5, d=2.5, e=np.array([3, 6, 9]),
f=True, g=pd.DataFrame(), h=pd.DataFrame())
assert foo(a=None, b="", c=10, d=2.0, e=np.array([3, 6, 9]),
f=False, g=pd.Series(), h=pd.DataFrame())
# ... and when it should raise an error
with pytest.raises(TypeError):
foo(a=(), b="bar", c=5, d=2.5, e=np.array([3, 6, 9]),
f=True, g= | pd.DataFrame() | pandas.DataFrame |
"""
本地数据查询及预处理,适用于zipline ingest写入
读取本地数据
1. 元数据所涉及的时间列 其tz为UTC
2. 数据框datetime-index.tz为None
注:只选A股股票。注意股票总体在`ingest`及`fundamental`必须保持一致。
"""
import re
import warnings
from concurrent.futures.thread import ThreadPoolExecutor
from functools import lru_cache, partial
from trading_calendars import get_calendar
import numpy as np
import pandas as pd
from cnswd.mongodb import get_db
from cnswd.setting.constants import MAX_WORKER
from cnswd.utils import sanitize_dates
import akshare as ak
warnings.filterwarnings('ignore')
WY_DAILY_COL_MAPS = {
'日期': 'date',
'股票代码': 'symbol',
'收盘价': 'close',
'最高价': 'high',
'最低价': 'low',
'开盘价': 'open',
'前收盘': 'prev_close',
'涨跌幅': 'change_pct',
'换手率': 'turnover',
'成交量': 'volume',
'成交金额': 'amount',
'总市值': 'total_cap',
'流通市值': 'market_cap',
}
WY_ADJUSTMENT_COLS = {
'股票代码': 'symbol',
'分红年度': 'date',
'送股(每10股)': 's_ratio',
'转增(每10股)': 'z_ratio',
'派息(每10股)': 'amount',
'公告日期': 'declared_date',
'股权登记日': 'record_date',
'除权除息日': 'ex_date',
'红股上市日': 'pay_date'
}
def encode_index_code(x, offset=1000000):
i = int(x) + offset
return str(i).zfill(7)
def decode_index_code(x, offset=1000000):
i = int(x) - offset
return str(i).zfill(6)
def get_exchange(code):
"""股票所在交易所编码"""
# https://www.iso20022.org/10383/iso-10383-market-identifier-codes
if len(code) == 7:
return '指数'
if code.startswith('688'):
return "上交所科创板"
elif code.startswith('002'):
return "深交所中小板"
elif code.startswith('6'):
return "上交所"
elif code.startswith('3'):
return "深交所创业板"
elif code.startswith('0'):
return "深交所主板"
elif code.startswith('2'):
return "深证B股"
elif code.startswith('9'):
return "上海B股"
else:
raise ValueError(f'股票代码:{code}错误')
def _select_only_a(df, code_col):
"""选择A股数据
Arguments:
df {DataFrame} -- 数据框
code_col {str} -- 代表股票代码的列名称
Returns:
DataFrame -- 筛选出来的a股数据
"""
cond1 = df[code_col].str.startswith('2')
cond2 = df[code_col].str.startswith('9')
df = df.loc[~(cond1 | cond2), :]
return df
def _gen_index_metadata(db, code):
collection = db[code]
name = collection.find_one(projection={
'_id': 0,
'名称': 1,
},
sort=[('日期', -1)])
if name is None:
return pd.DataFrame()
first = collection.find_one(projection={
'_id': 0,
'日期': 1,
},
sort=[('日期', 1)])
last = collection.find_one(projection={
'_id': 0,
'日期': 1,
},
sort=[('日期', -1)])
start_date = pd.Timestamp(first['日期'], tz='UTC')
end_date = pd.Timestamp(last['日期'], tz='UTC')
return pd.DataFrame(
{
'symbol': encode_index_code(code),
'exchange': '指数',
'asset_name': name['名称'], # 简称
'start_date': start_date,
'end_date': end_date,
'first_traded': start_date,
# 适应于分钟级别的数据
'last_traded': end_date,
'auto_close_date': end_date + pd.Timedelta(days=1),
},
index=[0])
def gen_index_metadata():
db = get_db('wy_index_daily')
codes = db.list_collection_names()
dfs = [_gen_index_metadata(db, code) for code in codes]
return pd.concat(dfs)
def _stock_first_and_last(code, db=None):
"""
日线交易数据开始交易及结束交易日期
Examples
--------
>>> _stock_first_and_last('000333')
symbol asset_name first_traded last_traded
0 000333 美的集团 2020-04-02 00:00:00+00:00 2020-04-04 00:00:00+00:00
"""
if db is None:
db = get_db('wy_stock_daily')
if code not in db.list_collection_names():
return | pd.DataFrame() | pandas.DataFrame |
from directional import *
import pandas as pd
import numpy as np
demo_sin_cos_matrix = pd.read_csv("sample_data/sin-cos.csv")
demo_sin_cos_mean = pd.read_csv("sample_data/sin-cos-mean.csv")
demo_angle_matrix = pd.read_csv("sample_data/degrees.csv")
demo_radian_matrix = pd.read_csv("sample_data/radians.csv")
demo_radian_mean = | pd.read_csv("sample_data/radians-mean.csv") | pandas.read_csv |
import pandas as pd
from pandas.io.json import json_normalize
def venues_explore(client,lat,lng, limit=100, verbose=0, sort='popular', radius=2000, offset=1, day='any',query=''):
'''funtion to get n-places using explore in foursquare, where n is the limit when calling the function.
This returns a pandas dataframe with name, city ,country, lat, long, address and main category as columns
Arguments: *client, *lat, *long, limit (defaults to 100), radius (defaults to 2000), verbose (defaults to 0), offset (defaults to 1), day (defaults to any)'''
# create a dataframe
df_a = pd.DataFrame(columns=['Name',
'City',
'Latitude',
'Longitude',
'Category',
'Address'])
ll=lat+','+lng
if offset<=50:
for i_offset in range(0,offset):
#get venues using client https://github.com/mLewisLogic/foursquare
venues = client.venues.explore(params={'ll':ll,
'limit':limit,
'intent' : 'browse',
'sort':sort,
'radius':radius,
'offset':i_offset,
'day':day,
'query':query
})
venues=venues['groups'][0]['items']
df_venues = pd.DataFrame.from_dict(venues)
df_venues['venue'][0]
#print('limit', limit, 'sort', sort, 'radius', radius)
for i, value in df_venues['venue'].items():
if verbose==1:
print('i', i, 'name', value['name'])
venueName=value['name']
try:
venueCity=value['location']['city']
except:
venueCity=''
venueCountry=value['location']['country']
venueLat=value['location']['lat']
venueLng=value['location']['lng']
venueCountry=value['location']['country']
try:
venueAddress=value['location']['address']
except:
venueAddress=''
venueCategory=value['categories'][0]['name']
df_a=df_a.append([{'Name':venueName,
'City':venueCity,
'Country':venueCountry,
'Latitude':venueLat,
'Longitude':venueLng,
'Category':venueCategory,
'Address':venueAddress
}])
else:
print('ERROR: offset value per Foursquare API is up to 50. Please use a lower value.')
return df_a.reset_index()
def venues_explore_near(client,near, limit=100, verbose=0, sort='popular', radius=100000, offset=1, day='any',query=''):
'''funtion to get n-places using explore in foursquare, where n is the limit when calling the function.
This returns a pandas dataframe with name, city ,country, near, address and main category as columns.
"near" argument searches within the bounds of the geocode for a string naming a place in the world.
Arguments: *client, *near, limit (defaults to 100), radius (defaults to 100000, max according to api docs), verbose (defaults to 0), offset (defaults to 1), day (defaults to any)'''
# create a dataframe
df_a = pd.DataFrame(columns=['Name',
'City',
'Latitude',
'Longitude',
'Category',
'Address'])
if offset<=50:
for i_offset in range(0,offset):
#get venues using client https://github.com/mLewisLogic/foursquare
venues = client.venues.explore(params={'near':near,
'limit':limit,
'intent' : 'browse',
'sort':sort,
'radius':radius,
'offset':i_offset,
'day':day,
'query':query
})
venues=venues['groups'][0]['items']
df_venues = pd.DataFrame.from_dict(venues)
df_venues['venue'][0]
#print('limit', limit, 'sort', sort, 'radius', radius)
for i, value in df_venues['venue'].items():
if verbose==1:
print('i', i, 'name', value['name'])
venueName=value['name']
try:
venueCity=value['location']['city']
except:
venueCity=''
venueCountry=value['location']['country']
venueLat=value['location']['lat']
venueLng=value['location']['lng']
venueCountry=value['location']['country']
try:
venueAddress=value['location']['address']
except:
venueAddress=''
venueCategory=value['categories'][0]['name']
df_a=df_a.append([{'Name':venueName,
'City':venueCity,
'Country':venueCountry,
'Latitude':venueLat,
'Longitude':venueLng,
'Category':venueCategory,
'Address':venueAddress
}])
else:
print('ERROR: offset value according to Foursquare API is up to 50. Please use a lower value.')
return df_a.reset_index()
def get_categories():
'''Function to get a Pandas DataFrame of all categories in Foursquare as listed in https://developer.foursquare.com/docs/resources/categories
It uses json_normalize to get nested information and return a DataFrame with main, sub and sub-sub categories name and ID'''
df1 = pd.read_json('https://api.foursquare.com/v2/venues/categories?v=20170211&oauth_token=QEJ4AQPTMMNB413HGNZ5YDMJSHTOHZHMLZCAQCCLXIX41OMP&includeSupportedCC=true')
df1=df1.iloc[0,1]
df1 = json_normalize(df1)
#json_normalize(df1.iloc[0,0])
i=0
df_size=df1.shape[0]
df_cat=pd.DataFrame()
for i in range(i,df_size):
#print('print',df1.iloc[i,0])
#normalize subcategories
new_cats=json_normalize(df1.iloc[i,0])
#get new df size
new_size=new_cats.shape[0]
#print('new_size',new_size)
#new vars
i_sub=0
new_sub_cat=pd.DataFrame() #new df for sub sub cats
#iterate to get sub sub categories
for i_sub in range(i_sub,new_size):
sub_cats= | json_normalize(new_cats.iloc[i_sub,0]) | pandas.io.json.json_normalize |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.generic import ABCIndexClass
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_float, is_float_dtype, is_integer, is_scalar
from pandas.core.arrays import IntegerArray, integer_array
from pandas.core.arrays.integer import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
from pandas.tests.extension.base import BaseOpsUtil
def make_data():
return list(range(8)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100]
@pytest.fixture(
params=[
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
]
)
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
return integer_array(make_data(), dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return integer_array([np.nan, 1], dtype=dtype)
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
"""Parametrized fixture giving 'data' and 'data_missing'"""
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
def test_dtypes(dtype):
# smoke tests on auto dtype construction
if dtype.is_signed_integer:
assert np.dtype(dtype.type).kind == "i"
else:
assert np.dtype(dtype.type).kind == "u"
assert dtype.name is not None
@pytest.mark.parametrize(
"dtype, expected",
[
(Int8Dtype(), "Int8Dtype()"),
(Int16Dtype(), "Int16Dtype()"),
(Int32Dtype(), "Int32Dtype()"),
(Int64Dtype(), "Int64Dtype()"),
(UInt8Dtype(), "UInt8Dtype()"),
(UInt16Dtype(), "UInt16Dtype()"),
(UInt32Dtype(), "UInt32Dtype()"),
(UInt64Dtype(), "UInt64Dtype()"),
],
)
def test_repr_dtype(dtype, expected):
assert repr(dtype) == expected
def test_repr_array():
result = repr(integer_array([1, None, 3]))
expected = "<IntegerArray>\n[1, <NA>, 3]\nLength: 3, dtype: Int64"
assert result == expected
def test_repr_array_long():
data = integer_array([1, 2, None] * 1000)
expected = (
"<IntegerArray>\n"
"[ 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>, 1,\n"
" ...\n"
" <NA>, 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>]\n"
"Length: 3000, dtype: Int64"
)
result = repr(data)
assert result == expected
class TestConstructors:
def test_uses_pandas_na(self):
a = pd.array([1, None], dtype=pd.Int64Dtype())
assert a[1] is pd.NA
def test_from_dtype_from_float(self, data):
# construct from our dtype & string dtype
dtype = data.dtype
# from float
expected = pd.Series(data)
result = pd.Series(
data.to_numpy(na_value=np.nan, dtype="float"), dtype=str(dtype)
)
tm.assert_series_equal(result, expected)
# from int / list
expected = pd.Series(data)
result = pd.Series(np.array(data).tolist(), dtype=str(dtype))
tm.assert_series_equal(result, expected)
# from int / array
expected = pd.Series(data).dropna().reset_index(drop=True)
dropped = np.array(data.dropna()).astype(np.dtype((dtype.type)))
result = pd.Series(dropped, dtype=str(dtype))
tm.assert_series_equal(result, expected)
class TestArithmeticOps(BaseOpsUtil):
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
def _check_op(self, s, op_name, other, exc=None):
op = self.get_op_from_name(op_name)
result = op(s, other)
# compute expected
mask = s.isna()
# if s is a DataFrame, squeeze to a Series
# for comparison
if isinstance(s, pd.DataFrame):
result = result.squeeze()
s = s.squeeze()
mask = mask.squeeze()
# other array is an Integer
if isinstance(other, IntegerArray):
omask = getattr(other, "mask", None)
mask = getattr(other, "data", other)
if omask is not None:
mask |= omask
# 1 ** na is na, so need to unmask those
if op_name == "__pow__":
mask = np.where(~s.isna() & (s == 1), False, mask)
elif op_name == "__rpow__":
other_is_one = other == 1
if isinstance(other_is_one, pd.Series):
other_is_one = other_is_one.fillna(False)
mask = np.where(other_is_one, False, mask)
# float result type or float op
if (
is_float_dtype(other)
or is_float(other)
or op_name in ["__rtruediv__", "__truediv__", "__rdiv__", "__div__"]
):
rs = s.astype("float")
expected = op(rs, other)
self._check_op_float(result, expected, mask, s, op_name, other)
# integer result type
else:
rs = pd.Series(s.values._data, name=s.name)
expected = op(rs, other)
self._check_op_integer(result, expected, mask, s, op_name, other)
def _check_op_float(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in float dtypes
expected[mask] = np.nan
if "floordiv" in op_name:
# Series op sets 1//0 to np.inf, which IntegerArray does not do (yet)
mask2 = np.isinf(expected) & np.isnan(result)
expected[mask2] = np.nan
tm.assert_series_equal(result, expected)
def _check_op_integer(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in integer dtypes
# to compare properly, we convert the expected
# to float, mask to nans and convert infs
# if we have uints then we process as uints
# then convert to float
# and we ultimately want to create a IntArray
# for comparisons
fill_value = 0
# mod/rmod turn floating 0 into NaN while
# integer works as expected (no nan)
if op_name in ["__mod__", "__rmod__"]:
if is_scalar(other):
if other == 0:
expected[s.values == 0] = 0
else:
expected = expected.fillna(0)
else:
expected[
(s.values == 0).fillna(False)
& ((expected == 0).fillna(False) | expected.isna())
] = 0
try:
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
except ValueError:
expected = expected.astype(float)
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
expected[mask] = pd.NA
# assert that the expected astype is ok
# (skip for unsigned as they have wrap around)
if not s.dtype.is_unsigned_integer:
original = pd.Series(original)
# we need to fill with 0's to emulate what an astype('int') does
# (truncation) for certain ops
if op_name in ["__rtruediv__", "__rdiv__"]:
mask |= original.isna()
original = original.fillna(0).astype("int")
original = original.astype("float")
original[mask] = np.nan
tm.assert_series_equal(original, expected.astype("float"))
# assert our expected result
tm.assert_series_equal(result, expected)
def test_arith_integer_array(self, data, all_arithmetic_operators):
# we operate with a rhs of an integer array
op = all_arithmetic_operators
s = pd.Series(data)
rhs = pd.Series([1] * len(data), dtype=data.dtype)
rhs.iloc[-1] = np.nan
self._check_op(s, op, rhs)
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# scalar
op = all_arithmetic_operators
s = pd.Series(data)
self._check_op(s, op, 1, exc=TypeError)
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op = all_arithmetic_operators
df = pd.DataFrame({"A": data})
self._check_op(df, op, 1, exc=TypeError)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op = all_arithmetic_operators
s = pd.Series(data)
other = np.ones(len(s), dtype=s.dtype.type)
self._check_op(s, op, other, exc=TypeError)
def test_arith_coerce_scalar(self, data, all_arithmetic_operators):
op = all_arithmetic_operators
s = pd.Series(data)
other = 0.01
self._check_op(s, op, other)
@pytest.mark.parametrize("other", [1.0, np.array(1.0)])
def test_arithmetic_conversion(self, all_arithmetic_operators, other):
# if we have a float operand we should have a float result
# if that is equal to an integer
op = self.get_op_from_name(all_arithmetic_operators)
s = pd.Series([1, 2, 3], dtype="Int64")
result = op(s, other)
assert result.dtype is np.dtype("float")
def test_arith_len_mismatch(self, all_arithmetic_operators):
# operating with a list-like with non-matching length raises
op = self.get_op_from_name(all_arithmetic_operators)
other = np.array([1.0])
s = pd.Series([1, 2, 3], dtype="Int64")
with pytest.raises(ValueError, match="Lengths must match"):
op(s, other)
@pytest.mark.parametrize("other", [0, 0.5])
def test_arith_zero_dim_ndarray(self, other):
arr = integer_array([1, None, 2])
result = arr + np.array(other)
expected = arr + other
tm.assert_equal(result, expected)
def test_error(self, data, all_arithmetic_operators):
# invalid ops
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
opa = getattr(data, op)
# invalid scalars
msg = (
r"(:?can only perform ops with numeric values)"
r"|(:?IntegerArray cannot perform the operation mod)"
)
with pytest.raises(TypeError, match=msg):
ops("foo")
with pytest.raises(TypeError, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
with pytest.raises(TypeError, match=msg):
ops(pd.Series("foo", index=s.index))
if op != "__rpow__":
# TODO(extension)
# rpow with a datetimelike coerces the integer array incorrectly
msg = (
"can only perform ops with numeric values|"
"cannot perform .* with this index type: DatetimeArray|"
"Addition/subtraction of integers and integer-arrays "
"with DatetimeArray is no longer supported. *"
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
# 2d
result = opa(pd.DataFrame({"A": s}))
assert result is NotImplemented
msg = r"can only perform ops with 1-d structures"
with pytest.raises(NotImplementedError, match=msg):
opa(np.arange(len(s)).reshape(-1, len(s)))
@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)])
def test_divide_by_zero(self, zero, negative):
# https://github.com/pandas-dev/pandas/issues/27398
a = pd.array([0, 1, -1, None], dtype="Int64")
result = a / zero
expected = np.array([np.nan, np.inf, -np.inf, np.nan])
if negative:
expected *= -1
tm.assert_numpy_array_equal(result, expected)
def test_pow_scalar(self):
a = pd.array([-1, 0, 1, None, 2], dtype="Int64")
result = a ** 0
expected = pd.array([1, 1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** 1
expected = pd.array([-1, 0, 1, None, 2], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** pd.NA
expected = pd.array([None, None, 1, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** np.nan
expected = np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
# reversed
a = a[1:] # Can't raise integers to negative powers.
result = 0 ** a
expected = pd.array([1, 0, None, 0], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = 1 ** a
expected = pd.array([1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = pd.NA ** a
expected = pd.array([1, None, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = np.nan ** a
expected = np.array([1, np.nan, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
def test_pow_array(self):
a = integer_array([0, 0, 0, 1, 1, 1, None, None, None])
b = integer_array([0, 1, None, 0, 1, None, 0, 1, None])
result = a ** b
expected = integer_array([1, 0, None, 1, 1, 1, 1, None, None])
tm.assert_extension_array_equal(result, expected)
def test_rpow_one_to_na(self):
# https://github.com/pandas-dev/pandas/issues/22022
# https://github.com/pandas-dev/pandas/issues/29997
arr = integer_array([np.nan, np.nan])
result = np.array([1.0, 2.0]) ** arr
expected = np.array([1.0, np.nan])
tm.assert_numpy_array_equal(result, expected)
class TestComparisonOps(BaseOpsUtil):
def _compare_other(self, data, op_name, other):
op = self.get_op_from_name(op_name)
# array
result = pd.Series(op(data, other))
expected = pd.Series(op(data._data, other), dtype="boolean")
# fill the nan locations
expected[data._mask] = pd.NA
tm.assert_series_equal(result, expected)
# series
s = pd.Series(data)
result = op(s, other)
expected = op(pd.Series(data._data), other)
# fill the nan locations
expected[data._mask] = pd.NA
expected = expected.astype("boolean")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("other", [True, False, pd.NA, -1, 0, 1])
def test_scalar(self, other, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([1, 0, None], dtype="Int64")
result = op(a, other)
if other is pd.NA:
expected = pd.array([None, None, None], dtype="boolean")
else:
values = op(a._data, other)
expected = pd.arrays.BooleanArray(values, a._mask, copy=True)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(a, pd.array([1, 0, None], dtype="Int64"))
def test_array(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([0, 1, 2, None, None, None], dtype="Int64")
b = pd.array([0, 1, None, 0, 1, None], dtype="Int64")
result = op(a, b)
values = op(a._data, b._data)
mask = a._mask | b._mask
expected = pd.arrays.BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(
a, pd.array([0, 1, 2, None, None, None], dtype="Int64")
)
tm.assert_extension_array_equal(
b, pd.array([0, 1, None, 0, 1, None], dtype="Int64")
)
def test_compare_with_booleanarray(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([True, False, None] * 3, dtype="boolean")
b = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype="Int64")
other = pd.array([False] * 3 + [True] * 3 + [None] * 3, dtype="boolean")
expected = op(a, other)
result = op(a, b)
tm.assert_extension_array_equal(result, expected)
def test_no_shared_mask(self, data):
result = data + 1
assert np.shares_memory(result._mask, data._mask) is False
def test_compare_to_string(self, any_nullable_int_dtype):
# GH 28930
s = pd.Series([1, None], dtype=any_nullable_int_dtype)
result = s == "a"
expected = pd.Series([False, pd.NA], dtype="boolean")
self.assert_series_equal(result, expected)
def test_compare_to_int(self, any_nullable_int_dtype, all_compare_operators):
# GH 28930
s1 = pd.Series([1, None, 3], dtype=any_nullable_int_dtype)
s2 = pd.Series([1, None, 3], dtype="float")
method = getattr(s1, all_compare_operators)
result = method(2)
method = getattr(s2, all_compare_operators)
expected = method(2).astype("boolean")
expected[s2.isna()] = pd.NA
self.assert_series_equal(result, expected)
class TestCasting:
@pytest.mark.parametrize("dropna", [True, False])
def test_construct_index(self, all_data, dropna):
# ensure that we do not coerce to Float64Index, rather
# keep as Index
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Index(integer_array(other, dtype=all_data.dtype))
expected = pd.Index(other, dtype=object)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
def test_astype_index(self, all_data, dropna):
# as an int/uint index to Index
all_data = all_data[:10]
if dropna:
other = all_data[~all_data.isna()]
else:
other = all_data
dtype = all_data.dtype
idx = pd.Index(np.array(other))
assert isinstance(idx, ABCIndexClass)
result = idx.astype(dtype)
expected = idx.astype(object).astype(dtype)
tm.assert_index_equal(result, expected)
def test_astype(self, all_data):
all_data = all_data[:10]
ints = all_data[~all_data.isna()]
mixed = all_data
dtype = Int8Dtype()
# coerce to same type - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype)
expected = pd.Series(ints)
tm.assert_series_equal(result, expected)
# coerce to same other - ints
s = pd.Series(ints)
result = s.astype(dtype)
expected = pd.Series(ints, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype.numpy_dtype)
expected = pd.Series(ints._data.astype(all_data.dtype.numpy_dtype))
tm.assert_series_equal(result, expected)
# coerce to same type - mixed
s = pd.Series(mixed)
result = s.astype(all_data.dtype)
expected = pd.Series(mixed)
tm.assert_series_equal(result, expected)
# coerce to same other - mixed
s = pd.Series(mixed)
result = s.astype(dtype)
expected = pd.Series(mixed, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - mixed
s = pd.Series(mixed)
msg = r"cannot convert to .*-dtype NumPy array with missing values.*"
with pytest.raises(ValueError, match=msg):
s.astype(all_data.dtype.numpy_dtype)
# coerce to object
s = pd.Series(mixed)
result = s.astype("object")
expected = pd.Series(np.asarray(mixed))
tm.assert_series_equal(result, expected)
def test_astype_to_larger_numpy(self):
a = pd.array([1, 2], dtype="Int32")
result = a.astype("int64")
expected = np.array([1, 2], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
a = pd.array([1, 2], dtype="UInt32")
result = a.astype("uint64")
expected = np.array([1, 2], dtype="uint64")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [Int8Dtype(), "Int8", UInt32Dtype(), "UInt32"])
def test_astype_specific_casting(self, dtype):
s = pd.Series([1, 2, 3], dtype="Int64")
result = s.astype(dtype)
expected = pd.Series([1, 2, 3], dtype=dtype)
tm.assert_series_equal(result, expected)
s = pd.Series([1, 2, 3, None], dtype="Int64")
result = s.astype(dtype)
expected = pd.Series([1, 2, 3, None], dtype=dtype)
tm.assert_series_equal(result, expected)
def test_construct_cast_invalid(self, dtype):
msg = "cannot safely"
arr = [1.2, 2.3, 3.7]
with pytest.raises(TypeError, match=msg):
integer_array(arr, dtype=dtype)
with pytest.raises(TypeError, match=msg):
pd.Series(arr).astype(dtype)
arr = [1.2, 2.3, 3.7, np.nan]
with pytest.raises(TypeError, match=msg):
integer_array(arr, dtype=dtype)
with pytest.raises(TypeError, match=msg):
pd.Series(arr).astype(dtype)
@pytest.mark.parametrize("in_series", [True, False])
def test_to_numpy_na_nan(self, in_series):
a = pd.array([0, 1, None], dtype="Int64")
if in_series:
a = pd.Series(a)
result = a.to_numpy(dtype="float64", na_value=np.nan)
expected = np.array([0.0, 1.0, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
result = a.to_numpy(dtype="int64", na_value=-1)
expected = np.array([0, 1, -1], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
result = a.to_numpy(dtype="bool", na_value=False)
expected = np.array([False, True, False], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("in_series", [True, False])
@pytest.mark.parametrize("dtype", ["int32", "int64", "bool"])
def test_to_numpy_dtype(self, dtype, in_series):
a = pd.array([0, 1], dtype="Int64")
if in_series:
a = pd.Series(a)
result = a.to_numpy(dtype=dtype)
expected = np.array([0, 1], dtype=dtype)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", ["float64", "int64", "bool"])
def test_to_numpy_na_raises(self, dtype):
a = pd.array([0, 1, None], dtype="Int64")
with pytest.raises(ValueError, match=dtype):
a.to_numpy(dtype=dtype)
def test_astype_str(self):
a = pd.array([1, 2, None], dtype="Int64")
expected = np.array(["1", "2", "<NA>"], dtype=object)
tm.assert_numpy_array_equal(a.astype(str), expected)
tm.assert_numpy_array_equal(a.astype("str"), expected)
def test_astype_boolean(self):
# https://github.com/pandas-dev/pandas/issues/31102
a = pd.array([1, 0, -1, 2, None], dtype="Int64")
result = a.astype("boolean")
expected = pd.array([True, False, True, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_frame_repr(data_missing):
df = pd.DataFrame({"A": data_missing})
result = repr(df)
expected = " A\n0 <NA>\n1 1"
assert result == expected
def test_conversions(data_missing):
# astype to object series
df = pd.DataFrame({"A": data_missing})
result = df["A"].astype("object")
expected = pd.Series(np.array([np.nan, 1], dtype=object), name="A")
tm.assert_series_equal(result, expected)
# convert to object ndarray
# we assert that we are exactly equal
# including type conversions of scalars
result = df["A"].astype("object").values
expected = np.array([pd.NA, 1], dtype=object)
tm.assert_numpy_array_equal(result, expected)
for r, e in zip(result, expected):
if pd.isnull(r):
assert pd.isnull(e)
elif is_integer(r):
assert r == e
assert is_integer(e)
else:
assert r == e
assert type(r) == type(e)
def test_integer_array_constructor():
values = np.array([1, 2, 3, 4], dtype="int64")
mask = np.array([False, False, False, True], dtype="bool")
result = IntegerArray(values, mask)
expected = integer_array([1, 2, 3, np.nan], dtype="int64")
tm.assert_extension_array_equal(result, expected)
msg = r".* should be .* numpy array. Use the 'integer_array' function instead"
with pytest.raises(TypeError, match=msg):
IntegerArray(values.tolist(), mask)
with pytest.raises(TypeError, match=msg):
IntegerArray(values, mask.tolist())
with pytest.raises(TypeError, match=msg):
IntegerArray(values.astype(float), mask)
msg = r"__init__\(\) missing 1 required positional argument: 'mask'"
with pytest.raises(TypeError, match=msg):
IntegerArray(values)
@pytest.mark.parametrize(
"a, b",
[
([1, None], [1, np.nan]),
([None], [np.nan]),
([None, np.nan], [np.nan, np.nan]),
([np.nan, np.nan], [np.nan, np.nan]),
],
)
def test_integer_array_constructor_none_is_nan(a, b):
result = integer_array(a)
expected = integer_array(b)
tm.assert_extension_array_equal(result, expected)
def test_integer_array_constructor_copy():
values = np.array([1, 2, 3, 4], dtype="int64")
mask = np.array([False, False, False, True], dtype="bool")
result = IntegerArray(values, mask)
assert result._data is values
assert result._mask is mask
result = IntegerArray(values, mask, copy=True)
assert result._data is not values
assert result._mask is not mask
@pytest.mark.parametrize(
"values",
[
["foo", "bar"],
["1", "2"],
"foo",
1,
1.0,
pd.date_range("20130101", periods=2),
np.array(["foo"]),
[[1, 2], [3, 4]],
[np.nan, {"a": 1}],
],
)
def test_to_integer_array_error(values):
# error in converting existing arrays to IntegerArrays
msg = (
r"(:?.* cannot be converted to an IntegerDtype)"
r"|(:?values must be a 1D list-like)"
)
with pytest.raises(TypeError, match=msg):
integer_array(values)
def test_to_integer_array_inferred_dtype():
# if values has dtype -> respect it
result = integer_array(np.array([1, 2], dtype="int8"))
assert result.dtype == Int8Dtype()
result = integer_array(np.array([1, 2], dtype="int32"))
assert result.dtype == Int32Dtype()
# if values have no dtype -> always int64
result = integer_array([1, 2])
assert result.dtype == Int64Dtype()
def test_to_integer_array_dtype_keyword():
result = integer_array([1, 2], dtype="int8")
assert result.dtype == Int8Dtype()
# if values has dtype -> override it
result = integer_array(np.array([1, 2], dtype="int8"), dtype="int32")
assert result.dtype == Int32Dtype()
def test_to_integer_array_float():
result = integer_array([1.0, 2.0])
expected = integer_array([1, 2])
tm.assert_extension_array_equal(result, expected)
with pytest.raises(TypeError, match="cannot safely cast non-equivalent"):
integer_array([1.5, 2.0])
# for float dtypes, the itemsize is not preserved
result = integer_array(np.array([1.0, 2.0], dtype="float32"))
assert result.dtype == Int64Dtype()
@pytest.mark.parametrize(
"bool_values, int_values, target_dtype, expected_dtype",
[
([False, True], [0, 1], Int64Dtype(), Int64Dtype()),
([False, True], [0, 1], "Int64", Int64Dtype()),
([False, True, np.nan], [0, 1, np.nan], Int64Dtype(), Int64Dtype()),
],
)
def test_to_integer_array_bool(bool_values, int_values, target_dtype, expected_dtype):
result = integer_array(bool_values, dtype=target_dtype)
assert result.dtype == expected_dtype
expected = integer_array(int_values, dtype=target_dtype)
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"values, to_dtype, result_dtype",
[
(np.array([1], dtype="int64"), None, Int64Dtype),
(np.array([1, np.nan]), None, Int64Dtype),
(np.array([1, np.nan]), "int8", Int8Dtype),
],
)
def test_to_integer_array(values, to_dtype, result_dtype):
# convert existing arrays to IntegerArrays
result = integer_array(values, dtype=to_dtype)
assert result.dtype == result_dtype()
expected = integer_array(values, dtype=result_dtype())
tm.assert_extension_array_equal(result, expected)
def test_cross_type_arithmetic():
df = pd.DataFrame(
{
"A": pd.Series([1, 2, np.nan], dtype="Int64"),
"B": pd.Series([1, np.nan, 3], dtype="UInt8"),
"C": [1, 2, 3],
}
)
result = df.A + df.C
expected = pd.Series([2, 4, np.nan], dtype="Int64")
tm.assert_series_equal(result, expected)
result = (df.A + df.C) * 3 == 12
expected = pd.Series([False, True, None], dtype="boolean")
tm.assert_series_equal(result, expected)
result = df.A + df.B
expected = pd.Series([2, np.nan, np.nan], dtype="Int64")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("op", ["sum", "min", "max", "prod"])
def test_preserve_dtypes(op):
# TODO(#22346): preserve Int64 dtype
# for ops that enable (mean would actually work here
# but generally it is a float return value)
df = pd.DataFrame(
{
"A": ["a", "b", "b"],
"B": [1, None, 3],
"C": integer_array([1, None, 3], dtype="Int64"),
}
)
# op
result = getattr(df.C, op)()
assert isinstance(result, int)
# groupby
result = getattr(df.groupby("A"), op)()
expected = pd.DataFrame(
{"B": np.array([1.0, 3.0]), "C": integer_array([1, 3], dtype="Int64")},
index=pd.Index(["a", "b"], name="A"),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("op", ["mean"])
def test_reduce_to_float(op):
# some reduce ops always return float, even if the result
# is a rounded number
df = pd.DataFrame(
{
"A": ["a", "b", "b"],
"B": [1, None, 3],
"C": integer_array([1, None, 3], dtype="Int64"),
}
)
# op
result = getattr(df.C, op)()
assert isinstance(result, float)
# groupby
result = getattr(df.groupby("A"), op)()
expected = pd.DataFrame(
{"B": np.array([1.0, 3.0]), "C": integer_array([1, 3], dtype="Int64")},
index=pd.Index(["a", "b"], name="A"),
)
tm.assert_frame_equal(result, expected)
def test_astype_nansafe():
# see gh-22343
arr = integer_array([np.nan, 1, 2], dtype="Int8")
msg = "cannot convert to 'uint32'-dtype NumPy array with missing values."
with pytest.raises(ValueError, match=msg):
arr.astype("uint32")
@pytest.mark.parametrize("ufunc", [np.abs, np.sign])
# np.sign emits a warning with nans, <https://github.com/numpy/numpy/issues/15127>
@pytest.mark.filterwarnings("ignore:invalid value encountered in sign")
def test_ufuncs_single_int(ufunc):
a = integer_array([1, 2, -3, np.nan])
result = ufunc(a)
expected = integer_array(ufunc(a.astype(float)))
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
result = ufunc(s)
expected = pd.Series(integer_array(ufunc(a.astype(float))))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.log, np.exp, np.sin, np.cos, np.sqrt])
def test_ufuncs_single_float(ufunc):
a = integer_array([1, 2, -3, np.nan])
with np.errstate(invalid="ignore"):
result = ufunc(a)
expected = ufunc(a.astype(float))
tm.assert_numpy_array_equal(result, expected)
s = pd.Series(a)
with np.errstate(invalid="ignore"):
result = ufunc(s)
expected = ufunc(s.astype(float))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.add, np.subtract])
def test_ufuncs_binary_int(ufunc):
# two IntegerArrays
a = integer_array([1, 2, -3, np.nan])
result = ufunc(a, a)
expected = integer_array(ufunc(a.astype(float), a.astype(float)))
tm.assert_extension_array_equal(result, expected)
# IntegerArray with numpy array
arr = np.array([1, 2, 3, 4])
result = ufunc(a, arr)
expected = integer_array(ufunc(a.astype(float), arr))
tm.assert_extension_array_equal(result, expected)
result = ufunc(arr, a)
expected = integer_array(ufunc(arr, a.astype(float)))
tm.assert_extension_array_equal(result, expected)
# IntegerArray with scalar
result = ufunc(a, 1)
expected = integer_array(ufunc(a.astype(float), 1))
tm.assert_extension_array_equal(result, expected)
result = ufunc(1, a)
expected = integer_array(ufunc(1, a.astype(float)))
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("values", [[0, 1], [0, None]])
def test_ufunc_reduce_raises(values):
a = integer_array(values)
msg = r"The 'reduce' method is not supported."
with pytest.raises(NotImplementedError, match=msg):
np.add.reduce(a)
@td.skip_if_no("pyarrow", min_version="0.15.0")
def test_arrow_array(data):
# protocol added in 0.15.0
import pyarrow as pa
arr = pa.array(data)
expected = np.array(data, dtype=object)
expected[data.isna()] = None
expected = pa.array(expected, type=data.dtype.name.lower(), from_pandas=True)
assert arr.equals(expected)
@td.skip_if_no("pyarrow", min_version="0.16.0")
def test_arrow_roundtrip(data):
# roundtrip possible from arrow 0.16.0
import pyarrow as pa
df = pd.DataFrame({"a": data})
table = pa.table(df)
assert table.field("a").type == str(data.dtype.numpy_dtype)
result = table.to_pandas()
tm.assert_frame_equal(result, df)
@td.skip_if_no("pyarrow", min_version="0.16.0")
def test_arrow_from_arrow_uint():
# https://github.com/pandas-dev/pandas/issues/31896
# possible mismatch in types
import pyarrow as pa
dtype = pd.UInt32Dtype()
result = dtype.__from_arrow__(pa.array([1, 2, 3, 4, None], type="int64"))
expected = pd.array([1, 2, 3, 4, None], dtype="UInt32")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"pandasmethname, kwargs",
[
("var", {"ddof": 0}),
("var", {"ddof": 1}),
("kurtosis", {}),
("skew", {}),
("sem", {}),
],
)
def test_stat_method(pandasmethname, kwargs):
s = pd.Series(data=[1, 2, 3, 4, 5, 6, np.nan, np.nan], dtype="Int64")
pandasmeth = getattr(s, pandasmethname)
result = pandasmeth(**kwargs)
s2 = pd.Series(data=[1, 2, 3, 4, 5, 6], dtype="Int64")
pandasmeth = getattr(s2, pandasmethname)
expected = pandasmeth(**kwargs)
assert expected == result
def test_value_counts_na():
arr = pd.array([1, 2, 1, pd.NA], dtype="Int64")
result = arr.value_counts(dropna=False)
expected = pd.Series([2, 1, 1], index=[1, 2, pd.NA], dtype="Int64")
tm.assert_series_equal(result, expected)
result = arr.value_counts(dropna=True)
expected = | pd.Series([2, 1], index=[1, 2], dtype="Int64") | pandas.Series |
# This chart export activity series in pandas format
import pandas as pd
import datetime
import numpy as np
act = GC.activity()
dd = {}
for k, v in act.items():
dd[k] = np.array(v)
df = | pd.DataFrame(dd) | pandas.DataFrame |
import pandas as pd
import os
import pickle
import logging
from tqdm import tqdm
import sys
from flashtext import KeywordProcessor
import joblib
import multiprocessing
import numpy as np
import urllib.request
import zipfile
import numpy as np
import hashlib
import json
from .flags import flags
logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)-5.5s] [%(name)-12.12s]: %(message)s')
log = logging.getLogger(__name__)
class Geocode():
def __init__(self, min_population_cutoff=30000, large_city_population_cutoff=200000, location_types=None):
self.kp = None
self.geo_data = None
self.min_population_cutoff = min_population_cutoff
self.large_city_population_cutoff = large_city_population_cutoff
self.geo_data_field_names = ['name', 'official_name', 'country_code', 'longitude', 'latitude', 'geoname_id', 'location_type', 'population']
self.default_location_types = ['city', 'place', 'country', 'admin1', 'admin2', 'admin3', 'admin4', 'admin5', 'admin6', 'admin_other', 'continent', 'region']
self.location_types = self._get_location_types(location_types)
self.argument_hash = self.get_arguments_hash()
def load(self, recompute=False):
if recompute or not (os.path.isfile(self.geonames_pickle_path) and os.path.isfile(self.keyword_processor_pickle_path)):
# geonames data
log.info('Recomputing pickle files...')
if os.path.isfile(self.geonames_pickle_path) and not recompute:
log.info('Pickled geonames file is already present!')
else:
self.create_geonames_pickle()
# keyword processor
if os.path.isfile(self.keyword_processor_pickle_path) and not recompute:
log.info('Pickled keyword processor file is already present!')
else:
self.create_keyword_processor_pickle()
# load data into memory
self.kp = self.get_keyword_processor_pickle()
self.geo_data = self.get_geonames_pickle()
def get_geonames_data(self):
geonames_data_path = self.get_cache_path('allCountries.txt')
if not os.path.isfile(geonames_data_path):
# download file
url = 'https://download.geonames.org/export/dump/allCountries.zip'
log.info(f'Downloading data from {url}')
geonames_data_path_zip = self.get_cache_path('allCountries.zip')
urllib.request.urlretrieve(url, geonames_data_path_zip)
log.info(f'... done')
log.info('Extracting data...')
# extract
with zipfile.ZipFile(geonames_data_path_zip, 'r') as f:
f.extractall(self.data_dir)
log.info('...done')
# remove zip file
os.remove(geonames_data_path_zip)
log.info(f'Reading data from {geonames_data_path}...')
dtypes = {'name': str, 'latitude': float, 'longitude': float, 'country_code': str, 'population': int, 'feature_code': str, 'alternatenames': str, 'geoname_id': str}
geonames_columns = ['geoname_id', 'name', 'asciiname', 'alternatenames', 'latitude', 'longitude', 'feature_class', 'feature_code', 'country_code', 'cc2', 'admin1', 'admin2', 'admin3', 'admin4', 'population', 'elevation', 'dem', 'timezone', 'modification_date']
df = pd.read_csv(geonames_data_path, names=geonames_columns, sep='\t', dtype=dtypes, usecols=dtypes.keys())
# remove data file
os.remove(geonames_data_path)
return df
def get_feature_names_data(self):
feature_code_path = self.get_cache_path('featureCodes_en.txt')
if not os.path.isfile(feature_code_path):
# download file
url = 'https://download.geonames.org/export/dump/featureCodes_en.txt'
log.info(f'Downloading data from {url}')
urllib.request.urlretrieve(url, feature_code_path)
log.info(f'... done')
log.info(f'Reading data from {feature_code_path}...')
df_features = | pd.read_csv(feature_code_path, sep='\t', names=['feature_code', 'description-short', 'description-long']) | pandas.read_csv |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range('2012-1-1', periods=3, freq='D')
v2 = pd.date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
tm.assert_series_equal(rs, xp)
assert rs.dtype == 'timedelta64[ns]'
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
# series on the rhs
result = df['A'] - df['A'].shift()
assert result.dtype == 'timedelta64[ns]'
result = df['A'] + td
assert result.dtype == 'M8[ns]'
# scalar Timestamp on rhs
maxa = df['A'].max()
assert isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
assert resultb.dtype == 'timedelta64[ns]'
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
tm.assert_series_equal(result, expected)
assert result.dtype == 'm8[ns]'
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
assert resulta.dtype == 'm8[ns]'
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df['A'])
assert resultb.dtype == 'M8[ns]'
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(df['A'], resultb)
assert resultb.dtype == 'M8[ns]'
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
nat_series_dtype_timedelta = Series([NaT, NaT],
dtype='timedelta64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
tm.assert_series_equal(timedelta_series - NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
# addition
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
# multiplication
tm.assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
tm.assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(timedelta_series * np.nan,
nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series,
nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / np.nan,
nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box_with_array):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
def test_td64arr_add_sub_float(self, box_with_array, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdarr + other
with pytest.raises(TypeError):
other + tdarr
with pytest.raises(TypeError):
tdarr - other
with pytest.raises(TypeError):
other - tdarr
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box_with_array, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box_with_array)
msg = ("cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation")
with pytest.raises(TypeError, match=msg):
idx - Timestamp('2011-01-01')
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp('2011-01-01', tz=tz)
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
def test_td64arr_add_sub_timestamp(self, box_with_array):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdi = timedelta_range('1 day', periods=3)
expected = pd.date_range('2012-01-02', periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range('2011-12-31', periods=3, freq='-1D')
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
with pytest.raises(TypeError):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64('NaT')
tdi = timedelta_range('1 day', periods=3)
expected = pd.DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Operations with int-like others
def test_td64arr_add_int_series_invalid(self, box):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
int_ser = Series([2, 3, 4])
with pytest.raises(err):
tdser + int_ser
with pytest.raises(err):
int_ser + tdser
with pytest.raises(err):
tdser - int_ser
with pytest.raises(err):
int_ser - tdser
def test_td64arr_add_intlike(self, box_with_array):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box_with_array)
err = TypeError
if box_with_array in [pd.Index, tm.to_array]:
err = NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array,
scalar):
box = box_with_array
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box in [pd.Index, tm.to_array] and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
# TODO: this was taken from tests.series.test_ops; de-duplicate
@pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4),
Timedelta(minutes=5, seconds=4),
Timedelta('5m4s').to_timedelta64()])
def test_operators_timedelta64_with_timedelta(self, scalar_td):
# smoke tests
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 + scalar_td
scalar_td + td1
td1 - scalar_td
scalar_td - td1
td1 / scalar_td
scalar_td / td1
# TODO: this was taken from tests.series.test_ops; de-duplicate
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
def test_td64arr_add_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
if box is pd.DataFrame and names[1] == 'Venkman':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_add_sub_td64_nat(self, box):
# GH#23320 special handling for timedelta64("NaT")
tdi = pd.TimedeltaIndex([NaT, Timedelta('1s')])
other = np.timedelta64("NaT")
expected = pd.TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
result = other - obj
tm.assert_equal(result, expected)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - two_hours
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
# TODO: this was taken from tests.series.test_operators; de-duplicate
def test_timedelta64_operations_with_DateOffset(self):
# GH#10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(PerformanceWarning):
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3),
timedelta(minutes=5, seconds=6),
timedelta(hours=2, minutes=5, seconds=3)])
tm.assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
tm.assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box):
# GH#18849
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box):
# GH#18824, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = | tm.box_expected(tdi, box_with_array) | pandas.util.testing.box_expected |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@version: 1.4.0
@file: GSP_main.py
@time: 2021/1/26 10:50
@functions: graph signal processing main script
@update: support Yeo-ICN definition
@update: support ICN-level brain activity and connecitivty strength saving
"""
import numpy as np
import glob
import os
import time
import matplotlib.pyplot as plt
from pygsp import graphs, filters, plotting
from GSP_utilities import surrogate_BOLD_create, save_variable, load_variable
import pandas as pd
from dppd import dppd
dp, X = dppd()
# 1. path locations and parameters
start = time.time()
deriv_path = '/home/amax/data/cye/MScohort_BIDS_clean/derivatives'
connectome_path = os.path.join(deriv_path, 'mrtrix')
xcpengine_path = os.path.join(deriv_path, 'xcpengine')
network_assign_path = 'CAB-NP_v1.1_Labels-ReorderedbyNetworks_Yeo.csv'
num_BOLD_timepoints = 180
num_rand = 100 # number of surrogates
functional_type = 'BOLD'
tract_type = 'meanlength' # one of the following: invlength, invnodevol, level-participant_connectome, meanlength
ICN_type = 'Yeo' # one of the following: 'Yeo', 'Cole'
normalize_type = 'both' # 'W': normalize W; 'L': normalize Laplacian (Preti method); 'both': normalize both W and Laplacian
# 2. read network assignment for hcpmmp
network_assign_csv = pd.read_csv(network_assign_path)
network_assign_csv = dp(network_assign_csv).mutate(NETWORK=X.Yeo_NETWORK).pd
network_assign_csv = dp(network_assign_csv).mutate(NETWORKKEY=X.Yeo_NETWORKKEY).pd
num_network_df = dp(network_assign_csv).summarise((X.NETWORKKEY, np.max, 'hp_max')).pd
num_network = num_network_df.iloc[0,0]
network_rowindex_ls = []
for network_i in range(1,num_network+1):
df_network = dp(network_assign_csv).filter_by(X.NETWORKKEY == network_i).pd
network_rowindex_ls.append(df_network.index.values)
network_unique_df = dp(network_assign_csv).distinct('NETWORKKEY').pd
network_unique_df = network_unique_df.sort_values(by='NETWORKKEY',ascending = True)
network_unique_df = dp(network_unique_df).filter_by(-X.NETWORK.isin(['Undefine'])).pd # remove undefined ICN
network_unique_df = network_unique_df.reset_index()
# 3. define group of interests
cohort1 = 'ms'
cohort2 = 'nc'
cohort3 = 'nmo'
cohort4 = 'cis'
cohort1_connectome_ls = glob.glob(os.path.join(connectome_path, 'sub-' + cohort1 + '*'))
cohort2_connectome_ls = glob.glob(os.path.join(connectome_path, 'sub-' + cohort2 + '*'))
cohort3_connectome_ls = glob.glob(os.path.join(connectome_path, 'sub-' + cohort3 + '*'))
cohort4_connectome_ls = glob.glob(os.path.join(connectome_path, 'sub-' + cohort4 + '*'))
cohort_connectome_ls = cohort1_connectome_ls + cohort2_connectome_ls + cohort3_connectome_ls + cohort4_connectome_ls
cohort_connectome_ls.sort()
cohort1_fmri_ls = glob.glob(os.path.join(xcpengine_path, 'sub-' + cohort1 + '*'))
cohort2_fmri_ls = glob.glob(os.path.join(xcpengine_path, 'sub-' + cohort2 + '*'))
cohort3_fmri_ls = glob.glob(os.path.join(xcpengine_path, 'sub-' + cohort3 + '*'))
cohort4_fmri_ls = glob.glob(os.path.join(xcpengine_path, 'sub-' + cohort4 + '*'))
cohort_fmri_ls = cohort1_fmri_ls + cohort2_fmri_ls + cohort3_fmri_ls + cohort4_fmri_ls
cohort_name_ls = [os.path.basename(item) for item in cohort_connectome_ls]
remove_name_ls = ['sub-nc011','sub-nc039', 'sub-nmo002', 'sub-nmo019', 'sub-cis002','sub-cis015', 'sub-ms015'] # problematic cases
cohort_name_ls = list(set(cohort_name_ls) - set(remove_name_ls)) # remove problematic cases
for i in remove_name_ls: # remove problematic cases
cohort_connectome_ls = [x for x in cohort_connectome_ls if i not in x]
cohort_fmri_ls = [x for x in cohort_fmri_ls if i not in x]
cohort_name_ls.sort()
cohort_connectome_ls.sort()
cohort_fmri_ls.sort()
if len(cohort_connectome_ls) != len(cohort_fmri_ls):
print('Number of connectome and xcpengine results not matched')
# 4. create a dataframe to store individual filepath
path_dict = {'subname':cohort_name_ls, 'mrtrix_path': cohort_connectome_ls, 'xcp_path':cohort_fmri_ls}
path_df = pd.DataFrame(path_dict, columns=['subname','mrtrix_path','xcp_path'])
path_df = dp(path_df).mutate(connectome_path=X.mrtrix_path + '/connectome/' + X.subname +'_parc-hcpmmp1_' + tract_type + '.csv').pd
path_df = dp(path_df).mutate(BOLD_series_path=X.xcp_path + '/fcon/hcpmmp/hcpmmp.1D').pd
path_df = dp(path_df).mutate(fmri_map_path=X.xcp_path + '/roiquant/hcpmmp/' + X.subname +'_hcpmmp_mean.csv').pd
print('finished step 4')
# 5. load individual connectome as ndarray
num_parcels = len(network_assign_csv)
num_sub = len(path_df)
path_df_nc = dp(path_df).filter_by(X.subname.str.contains('nc')).pd
num_nc = len(path_df_nc)
nc_idx = path_df_nc.index
connectome_array = np.zeros(shape=(num_parcels, num_parcels, num_sub))
for sub_idx in range(len(path_df)):
indiviudal_connectome = np.genfromtxt(path_df.loc[sub_idx, 'connectome_path'], delimiter=',')
connectome_array[:,:,sub_idx] = indiviudal_connectome
# 6. load individual BOLD series and fill missing part according to /fcon/hcpmmp/missing.txt
BOLD_series_3D = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub))
for sub_idx in range(len(path_df)):
BOLD_series = np.genfromtxt(path_df.loc[sub_idx, 'BOLD_series_path'])
BOLD_series = BOLD_series.T
missing_path = os.path.join(path_df.loc[sub_idx, 'xcp_path'], 'fcon', 'hcpmmp', 'hcpmmp_missing.txt')
if os.path.exists(missing_path):
missing_parcel_id = np.genfromtxt(missing_path, dtype=int)
if missing_parcel_id.size == 1: # only one parcel missing
if BOLD_series[missing_parcel_id-1,:].sum() != 0:
print("missing parcel not match for subject {}".format(sub_idx))
network_key = network_assign_csv.loc[missing_parcel_id-1,'NETWORKKEY']
network_parcel_idx = network_rowindex_ls[network_key-1]
BOLD_series[missing_parcel_id-1,:] = np.mean(BOLD_series[network_parcel_idx,:])
else: # multiple parcels missing
for missing_idx in missing_parcel_id:
network_key = network_assign_csv.loc[missing_idx-1,'NETWORKKEY']
network_parcel_idx = network_rowindex_ls[network_key-1]
BOLD_series[missing_idx-1,:] = np.mean(BOLD_series[network_parcel_idx,:])
BOLD_series_3D[:,:,sub_idx] = BOLD_series
print('finished loading individual BOLD series and filling missing part')
# 7. load fmri parametric map and fill missing part according to /fcon/hcpmmp/missing.txt
fmri_paramap = np.zeros(shape=(num_parcels, num_sub))
paramap_str = 'mean_alffZ'
for sub_idx in range(len(path_df)):
fmri_map = pd.read_csv(path_df.loc[sub_idx, 'fmri_map_path'],index_col=0)
fmri_map = fmri_map.loc[:,paramap_str]
missing_path = os.path.join(path_df.loc[sub_idx, 'xcp_path'], 'fcon', 'hcpmmp', 'hcpmmp_missing.txt')
if os.path.exists(missing_path):
missing_parcel_id = np.genfromtxt(missing_path, dtype=int)
if missing_parcel_id.size == 1: # only one parcel missing
if not np.isnan(fmri_map[missing_parcel_id]):
print("missing parcel not match for subject {}".format(sub_idx))
network_key = network_assign_csv.loc[missing_parcel_id-1,'NETWORKKEY']
network_parcel_idx = network_rowindex_ls[network_key-1]
fmri_map[int(missing_parcel_id)] = np.mean(fmri_map[network_parcel_idx])
fmri_map = fmri_map.to_numpy()
else: # multiple parcels missing
network_key = network_assign_csv.loc[missing_parcel_id-1,'NETWORKKEY']
network_rowindex_ls = np.array(network_rowindex_ls, dtype=object)
network_parcel_idx = network_rowindex_ls[network_key-1]
for parcel_i in range(missing_parcel_id.size):
fmri_map[int(missing_parcel_id[parcel_i])] = np.mean(fmri_map[network_parcel_idx[parcel_i]])
fmri_map = fmri_map.to_numpy()
fmri_paramap[:,sub_idx] = fmri_map
print('finished loading fmri parametric map and fill missing part')
# 8. load connectome and functional signal and do GSP
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
func_sig = BOLD_series_3D
s_head_cohort = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub))
s_rand_cohort = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub, num_rand))
else:
raise ValueError('undefined functional signal')
G_U_cohort = np.zeros(shape=(num_parcels, num_parcels, num_sub))
for sub_idx in range(len(path_df)):
W = np.genfromtxt(path_df.loc[sub_idx, 'connectome_path'], delimiter=',')
# Symmetric Normalization of adjacency matrix
D = np.diag(np.sum(W,1)) #degree
D_power = np.power(D, (-1/2))
D_power[np.isinf(D_power)] = 0
Wsymm = D_power @ W @ D_power
#The eigenvector matrix G.U is used to define the Graph Fourier Transform of the graph signal S
if normalize_type == 'W':
G = graphs.Graph(Wsymm)
G.compute_fourier_basis()
G_U_cohort[:,:,sub_idx] = G.U
U = G.U
elif normalize_type == 'L':
G = graphs.Graph(W, lap_type = 'normalized')
G.compute_fourier_basis()
G_U_cohort[:,:,sub_idx] = G.U
U = G.U
elif normalize_type == 'both':
Wsymm = np.triu(Wsymm) + np.triu(Wsymm).T - np.diag(np.triu(Wsymm).diagonal()) # force symmetric
G = graphs.Graph(Wsymm, lap_type = 'normalized')
G.compute_fourier_basis()
G_U_cohort[:,:,sub_idx] = G.U
U = G.U
# L = np.eye(len(Wsymm)) - Wsymm
# lamda, U = np.linalg.eig(L)
# U = U[:, np.argsort(lamda)]
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
s_head = U.T @ func_sig[:,:,sub_idx]
s_head_cohort[:,:,sub_idx] = s_head
# calcualte surrogate for individual
s_rand_cohort[:,:,sub_idx,:] = surrogate_BOLD_create(U, func_sig[:,:,sub_idx], num_rand)
print('finished Graph Fourier Transform')
# save_variable(G_U_cohort, 'G_U_cohort.pkl')
# save_variable(s_head_cohort, 's_head_cohort.pkl')
# save_variable(s_rand_cohort, 's_rand_cohort.pkl')
# G_U_cohort = load_variable('G_U_cohort.pkl')
# s_head_cohort = load_variable('s_head_cohort.pkl')
# s_rand_cohort = load_variable('s_rand_cohort.pkl')
# 8.5(optional). plot Sihag2020 plot
# take nc001 as example
nc001_idx = path_df.subname[path_df.subname == 'sub-nc001'].index.tolist()[0]
s_low = G_U_cohort[:,0:4, nc001_idx] @ s_head_cohort[0:4,:,nc001_idx]
s_high = G_U_cohort[:,-55:-51, nc001_idx] @ s_head_cohort[-55:-51,:,nc001_idx]
np.savetxt("nc001_s_low_both.csv", s_low, delimiter=",")
np.savetxt("nc001_s_high_both.csv", s_high, delimiter=",")
# 9. calculate the median-split threshold
NC_index = [cohort_name_ls.index(x) for x in cohort_name_ls if 'nc' in x]
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
s_head_NC = s_head_cohort[:,:,NC_index]
s_head_NC_square = np.power(s_head_NC, 2)
#s_head_NC_square = np.power(s_head_NC_square, 1/2)
s_head_NC_square_mean = np.mean(s_head_NC_square, (1,2)) # average for each timepoint and each subject
s_head_NC_AUCTOT = np.trapz(s_head_NC_square_mean)
i=0
AUC=0
while AUC < s_head_NC_AUCTOT/2:
AUC = np.trapz(s_head_NC_square_mean[:i])
i = i + 1
cutoff = i-1
print('finished calculating the median-split threshold')
print('cutoff = {}'.format(cutoff))
# 10. calculate decoupling index for empirical data
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
s_aligned_cohort = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub))
s_liberal_cohort = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_sub))
for sub_idx in range(len(path_df)):
s_aligned_cohort[:,:,sub_idx] = G_U_cohort[:,0:cutoff, sub_idx] @ s_head_cohort[0:cutoff,:,sub_idx]
s_liberal_cohort[:,:,sub_idx] = G_U_cohort[:,cutoff-1:-1, sub_idx] @ s_head_cohort[cutoff-1:-1,:,sub_idx]
s_aligned_individual = np.linalg.norm(s_aligned_cohort, ord=2, axis=1)
s_liberal_individual = np.linalg.norm(s_liberal_cohort, ord=2, axis=1)
s_deCoupIdx_individual = s_liberal_individual / s_aligned_individual
s_aligned = np.mean(s_aligned_individual[:,nc_idx], axis=1)
s_liberal = np.mean(s_liberal_individual[:,nc_idx], axis=1)
s_deCoupIdx_node = s_liberal/s_aligned # only for NC
print('finished calculating decoupling index for empirical data')
# 11. calculate decoupling index for surrogate data only for NC
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
s_aligned_cohort_rand = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_nc, num_rand))
s_liberal_cohort_rand = np.zeros(shape=(num_parcels, num_BOLD_timepoints, num_nc, num_rand))
for i, sub_idx in enumerate(nc_idx):
for rand_idx in range(num_rand):
s_aligned_cohort_rand[:,:,i,rand_idx] = G_U_cohort[:,0:cutoff, sub_idx] @ s_rand_cohort[0:cutoff,:,sub_idx,rand_idx]
s_liberal_cohort_rand[:,:,i,rand_idx] = G_U_cohort[:,cutoff-1:-1, sub_idx] @ s_rand_cohort[cutoff-1:-1,:,sub_idx,rand_idx]
# norm for BOLD timepoints
s_aligned_norm_rand = np.linalg.norm(s_aligned_cohort_rand, ord=2, axis=1)
s_liberal_norm_rand = np.linalg.norm(s_liberal_cohort_rand, ord=2, axis=1)
# average for cohorts
s_aligned_rand = np.mean(s_aligned_norm_rand, axis=1)
s_liberal_rand = np.mean(s_liberal_norm_rand, axis=1)
# decoupling index
s_deCoupIdx_node_rand = s_liberal_rand/s_aligned_rand
print('finished calculating decoupling index for surrogate data')
# 12. network-level harmonics for emperical and surrogate data
s_aligned_network = np.zeros(shape=(num_network))
s_liberal_network = np.zeros(shape=(num_network))
s_aligned_network_individual = np.zeros(shape=(num_network, num_sub))
s_liberal_network_individual = np.zeros(shape=(num_network, num_sub))
s_aligned_network_rand = np.zeros(shape=(num_network, num_rand))
s_liberal_network_rand = np.zeros(shape=(num_network, num_rand))
for i in range(num_network):
s_aligned_network[i] = np.mean(s_aligned[network_rowindex_ls[i]])
s_liberal_network[i] = np.mean(s_liberal[network_rowindex_ls[i]])
s_aligned_network_individual[i,:] = np.mean(s_aligned_individual[network_rowindex_ls[i],:], axis=0)
s_liberal_network_individual[i,:] = np.mean(s_liberal_individual[network_rowindex_ls[i],:], axis=0)
s_aligned_network_rand[i,:] = np.mean(s_aligned_rand[network_rowindex_ls[i],:], axis=0)
s_liberal_network_rand[i,:] = np.mean(s_liberal_rand[network_rowindex_ls[i],:], axis=0)
s_deCoupIdx_network = s_liberal_network/s_aligned_network
s_deCoupIdx_network_individual = s_liberal_network_individual/s_aligned_network_individual
s_deCoupIdx_network_rand = s_liberal_network_rand/s_aligned_network_rand
# 13. brain-level harmonics for emperical and surrogate data
s_aligned_brain = np.mean(s_aligned)
s_liberal_brain = np.mean(s_liberal)
s_deCoupIdx_brain = s_liberal_brain/s_aligned_brain
s_aligned_brain_individual = np.mean(s_aligned_individual, axis=0)
s_liberal_brain_individual = np.mean(s_liberal_individual, axis=0)
s_deCoupIdx_brain_individual = s_liberal_brain_individual/s_aligned_brain_individual
s_aligned_brain_rand = np.mean(s_aligned_rand, axis=0)
s_liberal_brain_rand = np.mean(s_liberal_rand, axis=0)
s_deCoupIdx_brain_rand = s_liberal_brain_rand/s_aligned_brain_rand
print('s_deCoupIdx_brain = {}'.format(s_deCoupIdx_brain))
# 14. significance of surrogate for plot
# node-level
s_deCoupIdx_node_significance = np.logical_or((np.percentile(s_deCoupIdx_node_rand, 5, axis=1) >= s_deCoupIdx_node), (np.percentile(s_deCoupIdx_node_rand, 95, axis=1) <= s_deCoupIdx_node))
s_deCoupIdx_node_significance = s_deCoupIdx_node_significance.astype(np.int)
# network-level
s_deCoupIdx_network_significance = np.logical_or((np.percentile(s_deCoupIdx_network_rand, 5, axis=1) >= s_deCoupIdx_network), (np.percentile(s_deCoupIdx_network_rand, 95, axis=1) <= s_deCoupIdx_network))
s_deCoupIdx_network_significance = s_deCoupIdx_network_significance.astype(np.int)
# brain-level
s_deCoupIdx_brain_significance = np.logical_or((np.percentile(s_deCoupIdx_brain_rand, 5, axis=0) >= s_deCoupIdx_brain), (np.percentile(s_deCoupIdx_brain_rand, 95, axis=0) <= s_deCoupIdx_brain))
# 15. save results to csv
if normalize_type == 'W':
normalize_str = '_W'
elif normalize_type == 'L':
normalize_str = '_L'
elif normalize_type == 'both':
normalize_str = '_both'
if functional_type == 'BOLD': # func_sig is BOLD_series_3D
csv_folder = 'BOLD_4D_' + tract_type + '_' + normalize_str
if not os.path.exists(os.path.abspath(csv_folder)):
os.mkdir(os.path.abspath(csv_folder))
# save surrogate (ndarray with num_rand × num_region)
s_deCoupIdx_node_rand_df = pd.DataFrame(data = s_deCoupIdx_node_rand.T, columns = network_assign_csv.loc[:,'LABEL'])
s_deCoupIdx_network_rand_df = pd.DataFrame(data = s_deCoupIdx_network_rand.T, columns = network_unique_df.loc[:,'NETWORK'])
s_deCoupIdx_brain_rand_df = pd.DataFrame(data = s_deCoupIdx_brain_rand)
s_deCoupIdx_node_rand_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_node_rand_df.csv'))
s_deCoupIdx_network_rand_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_' + '-network_rand_df.csv'))
s_deCoupIdx_brain_rand_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_brain_rand_df.csv'))
# save surrogate significance (ndarray with 1 × num_region)
s_deCoupIdx_node_significance_df = pd.DataFrame(data = np.expand_dims(s_deCoupIdx_node_significance, axis=0), columns = network_assign_csv.loc[:,'LABEL'])
s_deCoupIdx_network_significance_df = pd.DataFrame(data = np.expand_dims(s_deCoupIdx_network_significance, axis=0), columns = network_unique_df.loc[:,'NETWORK'])
s_deCoupIdx_node_significance_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_node_significance_df.csv'))
s_deCoupIdx_network_significance_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_' + '-network_significance_df.csv'))
with open(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_brain_significance.txt'), 'w') as output_file:
output_file.write(str(s_deCoupIdx_brain_significance))
# save empirical harmonics for NC cohort (for plot usage, ndarray with 1 × num_region)
s_deCoupIdx_node_empirical_df = pd.DataFrame(data = np.expand_dims(s_deCoupIdx_node, axis=0), columns = network_assign_csv.loc[:,'LABEL'])
s_deCoupIdx_network_empirical_df = pd.DataFrame(data = np.expand_dims(s_deCoupIdx_network, axis=0), columns = network_unique_df.loc[:,'NETWORK'])
s_deCoupIdx_node_empirical_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_node_empirical_df.csv'))
s_deCoupIdx_network_empirical_df.to_csv(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_' +'-network_empirical_df.csv'))
with open(os.path.join(os.path.abspath(csv_folder), 's_deCoupIdx_brain_empirical.txt'), 'w') as output_file:
output_file.write(str(s_deCoupIdx_brain))
# save subject-level harmonics (ndarray with num_sub × num_region)
s_deCoupIdx_node_individual_df = | pd.DataFrame(data = s_deCoupIdx_individual.T, columns = network_assign_csv.loc[:,'LABEL']) | pandas.DataFrame |
import json
import logging
import os
import pandas as pd
from .TfidfModel import TFIDFModel
logging.basicConfig(format='%(filename)s:%(lineno)d %(message)s')
log = logging.getLogger(__name__)
log.setLevel('INFO')
# print(CONFIG['dataset'])
if 'DATA_DIR' in os.environ.keys():
CONFIG = json.load(open('../config.json'))
data_folder = os.path.join(os.environ['DATA_DIR'], CONFIG['dataset'])
else:
data_folder = os.path.join('/data/tripadvisor_hotel')
class ReviewDB():
def __init__(self, data_folder=data_folder):
self.entity_db_dict = {
"all": EntityDB("all", data_folder)
}
self.tfidf_dict = {
name: TFIDFModel(db) for name, db in self.entity_db_dict.items()
}
self.tfidf_bigram_dict = {
name: TFIDFModel(db, 2) for name, db in self.entity_db_dict.items()
}
def _add_ent(self, entity_id):
log.info("entity_id is: " + entity_id)
db = EntityDB(entity_id, data_folder)
self.entity_db_dict[entity_id] = db
self.tfidf_dict[entity_id] = TFIDFModel(db)
self.tfidf_bigram_dict[entity_id] = TFIDFModel(db, 2)
def _db(self, entity_id):
if not entity_id in self.entity_db_dict:
self._add_ent(entity_id)
return self.entity_db_dict[entity_id]
def get_reviews(self, entity_id, reviews_id):
db = self._db(entity_id)
return db.get_review_from_id(reviews_id)
def get_cluster(self, entity_id, cluster_id):
db = self._db(entity_id)
return db.get_cluster_from_id(cluster_id)
def get_centroids(self, entity_id, cluster_id):
db = self._db(entity_id)
log.info('cluster_id is ' + str(cluster_id))
return db.get_centroids_from_id(cluster_id)
def get_topwords(self, entity_id, cluster_id, k=1):
if not entity_id in self.entity_db_dict:
self._add_ent(entity_id)
if k == 1:
return self.tfidf_dict[entity_id].top_k(cluster_id)
elif k == 2:
return self.tfidf_bigram_dict[entity_id].top_k(cluster_id)
else:
return Exception("Invalid value {} for k. There is only support for k=1 and k=2".format(k))
class EntityDB():
def __init__(self, entity_id, data_folder):
if entity_id != "all":
old_data_folder = data_folder
data_folder = os.path.join(os.path.join(data_folder, 'hotel-clusters'), entity_id)
if not os.path.exists(data_folder) and os.path.exists(data_folder):
raise FileNotFoundError("business_id {0} not found in file {1}".format(entity_id, old_data_folder))
try:
cluster_file=os.path.join(data_folder, 'clusters.csv')
log.info(cluster_file)
clusters_df = | pd.read_csv(cluster_file, index_col=0) | pandas.read_csv |
# -- coding: utf-8 --'
import pandas as pd
import numpy as np
import os
import textwrap
import string
import unicodedata
import sys
import sqlite3
import easygui
import re
import copy
import json
import xlsxwriter
# import pyanx
MAX_TAM_LABEL = 100 # nro máximo de caracteres nos labels
PALETA = {'vermelho':'#e82f4c', 'laranja':'#ea7e16', 'amarelo':'#f5d516', 'verde': '14bd11', 'azul':'#0b67d0', 'roxo':'#6460aa'}
PALE_TAB = {
'laranja' :['#FF6D00','#FF9800','#FFB74D','#FFECB3'],
'verde' :['#00C853','#8BC34A','#AED581','#DCEDC8'],
'azul' :['#2962FF','#2196F3','#64B5F6','#BBDEFB'],
'rosa' :['#7B1FA2','#9C27B0','#BA68C8','#E1BEE7'],
'ciano' :['#00B8D4','#00BCD4','#4DD0E1','#B2EBF2'],
'roxo' :['#6200EA','#673AB7','#9575CD','#D1C4E9'],
'amarelo' :['#FFD600','#FFEB3B','#FFF176','#FFF9C4'],
'vermelho':['#d50000','#f44336','#e57373','#ffcdd2'],
'marrom' :['#5D4037','#795548','#A1887F','#D7CCC8'],
'cinza' :['#455A64','#607D8B','#90A4AE','#CFD8DC']
}
PALE_TAB_CORES = [cor for cor in PALE_TAB.keys()]
TAM_PALETA_CORES = len(PALE_TAB_CORES) - 3 # ultimos 3 sao reservados
def definir_cor(nro: int) -> str:
nro_cor = nro % (len(PALE_TAB) - 3) # 'vermelho, marrom e cinza são reservados
return (PALE_TAB_CORES[nro_cor])
class estrutura: # especificações das planilhas
def __init__(self, nome="", estr=[], pasta="./"):
self.nome = nome
self.estr = estr
self.pasta = pasta
self.nome_rif = ''
def mudar_pasta(self, pasta):
self.pasta = pasta
def xlsx(self):
return self.nome + ".xlsx"
def csv(self):
return 'RIF'+self.nome_rif+'_'+self.nome + ".csv"
def estr_upper(self):
result = []
for elem in self.estr:
result.append(elem.upper())
return result
def nomearq(self):
return os.path.join(self.pasta, self.xlsx())
def nomearqcsv(self):
return os.path.join(self.pasta, self.csv())
def arquivo_existe(self):
if (
self.nome.upper() == "grupos".upper()
or self.nome.upper() == "vinculos".upper()
): # um novo é criado vazio, uma vez que não vem do COAF
return True
else:
return os.path.isfile(self.nomearq())
def estr_compativel(self, outra_estr=[]):
ok = all(elem.upper() in self.estr_upper() for elem in outra_estr)
if not ok:
print(self.estr)
print(outra_estr)
return ok
def exibir(self):
strestr = ",".join(self.estr)
return self.nome + ": " + strestr
def csv2xlsx(self):
nomecsv = self.nomearqcsv()
df = pd.read_csv(nomecsv, sep=';', header=0, dtype=str, encoding='latin1',index_col=False )
try:
df.to_excel(self.nomearq(),index=False)
except:
print('Erro gerando XLSX de entrada')
def help_estruturas(estruturas):
print("Estruturas esperadas das planilhas:")
for e in estruturas:
print(" " + e.exibir())
class log:
def __init__(self):
self.logs = u""
def gravalog(self, linha):
print(linha)
self.logs += linha + "\n"
def lelog(self):
return self.logs
class nodo:
def __init__(self, id, label, tipo="ENT", tooltip="", fonte="RIF"):
self.id = id
self.tipo = tipo
self.label = label
self.cor = "Silver"
self.sexo = 0
self.m1 = 0
self.m2 = 0
self.situacao = ""
self.dataOperacao = ""
self.texto_tooltip = tooltip
self.fonte = fonte
self.camada = 5 if self.fonte == "RIF" else 5
def todict(self):
return {
"id": self.id,
"tipo": self.tipo,
"sexo": self.sexo,
"label": self.label,
"camada": self.camada,
"situacao": self.situacao,
"cor": self.cor,
"texto_tooltip": self.texto_tooltip,
"m1": self.m1,
"m2": self.m2,
"m3": 0,
"m4": 0,
"m5": 0,
"m6": 0,
"m7": 0,
"m8": 0,
"m9": 0,
"m10": 0,
"m11": 0,
"dataoperacao": self.dataOperacao,
}
class noPF(nodo):
def __init__(self, id, label="", cor="Silver", sexo=0, fonte="RIF"):
nodo.__init__(self, id, label, "PF")
self.cor = cor
self.sexo = sexo
def todict(self):
return nodo.todict(self)
class noPJ(nodo):
def __init__(self, id, label="", cor="Silver", fonte="RIF"):
nodo.__init__(self, id, label, "PJ")
self.cor = cor
self.sexo = 1
class noConta(nodo):
def __init__(self, id, label="CONTA", cor=PALE_TAB['verde'][0]):
nodo.__init__(self, id, label, "CCR")
self.cor = cor
class noGrupo(nodo):
def __init__(self, id, label="GRUPO", cor=PALE_TAB['azul'][0]):
nodo.__init__(self, id, label, "GR")
self.cor = cor
self.fonte = "grupos"
class noComunicacao(nodo):
def __init__(self, id, label="COMUNICACAO", cor=PALE_TAB['marrom'][1], dataOperacao=None):
nodo.__init__(self, id, label, "COM")
self.cor = cor
# self.dataOperacao=dataOperacao
class aresta:
def __init__(self, origem, destino, descricao="", cor="Silver", fonte="RIF"):
self.origem = origem
self.destino = destino
self.descricao = descricao
self.cor = cor
self.fonte = fonte
self.camada = 5 if self.fonte == "RIF" else 5
def todict(self):
return {
"origem": self.origem,
"destino": self.destino,
"cor": self.cor,
"camada": self.camada,
"tipoDescricao": {"0": self.descricao},
}
lg = log()
com = estrutura(
"Comunicacoes",
[
"Indexador",
"idComunicacao",
"NumeroOcorrenciaBC",
"Data_do_Recebimento",
"Data_da_operacao",
"DataFimFato",
"cpfCnpjComunicante",
"nomeComunicante",
"CidadeAgencia",
"UFAgencia",
"NomeAgencia",
"NumeroAgencia",
"informacoesAdicionais",
"CampoA",
"CampoB",
"CampoC",
"CampoD",
"CampoE",
"CodigoSegmento",
],
)
env = estrutura(
"Envolvidos",
[
"Indexador",
"cpfCnpjEnvolvido",
"nomeEnvolvido",
"tipoEnvolvido",
"agenciaEnvolvido",
"contaEnvolvido",
"DataAberturaConta",
"DataAtualizacaoConta",
"bitPepCitado",
"bitPessoaObrigadaCitado",
"intServidorCitado",
],
)
oco = estrutura("Ocorrencias", ["Indexador", "idOcorrencia", "Ocorrencia"])
# opcionais
gru = estrutura("Grupos", ["cpfCnpjEnvolvido", "nome_Envolvido", "Grupo", "Detalhe", "Analise"])
vin = estrutura(
"Vinculos",
[
"cpfCnpjEnvolvido",
"nome_Envolvido",
"cpfCnpjVinculado",
"nome_Vinculado",
"Descricao",
],
)
estruturas = [com, env, oco, gru, vin]
# help_estruturas(estruturas)
def removeAcentos(data):
if data is None:
return u""
# if isinstance(data,str):
# data = unicode(data,'latin-1','ignore')
return "".join(
x for x in unicodedata.normalize("NFKD", data) if x in string.printable
)
def gerar_planilha(arquivo, df, nome, indice=False):
def formatar_cabecalho(cor):
return arquivo.book.add_format(
{
"bold": True,
"text_wrap": True,
"valign": "top",
"fg_color": cor,
"border": 1,
}
)
# Palette URL: http://paletton.com/#uid=43K0I0kw0w0jyC+oRxVy4oIDfjr
PALETA = [
"#5778C0",
"#a4b3b6",
"#FF8D63",
"#FFE700",
"#FFA900",
"#000000",
] # azul, cinza, verm, amarelo, lara, preto
COR_PRINCIPAL = PALETA[0]
COR_NEUTRA_CLARA = PALETA[1]
COR_SECUNDARIA = PALETA[2]
COR_TERCIARIA = PALETA[4]
COR_NEUTRA_ESCURA = PALETA[5]
df.style.bar(color=COR_PRINCIPAL)
print("antes " + nome)
df.to_excel(arquivo, sheet_name=nome, index=indice)
print("depois " + nome)
# Write the column headers with the defined format.
# print(df.index.names)
if len(arquivo.sheets) > 6:
cor_basica = COR_SECUNDARIA
elif len(arquivo.sheets) < 3:
cor_basica = COR_PRINCIPAL
else:
cor_basica = COR_NEUTRA_CLARA
if not indice:
for col_num, value in enumerate(df.columns.values):
arquivo.sheets[nome].write(
0, col_num, value, formatar_cabecalho(cor_basica)
)
arquivo.sheets[nome].set_tab_color(cor_basica)
else:
for col_num, value in enumerate(df.index.names):
arquivo.sheets[nome].write(
0, col_num, value, formatar_cabecalho(cor_basica if value != 'Analise' else COR_SECUNDARIA)
)
for col_num, value in enumerate(df.columns.values):
arquivo.sheets[nome].write(
0,
col_num + len(df.index.names),
value,
formatar_cabecalho(COR_NEUTRA_CLARA),
)
arquivo.sheets[nome].set_tab_color(cor_basica)
def gerar_planilhaXLS(arquivo, df, nome, indice=False):
df.style.bar(color="#99ccff")
df.to_excel(arquivo, sheet_name=nome, index=indice)
def tipoi2F(umou2=1, linha=None, carJuncao="\r "):
print("linha= ", linha)
descricao = linha[1 if umou2 == 1 else 3]
# if descricao == '': #telefone ou endereco
# descricao = carJuncao.join(node[4:].split('__'))
# else:
# if self.GNX.node[node]['tipo'] !='TEL':
# descricao = Obj.parseCPFouCNPJ(node) + carJuncao + carJuncao.join(textwrap.wrap(descricao,30))
# dicTipo = {'TEL':u'Telefone', 'END':u'Local', 'PF':u'PF', 'PJ':u'PJ', 'PE':u'Edifício', 'ES':u'Edifício', 'CC':u'Conta','INF':u'Armário' }
tipo = linha[7 if umou2 == 1 else 8]
# tipoi2 = dicTipo[tipo]
tipoi2 = u"Escritório"
if tipo in ("TEL", "END", "CC"):
descricao = ""
else:
descricao = carJuncao.join(textwrap.wrap(descricao, 30))
sexo = 1
if tipo == "PF":
# if self.GNX.node[node]['sexo']==1:
if not sexo or sexo == 1:
tipoi2 = u"Profissional (masculino)"
elif sexo == 2:
tipoi2 = u"Profissional (feminino)"
elif tipo == "PJ":
# if node[8:12]!='0001':
# if sexo != 1: #1=matriz
if sexo % 2 == 0: # 1=matriz
tipoi2 = u"Apartamento" # filial de empresa
else:
tipoi2 = u"Escritório"
elif tipo == "PE":
tipoi2 = u"Oficina"
corSituacao = linha[9 if umou2 == 1 else 10]
if linha[4 if umou2 == 1 else 5] == 0:
corSituacao = "Vermelho"
return (tipoi2, descricao, corSituacao)
def to_i2(df, arquivo=None):
dicTiposIngles = {
u"Profissional (masculino)": u"Person",
u"Profissional (feminino)": u"Woman",
u"Escritório": u"Office",
u"Apartamento": u"Workshop",
u"Governo": u"House",
u"Casa": u"House",
u"Loja": u"Office",
u"Oficina": u"Office",
u"Telefone": u"Phone",
u"Local": u"Place",
u"Conta": u"Account",
u"Armário": u"Cabinet",
u"Edifício": u"Office",
}
# chart = Pyanx_macros()
noi2origem = {}
noi2destino = {}
for idc, campos in df.iterrows():
# print('campos= ',campos)
tipo, descricao, corSituacao = tipoi2F(linha=campos, umou2=1, carJuncao=" ")
noi2origem[idc] = chart.add_node(
entity_type=dicTiposIngles.get(tipo, ""),
label=(campos["cpfcnpj1"]) + u"-" + (descricao),
)
tipo, descricao, corSituacao = tipoi2F(linha=campos, umou2=2, carJuncao=" ")
noi2destino[idc] = chart.add_node(
entity_type=dicTiposIngles.get(tipo, ""),
label=(campos["cpfcnpj1"]) + u"-" + (descricao),
)
nomeLigacao = campos["descrição"]
chart.add_edge(noi2origem[idc], noi2destino[idc], removeAcentos(nomeLigacao))
# idc += 1
fstream = chart.createStream(
layout="spring_layout", iterations=0
) # não calcula posição
retorno = fstream.getvalue()
fstream.close()
if arquivo is not None:
f = open(arquivo, "w")
f.write(retorno)
f.close()
return retorno
def soDigitos(texto):
return re.sub("[^0-9]", "", texto)
def estimarFluxoDoDinheiro(tInformacoesAdicionais):
# normalmente aparece algo como R$ 20,8 Mil enviada para Jardim Indústria e Comércio - CNPJ 606769xxx
# inicialmente quebramos o texto por R$ e verifica quais são seguidos por CPF ou CNPJ
# pega o texto da coluna InformacoesAdicionais do arquivo Comunicacoes.csv e tenta estimar o valor para cada cpf/cnpj
# normalmente aparece algo como R$ 20,8 Mil enviada para Indústria e Comércio - CNPJ 6067xxxxxx
# inicialmente quebramos o texto por R$ e verifica quais são seguidos por CPF ou CNPJ
# retorna dicionário
# como {'26106949xx': 'R$420 MIL RECEBIDOS, R$131 MIL POR', '68360088xxx': 'R$22 MIL, RECEBIDAS'}
# lista = re.sub(' +', ' ',tInformacoesAdicionais).upper().split('R$')
t = re.sub(" +", " ", tInformacoesAdicionais).upper()
lista = t.split("R$")
listaComTermoCPFCNPJ = []
for item in lista:
if "CPF" in item or "CNPJ" in item:
listaComTermoCPFCNPJ.append(item.strip())
listaValores = []
valoresDict = {}
for item in listaComTermoCPFCNPJ:
valorPara = ""
cpn = ""
le = item.split(" ")
valor = "R$" + le[0] # + ' ' + le[1] # + ' ' + le[2]
if le[1].upper().rstrip(",").rstrip("S").rstrip(",") in (
"MIL",
"MI",
"RECEBIDO",
"RECEBIDA",
"ENVIADA",
"RETIRADO",
"DEPOSITADO",
"CHEQUE",
):
valor += " " + le[1]
if le[2].upper().rstrip(",").rstrip("S") in (
"MIL",
"MI",
"RECEBIDO",
"RECEBIDA",
"ENVIADA",
"RETIRADO",
"DEPOSITADO",
"CHEQUE",
):
valor += " " + le[2]
if "CPF" in item:
aux1 = item.split("CPF ")
try:
aux2 = aux1[1].split(" ")
cpn = soDigitos(aux2[0])
except:
pass
elif "CNPJ" in item:
aux1 = item.split("CNPJ ")
try:
aux2 = aux1[1].split(" ")
cpn = soDigitos(aux2[0])
except:
pass
if cpn:
listaValores.append(valorPara)
if cpn in valoresDict:
v = valoresDict[cpn]
v.add(valor)
valoresDict[cpn] = v
else:
valoresDict[cpn] = set([valor])
d = {}
for k, v in valoresDict.items():
d[k] = ", ".join(v)
return d
# .def estimaFluxoDoDinheiro(t):
def consolidar_pd(pasta):
"""Processa as planilhas comunicacoes, envolvidos, ocorrencias e grupo em planilhas com agrupamento """
arq = com.nomearq() # Comunicacoes
nome_rif = com.nome_rif
try:
df_com = pd.read_excel(
arq, options={"strings_to_numbers": False}, converters={"Indexador": str}
)
df_com["Indexador"] = pd.to_numeric(df_com["Indexador"], errors="coerce")
df_com["Data_da_operacao"] = pd.to_datetime(df_com["Data_da_operacao"])
if not com.estr_compativel(df_com.columns):
print(com.estr_upper())
mostra_erro("O arquivo " + arq + " contém colunas incompatíveis: ")
raise ("Estrutura incompatível")
lg.gravalog("Arquivo " + arq + " lido.")
except Exception as exc:
print("Erro ao ler o arquivo " + arq + "\n" + str(type(exc)))
arq = env.nomearq() # Envolvidos
try:
df_env = pd.read_excel(
arq, options={"strings_to_numbers": False}, converters={"Indexador": str}
)
df_env["Indexador"] = pd.to_numeric(df_env["Indexador"], errors="coerce")
df_env = df_env[pd.notnull(df_env["Indexador"])]
if not env.estr_compativel(df_env.columns):
print(env.estr_upper())
mostra_erro("O arquivo " + arq + " contém colunas incompatíveis: ")
raise ("Estrutura incompatível")
lg.gravalog("Arquivo " + arq + " lido.")
except Exception as exc:
lg.gravalog("Erro ao ler o arquivo " + arq + "\n" + str(type(exc)))
arq = oco.nomearq() # Ocorrencias
try:
df_oco = pd.read_excel(arq, options={"strings_to_numbers": False})
df_oco["Indexador"] = pd.to_numeric(df_oco["Indexador"], errors="coerce")
df_oco = df_oco[pd.notnull(df_oco["Indexador"])]
dictOco = {}
dictOco2 = {}
for r in df_oco.itertuples(index=False):
if r.Indexador in dictOco:
s = dictOco[r.Indexador]
s += "; " + r.Ocorrencia
dictOco[r.Indexador] = s
else:
dictOco[r.Indexador] = r.Ocorrencia
dictOco2["Indexador"] = []
dictOco2["Ocorrencia"] = []
for k, v in dictOco.items():
dictOco2["Indexador"].append(k)
dictOco2["Ocorrencia"].append(v)
df_oco2 = pd.DataFrame.from_dict(dictOco2)
if not oco.estr_compativel(df_oco.columns):
print(oco.estr_upper())
mostra_erro("O arquivo " + arq + " contém colunas incompatíveis: ")
raise ("Estrutura incompatível")
lg.gravalog("Arquivo " + arq + " lido.")
except Exception as exc:
lg.gravalog("Erro ao ler o arquivo " + arq + "\n" + str(type(exc)))
arq = gru.nomearq() # Grupos/detalhes
if not os.path.isfile(arq): # criar arquivo vazio
consolidado = pd.ExcelWriter(
arq,
engine="xlsxwriter",
options={"strings_to_numbers": False},
datetime_format="dd/mm/yyyy",
date_format="dd/mm/yyyy",
)
gerar_planilha(
consolidado, pd.DataFrame(columns=gru.estr), gru.nome, indice=False
)
consolidado.save()
lg.gravalog(
"O arquivo "
+ arq
+ " não foi encontrado. Um novo foi criado com as colunas "
+ gru.exibir()
)
try:
df_gru = pd.read_excel(arq, options={"strings_to_numbers": False})
df_gru = df_gru.fillna("-")
if not gru.estr_compativel(df_gru.columns):
print(gru.estr_upper())
mostra_erro("O arquivo " + arq + " contém colunas incompatíveis: ")
raise ("Estrutura incompatível")
lg.gravalog("Arquivo " + arq + " lido.")
except Exception as exc:
lg.gravalog("Erro ao ler o arquivo " + arq + "\n" + str(type(exc)))
arq = vin.nomearq() # Vinculos
if not os.path.isfile(arq): # criar arquivo vazio
consolidado = pd.ExcelWriter(
arq,
engine="xlsxwriter",
options={"strings_to_numbers": False},
datetime_format="dd/mm/yyyy",
date_format="dd/mm/yyyy",
)
gerar_planilha(
consolidado, pd.DataFrame(columns=vin.estr), vin.nome, indice=False
)
consolidado.save()
lg.gravalog(
"O arquivo "
+ arq
+ " não foi encontrado. Um novo foi criado com as colunas "
+ vin.exibir()
)
try:
df_vin = pd.read_excel(arq, options={"strings_to_numbers": False})
if not vin.estr_compativel(df_vin.columns):
print(vin.estr_upper())
mostra_erro("O arquivo " + arq + " contém colunas incompatíveis: ")
raise ("Estrutura incompatível")
lg.gravalog("Arquivo " + arq + " lido.")
except Exception as exc:
lg.gravalog("Erro ao ler o arquivo " + arq + "\n" + str(type(exc)))
nenhumgrupo = len(df_gru["Grupo"].unique())==0
if nenhumgrupo:
grupos_selecionados = None
else:
grupos_selecionados = gui_grupos(df_gru["Grupo"].unique()) # selecao
if grupos_selecionados == None :
grupos_selecionados = df_gru["Grupo"].unique() # nenhum = todos
print("Consolidando")
if nome_rif == '':
nome_rif = os.path.basename(pasta)
arq = os.path.join(pasta, "RIF_consolidados"+"_"+nome_rif+".xlsx")
porGrupo = len(df_gru["Grupo"].unique()) > 1
try:
for df in [df_com, df_env, df_gru]:
df.dropna(how='all',inplace=True) # limpa as linhas sem nada
#print("antes merge")
df_consolida = pd.merge(df_com, df_env, how="left", on="Indexador")
df_indexador = df_env.groupby(['Indexador'], as_index=False).agg({'cpfCnpjEnvolvido': '#'.join})
df_indexador.loc[:,'IndexadorTXT'] = df_indexador.loc[:,'Indexador'].fillna(0).astype(np.int64).astype(np.str)
df_indexador.rename(columns={'cpfCnpjEnvolvido': 'cpfCnpjEnvolvido_todos'}, inplace=True)
df_consolida = | pd.merge(df_consolida, df_oco2, how="left", on="Indexador") | pandas.merge |
"""
Utilities that help with the building of tensorflow keras models
"""
import io
from muti import chu, genu
import tensorflow as tf
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import plotly.io as pio
from plotly.subplots import make_subplots
import warnings
import os
import math
import multiprocessing
def polynomial_decay_learning_rate(step: int, learning_rate_start: float, learning_rate_final: float,
decay_steps: int, power: float):
"""
Manual implementation of polynomial decay for learning rate
:param step: which step we're on
:param learning_rate_start: learning rate for epoch 0
:param learning_rate_final: learning rate for epoch decay_steps
:param decay_steps: epoch at which learning rate stops changing
:param power: exponent
:return:
"""
if step <= decay_steps:
delta = float(learning_rate_start - learning_rate_final)
lr = delta * (1.0 - float(step) / float(decay_steps)) ** power + learning_rate_final
return lr
return learning_rate_final
def get_pred(yh, column=None, wts=None):
"""
Returns an array of predicted values from a keras predict method. If column is None, then this
assumes the output has one column and it returns a flattened array.
If column is an int, it returns that column from the prediction matrix.
If column is a list of int, it returns the column sums
:param yh: keras model prediction
:param column: which column(s) to return, int or list of int
:param wts: array of weights. if yh is n x p, wts has length p. nd.array if specified
:return: prediction array
:rtype nd.array
"""
if wts is not None:
yh = yh * wts
if column is None:
return np.array(yh).flatten()
if not isinstance(column, list):
return yh[:, column]
# sum up columns
return np.sum(yh[:, column], axis=1)
def model_predictions(df: pd.DataFrame, specs: list, in_place = True, log_odds=False):
"""
find the predicted values for a keras model
:param: df - data frame to run the model over
:param specs - specifications of model. list elements
[0] - location
[1] - features_dict
[2] - target of model
[3] - column(s)
[4] - output name
:param log_odds: if true, take log-odds of result
:return:
"""
modl = tf.keras.models.load_model(specs[0])
ds = get_tf_dataset(specs[1], specs[2], df, 1000, 1)
yh = get_pred(modl.predict(ds), specs[3])
if log_odds:
i = yh == 1.0
yh[i] = .999999
i = yh == 0.0
yh[i] = 0.000001
yh = np.log(yh / (1.0 - yh))
if in_place:
df[specs[4]] = yh
return
else:
return yh
def plot_history(history: dict, groups=['loss'], metric='loss', first_epoch=0, title=None, plot_dir=None, in_browser=False):
"""
plot the history of metrics from a keras model tf build
:param history: history returned from keras fit
:param groups: groups to plot
:param metric: metric to plot
:param first_epoch: first element to plot
:param title: title for plot
:param plot_dir: directory to plot to
:param in_browser: if True display in browser
:return:
"""
fig = []
for g in groups:
x = np.arange(first_epoch, len(history[g]) - first_epoch)
y = history[g][first_epoch:len(history[metric])]
fig += [go.Scatter(x=x, y=y, name=g)]
if title is None:
title = 'TensorFlow Model Build<br>' + metric
layout = go.Layout(title=title,
xaxis=dict(title='Epoch'),
yaxis=dict(title=metric))
figx = go.Figure(fig, layout=layout)
if in_browser:
pio.renderers.default = 'browser'
figx.show()
if plot_dir is not None:
os.makedirs(plot_dir, exist_ok=True)
plot_file = plot_dir + metric + '.png'
figx.write_image(plot_file)
plot_file = plot_dir + metric + '.html'
figx.write_html(plot_file)
def build_column(feature_name: str, feature_params: list, out_path=None, print_details=True):
"""
Returns a tensorflow feature columns and, optionally, the vocabulary for categorical and
embedded features. Optionally creates files of the vocabularies for use in TensorBoard.
:param feature_name: name of the feature
:param feature_params:
Element 0: type of feature ('cts'/'spl', 'cat', 'emb').
Element 1: ('cat', 'emb') vocabulary list (list of levels)
Element 2: ('cat', 'emb') default index. If None, 0 is used
Element 3: ('emb') embedding dimension
:param out_path: path to write files containing levels of 'cat' and 'emb' variables
:param print_details: print info about each feature
:return: tf feature column and (for 'cat' and 'emb') a list of levels (vocabulary)
"""
if feature_params[0] == 'cts' or feature_params[0] == 'spl':
if print_details:
print('col {0} is numeric'.format(feature_name))
return tf.feature_column.numeric_column(feature_name)
# categorical and embedded features
if feature_params[0] in ['cat', 'emb']:
vocab = feature_params[1]
# save vocabulary for TensorBoard
if out_path is not None:
if out_path[-1] != '/':
out_path += '/'
if not os.path.isdir(out_path):
os.makedirs(out_path)
f = open(out_path + feature_name + '.txt', 'w')
f.write('label\tId\n')
for j, s in enumerate(vocab):
f.write(str(s) + '\t' + str(j) + '\n')
f.close()
dv = [j for j in range(len(vocab)) if vocab[j] == feature_params[2]][0]
col_cat = tf.feature_column.categorical_column_with_vocabulary_list(feature_name, vocab,
default_value=dv)
# go with 1-hot encoding
if feature_params[0] == 'cat':
col_ind = tf.feature_column.indicator_column(col_cat)
if print_details:
print('col {0} is categorical with {1} levels'.format(feature_name, len(vocab)))
return col_ind
# for embedded features, the third element of feature_params input is the dimension of the
# embedding
levels = feature_params[3]
col_emb = tf.feature_column.embedding_column(col_cat, levels)
if print_details:
print('col {0} is embedded with {1} levels'.format(feature_name, levels))
return col_emb
def build_model_cols(feature_dict: dict, out_vocab_dir=None, print_details=True):
"""
Builds inputs needed to specify a tf.keras.Model. The tf_cols_* are TensorFlow feature_columns. The
inputs_* are dictionaries of tf.keras.Inputs. The tf_cols_* are used to specify keras.DenseFeatures methods and
the inputs_* are the inputs to those layers.
:param feature_dict: dictionary of features to build columns for. The key is the feature name. The entry is a list:
feature type (str) 'cts'/'spl', 'cat', 'emb'
list of unique levels for 'cat' and 'emb'
embedding dimension for 'emb'
:param out_vocab_dir: directory to write out unique levels
:return: 4 lists:
- tf_cols_cts: tf.feature_column defining each continuous feature
- inputs_cts: list of tf.keras.Inputs for each continuous column
- tf_cols_cat: tf.feature_column defining each categorical ('cat','emb') feature
- inputs_cat: list of tf.keras.Inputs for each categorical ('cat', 'emb') column
The tf_cols_* are used in tf.keras.layers.DenseFeatures
the inputs_* are used to define the inputs to those tensors
"""
tf_cols_cts = []
tf_cols_cat = []
inputs_cts = {}
inputs_cat = {}
for feature in feature_dict.keys():
if feature_dict[feature][0] == 'cts' or feature_dict[feature][0] == 'spl':
feat = build_column(feature, feature_dict[feature], print_details=print_details)
tf_cols_cts += [feat]
inputs_cts[feature] = tf.keras.Input(shape=(1,), name=feature)
else:
feat = build_column(feature, feature_dict[feature], out_vocab_dir, print_details=print_details)
tf_cols_cat += [feat]
inputs_cat[feature] = tf.keras.Input(shape=(1,), name=feature, dtype=tf.string)
return tf_cols_cts, inputs_cts, tf_cols_cat, inputs_cat
def get_tf_dataset(feature_dict: dict, target: str, df: pd.DataFrame, batch_size: int, repeats=0):
"""
build a tf dataset from a pandas DataFrame
:param feature_dict: dictionary whose keys are the features
:param target: target var
:param df: pandas DataFrame to work on
:param batch_size: Batch size
:param repeats: how many repeats of the dataset (None = infinite)
:return: tf dataset
"""
buffer_size = df.shape[0]
tf_ds = tf.data.Dataset.from_tensor_slices((dict(df[feature_dict.keys()]), df[target]))
# tf_ds = tf_ds.batch(batch_size, drop_remainder=True, deterministic=False, num_parallel_calls=tf.data.AUTOTUNE).repeat().prefetch(buffer_size)
if repeats == 0:
tf_ds = tf_ds.shuffle(reshuffle_each_iteration=True, buffer_size=buffer_size)
tf_ds = tf_ds.batch(batch_size, drop_remainder=True, deterministic=False, num_parallel_calls=tf.data.AUTOTUNE)
tf_ds = tf_ds.prefetch(buffer_size=buffer_size)
tf_ds = tf_ds.cache()
else:
tf_ds = tf_ds.batch(batch_size, deterministic=False, num_parallel_calls=tf.data.AUTOTUNE).repeat(repeats).prefetch(buffer_size)
return tf_ds
def incr_build(model, by_var, start_list, add_list, get_data_fn, sample_size, feature_dict, target_var,
batch_size, epochs_list, global_valid_df_in,
model_dir=None, plot=False, verbose=0, output_size = 1, **kwargs):
"""
This function builds a sequence of models. The get_data_fn takes a list of values as contained in
start_list and add_list and returns data subset to those values. The initial model is built on the
values of start_list and then evaluated on the data subset to the first value of add_list.
At the next step, the data in the first element of add_list is added to the start_list data, the model
is updated and the evaluation is conducted on the second element of add_list
:param model: input model structure
:type model: tf keras model
:param start_list: list of (general) time periods for model build for the first model build
:type start_list: list
:param add_list: list of out-of-time periods to evaluate
:type add_list: list
:param get_data_fn: function to get a pandas DataFrame of data to work on
:type get_data_fn: function
:param sample_size: size of pandas DataFrames to get
:type sample_size: int
:param feature_dict: dictionary of features in the model
:type feature_dict: dict
:param target_var: target variable of model build
:type target_var: str
:param batch_size: size of batches for model build
:type batch_size: int
:param epochs_list: list (length 2) of epochs for model fit; entry 0 is initial model, entry 1 is subsequent
models
:type epochs_list: list
:param global_valid_df_in: DataFrame that includes all the segments in add_list -- for validation
:type global_valid_df_in: pandas DataFrame
:param model_dir: directory to save models
:type model_dir: str
:param plot: if True, plot history
:type plot: bool
:param verbose: print verobisity for keras.fit (0 = quiet, 1 = normal level, 2=talkative)
:type verbose int
:param output_size: the number of columns returned by keras model predict
:type output_size: int
:return: lists of out-of-sample values:
add_list
rmse root mean squared error
corr correlation
"""
if model_dir is not None:
if model_dir[-1] != '/':
model_dir += '/'
if os.path.isdir(model_dir):
os.system('rm -r ' + model_dir)
os.makedirs(model_dir)
build_list = start_list
epochs = epochs_list[0]
segs = []
global_valid_df = global_valid_df_in.copy()
# validation data
if output_size == 1:
global_valid_df['model_dnn_inc'] = np.full((global_valid_df.shape[0]), 0.0)
else:
for c in range(output_size):
global_valid_df['model_dnn_inc' + str(c)] = np.full((global_valid_df.shape[0]), 0.0)
global_valid_ds = get_tf_dataset(feature_dict, target_var, global_valid_df, 10000, 1)
for j, valid in enumerate(add_list):
segs += [valid]
model_df = get_data_fn(build_list, sample_size, **kwargs)
steps_per_epoch = int(model_df.shape[0] / batch_size)
model_ds = get_tf_dataset(feature_dict, target_var, model_df, batch_size=batch_size)
valid_df = get_data_fn([valid], sample_size, **kwargs)
valid_ds = get_tf_dataset(feature_dict, target_var, valid_df, batch_size=batch_size, repeats=1)
print('Data sizes for out-of-sample value {0}: build {1}, validate {2}'.format(valid, model_df.shape[0],
valid_df.shape[0]))
history = model.fit(model_ds, epochs=epochs, steps_per_epoch=steps_per_epoch,
validation_data=valid_ds, verbose=verbose)
gyh = model.predict(global_valid_ds)
i = global_valid_df[by_var] == valid
if output_size == 1:
global_valid_df.loc[i, 'model_dnn_inc'] = gyh[i]
else:
for c in range(output_size):
global_valid_df.loc[i, 'model_dnn_inc' + str(c)] = gyh[i][:,c]
build_list += [valid] # NOTE Accumulates
# build_list = [valid] # NOTE Accumulates NOT
if model_dir is not None:
out_m = model_dir + "before_" + valid + '.h5'
model.save(out_m, overwrite=True, save_format='h5')
if plot:
title = 'model loss\n' + 'Training up to ' + valid
plot_history(history, ['loss', 'val_loss'], 'loss', title=title)
epochs = epochs_list[1]
return segs, global_valid_df
def _marginal_cts(model: tf.keras.Model, column, features_dict: dict, sample_df: pd.DataFrame,
target: str, num_grp: int, num_sample: int, title: str,
sub_titles: str, cols: list):
"""
Build a Marginal Effects plot for a continuous feature
:param model: model
:param column: column(s) of model output, either an int or list of ints
:param features_dict: features in the model
:param sample_df: DataFrame operating on
:param target: target feature
:param num_grp: # of groups model output is sliced into
:param num_sample: # of obs to take from sample_df to build graph
:param title: title for graph
:param sub_titles: titles for subplots
:param cols: colors to use: list of str
:return: plotly_fig and importance metric
"""
sub_titles[6] = 'Box Plots'
# 't' is top spacing, 'b' is bottom, 'None' means there is no graph in that cell. We make
# 2 x 7 -- eliminating the (2,7) graph and putting the RHS graph in the (1,7) position
fig = make_subplots(rows=2, cols=num_grp + 1, subplot_titles=sub_titles,
row_heights=[1, .5],
specs=[[{'t': 0.07, 'b': -.1}, {'t': 0.07, 'b': -.10}, {'t': 0.07, 'b': -.10},
{'t': 0.07, 'b': -.10}, {'t': 0.07, 'b': -.10}, {'t': 0.07, 'b': -.10},
{'t': 0.35, 'b': -0.35}],
[{'t': -0.07}, {'t': -.07}, {'t': -.07}, {'t': -0.07}, {'t': -.07},
{'t': -.07}, None]])
# start with top row graphs
# find ranges by MOG and merge
lows = sample_df.groupby('grp')[target].quantile(.01)
highs = sample_df.groupby('grp')[target].quantile(.99)
both = pd.merge(left=lows, right=highs, left_index=True, right_index=True)
both.rename(columns={target + '_x': 'low', target + '_y': 'high'}, inplace=True)
# repeat these to accomodate the range of the feature we're going to build next
to_join = pd.concat([both] * 11).sort_index()
# range of the feature
xval = np.arange(11) / 10
xval = np.concatenate([xval] * num_grp)
to_join['steps'] = xval
to_join[target] = to_join['low'] + (to_join['high'] - to_join['low']) * to_join['steps']
# now sample the DataFrame
samps = sample_df.groupby('grp').sample(num_sample, replace=True)
samp_num = pd.Series(np.arange(samps.shape[0]))
samps.index = samp_num
samp_num.name = 'samp_num'
samps = pd.concat([samps, samp_num], axis=1)
# samps['samp_num'] = np.arange(samps.shape[0])
# drop the target column -- we're going to replace it with our grid of values
samps.pop(target)
# join in our grid
score_df = | pd.merge(samps, to_join[target], on='grp') | pandas.merge |
# coding: utf8
"""
Utils to convert AIBL dataset in BIDS
"""
def listdir_nohidden(path):
"""
This method lists all the subdirectories of path except the hidden
folders'
:param path: path whose subdirectories are needed
:return: list of all the subdirectories of path
"""
from os import listdir
return [result for result in listdir(path) if not result.startswith('.')]
def find_T1_folder(subdirectory, path_to_T1_1):
"""
This method checks if the subdirectory contains a T1 image, and it
returns the h
:param subdirectory: name of the folder
:return: previous path to arrive to the T1 image
"""
import os
path_to_convert = {'MPRAGE_ADNI_confirmed', 'MPRAGE', 'MPRAGE_ADNI_confirmed_RPT', 'MPRAGE_ADNI_confirmed_REPEATX2', 'MPRAGE_ADNI_confirmed_repeat', 'MPRAGE_ADNI_confirmed_REPEAT',
'MPRAGE_ADNI_conf_REPEAT'}
for j in path_to_convert:
path = []
# if conditions which checks if the subfolder contain a T1 image
if j == subdirectory:
path = os.path.join(path_to_T1_1, subdirectory)
return path
if path == []:
return 'NaN' # there are no more folders which could contain T1 images
def find_T1_folder_nodata(subdirectory, path_to_T1_1):
"""
This method checks if the subdirectory contains a T1 image, and it
returns the path. This method differs from the find_T1_folder since for
these folders the exame_date is not present in the clinical excel file
and we will not check if the exame_date corresponds to the date stored
in the path to the image, but they will be converted anyway
:param subdirectory: name of the folder
:return: previous path to arrive to the T1 image
"""
import os
path_to_convert = {'MPRAGESAGISOp2ND', 'MPRAGE_SAG_ISO_p2_ND', 'MPRAGE_SAG_ISO_p2'}
for j in path_to_convert:
path = []
# if conditions which checks if the subfolder contain a T1 image
if j == subdirectory:
path = os.path.join(path_to_T1_1, subdirectory)
return path
if path == []:
return 'NaN' # there are no more folders which could contain T1 images
def find_correspondance_index(i, csv_file):
"""
This method gives as output the index of the csv file analysed which
correspond to the 'i' subject
:param i: subject_ID
:param csv_file: csv file where all the information are listed
:return: index
"""
index = []
for x in csv_file.RID:
if i == str(x):
index = csv_file.RID[csv_file.RID == x].index.tolist()
return index
def find_correspondance_date(index, csv_file):
"""
The method returns the dates reported in the csv_file for the i-subject
:param index: index corresponding to the subject analysed
:param csv_file: csv file where all the information are listed
:return date
"""
return csv_file.EXAMDATE[index]
def match_data(exame_date, i, csv_file):
"""
This method returns the session_ID. It controls if the dates
corresponding to the image (from the name of the subdirectory)
correspond to one of the dates listed from the csv_file for the subject
analysed. The session_ID is the corresponding session for that patient
in that date. It returns -4 if there are no information.
:param exame_date: date where the image has been taken, it is saved
from the name of the corresponding subdirector
:param i: subject_ID
:param csv_file: csv file where all the information are listed
:return session_id of the patient
"""
import re
session_ID = []
index = find_correspondance_index(i, csv_file)
csv_date = find_correspondance_date(index, csv_file)
for xx in index:
if str(csv_date[xx]) != '-4':
# check is the date is not '-4'
m = re.search('([0-9].*)-(.*)-(.*)_(.*)_(.*)_(.*)', str(exame_date)) # string from image directory
p = re.search('(.*)/(.*)/(.*)', str(csv_date[xx])) # string from the date of the csv_file
if (p.group(1) == m.group(2)) & (p.group(2) == m.group(3)) & (p.group(3) == m.group(1)):
session_ID = csv_file.VISCODE[xx]
if session_ID == []:
session_ID = '-4'
return session_ID
def list_of_paths():
"""
It lists all the folders which not contain PET images
"""
return ['.DS_Store', 'localizer', 'Space_3D_T2_FLAIR_sag_p2', 'AXIAL_FLAIR', 'MPRAGE_ADNI_confirmed_REPEATX2', 'Axial_PD-T2_TSE',
'Axial_PD-T2_TSE_repeat', 'MPRAGE_SAG_ISO_p2_ND', 'Axial_PD-T2_TSE_confirmed', 'MPRAGESAGISOp2ND', 'MPRAGE_ADNI_confirmed',
'MPRAGE_ADNI_confirmed_repeat', 'MPRAGE_SAG_ISO_p2', 'MPRAGE', 'MPRAGE_ADNI_confirmed_REPEAT', 'Axial_PD-T2_TSE_confirmed_repeat',
'MPRAGE_ADNI_conf_REPEAT', 'Space_3D_T2_FLAIR_sag_p2_REPEAT', 'MPRAGE_ADNI_confirmed_RPT', 'Brain_256_1.6_zoom_4_x_4_iter',
'Space_3D_T2_FLAIR_sag_REPEAT', 'Axial_PD-T2_TSE_RPTconfirmed', 'Axial_PD-T2_TSE_RPT_confirmed', 'Axial_PD-T2_TSE_confirmed_REPEAT',
'flair_t2_spc_irprep_ns_sag_p2_1mm_iso', 'localiser']
def check_subdirectories_pet(subdirectories, sub, no_pet):
"""
It returns the correct subdirectories for the PET images, they should
belong to the list where there all the possible names of the PET images
:param subdirectories:
:param sub: all the possible subdirectories which need to be checked
:param no pet: list of names of folders which not contain PET images
:return subdirectory which is containing a PET image which needs to be
converted
"""
for j in range(len(sub)):
if (sub[j] not in no_pet) & (sub[j] != '.DS_Store'):
subdirectories.append(sub[j])
subdirectories = list(set(subdirectories))
return subdirectories
def dicom_to_nii(subject, output_path, output_filename, image_path):
"""
From dicom to nifti converts the dicom images in a nifti files using
dicom2nii or mri_convert
:param subject:
:param output_path: where nifti image is stored
:param output_filename: name of the nifti image
:param image_path: where dicom files are stored
:return: Image in a nifti format
"""
import os
import subprocess
from clinica.utils.stream import cprint
from os.path import exists
import shutil
from colorama import Fore
try:
os.makedirs(output_path)
except OSError:
if not os.path.isdir(output_path):
raise
# if image.Is_Dicom:
command = 'dcm2niix -b n -z y -o ' + output_path + ' -f ' + output_filename + ' ' + image_path
subprocess.run(command, shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
nifti_file = os.path.join(output_path, output_filename + '.nii.gz')
# Check if conversion worked (output file exists?)
if not exists(nifti_file):
command = 'dcm2nii -a n -d n -e n -i y -g y -p n -m n -r n -x n -o ' + output_path + ' ' + image_path
subprocess.run(command, shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
nifti_file_dcm2nii = os.path.join(output_path, 'DE-IDENTIFIED.nii.gz')
if os.path.isfile(nifti_file_dcm2nii):
shutil.move(nifti_file_dcm2nii, nifti_file)
if not exists(nifti_file):
# if the conversion dcm2nii has not worked, freesurfer utils
# mri_convert is used
dicom_image = listdir_nohidden(image_path)
dicom_image = [dcm for dcm in dicom_image if dcm.endswith('.dcm')]
try:
dicom_image = os.path.join(image_path, dicom_image[0])
except IndexError:
cprint(Fore.RED + 'We did not found the dicom files associated with the following directory: '
+ image_path + Fore.RESET)
# it requires the installation of Freesurfer (checked at the beginning)
command = 'mri_convert ' + dicom_image + ' ' + nifti_file
if exists(os.path.expandvars('$FREESURFER_HOME/bin/mri_convert')):
subprocess.run(command, shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
else:
cprint('mri_convert (from Freesurfer) not detected. '
+ nifti_file + ' not created...')
if not exists(nifti_file):
cprint(nifti_file + ' should have been created but this did not happen')
return nifti_file
def viscode_to_session(viscode):
"""
Replace the session label 'bl' with 'M00' or capitalize the session
name passed as input.
:param viscode: session name
:return: M00 if is the baseline session or the original session name
capitalized
"""
if viscode == 'bl':
return 'M00'
else:
return viscode.capitalize()
def find_path_to_pet_modality(path_to_dataset, csv_file):
"""
This method creates a Dataframe which contains all the paths to the PET
image of a modality (for example AV45 or PIB)
:param path_to_dataset: path to AIBL dataset
:param csv_file: file which correspond to the modality
:return: A dataframe which contains the path for PET images for a
single modality and subject_ID and session_ID are reported for each
path
"""
import os
import pandas
# TODO
# exclude_subjects = get_exclude_subject(file.txt)
no_pet = list_of_paths()
subjects_ID = listdir_nohidden(path_to_dataset)
# selection of the subjects_ID from the folder downloaded
# this subject must be discarded since it is only a sample and not a patient
if '0151083' in subjects_ID:
del subjects_ID[subjects_ID.index('0151083')]
sub_ID = []
ses_ID = []
path_pet = []
# Iteration through all the subjects_ID
def is_int(x):
for i in x:
if int(i) in list(csv_file.RID):
yield i
# def append_path(image_ID):
for i in is_int(subjects_ID):
# check if the subject is present in the csv_file for the modality selected
subdirectories = []
path_to_pet_1 = os.path.join(path_to_dataset, str(i))
# subdirectory_all = os.listdir(path_to_pet_1)
subdirectory_all = listdir_nohidden(path_to_pet_1)
subdirectories = check_subdirectories_pet(subdirectories, subdirectory_all, no_pet)
# selection only of the folders which contain PET image
for j in range(len(subdirectories)):
path_to_pet_2 = os.path.join(path_to_pet_1, subdirectories[j])
exame_date = listdir_nohidden(path_to_pet_2)
# exame date of the image which is going to be converted
for x in range(len(exame_date)):
# selection of the session_ID matching the data in the csv_file with the one of the image
session_ID = match_data(exame_date[x], i, csv_file)
if session_ID != '-4':
path_to_pet_3 = os.path.join(path_to_pet_2, str(exame_date[x]))
# For the RID 1607 there are two PET images of the flute modality, and we select the first
if i == '1607':
if subdirectories[j] == 'Flute_256_1.6_Zoom_plain_4_x_4_Iter':
image_ID = ['I442930']
else:
image_ID = listdir_nohidden(path_to_pet_3)
else:
image_ID = listdir_nohidden(path_to_pet_3)
for y in range(len(image_ID)):
# final path to find the image we want to convert
path_to_pet = os.path.join(path_to_pet_3, image_ID[y]) #
sub_ID.append(i)
ses_ID.append(session_ID)
path_pet.append(path_to_pet)
data = pandas.DataFrame({'Subjects_ID': sub_ID,
'Session_ID': ses_ID,
'Path_to_pet': path_pet})
# data=final dataframe
return data
def find_path_to_T1_ADNI(file_mri, subjects_ID, path_to_dataset):
"""
This method creates a Dataframe which contains all the paths to the T1
images which are ADNI compliant (as explained in the AIBL website).
This images differ from the others T1 of the dataset since in the
cvs_file is reported the exame date.
:param file_mri: in the clinical data there are two files which
describe the parameters of the T1 images (MRI 1.5 T and MRI 3T)
:param subjects_ID: subjects_id in the dataset dowloaded
:param path_to_dataset: path to AIBL dataset
:return: A dataframe which contains the path for T1 images and
subject_ID and session_ID are reported for each path
"""
import os
sub_ID = []
ses_ID = []
path_T1 = []
for i in subjects_ID:
for jj in file_mri:
# it checks all the file_mri
if int(i) in list(jj.RID):
# check if the information of the subject are present in the csv_file
path_to_T1_1 = os.path.join(path_to_dataset, str(i))
# subdirectories = os.listdir(path_to_T1_1)
subdirectories = listdir_nohidden(path_to_T1_1)
for j in range(len(subdirectories)):
# check if the subdirectory can contain a T1 image
path_to_T1_2 = find_T1_folder(subdirectories[j], path_to_T1_1)
if path_to_T1_2 != 'NaN':
exame_date = listdir_nohidden(path_to_T1_2) # this is the string I need to compare with the csv
for x in range(len(exame_date)):
# check if the corresponding session_ID can be found in the csv_file
session_ID = match_data(exame_date[x], i, jj)
if session_ID != '-4':
path_to_T1_3 = os.path.join(path_to_T1_2, str(exame_date[x]))
image_ID = listdir_nohidden(path_to_T1_3)
for y in range(len(image_ID)):
# compute the final path
path_to_T1 = os.path.join(path_to_T1_3, image_ID[y])
sub_ID.append(i)
ses_ID.append(session_ID)
path_T1.append(path_to_T1)
return [sub_ID, ses_ID, path_T1]
def find_path_to_T1_SAG(path_to_dataset, subjects_ID, sub_ID, ses_ID, path_T1):
"""
This method creates a Dataframe which contains all the paths to the T1
images which are not ADNI compliant, they contain the word "SAG" in
their name
:param path_to_dataset: path to AIBL dataset
:param subjects_ID: subjects_id in the dataset dowloaded
:param sub_ID: the previous list (from T1_ADNI) where new subjects ID
will be appended
:param ses_ID: the previous list (from T1_ADNI) where new session ID
will be appended
:param path_T1:the previous list (from T1_ADNI) where new paths will be
appended
:return: it completes the list of all the T1 paths including all the
images where we didn't find the exame-data but we can fix it with a
further analysis
"""
import os
for i in subjects_ID:
subdirectory_for_subject = []
path_to_T1_1 = os.path.join(path_to_dataset, str(i))
# subdirectories = os.listdir(path_to_T1_1)
subdirectories = listdir_nohidden(path_to_T1_1)
for j in range(len(subdirectories)):
# we convert only the images which are in this list and we take only one of them for subject
if subdirectories[j] in ['MPRAGESAGISOp2ND', 'MPRAGE_SAG_ISO_p2_ND', 'MPRAGE_SAG_ISO_p2']:
subdirectory_for_subject.append(subdirectories[j])
if not subdirectory_for_subject:
pass
else:
path_to_T1_2 = os.path.join(path_to_T1_1, subdirectory_for_subject[0])
exame_date = listdir_nohidden(path_to_T1_2)
if i in [342, 557]:
session_ID = 'M54'
else:
session_ID = 'M00'
if (i in sub_ID and session_ID != ses_ID[sub_ID.index(i)]) or (i not in sub_ID):
# if for a subject in the same session we have both this image and the "ADNI" compliant we are converting the second one since the exame-date is more precise
path_to_T1_3 = os.path.join(path_to_T1_2, str(exame_date[0]))
image_ID = listdir_nohidden(path_to_T1_3)
path_to_T1 = os.path.join(path_to_T1_3, image_ID[0])
# we append the result to the list
sub_ID.append(i)
ses_ID.append(session_ID)
path_T1.append(path_to_T1)
return [sub_ID, ses_ID, path_T1]
def find_path_to_T1(path_to_dataset, path_to_csv):
"""
This method creates a DataFrame for the T1 images, where for each of
them the subject ID, the session ID and the path to the image are
reported
:param path_to_dataset: path to AIBL dataset
:param path_to_csv: path to the csv files downloaded
:return: pandas dataframe which contains all the paths for the T1
images, and the correisponding subject_ID and session_ID
"""
import os
import pandas
import glob
# two csv_files contain information regarding the T1w MRI images
mri_meta = pandas.read_csv(glob.glob(os.path.join(path_to_csv, "aibl_mrimeta_*.csv"))[0])
mri_3meta = pandas.read_csv(glob.glob(os.path.join(path_to_csv, "aibl_mri3meta_*.csv"))[0])
file_mri = [mri_meta, mri_3meta]
subjects_ID = listdir_nohidden(path_to_dataset)
# list of all the folders which correspond to the subject_ID
# all the subjects downloaded are taken into account for the conversion, except this sample
if '0151083' in subjects_ID:
del subjects_ID[subjects_ID.index('0151083')]
[sub_ID, ses_ID, path_T1] = find_path_to_T1_ADNI(file_mri, subjects_ID, path_to_dataset)
[sub_ID, ses_ID, path_T1] = find_path_to_T1_SAG(path_to_dataset, subjects_ID, sub_ID, ses_ID, path_T1)
data = pandas.DataFrame({'Subjects_ID': sub_ID,
'Session_ID': ses_ID,
'Path_to_T1': path_T1})
# data= final dataframe
return data
# Covert the AIBL PET images into the BIDS specification.
# There are three pet modalities: av45, pib, flute. All of them are converted
# in BIDS
def paths_to_bids(path_to_dataset, path_to_csv, bids_dir, modality):
"""
This method converts all the T1 images found in the AIBL dataset
downloaded in BIDS
:param path_to_dataset: path_to_dataset
:param path_to_csv: path to the csv file containing clinical data
:param bids_dir: path to save the AIBL-T1-dataset converted in a
BIDS format
:param modality: string 't1', 'av45', 'flute' or 'pib'
:return: list of all the images that are potentially converted in a
BIDS format and saved in the bids_dir. This does not guarantee
existence
"""
from os.path import join, exists
from numpy import nan
import pandas as pds
from clinica.utils.stream import cprint
from multiprocessing.dummy import Pool
from multiprocessing import cpu_count, Value
import glob
if modality.lower() not in ['t1', 'av45', 'flute', 'pib']:
# This should never be reached
raise RuntimeError(modality.lower()
+ ' is not supported for conversion')
counter = None
def init(args):
""" store the counter for later use """
global counter
counter = args
def create_file(image):
global counter
subject = image.Subjects_ID
session = image.Session_ID
name_of_path = {'t1': 'Path_to_T1',
'av45': 'Path_to_pet',
'flute': 'Path_to_pet',
'pib': 'Path_to_pet'}
# depending on the dataframe, there is different way of accessing
# the iage object
image_path = image[name_of_path[modality]]
with counter.get_lock():
counter.value += 1
if image_path is nan:
cprint('No path specified for ' + subject + ' in session '
+ session)
return nan
cprint('[' + modality.upper() + '] Processing subject ' + str(subject)
+ ' - session ' + session + ', ' + str(counter.value) + ' / '
+ str(total))
session = viscode_to_session(session)
# creation of the path
if modality == 't1':
output_path = join(bids_dir, 'sub-AIBL' + subject,
'ses-' + session, 'anat')
output_filename = 'sub-AIBL' + subject + '_ses-' + session + '_T1w'
elif modality in ['flute', 'pib', 'av45']:
output_path = join(bids_dir, 'sub-AIBL' + subject,
'ses-' + session, 'pet')
output_filename = 'sub-AIBL' + subject + '_ses-' + session \
+ '_task-rest_acq-' + modality + '_pet'
# image is saved following BIDS specifications
if exists(join(output_path, output_filename + '.nii.gz')):
cprint('Subject ' + str(subject) + ' - session '
+ session + ' already processed.')
output_image = join(output_path, output_filename + '.nii.gz')
else:
output_image = dicom_to_nii(subject,
output_path,
output_filename,
image_path)
return output_image
# it reads the dataframe where subject_ID, session_ID and path are saved
if modality == 't1':
images = find_path_to_T1(path_to_dataset, path_to_csv)
else:
path_to_csv_pet_modality = glob.glob(join(
path_to_csv, 'aibl_' + modality + 'meta_*.csv')
)[0]
if not exists(path_to_csv_pet_modality):
raise FileNotFoundError(path_to_csv_pet_modality
+ ' file not found in clinical data folder')
# Latest version of Flutemetamol CSV file (aibl_flutemeta_01-Jun-2018.csv)
# has an extra column for some rows. However, each CSV file (regarding PET tracers)
# contains the same columns. The usecols fixes this issue.
df_pet = pds.read_csv(path_to_csv_pet_modality, sep=',|;', usecols=list(range(0, 36)))
images = find_path_to_pet_modality(path_to_dataset,
df_pet)
images.to_csv(join(bids_dir, modality + '_paths_aibl.tsv'),
index=False, sep='\t', encoding='utf-8')
counter = Value('i', 0)
total = images.shape[0]
# Reshape inputs to give it as a list to the workers
images_list = []
for i in range(total):
images_list.append(images.iloc[i])
# intializer are used with the counter variable to keep track of how many
# files have been processed
poolrunner = Pool(cpu_count(), initializer=init, initargs=(counter,))
output_file_treated = poolrunner.map(create_file, images_list)
del counter
return output_file_treated
# -- Methods for the clinical data --
def create_participants_df_AIBL(input_path, clinical_spec_path, clinical_data_dir, delete_non_bids_info=True):
"""
This methods create a participants file for the AIBL dataset where
information regarding the patients are reported
:param input_path: path to the input directory :param
clinical_spec_path: path to the clinical file :param clinical_data_dir:
directory to the clinical data files :param delete_non_bids_info: if
True delete all the rows of the subjects that are not available in the
BIDS dataset :return: a pandas dataframe that contains the participants
data and it is saved in a tsv file
"""
import pandas as pd
import os
from os import path
import re
import numpy as np
import glob
fields_bids = ['participant_id']
fields_dataset = []
prev_location = ''
index_to_drop = []
location_name = 'AIBL location'
if not os.path.exists(clinical_spec_path):
raise FileNotFoundError(clinical_spec_path
+ ' not found in clinical data.')
participants_specs = pd.read_excel(clinical_spec_path,
sheet_name='participant.tsv')
participant_fields_db = participants_specs['AIBL']
field_location = participants_specs[location_name]
participant_fields_bids = participants_specs['BIDS CLINICA']
# Extract the list of the available fields for the dataset (and the corresponding BIDS version)
for i in range(0, len(participant_fields_db)):
if not pd.isnull(participant_fields_db[i]):
fields_bids.append(participant_fields_bids[i])
fields_dataset.append(participant_fields_db[i])
# Init the dataframe that will be saved in the file participant.tsv
participant_df = pd.DataFrame(columns=fields_bids)
csv_files = []
for i in range(0, len(participant_fields_db)):
# If a field not empty is found
if not pd.isnull(participant_fields_db[i]):
# Extract the file location of the field and read the value from the file
tmp = field_location[i].split('/')
location = tmp[0]
# If a sheet is available
if len(tmp) > 1:
sheet = tmp[1]
else:
sheet = ''
# Check if the file to open for a certain field it's the same of the previous field
if location == prev_location and sheet == prev_sheet:
pass
else:
file_ext = os.path.splitext(location)[1]
file_to_read_path = path.join(clinical_data_dir, location)
if file_ext == '.xlsx':
file_to_read = pd.read_excel(glob.glob(file_to_read_path)[0], sheet_name=sheet)
elif file_ext == '.csv':
file_to_read = pd.read_csv(glob.glob(file_to_read_path)[0])
prev_location = location
prev_sheet = sheet
field_col_values = []
# For each field in fields_dataset extract all the column values
for j in range(0, len(file_to_read)):
# Convert the alternative_id_1 to string if is an integer/float
if participant_fields_bids[i] == 'alternative_id_1' and \
(file_to_read[participant_fields_db[i]].dtype == np.float64 or file_to_read[
participant_fields_db[i]].dtype == np.int64):
if not pd.isnull(file_to_read.at[j, participant_fields_db[i]]):
# value_to_append = str(file_to_read.get_value(j, participant_fields_db[i])).rstrip('.0')
value_to_append = str(file_to_read.at[j, participant_fields_db[i]])
else:
value_to_append = np.NaN
else:
value_to_append = file_to_read.at[j, participant_fields_db[i]]
field_col_values.append(value_to_append)
# Add the extracted column to the participant_df
participant_df[participant_fields_bids[i]] = | pd.Series(field_col_values) | pandas.Series |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import decimal
from datetime import datetime
from distutils.version import LooseVersion
import inspect
import sys
import unittest
from io import StringIO
from typing import List
import numpy as np
import pandas as pd
from pandas.tseries.offsets import DateOffset
from pyspark import StorageLevel
from pyspark.ml.linalg import SparseVector
from pyspark.sql.types import StructType
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.frame import CachedDataFrame
from pyspark.pandas.missing.frame import _MissingPandasLikeDataFrame
from pyspark.pandas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
from pyspark.testing.pandasutils import (
have_tabulate,
PandasOnSparkTestCase,
SPARK_CONF_ARROW_ENABLED,
tabulate_requirement_message,
)
from pyspark.testing.sqlutils import SQLTestUtils
from pyspark.pandas.utils import name_like_string
class DataFrameTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=np.random.rand(9),
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@property
def df_pair(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
return pdf, psdf
def test_dataframe(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf["a"] + 1, pdf["a"] + 1)
self.assert_eq(psdf.columns, pd.Index(["a", "b"]))
self.assert_eq(psdf[psdf["b"] > 2], pdf[pdf["b"] > 2])
self.assert_eq(-psdf[psdf["b"] > 2], -pdf[pdf["b"] > 2])
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assert_eq(psdf.a, pdf.a)
self.assert_eq(psdf.b.mean(), pdf.b.mean())
self.assert_eq(psdf.b.var(), pdf.b.var())
self.assert_eq(psdf.b.std(), pdf.b.std())
pdf, psdf = self.df_pair
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assertEqual(psdf.a.notnull().rename("x").name, "x")
# check ps.DataFrame(ps.Series)
pser = pd.Series([1, 2, 3], name="x", index=np.random.rand(3))
psser = ps.from_pandas(pser)
self.assert_eq(pd.DataFrame(pser), ps.DataFrame(psser))
# check psdf[pd.Index]
pdf, psdf = self.df_pair
column_mask = pdf.columns.isin(["a", "b"])
index_cols = pdf.columns[column_mask]
self.assert_eq(psdf[index_cols], pdf[index_cols])
def _check_extension(self, psdf, pdf):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(psdf, pdf, check_exact=False)
for dtype in psdf.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assert_eq(psdf, pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1, 2, None, 4], dtype="Int8"),
"b": pd.Series([1, None, None, 4], dtype="Int16"),
"c": pd.Series([1, 2, None, None], dtype="Int32"),
"d": pd.Series([None, 2, None, 4], dtype="Int64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_astype_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": [1, 2, None, 4],
"b": [1, None, None, 4],
"c": [1, 2, None, None],
"d": [None, 2, None, 4],
}
)
psdf = ps.from_pandas(pdf)
astype = {"a": "Int8", "b": "Int16", "c": "Int32", "d": "Int64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_extension_object_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series(["a", "b", None, "c"], dtype="string"),
"b": pd.Series([True, None, False, True], dtype="boolean"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_astype_extension_object_dtypes(self):
pdf = pd.DataFrame({"a": ["a", "b", None, "c"], "b": [True, None, False, True]})
psdf = ps.from_pandas(pdf)
astype = {"a": "string", "b": "boolean"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_extension_float_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, None, 4.0], dtype="Float32"),
"b": pd.Series([1.0, None, 3.0, 4.0], dtype="Float64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + 1, pdf + 1)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_astype_extension_float_dtypes(self):
pdf = pd.DataFrame({"a": [1.0, 2.0, None, 4.0], "b": [1.0, None, 3.0, 4.0]})
psdf = ps.from_pandas(pdf)
astype = {"a": "Float32", "b": "Float64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
def test_insert(self):
#
# Basic DataFrame
#
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psser = ps.Series([4, 5, 6])
self.assertRaises(ValueError, lambda: psdf.insert(0, "y", psser))
self.assertRaisesRegex(
ValueError, "cannot insert b, already exists", lambda: psdf.insert(1, "b", 10)
)
self.assertRaisesRegex(
TypeError,
'"column" should be a scalar value or tuple that contains scalar values',
lambda: psdf.insert(0, list("abc"), psser),
)
self.assertRaisesRegex(
TypeError,
"loc must be int",
lambda: psdf.insert((1,), "b", 10),
)
self.assertRaisesRegex(
NotImplementedError,
"Assigning column name as tuple is only supported for MultiIndex columns for now.",
lambda: psdf.insert(0, ("e",), 10),
)
self.assertRaises(ValueError, lambda: psdf.insert(0, "e", [7, 8, 9, 10]))
self.assertRaises(ValueError, lambda: psdf.insert(0, "f", ps.Series([7, 8])))
self.assertRaises(AssertionError, lambda: psdf.insert(100, "y", psser))
self.assertRaises(AssertionError, lambda: psdf.insert(1, "y", psser, allow_duplicates=True))
#
# DataFrame with MultiIndex as columns
#
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
self.assertRaisesRegex(
ValueError, "cannot insert d, already exists", lambda: psdf.insert(4, "d", 11)
)
self.assertRaisesRegex(
ValueError,
r"cannot insert \('x', 'a', 'b'\), already exists",
lambda: psdf.insert(4, ("x", "a", "b"), 11),
)
self.assertRaisesRegex(
ValueError,
'"column" must have length equal to number of column levels.',
lambda: psdf.insert(4, ("e",), 11),
)
def test_inplace(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["a"] = pdf["a"] + 10
psdf["a"] = psdf["a"] + 10
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
def test_assign_list(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
psdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
with self.assertRaisesRegex(ValueError, "Length of values does not match length of index"):
psdf["z"] = [10, 20, 30, 40, 50, 60, 70, 80]
def test_dataframe_multiindex_columns(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["x"], pdf["x"])
self.assert_eq(psdf["y.z"], pdf["y.z"])
self.assert_eq(psdf["x"]["b"], pdf["x"]["b"])
self.assert_eq(psdf["x"]["b"]["2"], pdf["x"]["b"]["2"])
self.assert_eq(psdf.x, pdf.x)
self.assert_eq(psdf.x.b, pdf.x.b)
self.assert_eq(psdf.x.b["2"], pdf.x.b["2"])
self.assertRaises(KeyError, lambda: psdf["z"])
self.assertRaises(AttributeError, lambda: psdf.z)
self.assert_eq(psdf[("x",)], pdf[("x",)])
self.assert_eq(psdf[("x", "a")], pdf[("x", "a")])
self.assert_eq(psdf[("x", "a", "1")], pdf[("x", "a", "1")])
def test_dataframe_column_level_name(self):
column = pd.Index(["A", "B", "C"], name="X")
pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=column, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
def test_dataframe_multiindex_names_level(self):
columns = pd.MultiIndex.from_tuples(
[("X", "A", "Z"), ("X", "B", "Z"), ("Y", "C", "Z"), ("Y", "D", "Z")],
names=["lvl_1", "lvl_2", "lv_3"],
)
pdf = pd.DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]],
columns=columns,
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
psdf1 = ps.from_pandas(pdf)
self.assert_eq(psdf1.columns.names, pdf.columns.names)
self.assertRaises(
AssertionError,
lambda: ps.DataFrame(psdf1._internal.copy(column_label_names=("level",))),
)
self.assert_eq(psdf["X"], pdf["X"])
self.assert_eq(psdf["X"].columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"].to_pandas().columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"]["A"], pdf["X"]["A"])
self.assert_eq(psdf["X"]["A"].columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf["X"]["A"].to_pandas().columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf[("X", "A")], pdf[("X", "A")])
self.assert_eq(psdf[("X", "A")].columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A")].to_pandas().columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A", "Z")], pdf[("X", "A", "Z")])
def test_itertuples(self):
pdf = pd.DataFrame({"num_legs": [4, 2], "num_wings": [0, 2]}, index=["dog", "hawk"])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
pdf.itertuples(index=False, name="Animal"), psdf.itertuples(index=False, name="Animal")
):
self.assert_eq(ptuple, ktuple)
for ptuple, ktuple in zip(pdf.itertuples(name=None), psdf.itertuples(name=None)):
self.assert_eq(ptuple, ktuple)
pdf.index = pd.MultiIndex.from_arrays(
[[1, 2], ["black", "brown"]], names=("count", "color")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf.columns = pd.MultiIndex.from_arrays(
[["CA", "WA"], ["age", "children"]], names=("origin", "info")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
(pdf + 1).itertuples(name="num"), (psdf + 1).itertuples(name="num")
):
self.assert_eq(ptuple, ktuple)
# DataFrames with a large number of columns (>254)
pdf = pd.DataFrame(np.random.random((1, 255)))
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="num"), psdf.itertuples(name="num")):
self.assert_eq(ptuple, ktuple)
def test_iterrows(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
for (pdf_k, pdf_v), (psdf_k, psdf_v) in zip(pdf.iterrows(), psdf.iterrows()):
self.assert_eq(pdf_k, psdf_k)
self.assert_eq(pdf_v, psdf_v)
def test_reset_index(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index().index, pdf.reset_index().index)
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.index.name = "a"
psdf.index.name = "a"
with self.assertRaisesRegex(ValueError, "cannot insert a, already exists"):
psdf.reset_index()
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
# inplace
pser = pdf.a
psser = psdf.a
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf.columns = ["index", "b"]
psdf.columns = ["index", "b"]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
def test_reset_index_with_default_index_types(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
with ps.option_context("compute.default_index_type", "sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed-sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed"):
# the index is different.
self.assert_eq(psdf.reset_index().to_pandas().reset_index(drop=True), pdf.reset_index())
def test_reset_index_with_multiindex_columns(self):
index = pd.MultiIndex.from_tuples(
[("bird", "falcon"), ("bird", "parrot"), ("mammal", "lion"), ("mammal", "monkey")],
names=["class", "name"],
)
columns = pd.MultiIndex.from_tuples([("speed", "max"), ("species", "type")])
pdf = pd.DataFrame(
[(389.0, "fly"), (24.0, "fly"), (80.5, "run"), (np.nan, "jump")],
index=index,
columns=columns,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(level="class"), pdf.reset_index(level="class"))
self.assert_eq(
psdf.reset_index(level="class", col_level=1),
pdf.reset_index(level="class", col_level=1),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="species"),
pdf.reset_index(level="class", col_level=1, col_fill="species"),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="genus"),
pdf.reset_index(level="class", col_level=1, col_fill="genus"),
)
with self.assertRaisesRegex(IndexError, "Index has only 2 levels, not 3"):
psdf.reset_index(col_level=2)
pdf.index.names = [("x", "class"), ("y", "name")]
psdf.index.names = [("x", "class"), ("y", "name")]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with self.assertRaisesRegex(ValueError, "Item must have length equal to number of levels."):
psdf.reset_index(col_level=1)
def test_index_to_frame_reset_index(self):
def check(psdf, pdf):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
pdf, psdf = self.df_pair
check(psdf.index.to_frame(), pdf.index.to_frame())
check(psdf.index.to_frame(index=False), pdf.index.to_frame(index=False))
check(psdf.index.to_frame(name="a"), pdf.index.to_frame(name="a"))
check(psdf.index.to_frame(index=False, name="a"), pdf.index.to_frame(index=False, name="a"))
check(psdf.index.to_frame(name=("x", "a")), pdf.index.to_frame(name=("x", "a")))
check(
psdf.index.to_frame(index=False, name=("x", "a")),
pdf.index.to_frame(index=False, name=("x", "a")),
)
def test_multiindex_column_access(self):
columns = pd.MultiIndex.from_tuples(
[
("a", "", "", "b"),
("c", "", "d", ""),
("e", "", "f", ""),
("e", "g", "", ""),
("", "", "", "h"),
("i", "", "", ""),
]
)
pdf = pd.DataFrame(
[
(1, "a", "x", 10, 100, 1000),
(2, "b", "y", 20, 200, 2000),
(3, "c", "z", 30, 300, 3000),
],
columns=columns,
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["a"], pdf["a"])
self.assert_eq(psdf["a"]["b"], pdf["a"]["b"])
self.assert_eq(psdf["c"], pdf["c"])
self.assert_eq(psdf["c"]["d"], pdf["c"]["d"])
self.assert_eq(psdf["e"], pdf["e"])
self.assert_eq(psdf["e"][""]["f"], pdf["e"][""]["f"])
self.assert_eq(psdf["e"]["g"], pdf["e"]["g"])
self.assert_eq(psdf[""], pdf[""])
self.assert_eq(psdf[""]["h"], pdf[""]["h"])
self.assert_eq(psdf["i"], pdf["i"])
self.assert_eq(psdf[["a", "e"]], pdf[["a", "e"]])
self.assert_eq(psdf[["e", "a"]], pdf[["e", "a"]])
self.assert_eq(psdf[("a",)], pdf[("a",)])
self.assert_eq(psdf[("e", "g")], pdf[("e", "g")])
# self.assert_eq(psdf[("i",)], pdf[("i",)])
self.assert_eq(psdf[("i", "")], pdf[("i", "")])
self.assertRaises(KeyError, lambda: psdf[("a", "b")])
def test_repr_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df.__repr__()
df["a"] = df["id"]
self.assertEqual(df.__repr__(), df.to_pandas().__repr__())
def test_repr_html_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df._repr_html_()
df["a"] = df["id"]
self.assertEqual(df._repr_html_(), df.to_pandas()._repr_html_())
def test_empty_dataframe(self):
pdf = pd.DataFrame({"a": pd.Series([], dtype="i1"), "b": pd.Series([], dtype="str")})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_all_null_dataframe(self):
pdf = pd.DataFrame(
{
"a": [None, None, None, "a"],
"b": [None, None, None, 1],
"c": [None, None, None] + list(np.arange(1, 2).astype("i1")),
"d": [None, None, None, 1.0],
"e": [None, None, None, True],
"f": [None, None, None] + list(pd.date_range("20130101", periods=1)),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
pdf = pd.DataFrame(
{
"a": pd.Series([None, None, None], dtype="float64"),
"b": pd.Series([None, None, None], dtype="str"),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_nullable_object(self):
pdf = pd.DataFrame(
{
"a": list("abc") + [np.nan, None],
"b": list(range(1, 4)) + [np.nan, None],
"c": list(np.arange(3, 6).astype("i1")) + [np.nan, None],
"d": list(np.arange(4.0, 7.0, dtype="float64")) + [np.nan, None],
"e": [True, False, True, np.nan, None],
"f": list(pd.date_range("20130101", periods=3)) + [np.nan, None],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_assign(self):
pdf, psdf = self.df_pair
psdf["w"] = 1.0
pdf["w"] = 1.0
self.assert_eq(psdf, pdf)
psdf.w = 10.0
pdf.w = 10.0
self.assert_eq(psdf, pdf)
psdf[1] = 1.0
pdf[1] = 1.0
self.assert_eq(psdf, pdf)
psdf = psdf.assign(a=psdf["a"] * 2)
pdf = pdf.assign(a=pdf["a"] * 2)
self.assert_eq(psdf, pdf)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "w"), ("y", "v")])
pdf.columns = columns
psdf.columns = columns
psdf[("a", "c")] = "def"
pdf[("a", "c")] = "def"
self.assert_eq(psdf, pdf)
psdf = psdf.assign(Z="ZZ")
pdf = pdf.assign(Z="ZZ")
self.assert_eq(psdf, pdf)
psdf["x"] = "ghi"
pdf["x"] = "ghi"
self.assert_eq(psdf, pdf)
def test_head(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.head(2), pdf.head(2))
self.assert_eq(psdf.head(3), pdf.head(3))
self.assert_eq(psdf.head(0), pdf.head(0))
self.assert_eq(psdf.head(-3), pdf.head(-3))
self.assert_eq(psdf.head(-10), pdf.head(-10))
with option_context("compute.ordered_head", True):
self.assert_eq(psdf.head(), pdf.head())
def test_attributes(self):
psdf = self.psdf
self.assertIn("a", dir(psdf))
self.assertNotIn("foo", dir(psdf))
self.assertRaises(AttributeError, lambda: psdf.foo)
psdf = ps.DataFrame({"a b c": [1, 2, 3]})
self.assertNotIn("a b c", dir(psdf))
psdf = ps.DataFrame({"a": [1, 2], 5: [1, 2]})
self.assertIn("a", dir(psdf))
self.assertNotIn(5, dir(psdf))
def test_column_names(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.columns, pdf.columns)
self.assert_eq(psdf[["b", "a"]].columns, pdf[["b", "a"]].columns)
self.assert_eq(psdf["a"].name, pdf["a"].name)
self.assert_eq((psdf["a"] + 1).name, (pdf["a"] + 1).name)
self.assert_eq((psdf.a + psdf.b).name, (pdf.a + pdf.b).name)
self.assert_eq((psdf.a + psdf.b.rename("a")).name, (pdf.a + pdf.b.rename("a")).name)
self.assert_eq((psdf.a + psdf.b.rename()).name, (pdf.a + pdf.b.rename()).name)
self.assert_eq((psdf.a.rename() + psdf.b).name, (pdf.a.rename() + pdf.b).name)
self.assert_eq(
(psdf.a.rename() + psdf.b.rename()).name, (pdf.a.rename() + pdf.b.rename()).name
)
def test_rename_columns(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
psdf.columns = ["x", "y"]
pdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
columns = pdf.columns
columns.name = "lvl_1"
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1"])
self.assert_eq(psdf, pdf)
msg = "Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with self.assertRaisesRegex(ValueError, msg):
psdf.columns = [1, 2, 3, 4]
# Multi-index columns
pdf = pd.DataFrame(
{("A", "0"): [1, 2, 2, 3], ("B", "1"): [1, 2, 3, 4]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
columns = pdf.columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
pdf.columns = ["x", "y"]
psdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
columns.names = ["lvl_1", "lvl_2"]
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1", "lvl_2"])
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
def test_rename_dataframe(self):
pdf1 = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
psdf1 = ps.from_pandas(pdf1)
self.assert_eq(
psdf1.rename(columns={"A": "a", "B": "b"}), pdf1.rename(columns={"A": "a", "B": "b"})
)
result_psdf = psdf1.rename(index={1: 10, 2: 20})
result_pdf = pdf1.rename(index={1: 10, 2: 20})
self.assert_eq(result_psdf, result_pdf)
# inplace
pser = result_pdf.A
psser = result_psdf.A
result_psdf.rename(index={10: 100, 20: 200}, inplace=True)
result_pdf.rename(index={10: 100, 20: 200}, inplace=True)
self.assert_eq(result_psdf, result_pdf)
self.assert_eq(psser, pser)
def str_lower(s) -> str:
return str.lower(s)
self.assert_eq(
psdf1.rename(str_lower, axis="columns"), pdf1.rename(str_lower, axis="columns")
)
def mul10(x) -> int:
return x * 10
self.assert_eq(psdf1.rename(mul10, axis="index"), pdf1.rename(mul10, axis="index"))
self.assert_eq(
psdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
pdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
)
idx = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Y", "D")])
pdf2 = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(psdf2.rename(columns=str_lower), pdf2.rename(columns=str_lower))
self.assert_eq(
psdf2.rename(columns=str_lower, level=0), pdf2.rename(columns=str_lower, level=0)
)
self.assert_eq(
psdf2.rename(columns=str_lower, level=1), pdf2.rename(columns=str_lower, level=1)
)
pdf3 = pd.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list("ab"))
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(psdf3.rename(index=str_lower), pdf3.rename(index=str_lower))
self.assert_eq(
psdf3.rename(index=str_lower, level=0), pdf3.rename(index=str_lower, level=0)
)
self.assert_eq(
psdf3.rename(index=str_lower, level=1), pdf3.rename(index=str_lower, level=1)
)
pdf4 = pdf2 + 1
psdf4 = psdf2 + 1
self.assert_eq(psdf4.rename(columns=str_lower), pdf4.rename(columns=str_lower))
pdf5 = pdf3 + 1
psdf5 = psdf3 + 1
self.assert_eq(psdf5.rename(index=str_lower), pdf5.rename(index=str_lower))
msg = "Either `index` or `columns` should be provided."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename()
msg = "`mapper` or `index` or `columns` should be either dict-like or function type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename(mapper=[str_lower], axis=1)
msg = "Mapper dict should have the same value type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename({"A": "a", "B": 2}, axis=1)
msg = r"level should be an integer between \[0, column_labels_level\)"
with self.assertRaisesRegex(ValueError, msg):
psdf2.rename(columns=str_lower, level=2)
def test_rename_axis(self):
index = pd.Index(["A", "B", "C"], name="index")
columns = pd.Index(["numbers", "values"], name="cols")
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis("index2", axis=axis).sort_index(),
psdf.rename_axis("index2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["index2"], axis=axis).sort_index(),
psdf.rename_axis(["index2"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis("cols2", axis=axis).sort_index(),
psdf.rename_axis("cols2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["cols2"], axis=axis).sort_index(),
psdf.rename_axis(["cols2"], axis=axis).sort_index(),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pdf2.rename_axis("index2", axis="index", inplace=True)
psdf2.rename_axis("index2", axis="index", inplace=True)
self.assert_eq(pdf2.sort_index(), psdf2.sort_index())
self.assertRaises(ValueError, lambda: psdf.rename_axis(["index2", "index3"], axis=0))
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols2", "cols3"], axis=1))
self.assertRaises(TypeError, lambda: psdf.rename_axis(mapper=["index2"], index=["index3"]))
self.assert_eq(
pdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
psdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index2"}, columns={"missing": "cols2"}).sort_index(),
psdf.rename_axis(
index={"missing": "index2"}, columns={"missing": "cols2"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"]
)
columns = pd.MultiIndex.from_tuples(
[("numbers", "first"), ("values", "second")], names=["cols1", "cols2"]
)
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
psdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
psdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
)
self.assertRaises(
ValueError, lambda: psdf.rename_axis(["index3", "index4", "index5"], axis=0)
)
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols3", "cols4", "cols5"], axis=1))
self.assert_eq(
pdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
psdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index3"}, columns={"missing": "cols3"}).sort_index(),
psdf.rename_axis(
index={"missing": "index3"}, columns={"missing": "cols3"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
psdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
def test_dot(self):
psdf = self.psdf
with self.assertRaisesRegex(TypeError, "Unsupported type DataFrame"):
psdf.dot(psdf)
def test_dot_in_column_name(self):
self.assert_eq(
ps.DataFrame(ps.range(1)._internal.spark_frame.selectExpr("1L as `a.b`"))["a.b"],
ps.Series([1], name="a.b"),
)
def test_aggregate(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=["A", "B", "C"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(), # TODO?: fix column order
pdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(),
)
self.assert_eq(
psdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
pdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
)
self.assertRaises(KeyError, lambda: psdf.agg({"A": ["sum", "min"], "X": ["min", "max"]}))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
pdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
pdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.agg({"X": ["sum", "min"], "Y": ["min", "max"]}))
# non-string names
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=[10, 20, 30]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
pdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
)
self.assert_eq(
psdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
pdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
)
columns = pd.MultiIndex.from_tuples([("X", 10), ("X", 20), ("Y", 30)])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
pdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
pdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
)
pdf = pd.DataFrame(
[datetime(2019, 2, 2, 0, 0, 0, 0), datetime(2019, 2, 3, 0, 0, 0, 0)],
columns=["timestamp"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.timestamp.min(), pdf.timestamp.min())
self.assert_eq(psdf.timestamp.max(), pdf.timestamp.max())
self.assertRaises(ValueError, lambda: psdf.agg(("sum", "min")))
def test_droplevel(self):
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
pdf.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
psdf = ps.from_pandas(pdf)
self.assertRaises(ValueError, lambda: psdf.droplevel(["a", "b"]))
self.assertRaises(ValueError, lambda: psdf.droplevel([1, 1, 1, 1, 1]))
self.assertRaises(IndexError, lambda: psdf.droplevel(2))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a"}))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a": 1}))
self.assertRaises(ValueError, lambda: psdf.droplevel(["level_1", "level_2"], axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(2, axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1"}, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1": 1}, axis=1))
self.assert_eq(pdf.droplevel("a"), psdf.droplevel("a"))
self.assert_eq(pdf.droplevel(["a"]), psdf.droplevel(["a"]))
self.assert_eq(pdf.droplevel(("a",)), psdf.droplevel(("a",)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel("level_1", axis=1), psdf.droplevel("level_1", axis=1))
self.assert_eq(pdf.droplevel(["level_1"], axis=1), psdf.droplevel(["level_1"], axis=1))
self.assert_eq(pdf.droplevel(("level_1",), axis=1), psdf.droplevel(("level_1",), axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
self.assert_eq(pdf.droplevel(-1, axis=1), psdf.droplevel(-1, axis=1))
# Tupled names
pdf.columns.names = [("level", 1), ("level", 2)]
pdf.index.names = [("a", 10), ("x", 20)]
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.droplevel("a"))
self.assertRaises(KeyError, lambda: psdf.droplevel(("a", 10)))
self.assert_eq(pdf.droplevel([("a", 10)]), psdf.droplevel([("a", 10)]))
self.assert_eq(
pdf.droplevel([("level", 1)], axis=1), psdf.droplevel([("level", 1)], axis=1)
)
# non-string names
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis([10.0, 20.0])
)
pdf.columns = pd.MultiIndex.from_tuples([("c", "e"), ("d", "f")], names=[100.0, 200.0])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.droplevel(10.0), psdf.droplevel(10.0))
self.assert_eq(pdf.droplevel([10.0]), psdf.droplevel([10.0]))
self.assert_eq(pdf.droplevel((10.0,)), psdf.droplevel((10.0,)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel(100.0, axis=1), psdf.droplevel(100.0, axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
def test_drop(self):
pdf = pd.DataFrame({"x": [1, 2], "y": [3, 4], "z": [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
# Assert 'labels' or 'columns' parameter is set
expected_error_message = "Need to specify at least one of 'labels' or 'columns'"
with self.assertRaisesRegex(ValueError, expected_error_message):
psdf.drop()
#
# Drop columns
#
# Assert using a str for 'labels' works
self.assert_eq(psdf.drop("x", axis=1), pdf.drop("x", axis=1))
self.assert_eq((psdf + 1).drop("x", axis=1), (pdf + 1).drop("x", axis=1))
# Assert using a list for 'labels' works
self.assert_eq(psdf.drop(["y", "z"], axis=1), pdf.drop(["y", "z"], axis=1))
self.assert_eq(psdf.drop(["x", "y", "z"], axis=1), pdf.drop(["x", "y", "z"], axis=1))
# Assert using 'columns' instead of 'labels' produces the same results
self.assert_eq(psdf.drop(columns="x"), pdf.drop(columns="x"))
self.assert_eq(psdf.drop(columns=["y", "z"]), pdf.drop(columns=["y", "z"]))
self.assert_eq(psdf.drop(columns=["x", "y", "z"]), pdf.drop(columns=["x", "y", "z"]))
self.assert_eq(psdf.drop(columns=[]), pdf.drop(columns=[]))
columns = pd.MultiIndex.from_tuples([(1, "x"), (1, "y"), (2, "z")])
pdf.columns = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(columns=1), pdf.drop(columns=1))
self.assert_eq(psdf.drop(columns=(1, "x")), pdf.drop(columns=(1, "x")))
self.assert_eq(psdf.drop(columns=[(1, "x"), 2]), pdf.drop(columns=[(1, "x"), 2]))
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
self.assertRaises(KeyError, lambda: psdf.drop(columns=3))
self.assertRaises(KeyError, lambda: psdf.drop(columns=(1, "z")))
pdf.index = pd.MultiIndex.from_tuples([("i", 0), ("j", 1)])
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
# non-string names
pdf = pd.DataFrame({10: [1, 2], 20: [3, 4], 30: [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(10, axis=1), pdf.drop(10, axis=1))
self.assert_eq(psdf.drop([20, 30], axis=1), pdf.drop([20, 30], axis=1))
#
# Drop rows
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
# Given labels (and axis = 0)
self.assert_eq(psdf.drop(labels="A", axis=0), pdf.drop(labels="A", axis=0))
self.assert_eq(psdf.drop(labels="A"), pdf.drop(labels="A"))
self.assert_eq((psdf + 1).drop(labels="A"), (pdf + 1).drop(labels="A"))
self.assert_eq(psdf.drop(labels=["A", "C"], axis=0), pdf.drop(labels=["A", "C"], axis=0))
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
# Given index
self.assert_eq(psdf.drop(index="A"), pdf.drop(index="A"))
self.assert_eq(psdf.drop(index=["A", "C"]), pdf.drop(index=["A", "C"]))
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
self.assert_eq(psdf.drop(index=[]), pdf.drop(index=[]))
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
# Non-string names
pdf.index = [10, 20, 30]
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(labels=10, axis=0), pdf.drop(labels=10, axis=0))
self.assert_eq(psdf.drop(labels=[10, 30], axis=0), pdf.drop(labels=[10, 30], axis=0))
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
# MultiIndex
pdf.index = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assertRaises(NotImplementedError, lambda: psdf.drop(labels=[("a", "x")]))
#
# Drop rows and columns
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(index="A", columns="X"), pdf.drop(index="A", columns="X"))
self.assert_eq(
psdf.drop(index=["A", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=[], columns=["X", "Z"]),
pdf.drop(index=[], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=[]),
pdf.drop(index=["A", "B", "C"], columns=[]),
)
self.assert_eq(
psdf.drop(index=[], columns=[]),
pdf.drop(index=[], columns=[]),
)
self.assertRaises(
ValueError,
lambda: psdf.drop(labels="A", axis=0, columns="X"),
)
def _test_dropna(self, pdf, axis):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(psdf.dropna(axis=axis, subset=["x"]), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(psdf.dropna(axis=axis, subset="x"), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"]), pdf.dropna(axis=axis, subset=["y", "z"])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"], how="all"),
pdf.dropna(axis=axis, subset=["y", "z"], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
pdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pser = pdf2[pdf2.columns[0]]
psser = psdf2[psdf2.columns[0]]
pdf2.dropna(inplace=True, axis=axis)
psdf2.dropna(inplace=True, axis=axis)
self.assert_eq(psdf2, pdf2)
self.assert_eq(psser, pser)
# multi-index
columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")])
if axis == 0:
pdf.columns = columns
else:
pdf.index = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "x")]), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=("a", "x")), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
)
def test_dropna_axis_index(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self._test_dropna(pdf, axis=0)
# empty
pdf = pd.DataFrame(index=np.random.rand(6))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(), pdf.dropna())
self.assert_eq(psdf.dropna(how="all"), pdf.dropna(how="all"))
self.assert_eq(psdf.dropna(thresh=0), pdf.dropna(thresh=0))
self.assert_eq(psdf.dropna(thresh=1), pdf.dropna(thresh=1))
with self.assertRaisesRegex(ValueError, "No axis named foo"):
psdf.dropna(axis="foo")
self.assertRaises(KeyError, lambda: psdf.dropna(subset="1"))
with self.assertRaisesRegex(ValueError, "invalid how option: 1"):
psdf.dropna(how=1)
with self.assertRaisesRegex(TypeError, "must specify how or thresh"):
psdf.dropna(how=None)
def test_dropna_axis_column(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=[str(r) for r in np.random.rand(6)],
).T
self._test_dropna(pdf, axis=1)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
ValueError, "The length of each subset must be the same as the index size."
):
psdf.dropna(subset=(["x", "y"]), axis=1)
# empty
pdf = pd.DataFrame({"x": [], "y": [], "z": []})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=1), pdf.dropna(axis=1))
self.assert_eq(psdf.dropna(axis=1, how="all"), pdf.dropna(axis=1, how="all"))
self.assert_eq(psdf.dropna(axis=1, thresh=0), pdf.dropna(axis=1, thresh=0))
self.assert_eq(psdf.dropna(axis=1, thresh=1), pdf.dropna(axis=1, thresh=1))
def test_dtype(self):
pdf = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("i1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
# multi-index columns
columns = pd.MultiIndex.from_tuples(zip(list("xxxyyz"), list("abcdef")))
pdf.columns = columns
psdf.columns = columns
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
def test_fillna(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({"x": -1, "y": -2, "z": -5}), pdf.fillna({"x": -1, "y": -2, "z": -5})
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
pdf = pdf.set_index(["x", "y"])
psdf = ps.from_pandas(pdf)
# check multi index
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
pser = pdf.z
psser = psdf.z
pdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
psdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
s_nan = pd.Series([-1, -2, -5], index=["x", "y", "z"], dtype=int)
self.assert_eq(psdf.fillna(s_nan), pdf.fillna(s_nan))
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis=1)
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis="columns")
with self.assertRaisesRegex(ValueError, "limit parameter for value is not support now"):
psdf.fillna(-1, limit=1)
with self.assertRaisesRegex(TypeError, "Unsupported.*DataFrame"):
psdf.fillna(pd.DataFrame({"x": [-1], "y": [-1], "z": [-1]}))
with self.assertRaisesRegex(TypeError, "Unsupported.*int64"):
psdf.fillna({"x": np.int64(-6), "y": np.int64(-4), "z": -5})
with self.assertRaisesRegex(ValueError, "Expecting 'pad', 'ffill', 'backfill' or 'bfill'."):
psdf.fillna(method="xxx")
with self.assertRaisesRegex(
ValueError, "Must specify a fillna 'value' or 'method' parameter."
):
psdf.fillna()
# multi-index columns
pdf = pd.DataFrame(
{
("x", "a"): [np.nan, 2, 3, 4, np.nan, 6],
("x", "b"): [1, 2, np.nan, 4, np.nan, np.nan],
("y", "c"): [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
self.assert_eq(psdf.fillna({"x": -1}), pdf.fillna({"x": -1}))
self.assert_eq(
psdf.fillna({"x": -1, ("x", "b"): -2}), pdf.fillna({"x": -1, ("x", "b"): -2})
)
self.assert_eq(
psdf.fillna({("x", "b"): -2, "x": -1}), pdf.fillna({("x", "b"): -2, "x": -1})
)
# check multi index
pdf = pdf.set_index([("x", "a"), ("x", "b")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
def test_isnull(self):
pdf = pd.DataFrame(
{"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")}, index=np.random.rand(6)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.notnull(), pdf.notnull())
self.assert_eq(psdf.isnull(), pdf.isnull())
def test_to_datetime(self):
pdf = pd.DataFrame(
{"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}, index=np.random.rand(2)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pd.to_datetime(pdf), ps.to_datetime(psdf))
def test_nunique(self):
pdf = pd.DataFrame({"A": [1, 2, 3], "B": [np.nan, 3, np.nan]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
# Assert NaNs are dropped by default
self.assert_eq(psdf.nunique(), pdf.nunique())
# Assert including NaN values
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
# Assert approximate counts
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True),
pd.Series([103], index=["A"]),
)
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True, rsd=0.01),
pd.Series([100], index=["A"]),
)
# Assert unsupported axis value yet
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.nunique(axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")], names=["1", "2"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.nunique(), pdf.nunique())
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
def test_sort_values(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values("b"), pdf.sort_values("b"))
for ascending in [True, False]:
for na_position in ["first", "last"]:
self.assert_eq(
psdf.sort_values("a", ascending=ascending, na_position=na_position),
pdf.sort_values("a", ascending=ascending, na_position=na_position),
)
self.assert_eq(psdf.sort_values(["a", "b"]), pdf.sort_values(["a", "b"]))
self.assert_eq(
psdf.sort_values(["a", "b"], ascending=[False, True]),
pdf.sort_values(["a", "b"], ascending=[False, True]),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], ascending=[False]))
self.assert_eq(
psdf.sort_values(["a", "b"], na_position="first"),
pdf.sort_values(["a", "b"], na_position="first"),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], na_position="invalid"))
pserA = pdf.a
psserA = psdf.a
self.assert_eq(psdf.sort_values("b", inplace=True), pdf.sort_values("b", inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# multi-index columns
pdf = pd.DataFrame(
{("X", 10): [1, 2, 3, 4, 5, None, 7], ("X", 20): [7, 6, 5, 4, 3, 2, 1]},
index=np.random.rand(7),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(("X", 20)), pdf.sort_values(("X", 20)))
self.assert_eq(
psdf.sort_values([("X", 20), ("X", 10)]), pdf.sort_values([("X", 20), ("X", 10)])
)
self.assertRaisesRegex(
ValueError,
"For a multi-index, the label must be a tuple with elements",
lambda: psdf.sort_values(["X"]),
)
# non-string names
pdf = pd.DataFrame(
{10: [1, 2, 3, 4, 5, None, 7], 20: [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(20), pdf.sort_values(20))
self.assert_eq(psdf.sort_values([20, 10]), pdf.sort_values([20, 10]))
def test_sort_index(self):
pdf = pd.DataFrame(
{"A": [2, 1, np.nan], "B": [np.nan, 0, np.nan]}, index=["b", "a", np.nan]
)
psdf = ps.from_pandas(pdf)
# Assert invalid parameters
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(axis=1))
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(kind="mergesort"))
self.assertRaises(ValueError, lambda: psdf.sort_index(na_position="invalid"))
# Assert default behavior without parameters
self.assert_eq(psdf.sort_index(), pdf.sort_index())
# Assert sorting descending
self.assert_eq(psdf.sort_index(ascending=False), pdf.sort_index(ascending=False))
# Assert sorting NA indices first
self.assert_eq(psdf.sort_index(na_position="first"), pdf.sort_index(na_position="first"))
# Assert sorting descending and NA indices first
self.assert_eq(
psdf.sort_index(ascending=False, na_position="first"),
pdf.sort_index(ascending=False, na_position="first"),
)
# Assert sorting inplace
pserA = pdf.A
psserA = psdf.A
self.assertEqual(psdf.sort_index(inplace=True), pdf.sort_index(inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# Assert multi-indices
pdf = pd.DataFrame(
{"A": range(4), "B": range(4)[::-1]}, index=[["b", "b", "a", "a"], [1, 0, 1, 0]]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psdf.sort_index(level=[1, 0]), pdf.sort_index(level=[1, 0]))
self.assert_eq(psdf.reset_index().sort_index(), pdf.reset_index().sort_index())
# Assert with multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_swaplevel(self):
# MultiIndex with two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(), psdf.swaplevel())
self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1))
self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1))
self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color"))
# MultiIndex with more than two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"], ["l", "m", "s", "xs"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(), psdf.swaplevel())
self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1))
self.assert_eq(pdf.swaplevel(0, 2), psdf.swaplevel(0, 2))
self.assert_eq(pdf.swaplevel(1, 2), psdf.swaplevel(1, 2))
self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1))
self.assert_eq(pdf.swaplevel(-1, -2), psdf.swaplevel(-1, -2))
self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color"))
self.assert_eq(pdf.swaplevel("number", "size"), psdf.swaplevel("number", "size"))
self.assert_eq(pdf.swaplevel("color", "size"), psdf.swaplevel("color", "size"))
self.assert_eq(
pdf.swaplevel("color", "size", axis="index"),
psdf.swaplevel("color", "size", axis="index"),
)
self.assert_eq(
pdf.swaplevel("color", "size", axis=0), psdf.swaplevel("color", "size", axis=0)
)
pdf = pd.DataFrame(
{
"x1": ["a", "b", "c", "d"],
"x2": ["a", "b", "c", "d"],
"x3": ["a", "b", "c", "d"],
"x4": ["a", "b", "c", "d"],
}
)
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pdf.columns = pidx
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(axis=1), psdf.swaplevel(axis=1))
self.assert_eq(pdf.swaplevel(0, 1, axis=1), psdf.swaplevel(0, 1, axis=1))
self.assert_eq(pdf.swaplevel(0, 2, axis=1), psdf.swaplevel(0, 2, axis=1))
self.assert_eq(pdf.swaplevel(1, 2, axis=1), psdf.swaplevel(1, 2, axis=1))
self.assert_eq(pdf.swaplevel(1, 1, axis=1), psdf.swaplevel(1, 1, axis=1))
self.assert_eq(pdf.swaplevel(-1, -2, axis=1), psdf.swaplevel(-1, -2, axis=1))
self.assert_eq(
pdf.swaplevel("number", "color", axis=1), psdf.swaplevel("number", "color", axis=1)
)
self.assert_eq(
pdf.swaplevel("number", "size", axis=1), psdf.swaplevel("number", "size", axis=1)
)
self.assert_eq(
pdf.swaplevel("color", "size", axis=1), psdf.swaplevel("color", "size", axis=1)
)
self.assert_eq(
pdf.swaplevel("color", "size", axis="columns"),
psdf.swaplevel("color", "size", axis="columns"),
)
# Error conditions
self.assertRaises(AssertionError, lambda: ps.DataFrame([1, 2]).swaplevel())
self.assertRaises(IndexError, lambda: psdf.swaplevel(0, 9, axis=1))
self.assertRaises(KeyError, lambda: psdf.swaplevel("not_number", "color", axis=1))
self.assertRaises(ValueError, lambda: psdf.swaplevel(axis=2))
def test_swapaxes(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["x", "y", "z"], columns=["a", "b", "c"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.swapaxes(0, 1), pdf.swapaxes(0, 1))
self.assert_eq(psdf.swapaxes(1, 0), pdf.swapaxes(1, 0))
self.assert_eq(psdf.swapaxes("index", "columns"), pdf.swapaxes("index", "columns"))
self.assert_eq(psdf.swapaxes("columns", "index"), pdf.swapaxes("columns", "index"))
self.assert_eq((psdf + 1).swapaxes(0, 1), (pdf + 1).swapaxes(0, 1))
self.assertRaises(AssertionError, lambda: psdf.swapaxes(0, 1, copy=False))
self.assertRaises(ValueError, lambda: psdf.swapaxes(0, -1))
def test_nlargest(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.nlargest(n=5, columns="a"), pdf.nlargest(5, columns="a"))
self.assert_eq(psdf.nlargest(n=5, columns=["a", "b"]), pdf.nlargest(5, columns=["a", "b"]))
def test_nsmallest(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.nsmallest(n=5, columns="a"), pdf.nsmallest(5, columns="a"))
self.assert_eq(
psdf.nsmallest(n=5, columns=["a", "b"]), pdf.nsmallest(5, columns=["a", "b"])
)
def test_xs(self):
d = {
"num_legs": [4, 4, 2, 2],
"num_wings": [0, 0, 2, 2],
"class": ["mammal", "mammal", "mammal", "bird"],
"animal": ["cat", "dog", "bat", "penguin"],
"locomotion": ["walks", "walks", "flies", "walks"],
}
pdf = pd.DataFrame(data=d)
pdf = pdf.set_index(["class", "animal", "locomotion"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.xs("mammal"), pdf.xs("mammal"))
self.assert_eq(psdf.xs(("mammal",)), pdf.xs(("mammal",)))
self.assert_eq(psdf.xs(("mammal", "dog", "walks")), pdf.xs(("mammal", "dog", "walks")))
self.assert_eq(
ps.concat([psdf, psdf]).xs(("mammal", "dog", "walks")),
pd.concat([pdf, pdf]).xs(("mammal", "dog", "walks")),
)
self.assert_eq(psdf.xs("cat", level=1), pdf.xs("cat", level=1))
self.assert_eq(psdf.xs("flies", level=2), pdf.xs("flies", level=2))
self.assert_eq(psdf.xs("mammal", level=-3), pdf.xs("mammal", level=-3))
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.xs("num_wings", axis=1)
with self.assertRaises(KeyError):
psdf.xs(("mammal", "dog", "walk"))
msg = r"'Key length \(4\) exceeds index depth \(3\)'"
with self.assertRaisesRegex(KeyError, msg):
psdf.xs(("mammal", "dog", "walks", "foo"))
msg = "'key' should be a scalar value or tuple that contains scalar values"
with self.assertRaisesRegex(TypeError, msg):
psdf.xs(["mammal", "dog", "walks", "foo"])
self.assertRaises(IndexError, lambda: psdf.xs("foo", level=-4))
self.assertRaises(IndexError, lambda: psdf.xs("foo", level=3))
self.assertRaises(KeyError, lambda: psdf.xs(("dog", "walks"), level=1))
# non-string names
pdf = pd.DataFrame(data=d)
pdf = pdf.set_index(["class", "animal", "num_legs", "num_wings"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.xs(("mammal", "dog", 4)), pdf.xs(("mammal", "dog", 4)))
self.assert_eq(psdf.xs(2, level=2), pdf.xs(2, level=2))
self.assert_eq((psdf + "a").xs(("mammal", "dog", 4)), (pdf + "a").xs(("mammal", "dog", 4)))
self.assert_eq((psdf + "a").xs(2, level=2), (pdf + "a").xs(2, level=2))
def test_missing(self):
psdf = self.psdf
missing_functions = inspect.getmembers(_MissingPandasLikeDataFrame, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*DataFrame.*{}.*is deprecated".format(name)
):
getattr(psdf, name)()
missing_properties = inspect.getmembers(
_MissingPandasLikeDataFrame, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*DataFrame.*{}.*is deprecated".format(name)
):
getattr(psdf, name)
def test_to_numpy(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 9, 4, 2, 4],
"c": ["one", "three", "six", "seven", "one", "5"],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.to_numpy(), pdf.values)
def test_to_pandas(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.to_pandas(), pdf)
def test_isin(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 9, 4, 2, 4],
"c": ["one", "three", "six", "seven", "one", "5"],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.isin([4, "six"]), pdf.isin([4, "six"]))
# Seems like pandas has a bug when passing `np.array` as parameter
self.assert_eq(psdf.isin(np.array([4, "six"])), pdf.isin([4, "six"]))
self.assert_eq(
psdf.isin({"a": [2, 8], "c": ["three", "one"]}),
pdf.isin({"a": [2, 8], "c": ["three", "one"]}),
)
self.assert_eq(
psdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}),
pdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}),
)
msg = "'DataFrame' object has no attribute {'e'}"
with self.assertRaisesRegex(AttributeError, msg):
psdf.isin({"e": [5, 7], "a": [1, 6]})
msg = "DataFrame and Series are not supported"
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.isin(pdf)
msg = "Values should be iterable, Series, DataFrame or dict."
with self.assertRaisesRegex(TypeError, msg):
psdf.isin(1)
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, None, 9, 4, None, 4],
"c": [None, 5, None, 3, 2, 1],
},
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(psdf.isin([4, 3, 1, 1, None]), pdf.isin([4, 3, 1, 1, None]))
else:
expected = pd.DataFrame(
{
"a": [True, False, True, True, False, False],
"b": [True, False, False, True, False, True],
"c": [False, False, False, True, False, True],
}
)
self.assert_eq(psdf.isin([4, 3, 1, 1, None]), expected)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
psdf.isin({"b": [4, 3, 1, 1, None]}), pdf.isin({"b": [4, 3, 1, 1, None]})
)
else:
expected = pd.DataFrame(
{
"a": [False, False, False, False, False, False],
"b": [True, False, False, True, False, True],
"c": [False, False, False, False, False, False],
}
)
self.assert_eq(psdf.isin({"b": [4, 3, 1, 1, None]}), expected)
def test_merge(self):
left_pdf = pd.DataFrame(
{
"lkey": ["foo", "bar", "baz", "foo", "bar", "l"],
"value": [1, 2, 3, 5, 6, 7],
"x": list("abcdef"),
},
columns=["lkey", "value", "x"],
)
right_pdf = pd.DataFrame(
{
"rkey": ["baz", "foo", "bar", "baz", "foo", "r"],
"value": [4, 5, 6, 7, 8, 9],
"y": list("efghij"),
},
columns=["rkey", "value", "y"],
)
right_ps = pd.Series(list("defghi"), name="x", index=[5, 6, 7, 8, 9, 10])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
right_psser = ps.from_pandas(right_ps)
def check(op, right_psdf=right_psdf, right_pdf=right_pdf):
k_res = op(left_psdf, right_psdf)
k_res = k_res.to_pandas()
k_res = k_res.sort_values(by=list(k_res.columns))
k_res = k_res.reset_index(drop=True)
p_res = op(left_pdf, right_pdf)
p_res = p_res.sort_values(by=list(p_res.columns))
p_res = p_res.reset_index(drop=True)
self.assert_eq(k_res, p_res)
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on="value"))
check(lambda left, right: left.merge(right, on=("value",)))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey"))
check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey")))
check(
lambda left, right: left.set_index("lkey").merge(
right, left_index=True, right_on="rkey"
)
)
check(
lambda left, right: left.merge(
right.set_index("rkey"), left_on="lkey", right_index=True
)
)
check(
lambda left, right: left.set_index("lkey").merge(
right.set_index("rkey"), left_index=True, right_index=True
)
)
# MultiIndex
check(
lambda left, right: left.merge(
right, left_on=["lkey", "value"], right_on=["rkey", "value"]
)
)
check(
lambda left, right: left.set_index(["lkey", "value"]).merge(
right, left_index=True, right_on=["rkey", "value"]
)
)
check(
lambda left, right: left.merge(
right.set_index(["rkey", "value"]), left_on=["lkey", "value"], right_index=True
)
)
# TODO: when both left_index=True and right_index=True with multi-index
# check(lambda left, right: left.set_index(['lkey', 'value']).merge(
# right.set_index(['rkey', 'value']), left_index=True, right_index=True))
# join types
for how in ["inner", "left", "right", "outer"]:
check(lambda left, right: left.merge(right, on="value", how=how))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey", how=how))
# suffix
check(
lambda left, right: left.merge(
right, left_on="lkey", right_on="rkey", suffixes=["_left", "_right"]
)
)
# Test Series on the right
check(lambda left, right: left.merge(right), right_psser, right_ps)
check(
lambda left, right: left.merge(right, left_on="x", right_on="x"), right_psser, right_ps
)
check(
lambda left, right: left.set_index("x").merge(right, left_index=True, right_on="x"),
right_psser,
right_ps,
)
# Test join types with Series
for how in ["inner", "left", "right", "outer"]:
check(lambda left, right: left.merge(right, how=how), right_psser, right_ps)
check(
lambda left, right: left.merge(right, left_on="x", right_on="x", how=how),
right_psser,
right_ps,
)
# suffix with Series
check(
lambda left, right: left.merge(
right,
suffixes=["_left", "_right"],
how="outer",
left_index=True,
right_index=True,
),
right_psser,
right_ps,
)
# multi-index columns
left_columns = pd.MultiIndex.from_tuples([(10, "lkey"), (10, "value"), (20, "x")])
left_pdf.columns = left_columns
left_psdf.columns = left_columns
right_columns = pd.MultiIndex.from_tuples([(10, "rkey"), (10, "value"), (30, "y")])
right_pdf.columns = right_columns
right_psdf.columns = right_columns
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on=[(10, "value")]))
check(
lambda left, right: (left.set_index((10, "lkey")).merge(right.set_index((10, "rkey"))))
)
check(
lambda left, right: (
left.set_index((10, "lkey")).merge(
right.set_index((10, "rkey")), left_index=True, right_index=True
)
)
)
# TODO: when both left_index=True and right_index=True with multi-index columns
# check(lambda left, right: left.merge(right,
# left_on=[('a', 'lkey')], right_on=[('a', 'rkey')]))
# check(lambda left, right: (left.set_index(('a', 'lkey'))
# .merge(right, left_index=True, right_on=[('a', 'rkey')])))
# non-string names
left_pdf.columns = [10, 100, 1000]
left_psdf.columns = [10, 100, 1000]
right_pdf.columns = [20, 100, 2000]
right_psdf.columns = [20, 100, 2000]
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on=[100]))
check(lambda left, right: (left.set_index(10).merge(right.set_index(20))))
check(
lambda left, right: (
left.set_index(10).merge(right.set_index(20), left_index=True, right_index=True)
)
)
def test_merge_same_anchor(self):
pdf = pd.DataFrame(
{
"lkey": ["foo", "bar", "baz", "foo", "bar", "l"],
"rkey": ["baz", "foo", "bar", "baz", "foo", "r"],
"value": [1, 1, 3, 5, 6, 7],
"x": list("abcdef"),
"y": list("efghij"),
},
columns=["lkey", "rkey", "value", "x", "y"],
)
psdf = ps.from_pandas(pdf)
left_pdf = pdf[["lkey", "value", "x"]]
right_pdf = pdf[["rkey", "value", "y"]]
left_psdf = psdf[["lkey", "value", "x"]]
right_psdf = psdf[["rkey", "value", "y"]]
def check(op, right_psdf=right_psdf, right_pdf=right_pdf):
k_res = op(left_psdf, right_psdf)
k_res = k_res.to_pandas()
k_res = k_res.sort_values(by=list(k_res.columns))
k_res = k_res.reset_index(drop=True)
p_res = op(left_pdf, right_pdf)
p_res = p_res.sort_values(by=list(p_res.columns))
p_res = p_res.reset_index(drop=True)
self.assert_eq(k_res, p_res)
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on="value"))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey"))
check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey")))
check(
lambda left, right: left.set_index("lkey").merge(
right, left_index=True, right_on="rkey"
)
)
check(
lambda left, right: left.merge(
right.set_index("rkey"), left_on="lkey", right_index=True
)
)
check(
lambda left, right: left.set_index("lkey").merge(
right.set_index("rkey"), left_index=True, right_index=True
)
)
def test_merge_retains_indices(self):
left_pdf = pd.DataFrame({"A": [0, 1]})
right_pdf = pd.DataFrame({"B": [1, 2]}, index=[1, 2])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
self.assert_eq(
left_psdf.merge(right_psdf, left_index=True, right_index=True),
left_pdf.merge(right_pdf, left_index=True, right_index=True),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_on="A", right_index=True),
left_pdf.merge(right_pdf, left_on="A", right_index=True),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_index=True, right_on="B"),
left_pdf.merge(right_pdf, left_index=True, right_on="B"),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_on="A", right_on="B"),
left_pdf.merge(right_pdf, left_on="A", right_on="B"),
)
def test_merge_how_parameter(self):
left_pdf = pd.DataFrame({"A": [1, 2]})
right_pdf = pd.DataFrame({"B": ["x", "y"]}, index=[1, 2])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True)
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True)
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="left")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="left")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="right")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="right")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="outer")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="outer")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
def test_merge_raises(self):
left = ps.DataFrame(
{"value": [1, 2, 3, 5, 6], "x": list("abcde")},
columns=["value", "x"],
index=["foo", "bar", "baz", "foo", "bar"],
)
right = ps.DataFrame(
{"value": [4, 5, 6, 7, 8], "y": list("fghij")},
columns=["value", "y"],
index=["baz", "foo", "bar", "baz", "foo"],
)
with self.assertRaisesRegex(ValueError, "No common columns to perform merge on"):
left[["x"]].merge(right[["y"]])
with self.assertRaisesRegex(ValueError, "not a combination of both"):
left.merge(right, on="value", left_on="x")
with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"):
left.merge(right, left_on="x")
with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"):
left.merge(right, left_index=True)
with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"):
left.merge(right, right_on="y")
with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"):
left.merge(right, right_index=True)
with self.assertRaisesRegex(
ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)"
):
left.merge(right, left_on="value", right_on=["value", "y"])
with self.assertRaisesRegex(
ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)"
):
left.merge(right, left_on=["value", "x"], right_on="value")
with self.assertRaisesRegex(ValueError, "['inner', 'left', 'right', 'full', 'outer']"):
left.merge(right, left_index=True, right_index=True, how="foo")
with self.assertRaisesRegex(KeyError, "id"):
left.merge(right, on="id")
def test_append(self):
pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"))
psdf = ps.from_pandas(pdf)
other_pdf = pd.DataFrame([[3, 4], [5, 6]], columns=list("BC"), index=[2, 3])
other_psdf = ps.from_pandas(other_pdf)
self.assert_eq(psdf.append(psdf), pdf.append(pdf))
self.assert_eq(psdf.append(psdf, ignore_index=True), pdf.append(pdf, ignore_index=True))
# Assert DataFrames with non-matching columns
self.assert_eq(psdf.append(other_psdf), pdf.append(other_pdf))
# Assert appending a Series fails
msg = "DataFrames.append() does not support appending Series to DataFrames"
with self.assertRaises(TypeError, msg=msg):
psdf.append(psdf["A"])
# Assert using the sort parameter raises an exception
msg = "The 'sort' parameter is currently not supported"
with self.assertRaises(NotImplementedError, msg=msg):
psdf.append(psdf, sort=True)
# Assert using 'verify_integrity' only raises an exception for overlapping indices
self.assert_eq(
psdf.append(other_psdf, verify_integrity=True),
pdf.append(other_pdf, verify_integrity=True),
)
msg = "Indices have overlapping values"
with self.assertRaises(ValueError, msg=msg):
psdf.append(psdf, verify_integrity=True)
# Skip integrity verification when ignore_index=True
self.assert_eq(
psdf.append(psdf, ignore_index=True, verify_integrity=True),
pdf.append(pdf, ignore_index=True, verify_integrity=True),
)
# Assert appending multi-index DataFrames
multi_index_pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[[2, 3], [4, 5]])
multi_index_psdf = ps.from_pandas(multi_index_pdf)
other_multi_index_pdf = pd.DataFrame(
[[5, 6], [7, 8]], columns=list("AB"), index=[[2, 3], [6, 7]]
)
other_multi_index_psdf = ps.from_pandas(other_multi_index_pdf)
self.assert_eq(
multi_index_psdf.append(multi_index_psdf), multi_index_pdf.append(multi_index_pdf)
)
# Assert DataFrames with non-matching columns
self.assert_eq(
multi_index_psdf.append(other_multi_index_psdf),
multi_index_pdf.append(other_multi_index_pdf),
)
# Assert using 'verify_integrity' only raises an exception for overlapping indices
self.assert_eq(
multi_index_psdf.append(other_multi_index_psdf, verify_integrity=True),
multi_index_pdf.append(other_multi_index_pdf, verify_integrity=True),
)
with self.assertRaises(ValueError, msg=msg):
multi_index_psdf.append(multi_index_psdf, verify_integrity=True)
# Skip integrity verification when ignore_index=True
self.assert_eq(
multi_index_psdf.append(multi_index_psdf, ignore_index=True, verify_integrity=True),
multi_index_pdf.append(multi_index_pdf, ignore_index=True, verify_integrity=True),
)
# Assert trying to append DataFrames with different index levels
msg = "Both DataFrames have to have the same number of index levels"
with self.assertRaises(ValueError, msg=msg):
psdf.append(multi_index_psdf)
# Skip index level check when ignore_index=True
self.assert_eq(
psdf.append(multi_index_psdf, ignore_index=True),
pdf.append(multi_index_pdf, ignore_index=True),
)
columns = pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.append(psdf), pdf.append(pdf))
def test_clip(self):
pdf = pd.DataFrame(
{"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
# Assert list-like values are not accepted for 'lower' and 'upper'
msg = "List-like value are not supported for 'lower' and 'upper' at the moment"
with self.assertRaises(TypeError, msg=msg):
psdf.clip(lower=[1])
with self.assertRaises(TypeError, msg=msg):
psdf.clip(upper=[1])
# Assert no lower or upper
self.assert_eq(psdf.clip(), pdf.clip())
# Assert lower only
self.assert_eq(psdf.clip(1), pdf.clip(1))
# Assert upper only
self.assert_eq(psdf.clip(upper=3), pdf.clip(upper=3))
# Assert lower and upper
self.assert_eq(psdf.clip(1, 3), pdf.clip(1, 3))
pdf["clip"] = pdf.A.clip(lower=1, upper=3)
psdf["clip"] = psdf.A.clip(lower=1, upper=3)
self.assert_eq(psdf, pdf)
# Assert behavior on string values
str_psdf = ps.DataFrame({"A": ["a", "b", "c"]}, index=np.random.rand(3))
self.assert_eq(str_psdf.clip(1, 3), str_psdf)
def test_binary_operators(self):
pdf = pd.DataFrame(
{"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf + psdf.copy(), pdf + pdf.copy())
self.assert_eq(psdf + psdf.loc[:, ["A", "B"]], pdf + pdf.loc[:, ["A", "B"]])
self.assert_eq(psdf.loc[:, ["A", "B"]] + psdf, pdf.loc[:, ["A", "B"]] + pdf)
self.assertRaisesRegex(
ValueError,
"it comes from a different dataframe",
lambda: ps.range(10).add(ps.range(10)),
)
self.assertRaisesRegex(
TypeError,
"add with a sequence is currently not supported",
lambda: ps.range(10).add(ps.range(10).id),
)
psdf_other = psdf.copy()
psdf_other.columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")])
self.assertRaisesRegex(
ValueError,
"cannot join with no overlapping index names",
lambda: psdf.add(psdf_other),
)
def test_binary_operator_add(self):
# Positive
pdf = pd.DataFrame({"a": ["x"], "b": ["y"], "c": [1], "d": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] + psdf["b"], pdf["a"] + pdf["b"])
self.assert_eq(psdf["c"] + psdf["d"], pdf["c"] + pdf["d"])
# Negative
ks_err_msg = "Addition can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + psdf["c"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" + psdf["c"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 + psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + 1)
def test_binary_operator_sub(self):
# Positive
pdf = pd.DataFrame({"a": [2], "b": [1]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] - psdf["b"], pdf["a"] - pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Subtraction can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] - psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] - "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" - psdf["b"])
ks_err_msg = "Subtraction can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 - psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - 1)
psdf = ps.DataFrame({"a": ["x"], "b": ["y"]})
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - psdf["b"])
def test_binary_operator_truediv(self):
# Positive
pdf = pd.DataFrame({"a": [3], "b": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] / psdf["b"], pdf["a"] / pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "True division can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] / psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] / "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" / psdf["b"])
ks_err_msg = "True division can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] / psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 / psdf["a"])
def test_binary_operator_floordiv(self):
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Floor division can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] // psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 // psdf["a"])
ks_err_msg = "Floor division can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] // psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] // "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" // psdf["b"])
def test_binary_operator_mod(self):
# Positive
pdf = pd.DataFrame({"a": [3], "b": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] % psdf["b"], pdf["a"] % pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Modulo can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] % psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] % "literal")
ks_err_msg = "Modulo can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] % psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 % psdf["a"])
def test_binary_operator_multiply(self):
# Positive
pdf = pd.DataFrame({"a": ["x", "y"], "b": [1, 2], "c": [3, 4]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["b"] * psdf["c"], pdf["b"] * pdf["c"])
self.assert_eq(psdf["c"] * psdf["b"], pdf["c"] * pdf["b"])
self.assert_eq(psdf["a"] * psdf["b"], pdf["a"] * pdf["b"])
self.assert_eq(psdf["b"] * psdf["a"], pdf["b"] * pdf["a"])
self.assert_eq(psdf["a"] * 2, pdf["a"] * 2)
self.assert_eq(psdf["b"] * 2, pdf["b"] * 2)
self.assert_eq(2 * psdf["a"], 2 * pdf["a"])
self.assert_eq(2 * psdf["b"], 2 * pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [2]})
ks_err_msg = "Multiplication can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] * "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" * psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * 0.1)
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 0.1 * psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" * psdf["a"])
def test_sample(self):
pdf = pd.DataFrame({"A": [0, 2, 4]})
psdf = ps.from_pandas(pdf)
# Make sure the tests run, but we can't check the result because they are non-deterministic.
psdf.sample(frac=0.1)
psdf.sample(frac=0.2, replace=True)
psdf.sample(frac=0.2, random_state=5)
psdf["A"].sample(frac=0.2)
psdf["A"].sample(frac=0.2, replace=True)
psdf["A"].sample(frac=0.2, random_state=5)
with self.assertRaises(ValueError):
psdf.sample()
with self.assertRaises(NotImplementedError):
psdf.sample(n=1)
def test_add_prefix(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4))
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.add_prefix("col_"), psdf.add_prefix("col_"))
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.add_prefix("col_"), psdf.add_prefix("col_"))
def test_add_suffix(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4))
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.add_suffix("first_series"), psdf.add_suffix("first_series"))
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.add_suffix("first_series"), psdf.add_suffix("first_series"))
def test_join(self):
# check basic function
pdf1 = pd.DataFrame(
{"key": ["K0", "K1", "K2", "K3"], "A": ["A0", "A1", "A2", "A3"]}, columns=["key", "A"]
)
pdf2 = pd.DataFrame(
{"key": ["K0", "K1", "K2"], "B": ["B0", "B1", "B2"]}, columns=["key", "B"]
)
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
# join with duplicated columns in Series
with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"):
ks1 = ps.Series(["A1", "A5"], index=[1, 2], name="A")
psdf1.join(ks1, how="outer")
# join with duplicated columns in DataFrame
with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"):
psdf1.join(psdf2, how="outer")
# check `on` parameter
join_pdf = pdf1.join(pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
join_pdf = pdf1.set_index("key").join(
pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.set_index("key").join(
psdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
# multi-index columns
columns1 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "A")])
columns2 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "B")])
pdf1.columns = columns1
pdf2.columns = columns2
psdf1.columns = columns1
psdf2.columns = columns2
join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
# check `on` parameter
join_pdf = pdf1.join(
pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(
psdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
join_pdf = pdf1.set_index(("x", "key")).join(
pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.set_index(("x", "key")).join(
psdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
# multi-index
midx1 = pd.MultiIndex.from_tuples(
[("w", "a"), ("x", "b"), ("y", "c"), ("z", "d")], names=["index1", "index2"]
)
midx2 = pd.MultiIndex.from_tuples(
[("w", "a"), ("x", "b"), ("y", "c")], names=["index1", "index2"]
)
pdf1.index = midx1
pdf2.index = midx2
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
join_pdf = pdf1.join(pdf2, on=["index1", "index2"], rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, on=["index1", "index2"], rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
with self.assertRaisesRegex(
ValueError, r'len\(left_on\) must equal the number of levels in the index of "right"'
):
psdf1.join(psdf2, on=["index1"], rsuffix="_right")
def test_replace(self):
pdf = pd.DataFrame(
{
"name": ["Ironman", "Captain America", "Thor", "Hulk"],
"weapon": ["Mark-45", "Shield", "Mjolnir", "Smash"],
},
index=np.random.rand(4),
)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
NotImplementedError, "replace currently works only for method='pad"
):
psdf.replace(method="bfill")
with self.assertRaisesRegex(
NotImplementedError, "replace currently works only when limit=None"
):
psdf.replace(limit=10)
with self.assertRaisesRegex(
NotImplementedError, "replace currently doesn't supports regex"
):
psdf.replace(regex="")
with self.assertRaisesRegex(ValueError, "Length of to_replace and value must be same"):
psdf.replace(to_replace=["Ironman"], value=["Spiderman", "Doctor Strange"])
with self.assertRaisesRegex(TypeError, "Unsupported type function"):
psdf.replace("Ironman", lambda x: "Spiderman")
with self.assertRaisesRegex(TypeError, "Unsupported type function"):
psdf.replace(lambda x: "Ironman", "Spiderman")
self.assert_eq(psdf.replace("Ironman", "Spiderman"), pdf.replace("Ironman", "Spiderman"))
self.assert_eq(
psdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]),
pdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]),
)
self.assert_eq(
psdf.replace(("Ironman", "Captain America"), ("Rescue", "Hawkeye")),
pdf.replace(("Ironman", "Captain America"), ("Rescue", "Hawkeye")),
)
# inplace
pser = pdf.name
psser = psdf.name
pdf.replace("Ironman", "Spiderman", inplace=True)
psdf.replace("Ironman", "Spiderman", inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf = pd.DataFrame(
{"A": [0, 1, 2, 3, np.nan], "B": [5, 6, 7, 8, np.nan], "C": ["a", "b", "c", "d", None]},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4))
self.assert_eq(
psdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
)
self.assert_eq(psdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200}))
self.assert_eq(
psdf.replace({"A": [0, np.nan], "B": [5, np.nan]}, 100),
pdf.replace({"A": [0, np.nan], "B": [5, np.nan]}, 100),
)
self.assert_eq(
psdf.replace({"A": {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({"A": {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({"X": {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({"X": {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(psdf.replace({"C": ["a", None]}, "e"), pdf.replace({"C": ["a", None]}, "e"))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4))
self.assert_eq(
psdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
)
self.assert_eq(psdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200}))
self.assert_eq(
psdf.replace({("X", "A"): [0, np.nan], ("X", "B"): 5}, 100),
pdf.replace({("X", "A"): [0, np.nan], ("X", "B"): 5}, 100),
)
self.assert_eq(
psdf.replace({("X", "A"): {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({("X", "A"): {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({("X", "B"): {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({("X", "B"): {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({("Y", "C"): ["a", None]}, "e"),
pdf.replace({("Y", "C"): ["a", None]}, "e"),
)
def test_update(self):
# check base function
def get_data(left_columns=None, right_columns=None):
left_pdf = pd.DataFrame(
{"A": ["1", "2", "3", "4"], "B": ["100", "200", np.nan, np.nan]}, columns=["A", "B"]
)
right_pdf = pd.DataFrame(
{"B": ["x", np.nan, "y", np.nan], "C": ["100", "200", "300", "400"]},
columns=["B", "C"],
)
left_psdf = ps.DataFrame(
{"A": ["1", "2", "3", "4"], "B": ["100", "200", None, None]}, columns=["A", "B"]
)
right_psdf = ps.DataFrame(
{"B": ["x", None, "y", None], "C": ["100", "200", "300", "400"]}, columns=["B", "C"]
)
if left_columns is not None:
left_pdf.columns = left_columns
left_psdf.columns = left_columns
if right_columns is not None:
right_pdf.columns = right_columns
right_psdf.columns = right_columns
return left_psdf, left_pdf, right_psdf, right_pdf
left_psdf, left_pdf, right_psdf, right_pdf = get_data()
pser = left_pdf.B
psser = left_psdf.B
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_psdf.sort_values(by=["A", "B"]))
self.assert_eq(psser.sort_index(), pser.sort_index())
left_psdf, left_pdf, right_psdf, right_pdf = get_data()
left_pdf.update(right_pdf, overwrite=False)
left_psdf.update(right_psdf, overwrite=False)
self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_psdf.sort_values(by=["A", "B"]))
with self.assertRaises(NotImplementedError):
left_psdf.update(right_psdf, join="right")
# multi-index columns
left_columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
right_columns = pd.MultiIndex.from_tuples([("X", "B"), ("Y", "C")])
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf, overwrite=False)
left_psdf.update(right_psdf, overwrite=False)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
right_columns = pd.MultiIndex.from_tuples([("Y", "B"), ("Y", "C")])
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
def test_pivot_table_dtypes(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [1, 2, 2, 4, 2, 4],
"c": [1, 2, 9, 4, 7, 4],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
# Skip columns comparison by reset_index
res_df = psdf.pivot_table(
index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"}
).dtypes.reset_index(drop=True)
exp_df = pdf.pivot_table(
index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"}
).dtypes.reset_index(drop=True)
self.assert_eq(res_df, exp_df)
# Results don't have the same column's name
# Todo: self.assert_eq(psdf.pivot_table(columns="a", values="b").dtypes,
# pdf.pivot_table(columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['c'], columns="a", values="b").dtypes,
# pdf.pivot_table(index=['c'], columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes,
# pdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['e', 'c'],
# columns="a", values="b", fill_value=999).dtypes, pdf.pivot_table(index=['e', 'c'],
# columns="a", values="b", fill_value=999).dtypes)
def test_pivot_table(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [10, 20, 20, 40, 20, 40],
"c": [1, 2, 9, 4, 7, 4],
"d": [-1, -2, -3, -4, -5, -6],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
# Checking if both DataFrames have the same results
self.assert_eq(
psdf.pivot_table(columns="a", values="b").sort_index(),
pdf.pivot_table(columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values="b").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc="sum"
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc="sum"
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum"
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum"
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(),
pdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["e", "c"], columns="a", values="b", fill_value=999
).sort_index(),
pdf.pivot_table(index=["e", "c"], columns="a", values="b", fill_value=999).sort_index(),
almost=True,
)
# multi-index columns
columns = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "e"), ("z", "c"), ("w", "d")]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.pivot_table(columns=("x", "a"), values=("x", "b")).sort_index(),
pdf.pivot_table(columns=[("x", "a")], values=[("x", "b")]).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")], columns=[("x", "a")], values=[("x", "b")]
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")], columns=[("x", "a")], values=[("x", "b"), ("y", "e")]
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e"), ("w", "d")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")],
columns=[("x", "a")],
values=[("x", "b"), ("y", "e"), ("w", "d")],
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")],
columns=("x", "a"),
values=[("x", "b"), ("y", "e")],
aggfunc={("x", "b"): "mean", ("y", "e"): "sum"},
).sort_index(),
pdf.pivot_table(
index=[("z", "c")],
columns=[("x", "a")],
values=[("x", "b"), ("y", "e")],
aggfunc={("x", "b"): "mean", ("y", "e"): "sum"},
).sort_index(),
almost=True,
)
def test_pivot_table_and_index(self):
# https://github.com/databricks/koalas/issues/805
pdf = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
},
columns=["A", "B", "C", "D", "E"],
index=np.random.rand(9),
)
psdf = ps.from_pandas(pdf)
ptable = pdf.pivot_table(
values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0
).sort_index()
ktable = psdf.pivot_table(
values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0
).sort_index()
self.assert_eq(ktable, ptable)
self.assert_eq(ktable.index, ptable.index)
self.assert_eq(repr(ktable.index), repr(ptable.index))
def test_stack(self):
pdf_single_level_cols = pd.DataFrame(
[[0, 1], [2, 3]], index=["cat", "dog"], columns=["weight", "height"]
)
psdf_single_level_cols = ps.from_pandas(pdf_single_level_cols)
self.assert_eq(
psdf_single_level_cols.stack().sort_index(), pdf_single_level_cols.stack().sort_index()
)
multicol1 = pd.MultiIndex.from_tuples(
[("weight", "kg"), ("weight", "pounds")], names=["x", "y"]
)
pdf_multi_level_cols1 = pd.DataFrame(
[[1, 2], [2, 4]], index=["cat", "dog"], columns=multicol1
)
psdf_multi_level_cols1 = ps.from_pandas(pdf_multi_level_cols1)
self.assert_eq(
psdf_multi_level_cols1.stack().sort_index(), pdf_multi_level_cols1.stack().sort_index()
)
multicol2 = pd.MultiIndex.from_tuples([("weight", "kg"), ("height", "m")])
pdf_multi_level_cols2 = pd.DataFrame(
[[1.0, 2.0], [3.0, 4.0]], index=["cat", "dog"], columns=multicol2
)
psdf_multi_level_cols2 = ps.from_pandas(pdf_multi_level_cols2)
self.assert_eq(
psdf_multi_level_cols2.stack().sort_index(), pdf_multi_level_cols2.stack().sort_index()
)
pdf = pd.DataFrame(
{
("y", "c"): [True, True],
("x", "b"): [False, False],
("x", "c"): [True, False],
("y", "a"): [False, True],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.stack().sort_index(), pdf.stack().sort_index())
self.assert_eq(psdf[[]].stack().sort_index(), pdf[[]].stack().sort_index(), almost=True)
def test_unstack(self):
pdf = pd.DataFrame(
np.random.randn(3, 3),
index=pd.MultiIndex.from_tuples([("rg1", "x"), ("rg1", "y"), ("rg2", "z")]),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.unstack().sort_index(), pdf.unstack().sort_index(), almost=True)
self.assert_eq(
psdf.unstack().unstack().sort_index(), pdf.unstack().unstack().sort_index(), almost=True
)
def test_pivot_errors(self):
psdf = ps.range(10)
with self.assertRaisesRegex(ValueError, "columns should be set"):
psdf.pivot(index="id")
with self.assertRaisesRegex(ValueError, "values should be set"):
psdf.pivot(index="id", columns="id")
def test_pivot_table_errors(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [1, 2, 2, 4, 2, 4],
"c": [1, 2, 9, 4, 7, 4],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.pivot_table(index=["c"], columns="a", values=5))
msg = "index should be a None or a list of columns."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(index="c", columns="a", values="b")
msg = "pivot_table doesn't support aggfunc as dict and without index."
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.pivot_table(columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"})
msg = "columns should be one column name."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns=["a"], values=["b"], aggfunc={"b": "mean", "e": "sum"})
msg = "Columns in aggfunc must be the same as values."
with self.assertRaisesRegex(ValueError, msg):
psdf.pivot_table(
index=["e", "c"], columns="a", values="b", aggfunc={"b": "mean", "e": "sum"}
)
msg = "values can't be a list without index."
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.pivot_table(columns="a", values=["b", "e"])
msg = "Wrong columns A."
with self.assertRaisesRegex(ValueError, msg):
psdf.pivot_table(
index=["c"], columns="A", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
)
msg = "values should be one column or list of columns."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns="a", values=(["b"], ["c"]))
msg = "aggfunc must be a dict mapping from column name to aggregate functions"
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns="a", values="b", aggfunc={"a": lambda x: sum(x)})
psdf = ps.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
},
columns=["A", "B", "C", "D", "E"],
index=np.random.rand(9),
)
msg = "values should be a numeric type."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(
index=["C"], columns="A", values=["B", "E"], aggfunc={"B": "mean", "E": "sum"}
)
msg = "values should be a numeric type."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(index=["C"], columns="A", values="B", aggfunc={"B": "mean"})
def test_transpose(self):
# TODO: what if with random index?
pdf1 = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}, columns=["col1", "col2"])
psdf1 = ps.from_pandas(pdf1)
pdf2 = pd.DataFrame(
data={"score": [9, 8], "kids": [0, 0], "age": [12, 22]},
columns=["score", "kids", "age"],
)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.transpose().sort_index(), psdf1.transpose().sort_index())
self.assert_eq(pdf2.transpose().sort_index(), psdf2.transpose().sort_index())
with option_context("compute.max_rows", None):
self.assert_eq(pdf1.transpose().sort_index(), psdf1.transpose().sort_index())
self.assert_eq(pdf2.transpose().sort_index(), psdf2.transpose().sort_index())
pdf3 = pd.DataFrame(
{
("cg1", "a"): [1, 2, 3],
("cg1", "b"): [4, 5, 6],
("cg2", "c"): [7, 8, 9],
("cg3", "d"): [9, 9, 9],
},
index=pd.MultiIndex.from_tuples([("rg1", "x"), ("rg1", "y"), ("rg2", "z")]),
)
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(pdf3.transpose().sort_index(), psdf3.transpose().sort_index())
with option_context("compute.max_rows", None):
self.assert_eq(pdf3.transpose().sort_index(), psdf3.transpose().sort_index())
def _test_cummin(self, pdf, psdf):
self.assert_eq(pdf.cummin(), psdf.cummin())
self.assert_eq(pdf.cummin(skipna=False), psdf.cummin(skipna=False))
self.assert_eq(pdf.cummin().sum(), psdf.cummin().sum())
def test_cummin(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cummin(pdf, psdf)
def test_cummin_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cummin(pdf, psdf)
def _test_cummax(self, pdf, psdf):
self.assert_eq(pdf.cummax(), psdf.cummax())
self.assert_eq(pdf.cummax(skipna=False), psdf.cummax(skipna=False))
self.assert_eq(pdf.cummax().sum(), psdf.cummax().sum())
def test_cummax(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cummax(pdf, psdf)
def test_cummax_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cummax(pdf, psdf)
def _test_cumsum(self, pdf, psdf):
self.assert_eq(pdf.cumsum(), psdf.cumsum())
self.assert_eq(pdf.cumsum(skipna=False), psdf.cumsum(skipna=False))
self.assert_eq(pdf.cumsum().sum(), psdf.cumsum().sum())
def test_cumsum(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cumsum(pdf, psdf)
def test_cumsum_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cumsum(pdf, psdf)
def _test_cumprod(self, pdf, psdf):
self.assert_eq(pdf.cumprod(), psdf.cumprod(), almost=True)
self.assert_eq(pdf.cumprod(skipna=False), psdf.cumprod(skipna=False), almost=True)
self.assert_eq(pdf.cumprod().sum(), psdf.cumprod().sum(), almost=True)
def test_cumprod(self):
pdf = pd.DataFrame(
[[2.0, 1.0, 1], [5, None, 2], [1.0, -1.0, -3], [2.0, 0, 4], [4.0, 9.0, 5]],
columns=list("ABC"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cumprod(pdf, psdf)
def test_cumprod_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.rand(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cumprod(pdf, psdf)
def test_drop_duplicates(self):
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
# inplace is False
for keep in ["first", "last", False]:
with self.subTest(keep=keep):
self.assert_eq(
pdf.drop_duplicates(keep=keep).sort_index(),
psdf.drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates("a", keep=keep).sort_index(),
psdf.drop_duplicates("a", keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates(["a", "b"], keep=keep).sort_index(),
psdf.drop_duplicates(["a", "b"], keep=keep).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).drop_duplicates(keep=keep).sort_index(),
psdf.set_index("a", append=True).drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).drop_duplicates("b", keep=keep).sort_index(),
psdf.set_index("a", append=True).drop_duplicates("b", keep=keep).sort_index(),
)
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
# inplace is False
for keep in ["first", "last", False]:
with self.subTest("multi-index columns", keep=keep):
self.assert_eq(
pdf.drop_duplicates(keep=keep).sort_index(),
psdf.drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates(("x", "a"), keep=keep).sort_index(),
psdf.drop_duplicates(("x", "a"), keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates([("x", "a"), ("y", "b")], keep=keep).sort_index(),
psdf.drop_duplicates([("x", "a"), ("y", "b")], keep=keep).sort_index(),
)
# inplace is True
subset_list = [None, "a", ["a", "b"]]
for subset in subset_list:
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
pser = pdf.a
psser = psdf.a
pdf.drop_duplicates(subset=subset, inplace=True)
psdf.drop_duplicates(subset=subset, inplace=True)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser.sort_index(), pser.sort_index())
# multi-index columns, inplace is True
subset_list = [None, ("x", "a"), [("x", "a"), ("y", "b")]]
for subset in subset_list:
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
pser = pdf[("x", "a")]
psser = psdf[("x", "a")]
pdf.drop_duplicates(subset=subset, inplace=True)
psdf.drop_duplicates(subset=subset, inplace=True)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser.sort_index(), pser.sort_index())
# non-string names
pdf = pd.DataFrame(
{10: [1, 2, 2, 2, 3], 20: ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.drop_duplicates(10, keep=keep).sort_index(),
psdf.drop_duplicates(10, keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates([10, 20], keep=keep).sort_index(),
psdf.drop_duplicates([10, 20], keep=keep).sort_index(),
)
def test_reindex(self):
index = pd.Index(["A", "B", "C", "D", "E"])
columns = pd.Index(["numbers"])
pdf = pd.DataFrame([1.0, 2.0, 3.0, 4.0, None], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
columns2 = pd.Index(["numbers", "2", "3"], name="cols2")
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
columns = pd.Index(["numbers"], name="cols")
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
pdf.reindex(["A", "B", "C"], columns=["numbers", "2", "3"]).sort_index(),
psdf.reindex(["A", "B", "C"], columns=["numbers", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(["A", "B", "C"], index=["numbers", "2", "3"]).sort_index(),
psdf.reindex(["A", "B", "C"], index=["numbers", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(index=["A", "B"]).sort_index(), psdf.reindex(index=["A", "B"]).sort_index()
)
self.assert_eq(
pdf.reindex(index=["A", "B", "2", "3"]).sort_index(),
psdf.reindex(index=["A", "B", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(index=["A", "E", "2", "3"], fill_value=0).sort_index(),
psdf.reindex(index=["A", "E", "2", "3"], fill_value=0).sort_index(),
)
self.assert_eq(
pdf.reindex(columns=["numbers"]).sort_index(),
psdf.reindex(columns=["numbers"]).sort_index(),
)
self.assert_eq(
pdf.reindex(columns=["numbers"], copy=True).sort_index(),
psdf.reindex(columns=["numbers"], copy=True).sort_index(),
)
# Using float as fill_value to avoid int64/32 clash
self.assert_eq(
pdf.reindex(columns=["numbers", "2", "3"], fill_value=0.0).sort_index(),
psdf.reindex(columns=["numbers", "2", "3"], fill_value=0.0).sort_index(),
)
columns2 = pd.Index(["numbers", "2", "3"])
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
columns2 = pd.Index(["numbers", "2", "3"], name="cols2")
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
# Reindexing single Index on single Index
pindex2 = pd.Index(["A", "C", "D", "E", "0"], name="index2")
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
pindex2 = pd.DataFrame({"index2": ["A", "C", "D", "E", "0"]}).set_index("index2").index
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
# Reindexing MultiIndex on single Index
pindex = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("F", "G")], names=["name1", "name2"]
)
kindex = ps.from_pandas(pindex)
self.assert_eq(
pdf.reindex(index=pindex, fill_value=0.0).sort_index(),
psdf.reindex(index=kindex, fill_value=0.0).sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.reindex(columns=["numbers", "2", "3"], axis=1))
self.assertRaises(TypeError, lambda: psdf.reindex(columns=["numbers", "2", "3"], axis=2))
self.assertRaises(TypeError, lambda: psdf.reindex(columns="numbers"))
self.assertRaises(TypeError, lambda: psdf.reindex(index=["A", "B", "C"], axis=1))
self.assertRaises(TypeError, lambda: psdf.reindex(index=123))
# Reindexing MultiIndex on MultiIndex
pdf = pd.DataFrame({"numbers": [1.0, 2.0, None]}, index=pindex)
psdf = ps.from_pandas(pdf)
pindex2 = pd.MultiIndex.from_tuples(
[("A", "G"), ("C", "D"), ("I", "J")], names=["name1", "name2"]
)
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
pindex2 = (
pd.DataFrame({"index_level_1": ["A", "C", "I"], "index_level_2": ["G", "D", "J"]})
.set_index(["index_level_1", "index_level_2"])
.index
)
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
columns = pd.MultiIndex.from_tuples([("X", "numbers")], names=["cols1", "cols2"])
pdf.columns = columns
psdf.columns = columns
# Reindexing MultiIndex index on MultiIndex columns and MultiIndex index
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
index = pd.Index(["A", "B", "C", "D", "E"])
pdf = pd.DataFrame(data=[1.0, 2.0, 3.0, 4.0, None], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
pindex2 = pd.Index(["A", "C", "D", "E", "0"], name="index2")
kindex2 = ps.from_pandas(pindex2)
# Reindexing single Index on MultiIndex columns and single Index
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(
columns=[("X", "numbers"), ("Y", "2"), ("Y", "3")], fill_value=fill_value
).sort_index(),
psdf.reindex(
columns=[("X", "numbers"), ("Y", "2"), ("Y", "3")], fill_value=fill_value
).sort_index(),
)
columns2 = pd.MultiIndex.from_tuples(
[("X", "numbers"), ("Y", "2"), ("Y", "3")], names=["cols3", "cols4"]
)
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.reindex(columns=["X"]))
self.assertRaises(ValueError, lambda: psdf.reindex(columns=[("X",)]))
def test_reindex_like(self):
data = [[1.0, 2.0], [3.0, None], [None, 4.0]]
index = pd.Index(["A", "B", "C"], name="index")
columns = pd.Index(["numbers", "values"], name="cols")
pdf = pd.DataFrame(data=data, index=index, columns=columns)
psdf = ps.from_pandas(pdf)
# Reindexing single Index on single Index
data2 = [[5.0, None], [6.0, 7.0], [8.0, None]]
index2 = pd.Index(["A", "C", "D"], name="index2")
columns2 = pd.Index(["numbers", "F"], name="cols2")
pdf2 = pd.DataFrame(data=data2, index=index2, columns=columns2)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(
pdf.reindex_like(pdf2).sort_index(),
psdf.reindex_like(psdf2).sort_index(),
)
pdf2 = pd.DataFrame({"index_level_1": ["A", "C", "I"]})
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(
pdf.reindex_like(pdf2.set_index(["index_level_1"])).sort_index(),
psdf.reindex_like(psdf2.set_index(["index_level_1"])).sort_index(),
)
# Reindexing MultiIndex on single Index
index2 = pd.MultiIndex.from_tuples(
[("A", "G"), ("C", "D"), ("I", "J")], names=["name3", "name4"]
)
pdf2 = pd.DataFrame(data=data2, index=index2)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(
pdf.reindex_like(pdf2).sort_index(),
psdf.reindex_like(psdf2).sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.reindex_like(index2))
self.assertRaises(AssertionError, lambda: psdf2.reindex_like(psdf))
# Reindexing MultiIndex on MultiIndex
columns2 = pd.MultiIndex.from_tuples(
[("numbers", "third"), ("values", "second")], names=["cols3", "cols4"]
)
pdf2.columns = columns2
psdf2.columns = columns2
columns = pd.MultiIndex.from_tuples(
[("numbers", "first"), ("values", "second")], names=["cols1", "cols2"]
)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["name1", "name2"]
)
pdf = pd.DataFrame(data=data, index=index, columns=columns)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.reindex_like(pdf2).sort_index(),
psdf.reindex_like(psdf2).sort_index(),
)
def test_melt(self):
pdf = pd.DataFrame(
{"A": [1, 3, 5], "B": [2, 4, 6], "C": [7, 8, 9]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.melt().sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars="A").sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars="A").sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=["A", "B"]).sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars=["A", "B"]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=("A", "B")).sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars=("A", "B")).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=["A"], value_vars=["C"])
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=["A"], value_vars=["C"]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=["A"], value_vars=["B"], var_name="myVarname", value_name="myValname")
.sort_values(["myVarname", "myValname"])
.reset_index(drop=True),
pdf.melt(
id_vars=["A"], value_vars=["B"], var_name="myVarname", value_name="myValname"
).sort_values(["myVarname", "myValname"]),
)
self.assert_eq(
psdf.melt(value_vars=("A", "B"))
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(value_vars=("A", "B")).sort_values(["variable", "value"]),
)
self.assertRaises(KeyError, lambda: psdf.melt(id_vars="Z"))
self.assertRaises(KeyError, lambda: psdf.melt(value_vars="Z"))
# multi-index columns
TEN = 10.0
TWELVE = 20.0
columns = pd.MultiIndex.from_tuples([(TEN, "A"), (TEN, "B"), (TWELVE, "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.melt().sort_values(["variable_0", "variable_1", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["variable_0", "variable_1", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=[(TEN, "A")])
.sort_values(["variable_0", "variable_1", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[(TEN, "A")])
.sort_values(["variable_0", "variable_1", "value"])
.rename(columns=name_like_string),
)
self.assert_eq(
psdf.melt(id_vars=[(TEN, "A")], value_vars=[(TWELVE, "C")])
.sort_values(["variable_0", "variable_1", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[(TEN, "A")], value_vars=[(TWELVE, "C")])
.sort_values(["variable_0", "variable_1", "value"])
.rename(columns=name_like_string),
)
self.assert_eq(
psdf.melt(
id_vars=[(TEN, "A")],
value_vars=[(TEN, "B")],
var_name=["myV1", "myV2"],
value_name="myValname",
)
.sort_values(["myV1", "myV2", "myValname"])
.reset_index(drop=True),
pdf.melt(
id_vars=[(TEN, "A")],
value_vars=[(TEN, "B")],
var_name=["myV1", "myV2"],
value_name="myValname",
)
.sort_values(["myV1", "myV2", "myValname"])
.rename(columns=name_like_string),
)
columns.names = ["v0", "v1"]
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.melt().sort_values(["v0", "v1", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["v0", "v1", "value"]),
)
self.assertRaises(ValueError, lambda: psdf.melt(id_vars=(TEN, "A")))
self.assertRaises(ValueError, lambda: psdf.melt(value_vars=(TEN, "A")))
self.assertRaises(KeyError, lambda: psdf.melt(id_vars=[TEN]))
self.assertRaises(KeyError, lambda: psdf.melt(id_vars=[(TWELVE, "A")]))
self.assertRaises(KeyError, lambda: psdf.melt(value_vars=[TWELVE]))
self.assertRaises(KeyError, lambda: psdf.melt(value_vars=[(TWELVE, "A")]))
# non-string names
pdf.columns = [10.0, 20.0, 30.0]
psdf.columns = [10.0, 20.0, 30.0]
self.assert_eq(
psdf.melt().sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=10.0).sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars=10.0).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=[10.0, 20.0])
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[10.0, 20.0]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=(10.0, 20.0))
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=(10.0, 20.0)).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=[10.0], value_vars=[30.0])
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[10.0], value_vars=[30.0]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(value_vars=(10.0, 20.0))
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(value_vars=(10.0, 20.0)).sort_values(["variable", "value"]),
)
def test_all(self):
pdf = pd.DataFrame(
{
"col1": [False, False, False],
"col2": [True, False, False],
"col3": [0, 0, 1],
"col4": [0, 1, 2],
"col5": [False, False, None],
"col6": [True, False, None],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.all(), pdf.all())
columns = pd.MultiIndex.from_tuples(
[
("a", "col1"),
("a", "col2"),
("a", "col3"),
("b", "col4"),
("b", "col5"),
("c", "col6"),
]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.all(), pdf.all())
columns.names = ["X", "Y"]
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.all(), pdf.all())
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psdf.all(axis=1)
def test_any(self):
pdf = pd.DataFrame(
{
"col1": [False, False, False],
"col2": [True, False, False],
"col3": [0, 0, 1],
"col4": [0, 1, 2],
"col5": [False, False, None],
"col6": [True, False, None],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.any(), pdf.any())
columns = pd.MultiIndex.from_tuples(
[
("a", "col1"),
("a", "col2"),
("a", "col3"),
("b", "col4"),
("b", "col5"),
("c", "col6"),
]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.any(), pdf.any())
columns.names = ["X", "Y"]
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.any(), pdf.any())
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psdf.any(axis=1)
def test_rank(self):
pdf = pd.DataFrame(
data={"col1": [1, 2, 3, 1], "col2": [3, 4, 3, 1]},
columns=["col1", "col2"],
index=np.random.rand(4),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.rank().sort_index(), psdf.rank().sort_index())
self.assert_eq(pdf.rank().sum(), psdf.rank().sum())
self.assert_eq(
pdf.rank(ascending=False).sort_index(), psdf.rank(ascending=False).sort_index()
)
self.assert_eq(pdf.rank(method="min").sort_index(), psdf.rank(method="min").sort_index())
self.assert_eq(pdf.rank(method="max").sort_index(), psdf.rank(method="max").sort_index())
self.assert_eq(
pdf.rank(method="first").sort_index(), psdf.rank(method="first").sort_index()
)
self.assert_eq(
pdf.rank(method="dense").sort_index(), psdf.rank(method="dense").sort_index()
)
msg = "method must be one of 'average', 'min', 'max', 'first', 'dense'"
with self.assertRaisesRegex(ValueError, msg):
psdf.rank(method="nothing")
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "col1"), ("y", "col2")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.rank().sort_index(), psdf.rank().sort_index())
def test_round(self):
pdf = pd.DataFrame(
{
"A": [0.028208, 0.038683, 0.877076],
"B": [0.992815, 0.645646, 0.149370],
"C": [0.173891, 0.577595, 0.491027],
},
columns=["A", "B", "C"],
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
pser = pd.Series([1, 0, 2], index=["A", "B", "C"])
psser = ps.Series([1, 0, 2], index=["A", "B", "C"])
self.assert_eq(pdf.round(2), psdf.round(2))
self.assert_eq(pdf.round({"A": 1, "C": 2}), psdf.round({"A": 1, "C": 2}))
self.assert_eq(pdf.round({"A": 1, "D": 2}), psdf.round({"A": 1, "D": 2}))
self.assert_eq(pdf.round(pser), psdf.round(psser))
msg = "decimals must be an integer, a dict-like or a Series"
with self.assertRaisesRegex(TypeError, msg):
psdf.round(1.5)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
pser = pd.Series([1, 0, 2], index=columns)
psser = ps.Series([1, 0, 2], index=columns)
self.assert_eq(pdf.round(2), psdf.round(2))
self.assert_eq(
pdf.round({("X", "A"): 1, ("Y", "C"): 2}), psdf.round({("X", "A"): 1, ("Y", "C"): 2})
)
self.assert_eq(pdf.round({("X", "A"): 1, "Y": 2}), psdf.round({("X", "A"): 1, "Y": 2}))
self.assert_eq(pdf.round(pser), psdf.round(psser))
# non-string names
pdf = pd.DataFrame(
{
10: [0.028208, 0.038683, 0.877076],
20: [0.992815, 0.645646, 0.149370],
30: [0.173891, 0.577595, 0.491027],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.round({10: 1, 30: 2}), psdf.round({10: 1, 30: 2}))
def test_shift(self):
pdf = pd.DataFrame(
{
"Col1": [10, 20, 15, 30, 45],
"Col2": [13, 23, 18, 33, 48],
"Col3": [17, 27, 22, 37, 52],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.shift(3), psdf.shift(3))
self.assert_eq(pdf.shift().shift(-1), psdf.shift().shift(-1))
self.assert_eq(pdf.shift().sum().astype(int), psdf.shift().sum())
# Need the expected result since pandas 0.23 does not support `fill_value` argument.
pdf1 = pd.DataFrame(
{"Col1": [0, 0, 0, 10, 20], "Col2": [0, 0, 0, 13, 23], "Col3": [0, 0, 0, 17, 27]},
index=pdf.index,
)
self.assert_eq(pdf1, psdf.shift(periods=3, fill_value=0))
msg = "should be an int"
with self.assertRaisesRegex(TypeError, msg):
psdf.shift(1.5)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "Col1"), ("x", "Col2"), ("y", "Col3")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.shift(3), psdf.shift(3))
self.assert_eq(pdf.shift().shift(-1), psdf.shift().shift(-1))
def test_diff(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.diff(), psdf.diff())
self.assert_eq(pdf.diff().diff(-1), psdf.diff().diff(-1))
self.assert_eq(pdf.diff().sum().astype(int), psdf.diff().sum())
msg = "should be an int"
with self.assertRaisesRegex(TypeError, msg):
psdf.diff(1.5)
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.diff(axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "Col1"), ("x", "Col2"), ("y", "Col3")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.diff(), psdf.diff())
def test_duplicated(self):
pdf = pd.DataFrame(
{"a": [1, 1, 2, 3], "b": [1, 1, 1, 4], "c": [1, 1, 1, 5]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
self.assert_eq(
pdf.duplicated(keep="last").sort_index(),
psdf.duplicated(keep="last").sort_index(),
)
self.assert_eq(
pdf.duplicated(keep=False).sort_index(),
psdf.duplicated(keep=False).sort_index(),
)
self.assert_eq(
pdf.duplicated(subset="b").sort_index(),
psdf.duplicated(subset="b").sort_index(),
)
self.assert_eq(
pdf.duplicated(subset=["b"]).sort_index(),
psdf.duplicated(subset=["b"]).sort_index(),
)
with self.assertRaisesRegex(ValueError, "'keep' only supports 'first', 'last' and False"):
psdf.duplicated(keep="false")
with self.assertRaisesRegex(KeyError, "'d'"):
psdf.duplicated(subset=["d"])
pdf.index.name = "x"
psdf.index.name = "x"
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
# multi-index
self.assert_eq(
pdf.set_index("a", append=True).duplicated().sort_index(),
psdf.set_index("a", append=True).duplicated().sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).duplicated(keep=False).sort_index(),
psdf.set_index("a", append=True).duplicated(keep=False).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).duplicated(subset=["b"]).sort_index(),
psdf.set_index("a", append=True).duplicated(subset=["b"]).sort_index(),
)
# mutli-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
self.assert_eq(
pdf.duplicated(subset=("x", "b")).sort_index(),
psdf.duplicated(subset=("x", "b")).sort_index(),
)
self.assert_eq(
pdf.duplicated(subset=[("x", "b")]).sort_index(),
psdf.duplicated(subset=[("x", "b")]).sort_index(),
)
# non-string names
pdf = pd.DataFrame(
{10: [1, 1, 2, 3], 20: [1, 1, 1, 4], 30: [1, 1, 1, 5]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
self.assert_eq(
pdf.duplicated(subset=10).sort_index(),
psdf.duplicated(subset=10).sort_index(),
)
def test_ffill(self):
idx = np.random.rand(6)
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=idx,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.ffill(), pdf.ffill())
self.assert_eq(psdf.ffill(limit=1), pdf.ffill(limit=1))
pser = pdf.y
psser = psdf.y
psdf.ffill(inplace=True)
pdf.ffill(inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
self.assert_eq(psser[idx[2]], pser[idx[2]])
def test_bfill(self):
idx = np.random.rand(6)
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=idx,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.bfill(), pdf.bfill())
self.assert_eq(psdf.bfill(limit=1), pdf.bfill(limit=1))
pser = pdf.x
psser = psdf.x
psdf.bfill(inplace=True)
pdf.bfill(inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
self.assert_eq(psser[idx[0]], pser[idx[0]])
def test_filter(self):
pdf = pd.DataFrame(
{
"aa": ["aa", "bd", "bc", "ab", "ce"],
"ba": [1, 2, 3, 4, 5],
"cb": [1.0, 2.0, 3.0, 4.0, 5.0],
"db": [1.0, np.nan, 3.0, np.nan, 5.0],
}
)
pdf = pdf.set_index("aa")
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.filter(items=["ab", "aa"], axis=0).sort_index(),
pdf.filter(items=["ab", "aa"], axis=0).sort_index(),
)
with option_context("compute.isin_limit", 0):
self.assert_eq(
psdf.filter(items=["ab", "aa"], axis=0).sort_index(),
pdf.filter(items=["ab", "aa"], axis=0).sort_index(),
)
self.assert_eq(
psdf.filter(items=["ba", "db"], axis=1).sort_index(),
pdf.filter(items=["ba", "db"], axis=1).sort_index(),
)
self.assert_eq(psdf.filter(like="b", axis="index"), pdf.filter(like="b", axis="index"))
self.assert_eq(psdf.filter(like="c", axis="columns"), pdf.filter(like="c", axis="columns"))
self.assert_eq(
psdf.filter(regex="b.*", axis="index"), pdf.filter(regex="b.*", axis="index")
)
self.assert_eq(
psdf.filter(regex="b.*", axis="columns"), pdf.filter(regex="b.*", axis="columns")
)
pdf = pdf.set_index("ba", append=True)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.filter(items=[("aa", 1), ("bd", 2)], axis=0).sort_index(),
pdf.filter(items=[("aa", 1), ("bd", 2)], axis=0).sort_index(),
)
with self.assertRaisesRegex(TypeError, "Unsupported type list"):
psdf.filter(items=[["aa", 1], ("bd", 2)], axis=0)
with self.assertRaisesRegex(ValueError, "The item should not be empty."):
psdf.filter(items=[(), ("bd", 2)], axis=0)
self.assert_eq(psdf.filter(like="b", axis=0), pdf.filter(like="b", axis=0))
self.assert_eq(psdf.filter(regex="b.*", axis=0), pdf.filter(regex="b.*", axis=0))
with self.assertRaisesRegex(ValueError, "items should be a list-like object"):
psdf.filter(items="b")
with self.assertRaisesRegex(ValueError, "No axis named"):
psdf.filter(regex="b.*", axis=123)
with self.assertRaisesRegex(TypeError, "Must pass either `items`, `like`"):
psdf.filter()
with self.assertRaisesRegex(TypeError, "mutually exclusive"):
psdf.filter(regex="b.*", like="aaa")
# multi-index columns
pdf = pd.DataFrame(
{
("x", "aa"): ["aa", "ab", "bc", "bd", "ce"],
("x", "ba"): [1, 2, 3, 4, 5],
("y", "cb"): [1.0, 2.0, 3.0, 4.0, 5.0],
("z", "db"): [1.0, np.nan, 3.0, np.nan, 5.0],
}
)
pdf = pdf.set_index(("x", "aa"))
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.filter(items=["ab", "aa"], axis=0).sort_index(),
pdf.filter(items=["ab", "aa"], axis=0).sort_index(),
)
self.assert_eq(
psdf.filter(items=[("x", "ba"), ("z", "db")], axis=1).sort_index(),
pdf.filter(items=[("x", "ba"), ("z", "db")], axis=1).sort_index(),
)
self.assert_eq(psdf.filter(like="b", axis="index"), pdf.filter(like="b", axis="index"))
self.assert_eq(psdf.filter(like="c", axis="columns"), pdf.filter(like="c", axis="columns"))
self.assert_eq(
psdf.filter(regex="b.*", axis="index"), pdf.filter(regex="b.*", axis="index")
)
self.assert_eq(
psdf.filter(regex="b.*", axis="columns"), pdf.filter(regex="b.*", axis="columns")
)
def test_pipe(self):
psdf = ps.DataFrame(
{"category": ["A", "A", "B"], "col1": [1, 2, 3], "col2": [4, 5, 6]},
columns=["category", "col1", "col2"],
)
self.assertRaisesRegex(
ValueError,
"arg is both the pipe target and a keyword argument",
lambda: psdf.pipe((lambda x: x, "arg"), arg="1"),
)
def test_transform(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
self.assert_eq(
psdf.transform(lambda x, y: x + y, y=2).sort_index(),
pdf.transform(lambda x, y: x + y, y=2).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
self.assert_eq(
psdf.transform(lambda x, y: x + y, y=1).sort_index(),
pdf.transform(lambda x, y: x + y, y=1).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.transform(1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
def test_apply(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index()
)
self.assert_eq(
psdf.apply(lambda x, b: x + b, args=(1,)).sort_index(),
pdf.apply(lambda x, b: x + b, args=(1,)).sort_index(),
)
self.assert_eq(
psdf.apply(lambda x, b: x + b, b=1).sort_index(),
pdf.apply(lambda x, b: x + b, b=1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index()
)
self.assert_eq(
psdf.apply(lambda x, b: x + b, args=(1,)).sort_index(),
pdf.apply(lambda x, b: x + b, args=(1,)).sort_index(),
)
self.assert_eq(
psdf.apply(lambda x, b: x + b, b=1).sort_index(),
pdf.apply(lambda x, b: x + b, b=1).sort_index(),
)
# returning a Series
self.assert_eq(
psdf.apply(lambda x: len(x), axis=1).sort_index(),
pdf.apply(lambda x: len(x), axis=1).sort_index(),
)
self.assert_eq(
psdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(),
pdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.apply(lambda x: len(x), axis=1).sort_index(),
pdf.apply(lambda x: len(x), axis=1).sort_index(),
)
self.assert_eq(
psdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(),
pdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.apply(1)
with self.assertRaisesRegex(TypeError, "The given function.*1 or 'column'; however"):
def f1(_) -> ps.DataFrame[int]:
pass
psdf.apply(f1, axis=0)
with self.assertRaisesRegex(TypeError, "The given function.*0 or 'index'; however"):
def f2(_) -> ps.Series[int]:
pass
psdf.apply(f2, axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index()
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index()
)
# returning a Series
self.assert_eq(
psdf.apply(lambda x: len(x), axis=1).sort_index(),
pdf.apply(lambda x: len(x), axis=1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.apply(lambda x: len(x), axis=1).sort_index(),
pdf.apply(lambda x: len(x), axis=1).sort_index(),
)
def test_apply_with_type(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
def identify1(x) -> ps.DataFrame[int, int]:
return x
# Type hints set the default column names, and we use default index for
# pandas API on Spark. Here we ignore both diff.
actual = psdf.apply(identify1, axis=1)
expected = pdf.apply(identify1, axis=1)
self.assert_eq(sorted(actual["c0"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["c1"].to_numpy()), sorted(expected["b"].to_numpy()))
def identify2(x) -> ps.DataFrame[slice("a", int), slice("b", int)]: # noqa: F405
return x
actual = psdf.apply(identify2, axis=1)
expected = pdf.apply(identify2, axis=1)
self.assert_eq(sorted(actual["a"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["b"].to_numpy()), sorted(expected["b"].to_numpy()))
def test_apply_batch(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf, a: pdf + a, args=(1,)).sort_index(),
(pdf + 1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf: pdf + 1).sort_index(),
(pdf + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf, b: pdf + b, b=1).sort_index(),
(pdf + 1).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.pandas_on_spark.apply_batch(1)
with self.assertRaisesRegex(TypeError, "The given function.*frame as its type hints"):
def f2(_) -> ps.Series[int]:
pass
psdf.pandas_on_spark.apply_batch(f2)
with self.assertRaisesRegex(ValueError, "The given function should return a frame"):
psdf.pandas_on_spark.apply_batch(lambda pdf: 1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda x: x + 1).sort_index(), (pdf + 1).sort_index()
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda x: x + 1).sort_index(),
(pdf + 1).sort_index(),
)
def test_apply_batch_with_type(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
def identify1(x) -> ps.DataFrame[int, int]:
return x
# Type hints set the default column names, and we use default index for
# pandas API on Spark. Here we ignore both diff.
actual = psdf.pandas_on_spark.apply_batch(identify1)
expected = pdf
self.assert_eq(sorted(actual["c0"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["c1"].to_numpy()), sorted(expected["b"].to_numpy()))
def identify2(x) -> ps.DataFrame[slice("a", int), slice("b", int)]: # noqa: F405
return x
actual = psdf.pandas_on_spark.apply_batch(identify2)
expected = pdf
self.assert_eq(sorted(actual["a"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["b"].to_numpy()), sorted(expected["b"].to_numpy()))
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [[e] for e in [4, 5, 6, 3, 2, 1, 0, 0, 0]]},
index=np.random.rand(9),
)
psdf = ps.from_pandas(pdf)
def identify3(x) -> ps.DataFrame[float, [int, List[int]]]:
return x
actual = psdf.pandas_on_spark.apply_batch(identify3)
actual.columns = ["a", "b"]
self.assert_eq(actual, pdf)
# For NumPy typing, NumPy version should be 1.21+ and Python version should be 3.8+
if sys.version_info >= (3, 8) and LooseVersion(np.__version__) >= LooseVersion("1.21"):
import numpy.typing as ntp
psdf = ps.from_pandas(pdf)
def identify4(
x,
) -> ps.DataFrame[float, [int, ntp.NDArray[int]]]: # type: ignore[name-defined]
return x
actual = psdf.pandas_on_spark.apply_batch(identify4)
actual.columns = ["a", "b"]
self.assert_eq(actual, pdf)
arrays = [[1, 2, 3, 4, 5, 6, 7, 8, 9], ["a", "b", "c", "d", "e", "f", "g", "h", "i"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [[e] for e in [4, 5, 6, 3, 2, 1, 0, 0, 0]]},
index=idx,
)
psdf = ps.from_pandas(pdf)
def identify4(x) -> ps.DataFrame[[int, str], [int, List[int]]]:
return x
actual = psdf.pandas_on_spark.apply_batch(identify4)
actual.index.names = ["number", "color"]
actual.columns = ["a", "b"]
self.assert_eq(actual, pdf)
def identify5(
x,
) -> ps.DataFrame[
[("number", int), ("color", str)], [("a", int), ("b", List[int])] # noqa: F405
]:
return x
actual = psdf.pandas_on_spark.apply_batch(identify5)
self.assert_eq(actual, pdf)
def test_transform_batch(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.c + 1).sort_index(),
(pdf.c + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf + a, 1).sort_index(),
(pdf + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf.c + a, a=1).sort_index(),
(pdf.c + 1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf + 1).sort_index(),
(pdf + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.b + 1).sort_index(),
(pdf.b + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf + a, 1).sort_index(),
(pdf + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf.c + a, a=1).sort_index(),
(pdf.c + 1).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.pandas_on_spark.transform_batch(1)
with self.assertRaisesRegex(ValueError, "The given function should return a frame"):
psdf.pandas_on_spark.transform_batch(lambda pdf: 1)
with self.assertRaisesRegex(
ValueError, "transform_batch cannot produce aggregated results"
):
psdf.pandas_on_spark.transform_batch(lambda pdf: pd.Series(1))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda x: x + 1).sort_index(),
(pdf + 1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda x: x + 1).sort_index(),
(pdf + 1).sort_index(),
)
def test_transform_batch_with_type(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
def identify1(x) -> ps.DataFrame[int, int]:
return x
# Type hints set the default column names, and we use default index for
# pandas API on Spark. Here we ignore both diff.
actual = psdf.pandas_on_spark.transform_batch(identify1)
expected = pdf
self.assert_eq(sorted(actual["c0"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["c1"].to_numpy()), sorted(expected["b"].to_numpy()))
def identify2(x) -> ps.DataFrame[slice("a", int), slice("b", int)]: # noqa: F405
return x
actual = psdf.pandas_on_spark.transform_batch(identify2)
expected = pdf
self.assert_eq(sorted(actual["a"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["b"].to_numpy()), sorted(expected["b"].to_numpy()))
def test_transform_batch_same_anchor(self):
psdf = ps.range(10)
psdf["d"] = psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.id + 1)
self.assert_eq(
psdf,
pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]),
)
psdf = ps.range(10)
def plus_one(pdf) -> ps.Series[np.int64]:
return pdf.id + 1
psdf["d"] = psdf.pandas_on_spark.transform_batch(plus_one)
self.assert_eq(
psdf,
pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]),
)
psdf = ps.range(10)
def plus_one(ser) -> ps.Series[np.int64]:
return ser + 1
psdf["d"] = psdf.id.pandas_on_spark.transform_batch(plus_one)
self.assert_eq(
psdf,
pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]),
)
def test_empty_timestamp(self):
pdf = pd.DataFrame(
{
"t": [
datetime(2019, 1, 1, 0, 0, 0),
datetime(2019, 1, 2, 0, 0, 0),
datetime(2019, 1, 3, 0, 0, 0),
]
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf[psdf["t"] != psdf["t"]], pdf[pdf["t"] != pdf["t"]])
self.assert_eq(psdf[psdf["t"] != psdf["t"]].dtypes, pdf[pdf["t"] != pdf["t"]].dtypes)
def test_to_spark(self):
psdf = ps.from_pandas(self.pdf)
with self.assertRaisesRegex(ValueError, "'index_col' cannot be overlapped"):
psdf.to_spark(index_col="a")
with self.assertRaisesRegex(ValueError, "length of index columns.*1.*3"):
psdf.to_spark(index_col=["x", "y", "z"])
def test_keys(self):
pdf = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.keys(), pdf.keys())
def test_quantile(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.quantile(0.5), pdf.quantile(0.5))
self.assert_eq(psdf.quantile([0.25, 0.5, 0.75]), pdf.quantile([0.25, 0.5, 0.75]))
self.assert_eq(psdf.loc[[]].quantile(0.5), pdf.loc[[]].quantile(0.5))
self.assert_eq(
psdf.loc[[]].quantile([0.25, 0.5, 0.75]), pdf.loc[[]].quantile([0.25, 0.5, 0.75])
)
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psdf.quantile(0.5, axis=1)
with self.assertRaisesRegex(TypeError, "accuracy must be an integer; however"):
psdf.quantile(accuracy="a")
with self.assertRaisesRegex(TypeError, "q must be a float or an array of floats;"):
psdf.quantile(q="a")
with self.assertRaisesRegex(TypeError, "q must be a float or an array of floats;"):
psdf.quantile(q=["a"])
with self.assertRaisesRegex(
ValueError, r"percentiles should all be in the interval \[0, 1\]"
):
psdf.quantile(q=[1.1])
self.assert_eq(
psdf.quantile(0.5, numeric_only=False), pdf.quantile(0.5, numeric_only=False)
)
self.assert_eq(
psdf.quantile([0.25, 0.5, 0.75], numeric_only=False),
pdf.quantile([0.25, 0.5, 0.75], numeric_only=False),
)
# multi-index column
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.quantile(0.5), pdf.quantile(0.5))
self.assert_eq(psdf.quantile([0.25, 0.5, 0.75]), pdf.quantile([0.25, 0.5, 0.75]))
pdf = pd.DataFrame({"x": ["a", "b", "c"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.quantile(0.5), pdf.quantile(0.5))
self.assert_eq(psdf.quantile([0.25, 0.5, 0.75]), pdf.quantile([0.25, 0.5, 0.75]))
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
psdf.quantile(0.5, numeric_only=False)
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
psdf.quantile([0.25, 0.5, 0.75], numeric_only=False)
def test_pct_change(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0], "c": [300, 200, 400, 200]},
index=np.random.rand(4),
)
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.pct_change(2), pdf.pct_change(2), check_exact=False)
self.assert_eq(psdf.pct_change().sum(), pdf.pct_change().sum(), check_exact=False)
def test_where(self):
pdf, psdf = self.df_pair
# pandas requires `axis` argument when the `other` is Series.
# `axis` is not fully supported yet in pandas-on-Spark.
self.assert_eq(
psdf.where(psdf > 2, psdf.a + 10, axis=0), pdf.where(pdf > 2, pdf.a + 10, axis=0)
)
with self.assertRaisesRegex(TypeError, "type of cond must be a DataFrame or Series"):
psdf.where(1)
def test_mask(self):
psdf = ps.from_pandas(self.pdf)
with self.assertRaisesRegex(TypeError, "type of cond must be a DataFrame or Series"):
psdf.mask(1)
def test_query(self):
pdf = pd.DataFrame({"A": range(1, 6), "B": range(10, 0, -2), "C": range(10, 5, -1)})
psdf = ps.from_pandas(pdf)
exprs = ("A > B", "A < C", "C == B")
for expr in exprs:
self.assert_eq(psdf.query(expr), pdf.query(expr))
# test `inplace=True`
for expr in exprs:
dummy_psdf = psdf.copy()
dummy_pdf = pdf.copy()
pser = dummy_pdf.A
psser = dummy_psdf.A
dummy_pdf.query(expr, inplace=True)
dummy_psdf.query(expr, inplace=True)
self.assert_eq(dummy_psdf, dummy_pdf)
self.assert_eq(psser, pser)
# invalid values for `expr`
invalid_exprs = (1, 1.0, (exprs[0],), [exprs[0]])
for expr in invalid_exprs:
with self.assertRaisesRegex(
TypeError,
"expr must be a string to be evaluated, {} given".format(type(expr).__name__),
):
psdf.query(expr)
# invalid values for `inplace`
invalid_inplaces = (1, 0, "True", "False")
for inplace in invalid_inplaces:
with self.assertRaisesRegex(
TypeError,
'For argument "inplace" expected type bool, received type {}.'.format(
type(inplace).__name__
),
):
psdf.query("a < b", inplace=inplace)
# doesn't support for MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")])
psdf.columns = columns
with self.assertRaisesRegex(TypeError, "Doesn't support for MultiIndex columns"):
psdf.query("('A', 'Z') > ('B', 'X')")
def test_take(self):
pdf = pd.DataFrame(
{"A": range(0, 50000), "B": range(100000, 0, -2), "C": range(100000, 50000, -1)}
)
psdf = ps.from_pandas(pdf)
# axis=0 (default)
self.assert_eq(psdf.take([1, 2]).sort_index(), pdf.take([1, 2]).sort_index())
self.assert_eq(psdf.take([-1, -2]).sort_index(), pdf.take([-1, -2]).sort_index())
self.assert_eq(
psdf.take(range(100, 110)).sort_index(), pdf.take(range(100, 110)).sort_index()
)
self.assert_eq(
psdf.take(range(-110, -100)).sort_index(), pdf.take(range(-110, -100)).sort_index()
)
self.assert_eq(
psdf.take([10, 100, 1000, 10000]).sort_index(),
pdf.take([10, 100, 1000, 10000]).sort_index(),
)
self.assert_eq(
psdf.take([-10, -100, -1000, -10000]).sort_index(),
pdf.take([-10, -100, -1000, -10000]).sort_index(),
)
# axis=1
self.assert_eq(
psdf.take([1, 2], axis=1).sort_index(), pdf.take([1, 2], axis=1).sort_index()
)
self.assert_eq(
psdf.take([-1, -2], axis=1).sort_index(), pdf.take([-1, -2], axis=1).sort_index()
)
self.assert_eq(
psdf.take(range(1, 3), axis=1).sort_index(),
pdf.take(range(1, 3), axis=1).sort_index(),
)
self.assert_eq(
psdf.take(range(-1, -3), axis=1).sort_index(),
pdf.take(range(-1, -3), axis=1).sort_index(),
)
self.assert_eq(
psdf.take([2, 1], axis=1).sort_index(),
pdf.take([2, 1], axis=1).sort_index(),
)
self.assert_eq(
psdf.take([-1, -2], axis=1).sort_index(),
pdf.take([-1, -2], axis=1).sort_index(),
)
# MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")])
psdf.columns = columns
pdf.columns = columns
# MultiIndex columns with axis=0 (default)
self.assert_eq(psdf.take([1, 2]).sort_index(), pdf.take([1, 2]).sort_index())
self.assert_eq(psdf.take([-1, -2]).sort_index(), pdf.take([-1, -2]).sort_index())
self.assert_eq(
psdf.take(range(100, 110)).sort_index(), pdf.take(range(100, 110)).sort_index()
)
self.assert_eq(
psdf.take(range(-110, -100)).sort_index(), pdf.take(range(-110, -100)).sort_index()
)
self.assert_eq(
psdf.take([10, 100, 1000, 10000]).sort_index(),
pdf.take([10, 100, 1000, 10000]).sort_index(),
)
self.assert_eq(
psdf.take([-10, -100, -1000, -10000]).sort_index(),
pdf.take([-10, -100, -1000, -10000]).sort_index(),
)
# axis=1
self.assert_eq(
psdf.take([1, 2], axis=1).sort_index(), pdf.take([1, 2], axis=1).sort_index()
)
self.assert_eq(
psdf.take([-1, -2], axis=1).sort_index(), pdf.take([-1, -2], axis=1).sort_index()
)
self.assert_eq(
psdf.take(range(1, 3), axis=1).sort_index(),
pdf.take(range(1, 3), axis=1).sort_index(),
)
self.assert_eq(
psdf.take(range(-1, -3), axis=1).sort_index(),
pdf.take(range(-1, -3), axis=1).sort_index(),
)
self.assert_eq(
psdf.take([2, 1], axis=1).sort_index(),
pdf.take([2, 1], axis=1).sort_index(),
)
self.assert_eq(
psdf.take([-1, -2], axis=1).sort_index(),
pdf.take([-1, -2], axis=1).sort_index(),
)
# Checking the type of indices.
self.assertRaises(TypeError, lambda: psdf.take(1))
self.assertRaises(TypeError, lambda: psdf.take("1"))
self.assertRaises(TypeError, lambda: psdf.take({1, 2}))
self.assertRaises(TypeError, lambda: psdf.take({1: None, 2: None}))
def test_axes(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.axes, psdf.axes)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.axes, psdf.axes)
def test_udt(self):
sparse_values = {0: 0.1, 1: 1.1}
sparse_vector = SparseVector(len(sparse_values), sparse_values)
pdf = pd.DataFrame({"a": [sparse_vector], "b": [10]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_eval(self):
pdf = pd.DataFrame({"A": range(1, 6), "B": range(10, 0, -2)})
psdf = ps.from_pandas(pdf)
# operation between columns (returns Series)
self.assert_eq(pdf.eval("A + B"), psdf.eval("A + B"))
self.assert_eq(pdf.eval("A + A"), psdf.eval("A + A"))
# assignment (returns DataFrame)
self.assert_eq(pdf.eval("C = A + B"), psdf.eval("C = A + B"))
self.assert_eq(pdf.eval("A = A + A"), psdf.eval("A = A + A"))
# operation between scalars (returns scalar)
self.assert_eq(pdf.eval("1 + 1"), psdf.eval("1 + 1"))
# complicated operations with assignment
self.assert_eq(
pdf.eval("B = A + B // (100 + 200) * (500 - B) - 10.5"),
psdf.eval("B = A + B // (100 + 200) * (500 - B) - 10.5"),
)
# inplace=True (only support for assignment)
pdf.eval("C = A + B", inplace=True)
psdf.eval("C = A + B", inplace=True)
self.assert_eq(pdf, psdf)
pser = pdf.A
psser = psdf.A
pdf.eval("A = B + C", inplace=True)
psdf.eval("A = B + C", inplace=True)
self.assert_eq(pdf, psdf)
self.assert_eq(pser, psser)
# doesn't support for multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b"), ("z", "c")])
psdf.columns = columns
self.assertRaises(TypeError, lambda: psdf.eval("x.a + y.b"))
@unittest.skipIf(not have_tabulate, tabulate_requirement_message)
def test_to_markdown(self):
pdf = pd.DataFrame(data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.to_markdown(), psdf.to_markdown())
def test_cache(self):
pdf = pd.DataFrame(
[(0.2, 0.3), (0.0, 0.6), (0.6, 0.0), (0.2, 0.1)], columns=["dogs", "cats"]
)
psdf = ps.from_pandas(pdf)
with psdf.spark.cache() as cached_df:
self.assert_eq(isinstance(cached_df, CachedDataFrame), True)
self.assert_eq(
repr(cached_df.spark.storage_level), repr(StorageLevel(True, True, False, True))
)
def test_persist(self):
pdf = pd.DataFrame(
[(0.2, 0.3), (0.0, 0.6), (0.6, 0.0), (0.2, 0.1)], columns=["dogs", "cats"]
)
psdf = ps.from_pandas(pdf)
storage_levels = [
StorageLevel.DISK_ONLY,
StorageLevel.MEMORY_AND_DISK,
StorageLevel.MEMORY_ONLY,
StorageLevel.OFF_HEAP,
]
for storage_level in storage_levels:
with psdf.spark.persist(storage_level) as cached_df:
self.assert_eq(isinstance(cached_df, CachedDataFrame), True)
self.assert_eq(repr(cached_df.spark.storage_level), repr(storage_level))
self.assertRaises(TypeError, lambda: psdf.spark.persist("DISK_ONLY"))
def test_squeeze(self):
axises = [None, 0, 1, "rows", "index", "columns"]
# Multiple columns
pdf = pd.DataFrame([[1, 2], [3, 4]], columns=["a", "b"], index=["x", "y"])
psdf = ps.from_pandas(pdf)
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Multiple columns with MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X")])
pdf.columns = columns
psdf.columns = columns
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Single column with single value
pdf = pd.DataFrame([[1]], columns=["a"], index=["x"])
psdf = ps.from_pandas(pdf)
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Single column with single value with MultiIndex column
columns = pd.MultiIndex.from_tuples([("A", "Z")])
pdf.columns = columns
psdf.columns = columns
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Single column with multiple values
pdf = pd.DataFrame([1, 2, 3, 4], columns=["a"])
psdf = ps.from_pandas(pdf)
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Single column with multiple values with MultiIndex column
pdf.columns = columns
psdf.columns = columns
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
def test_rfloordiv(self):
pdf = pd.DataFrame(
{"angles": [0, 3, 4], "degrees": [360, 180, 360]},
index=["circle", "triangle", "rectangle"],
columns=["angles", "degrees"],
)
psdf = ps.from_pandas(pdf)
expected_result = pdf.rfloordiv(10)
self.assert_eq(psdf.rfloordiv(10), expected_result)
def test_truncate(self):
pdf1 = pd.DataFrame(
{
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": ["h", "i", "j", "k", "l", "m", "n"],
"C": ["o", "p", "q", "r", "s", "t", "u"],
},
index=[-500, -20, -1, 0, 400, 550, 1000],
)
psdf1 = ps.from_pandas(pdf1)
pdf2 = pd.DataFrame(
{
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": ["h", "i", "j", "k", "l", "m", "n"],
"C": ["o", "p", "q", "r", "s", "t", "u"],
},
index=[1000, 550, 400, 0, -1, -20, -500],
)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(psdf1.truncate(), pdf1.truncate())
self.assert_eq(psdf1.truncate(before=-20), pdf1.truncate(before=-20))
self.assert_eq(psdf1.truncate(after=400), pdf1.truncate(after=400))
self.assert_eq(psdf1.truncate(copy=False), pdf1.truncate(copy=False))
self.assert_eq(psdf1.truncate(-20, 400, copy=False), pdf1.truncate(-20, 400, copy=False))
# The bug for these tests has been fixed in pandas 1.1.0.
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
self.assert_eq(psdf2.truncate(0, 550), pdf2.truncate(0, 550))
self.assert_eq(psdf2.truncate(0, 550, copy=False), pdf2.truncate(0, 550, copy=False))
else:
expected_psdf = ps.DataFrame(
{"A": ["b", "c", "d"], "B": ["i", "j", "k"], "C": ["p", "q", "r"]},
index=[550, 400, 0],
)
self.assert_eq(psdf2.truncate(0, 550), expected_psdf)
self.assert_eq(psdf2.truncate(0, 550, copy=False), expected_psdf)
# axis = 1
self.assert_eq(psdf1.truncate(axis=1), pdf1.truncate(axis=1))
self.assert_eq(psdf1.truncate(before="B", axis=1), pdf1.truncate(before="B", axis=1))
self.assert_eq(psdf1.truncate(after="A", axis=1), pdf1.truncate(after="A", axis=1))
self.assert_eq(psdf1.truncate(copy=False, axis=1), pdf1.truncate(copy=False, axis=1))
self.assert_eq(psdf2.truncate("B", "C", axis=1), pdf2.truncate("B", "C", axis=1))
self.assert_eq(
psdf1.truncate("B", "C", copy=False, axis=1),
pdf1.truncate("B", "C", copy=False, axis=1),
)
# MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "Z")])
pdf1.columns = columns
psdf1.columns = columns
pdf2.columns = columns
psdf2.columns = columns
self.assert_eq(psdf1.truncate(), pdf1.truncate())
self.assert_eq(psdf1.truncate(before=-20), pdf1.truncate(before=-20))
self.assert_eq(psdf1.truncate(after=400), pdf1.truncate(after=400))
self.assert_eq(psdf1.truncate(copy=False), pdf1.truncate(copy=False))
self.assert_eq(psdf1.truncate(-20, 400, copy=False), pdf1.truncate(-20, 400, copy=False))
# The bug for these tests has been fixed in pandas 1.1.0.
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
self.assert_eq(psdf2.truncate(0, 550), pdf2.truncate(0, 550))
self.assert_eq(psdf2.truncate(0, 550, copy=False), pdf2.truncate(0, 550, copy=False))
else:
expected_psdf.columns = columns
self.assert_eq(psdf2.truncate(0, 550), expected_psdf)
self.assert_eq(psdf2.truncate(0, 550, copy=False), expected_psdf)
# axis = 1
self.assert_eq(psdf1.truncate(axis=1), pdf1.truncate(axis=1))
self.assert_eq(psdf1.truncate(before="B", axis=1), pdf1.truncate(before="B", axis=1))
self.assert_eq(psdf1.truncate(after="A", axis=1), pdf1.truncate(after="A", axis=1))
self.assert_eq(psdf1.truncate(copy=False, axis=1), pdf1.truncate(copy=False, axis=1))
self.assert_eq(psdf2.truncate("B", "C", axis=1), pdf2.truncate("B", "C", axis=1))
self.assert_eq(
psdf1.truncate("B", "C", copy=False, axis=1),
pdf1.truncate("B", "C", copy=False, axis=1),
)
# Exceptions
psdf = ps.DataFrame(
{
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": ["h", "i", "j", "k", "l", "m", "n"],
"C": ["o", "p", "q", "r", "s", "t", "u"],
},
index=[-500, 100, 400, 0, -1, 550, -20],
)
msg = "truncate requires a sorted index"
with self.assertRaisesRegex(ValueError, msg):
psdf.truncate()
psdf = ps.DataFrame(
{
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": ["h", "i", "j", "k", "l", "m", "n"],
"C": ["o", "p", "q", "r", "s", "t", "u"],
},
index=[-500, -20, -1, 0, 400, 550, 1000],
)
msg = "Truncate: -20 must be after 400"
with self.assertRaisesRegex(ValueError, msg):
psdf.truncate(400, -20)
msg = "Truncate: B must be after C"
with self.assertRaisesRegex(ValueError, msg):
psdf.truncate("C", "B", axis=1)
def test_explode(self):
pdf = pd.DataFrame({"A": [[-1.0, np.nan], [0.0, np.inf], [1.0, -np.inf]], "B": 1})
pdf.index.name = "index"
pdf.columns.name = "columns"
psdf = ps.from_pandas(pdf)
expected_result1 = pdf.explode("A")
expected_result2 = pdf.explode("B")
self.assert_eq(psdf.explode("A"), expected_result1, almost=True)
self.assert_eq(psdf.explode("B"), expected_result2)
self.assert_eq(psdf.explode("A").index.name, expected_result1.index.name)
self.assert_eq(psdf.explode("A").columns.name, expected_result1.columns.name)
self.assertRaises(TypeError, lambda: psdf.explode(["A", "B"]))
# MultiIndex
midx = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "c")], names=["index1", "index2"]
)
pdf.index = midx
psdf = ps.from_pandas(pdf)
expected_result1 = pdf.explode("A")
expected_result2 = pdf.explode("B")
self.assert_eq(psdf.explode("A"), expected_result1, almost=True)
self.assert_eq(psdf.explode("B"), expected_result2)
self.assert_eq(psdf.explode("A").index.names, expected_result1.index.names)
self.assert_eq(psdf.explode("A").columns.name, expected_result1.columns.name)
self.assertRaises(TypeError, lambda: psdf.explode(["A", "B"]))
# MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X")], names=["column1", "column2"])
pdf.columns = columns
psdf.columns = columns
expected_result1 = pdf.explode(("A", "Z"))
expected_result2 = pdf.explode(("B", "X"))
expected_result3 = pdf.A.explode("Z")
self.assert_eq(psdf.explode(("A", "Z")), expected_result1, almost=True)
self.assert_eq(psdf.explode(("B", "X")), expected_result2)
self.assert_eq(psdf.explode(("A", "Z")).index.names, expected_result1.index.names)
self.assert_eq(psdf.explode(("A", "Z")).columns.names, expected_result1.columns.names)
self.assert_eq(psdf.A.explode("Z"), expected_result3, almost=True)
self.assertRaises(TypeError, lambda: psdf.explode(["A", "B"]))
self.assertRaises(ValueError, lambda: psdf.explode("A"))
def test_spark_schema(self):
psdf = ps.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("i1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
},
columns=["a", "b", "c", "d", "e", "f"],
)
actual = psdf.spark.schema()
expected = (
StructType()
.add("a", "string", False)
.add("b", "long", False)
.add("c", "byte", False)
.add("d", "double", False)
.add("e", "boolean", False)
.add("f", "timestamp", False)
)
self.assertEqual(actual, expected)
actual = psdf.spark.schema("index")
expected = (
StructType()
.add("index", "long", False)
.add("a", "string", False)
.add("b", "long", False)
.add("c", "byte", False)
.add("d", "double", False)
.add("e", "boolean", False)
.add("f", "timestamp", False)
)
self.assertEqual(actual, expected)
def test_print_schema(self):
psdf = ps.DataFrame(
{"a": list("abc"), "b": list(range(1, 4)), "c": np.arange(3, 6).astype("i1")},
columns=["a", "b", "c"],
)
prev = sys.stdout
try:
out = StringIO()
sys.stdout = out
psdf.spark.print_schema()
actual = out.getvalue().strip()
self.assertTrue("a: string" in actual, actual)
self.assertTrue("b: long" in actual, actual)
self.assertTrue("c: byte" in actual, actual)
out = StringIO()
sys.stdout = out
psdf.spark.print_schema(index_col="index")
actual = out.getvalue().strip()
self.assertTrue("index: long" in actual, actual)
self.assertTrue("a: string" in actual, actual)
self.assertTrue("b: long" in actual, actual)
self.assertTrue("c: byte" in actual, actual)
finally:
sys.stdout = prev
def test_explain_hint(self):
psdf1 = ps.DataFrame(
{"lkey": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 5]},
columns=["lkey", "value"],
)
psdf2 = ps.DataFrame(
{"rkey": ["foo", "bar", "baz", "foo"], "value": [5, 6, 7, 8]},
columns=["rkey", "value"],
)
merged = psdf1.merge(psdf2.spark.hint("broadcast"), left_on="lkey", right_on="rkey")
prev = sys.stdout
try:
out = StringIO()
sys.stdout = out
merged.spark.explain()
actual = out.getvalue().strip()
self.assertTrue("Broadcast" in actual, actual)
finally:
sys.stdout = prev
def test_mad(self):
pdf = pd.DataFrame(
{
"A": [1, 2, None, 4, np.nan],
"B": [-0.1, 0.2, -0.3, np.nan, 0.5],
"C": ["a", "b", "c", "d", "e"],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.mad(), pdf.mad())
self.assert_eq(psdf.mad(axis=1), pdf.mad(axis=1))
with self.assertRaises(ValueError):
psdf.mad(axis=2)
# MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("A", "Z")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.mad(), pdf.mad())
self.assert_eq(psdf.mad(axis=1), pdf.mad(axis=1))
pdf = pd.DataFrame({"A": [True, True, False, False], "B": [True, False, False, True]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.mad(), pdf.mad())
self.assert_eq(psdf.mad(axis=1), pdf.mad(axis=1))
def test_abs(self):
pdf = pd.DataFrame({"a": [-2, -1, 0, 1]})
psdf = ps.from_pandas(pdf)
self.assert_eq(abs(psdf), abs(pdf))
self.assert_eq(np.abs(psdf), np.abs(pdf))
def test_iteritems(self):
pdf = pd.DataFrame(
{"species": ["bear", "bear", "marsupial"], "population": [1864, 22000, 80000]},
index=["panda", "polar", "koala"],
columns=["species", "population"],
)
psdf = ps.from_pandas(pdf)
for (p_name, p_items), (k_name, k_items) in zip(pdf.iteritems(), psdf.iteritems()):
self.assert_eq(p_name, k_name)
self.assert_eq(p_items, k_items)
def test_tail(self):
pdf = pd.DataFrame({"x": range(1000)})
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.tail(), psdf.tail())
self.assert_eq(pdf.tail(10), psdf.tail(10))
self.assert_eq(pdf.tail(-990), psdf.tail(-990))
self.assert_eq(pdf.tail(0), psdf.tail(0))
self.assert_eq(pdf.tail(-1001), psdf.tail(-1001))
self.assert_eq(pdf.tail(1001), psdf.tail(1001))
self.assert_eq((pdf + 1).tail(), (psdf + 1).tail())
self.assert_eq((pdf + 1).tail(10), (psdf + 1).tail(10))
self.assert_eq((pdf + 1).tail(-990), (psdf + 1).tail(-990))
self.assert_eq((pdf + 1).tail(0), (psdf + 1).tail(0))
self.assert_eq((pdf + 1).tail(-1001), (psdf + 1).tail(-1001))
self.assert_eq((pdf + 1).tail(1001), (psdf + 1).tail(1001))
with self.assertRaisesRegex(TypeError, "bad operand type for unary -: 'str'"):
psdf.tail("10")
def test_last_valid_index(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, None], "b": [1.0, 2.0, 3.0, None], "c": [100, 200, 400, None]},
index=["Q", "W", "E", "R"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.last_valid_index(), psdf.last_valid_index())
self.assert_eq(pdf[[]].last_valid_index(), psdf[[]].last_valid_index())
# MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.last_valid_index(), psdf.last_valid_index())
# Empty DataFrame
pdf = pd.Series([]).to_frame()
psdf = ps.Series([]).to_frame()
self.assert_eq(pdf.last_valid_index(), psdf.last_valid_index())
def test_last(self):
index = pd.date_range("2018-04-09", periods=4, freq="2D")
pdf = pd.DataFrame([1, 2, 3, 4], index=index)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.last("1D"), psdf.last("1D"))
self.assert_eq(pdf.last(DateOffset(days=1)), psdf.last(DateOffset(days=1)))
with self.assertRaisesRegex(TypeError, "'last' only supports a DatetimeIndex"):
ps.DataFrame([1, 2, 3, 4]).last("1D")
def test_first(self):
index = pd.date_range("2018-04-09", periods=4, freq="2D")
pdf = pd.DataFrame([1, 2, 3, 4], index=index)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.first("1D"), psdf.first("1D"))
self.assert_eq(pdf.first(DateOffset(days=1)), psdf.first(DateOffset(days=1)))
with self.assertRaisesRegex(TypeError, "'first' only supports a DatetimeIndex"):
ps.DataFrame([1, 2, 3, 4]).first("1D")
def test_first_valid_index(self):
pdf = pd.DataFrame(
{"a": [None, 2, 3, 2], "b": [None, 2.0, 3.0, 1.0], "c": [None, 200, 400, 200]},
index=["Q", "W", "E", "R"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.first_valid_index(), psdf.first_valid_index())
self.assert_eq(pdf[[]].first_valid_index(), psdf[[]].first_valid_index())
# MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.first_valid_index(), psdf.first_valid_index())
# Empty DataFrame
pdf = pd.Series([]).to_frame()
psdf = ps.Series([]).to_frame()
self.assert_eq(pdf.first_valid_index(), psdf.first_valid_index())
pdf = pd.DataFrame(
{"a": [None, 2, 3, 2], "b": [None, 2.0, 3.0, 1.0], "c": [None, 200, 400, 200]},
index=[
datetime(2021, 1, 1),
datetime(2021, 2, 1),
datetime(2021, 3, 1),
datetime(2021, 4, 1),
],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.first_valid_index(), psdf.first_valid_index())
def test_product(self):
pdf = pd.DataFrame(
{"A": [1, 2, 3, 4, 5], "B": [10, 20, 30, 40, 50], "C": ["a", "b", "c", "d", "e"]}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index())
# Named columns
pdf.columns.name = "Koalas"
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index())
# MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index())
# Named MultiIndex columns
pdf.columns.names = ["Hello", "Koalas"]
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index())
# No numeric columns
pdf = pd.DataFrame({"key": ["a", "b", "c"], "val": ["x", "y", "z"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index())
# No numeric named columns
pdf.columns.name = "Koalas"
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), almost=True)
# No numeric MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y")])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), almost=True)
# No numeric named MultiIndex columns
pdf.columns.names = ["Hello", "Koalas"]
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), almost=True)
# All NaN columns
pdf = pd.DataFrame(
{
"A": [np.nan, np.nan, np.nan, np.nan, np.nan],
"B": [10, 20, 30, 40, 50],
"C": ["a", "b", "c", "d", "e"],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), check_exact=False)
# All NaN named columns
pdf.columns.name = "Koalas"
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), check_exact=False)
# All NaN MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), check_exact=False)
# All NaN named MultiIndex columns
pdf.columns.names = ["Hello", "Koalas"]
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), check_exact=False)
def test_from_dict(self):
data = {"row_1": [3, 2, 1, 0], "row_2": [10, 20, 30, 40]}
pdf = pd.DataFrame.from_dict(data)
psdf = ps.DataFrame.from_dict(data)
self.assert_eq(pdf, psdf)
pdf = pd.DataFrame.from_dict(data, dtype="int8")
psdf = ps.DataFrame.from_dict(data, dtype="int8")
self.assert_eq(pdf, psdf)
pdf = pd.DataFrame.from_dict(data, orient="index", columns=["A", "B", "C", "D"])
psdf = ps.DataFrame.from_dict(data, orient="index", columns=["A", "B", "C", "D"])
self.assert_eq(pdf, psdf)
def test_pad(self):
pdf = pd.DataFrame(
{
"A": [None, 3, None, None],
"B": [2, 4, None, 3],
"C": [None, None, None, 1],
"D": [0, 1, 5, 4],
},
columns=["A", "B", "C", "D"],
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
self.assert_eq(pdf.pad(), psdf.pad())
# Test `inplace=True`
pdf.pad(inplace=True)
psdf.pad(inplace=True)
self.assert_eq(pdf, psdf)
else:
expected = ps.DataFrame(
{
"A": [None, 3, 3, 3],
"B": [2.0, 4.0, 4.0, 3.0],
"C": [None, None, None, 1],
"D": [0, 1, 5, 4],
},
columns=["A", "B", "C", "D"],
)
self.assert_eq(expected, psdf.pad())
# Test `inplace=True`
psdf.pad(inplace=True)
self.assert_eq(expected, psdf)
def test_backfill(self):
pdf = pd.DataFrame(
{
"A": [None, 3, None, None],
"B": [2, 4, None, 3],
"C": [None, None, None, 1],
"D": [0, 1, 5, 4],
},
columns=["A", "B", "C", "D"],
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
self.assert_eq(pdf.backfill(), psdf.backfill())
# Test `inplace=True`
pdf.backfill(inplace=True)
psdf.backfill(inplace=True)
self.assert_eq(pdf, psdf)
else:
expected = ps.DataFrame(
{
"A": [3.0, 3.0, None, None],
"B": [2.0, 4.0, 3.0, 3.0],
"C": [1.0, 1.0, 1.0, 1.0],
"D": [0, 1, 5, 4],
},
columns=["A", "B", "C", "D"],
)
self.assert_eq(expected, psdf.backfill())
# Test `inplace=True`
psdf.backfill(inplace=True)
self.assert_eq(expected, psdf)
def test_align(self):
pdf1 = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=[10, 20, 30])
psdf1 = ps.from_pandas(pdf1)
for join in ["outer", "inner", "left", "right"]:
for axis in [None, 0, 1]:
psdf_l, psdf_r = psdf1.align(psdf1[["b"]], join=join, axis=axis)
pdf_l, pdf_r = pdf1.align(pdf1[["b"]], join=join, axis=axis)
self.assert_eq(psdf_l, pdf_l)
self.assert_eq(psdf_r, pdf_r)
psdf_l, psdf_r = psdf1[["a"]].align(psdf1[["b", "a"]], join=join, axis=axis)
pdf_l, pdf_r = pdf1[["a"]].align(pdf1[["b", "a"]], join=join, axis=axis)
self.assert_eq(psdf_l, pdf_l)
self.assert_eq(psdf_r, pdf_r)
psdf_l, psdf_r = psdf1[["b", "a"]].align(psdf1[["a"]], join=join, axis=axis)
pdf_l, pdf_r = pdf1[["b", "a"]].align(pdf1[["a"]], join=join, axis=axis)
self.assert_eq(psdf_l, pdf_l)
self.assert_eq(psdf_r, pdf_r)
psdf_l, psdf_r = psdf1.align(psdf1["b"], axis=0)
pdf_l, pdf_r = pdf1.align(pdf1["b"], axis=0)
self.assert_eq(psdf_l, pdf_l)
self.assert_eq(psdf_r, pdf_r)
psdf_l, psser_b = psdf1[["a"]].align(psdf1["b"], axis=0)
pdf_l, pser_b = pdf1[["a"]].align(pdf1["b"], axis=0)
self.assert_eq(psdf_l, pdf_l)
self.assert_eq(psser_b, pser_b)
self.assertRaises(ValueError, lambda: psdf1.align(psdf1, join="unknown"))
self.assertRaises(ValueError, lambda: psdf1.align(psdf1["b"]))
self.assertRaises(TypeError, lambda: psdf1.align(["b"]))
self.assertRaises(NotImplementedError, lambda: psdf1.align(psdf1["b"], axis=1))
pdf2 = pd.DataFrame({"a": [4, 5, 6], "d": ["d", "e", "f"]}, index=[10, 11, 12])
psdf2 = ps.from_pandas(pdf2)
for join in ["outer", "inner", "left", "right"]:
psdf_l, psdf_r = psdf1.align(psdf2, join=join, axis=1)
pdf_l, pdf_r = pdf1.align(pdf2, join=join, axis=1)
self.assert_eq(psdf_l.sort_index(), pdf_l.sort_index())
self.assert_eq(psdf_r.sort_index(), pdf_r.sort_index())
def test_between_time(self):
idx = pd.date_range("2018-04-09", periods=4, freq="1D20min")
pdf = pd.DataFrame({"A": [1, 2, 3, 4]}, index=idx)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.between_time("0:15", "0:45").sort_index(),
psdf.between_time("0:15", "0:45").sort_index(),
)
pdf.index.name = "ts"
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.between_time("0:15", "0:45").sort_index(),
psdf.between_time("0:15", "0:45").sort_index(),
)
# Column label is 'index'
pdf.columns = pd.Index(["index"])
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.between_time("0:15", "0:45").sort_index(),
psdf.between_time("0:15", "0:45").sort_index(),
)
# Both index name and column label are 'index'
pdf.index.name = "index"
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.between_time("0:15", "0:45").sort_index(),
psdf.between_time("0:15", "0:45").sort_index(),
)
# Index name is 'index', column label is ('X', 'A')
pdf.columns = pd.MultiIndex.from_arrays([["X"], ["A"]])
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.between_time("0:15", "0:45").sort_index(),
psdf.between_time("0:15", "0:45").sort_index(),
)
with self.assertRaisesRegex(
NotImplementedError, "between_time currently only works for axis=0"
):
psdf.between_time("0:15", "0:45", axis=1)
psdf = ps.DataFrame({"A": [1, 2, 3, 4]})
with self.assertRaisesRegex(TypeError, "Index must be DatetimeIndex"):
psdf.between_time("0:15", "0:45")
def test_at_time(self):
idx = pd.date_range("2018-04-09", periods=4, freq="1D20min")
pdf = pd.DataFrame({"A": [1, 2, 3, 4]}, index=idx)
psdf = ps.from_pandas(pdf)
psdf.at_time("0:20")
self.assert_eq(
pdf.at_time("0:20").sort_index(),
psdf.at_time("0:20").sort_index(),
)
# Index name is 'ts'
pdf.index.name = "ts"
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.at_time("0:20").sort_index(),
psdf.at_time("0:20").sort_index(),
)
# Index name is 'ts', column label is 'index'
pdf.columns = pd.Index(["index"])
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.at_time("0:40").sort_index(),
psdf.at_time("0:40").sort_index(),
)
# Both index name and column label are 'index'
pdf.index.name = "index"
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.at_time("0:40").sort_index(),
psdf.at_time("0:40").sort_index(),
)
# Index name is 'index', column label is ('X', 'A')
pdf.columns = pd.MultiIndex.from_arrays([["X"], ["A"]])
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.at_time("0:40").sort_index(),
psdf.at_time("0:40").sort_index(),
)
with self.assertRaisesRegex(NotImplementedError, "'asof' argument is not supported"):
psdf.at_time("0:15", asof=True)
with self.assertRaisesRegex(NotImplementedError, "at_time currently only works for axis=0"):
psdf.at_time("0:15", axis=1)
psdf = ps.DataFrame({"A": [1, 2, 3, 4]})
with self.assertRaisesRegex(TypeError, "Index must be DatetimeIndex"):
psdf.at_time("0:15")
def test_astype(self):
psdf = self.psdf
msg = "Only a column name can be used for the key in a dtype mappings argument."
with self.assertRaisesRegex(KeyError, msg):
psdf.astype({"c": float})
def test_describe(self):
pdf, psdf = self.df_pair
# numeric columns
self.assert_eq(psdf.describe(), pdf.describe())
psdf.a += psdf.a
pdf.a += pdf.a
self.assert_eq(psdf.describe(), pdf.describe())
# string columns
psdf = ps.DataFrame({"A": ["a", "b", "b", "c"], "B": ["d", "e", "f", "f"]})
pdf = psdf.to_pandas()
self.assert_eq(psdf.describe(), pdf.describe().astype(str))
psdf.A += psdf.A
pdf.A += pdf.A
self.assert_eq(psdf.describe(), pdf.describe().astype(str))
# timestamp columns
psdf = ps.DataFrame(
{
"A": [
pd.Timestamp("2020-10-20"),
pd.Timestamp("2021-06-02"),
pd.Timestamp("2021-06-02"),
pd.Timestamp("2022-07-11"),
],
"B": [
pd.Timestamp("2021-11-20"),
pd.Timestamp("2023-06-02"),
| pd.Timestamp("2026-07-11") | pandas.Timestamp |
#!/usr/bin/env python
""" fnPersistence, a class which provides a storage layer for meta-data and snv distances from the
findneighbour4 system in mongodb
A component of the findNeighbour4 system for bacterial relatedness monitoring
Copyright (C) 2021 <NAME> <EMAIL>
repo: https://github.com/davidhwyllie/findNeighbour4
This program is free software: you can redistribute it and/or modify
it under the terms of the MIT License as published
by the Free Software Foundation. See <https://opensource.org/licenses/MIT>, and the LICENSE file.
"""
import bson # type: ignore
import datetime
import json
import uuid
import pandas as pd # type: ignore
import logging
import pymongo # type: ignore
import gridfs # type: ignore
import pickle
import psutil # type: ignore
import io
import statistics
import numpy as np
from typing import (
Any,
Dict,
Iterable,
List,
NoReturn,
Optional,
Set,
Tuple,
TypedDict,
Union,
)
Guid2NeighboursFormat1 = List[Union[str, int]]
Guid2NeighboursFormat3 = Union[str]
Guid2NeighboursFormat4 = Dict[str, Union[str, int]]
Guid2NeighboursFormats = Union[
Guid2NeighboursFormat1, Guid2NeighboursFormat3, Guid2NeighboursFormat4
]
class RecentDatabaseMonitoringRet(TypedDict, total=False):
recompression_data: bool
latest_stats: Dict[str, Union[int, np.float64]]
trend_stats: List[Dict[str, Any]]
class NPEncoder(json.JSONEncoder):
"""encodes Numpy types as jsonisable equivalents"""
def default(self, obj: Any) -> Any:
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NPEncoder, self).default(obj)
class fn3persistence:
"""System for persisting results from large numbers of sequences stored in FindNeighbour 3+.
Uses Mongodb.
in the current schema there are the following collections:
-'config', containing configuration information
-'refcompressedseq' contains reference compressed sequences. note that this is a gridfs 'filesystem'. Keys are guids.
-'clusters' contains a graph of inter-guid links. note that this is a gridfs 'filesystem'. Keys are names of clustering algorithms.
-'guid2meta', contains guid -> metadata
-'guid2neighbour', contains links between guids, including snv
Here, individuals documents are identified by mongo assigned unique ids.
Each document contains three keys:
{'guid':'a1234', 'rstat':'s', 'neighbours':{}}
Up to max_neighbours_per_document neighbours can be stored per document.
*max_neighbours_per_document* should be less than 5,000, because there is a max. document size in mongodb.
In debug mode, it is automatically set to 3.
if max_neighbours_per_document exist in the document, 'rstat' is set to 'f' (full).
If there is a single item only, rstat is set to 's' (single); if there are multiple items, it is set to 'm'.
Indices exist on (i) guid - allowing you to find all the documents contains guid X's neighbours and
(ii) guid/rstat combination- allowing one to find guid X's most recent document, useful for addition.
This class provides methods to access these four entities.
NOTE: regarding sharding, the most important collection is guid2neighbour.
A hashed sharding based on guid should work well when ensuring database scalability.
"""
# code handling startup and shutdown.
def __init__(
self,
connString: str,
dbname: str = "fn3_unittesting",
debug: int = 0,
config_settings: dict = {},
max_neighbours_per_document: int = 100000,
server_monitoring_min_interval_msec: int = 0,
) -> None:
"""Creates a connection to a MongoDb database.
connString : the mongoDb connection string
dbname: the name of the mongoDb database to use.
if debug = 0 or 1, the database is opened or created.
if debug = 2, any existing collections are deleted.
config_settings: only used on db creation; optional dictionary to note items in the database's config collection.
"""
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
# client calling mongostore should trap for connection errors etc
self.connString = connString
self.dbname = dbname
self.debug = debug
self._connect() # will raise ConnectionError if fails
# can check what exists with connection.database_names()
self.expected_collections = [
"server_monitoring",
"guid2meta",
"guid2neighbour",
"config",
"refcompressedseq.chunks",
"refcompressedseq.files",
"clusters.chunks",
"clusters.files",
"msa.chunks",
"msa.files",
"tree.chunks",
"tree.files",
"fnlock",
]
self.expected_clustering_collections = [
"clusters.chunks",
"clusters.files",
"msa.chunks",
"msa.files",
"tree.chunks",
"tree.files",
]
self.storage_technology = "mongodb"
self.using_sqlite = False
self.max_neighbours_per_document = max_neighbours_per_document
self.server_monitoring_min_interval_msec = server_monitoring_min_interval_msec
self.previous_server_monitoring_data: Dict[str, Any] = {}
self.previous_server_monitoring_time = None
# delete any pre-existing data if we are in debug mode.
if debug == 2:
self.logger.warning(
"Debug mode operational [DEBUG={0}]; deleting all data from collections.".format(
debug
)
)
self._delete_existing_clustering_data()
self._delete_existing_data()
self.max_neighbours_per_document = 3 # used for unittests
else:
self.logger.info("Using stored data in mongostore")
# create indices on guid2neighbours; note will do nothing if index already exists
ix1 = pymongo.IndexModel(
[("guid", pymongo.ASCENDING), ("rstat", pymongo.ASCENDING)],
name="by_guid_full",
)
ix2 = pymongo.IndexModel([("rstat", pymongo.ASCENDING)], name="by_rstat")
self.db["guid2neighbour"].create_indexes([ix1, ix2])
# create indices on msa and trees; note will do nothing if index already exists
ix3 = pymongo.IndexModel(
[("filename", pymongo.ASCENDING), ("uploadDate", pymongo.ASCENDING)],
name="filename_date",
)
self.db["msa.files"].create_indexes([ix3])
ix3b = pymongo.IndexModel(
[("filename", pymongo.ASCENDING), ("uploadDate", pymongo.ASCENDING)],
name="filename_date",
)
self.db["tree.files"].create_indexes([ix3b])
# create indices on guid2meta, allowing recovery of valid and invalid specimens rapidly.
ix4 = pymongo.IndexModel(
[('"sequence_meta.DNAQuality.invalid"', pymongo.ASCENDING)],
name="guid_validity",
)
ix5 = pymongo.IndexModel(
[('"sequence_meta.DNAQuality.propACTG"', pymongo.ASCENDING)],
name="guid_quality",
)
ix5b = pymongo.IndexModel(
[('"sequence_meta.DNAQuality.examinationDate"', pymongo.ASCENDING)],
name="examinationDate",
)
# note: if additional metadata is added, such as sequence names etc which might be searched for, then we need to add additional indices here.
self.db["guid2meta"].create_indexes([ix4, ix5, ix5b])
# create index on server_monitoring insert times
ix6 = pymongo.IndexModel([("context|time|time_now", pymongo.ASCENDING)])
self.db["server_monitoring"].create_indexes([ix6])
def delete_server_monitoring_entries(self, before_seconds: int) -> None:
"""deletes server monitoring entries more than before_seconds ago"""
now = datetime.datetime.now()
earliest_allowed = now - datetime.timedelta(seconds=before_seconds)
earliest_allowed_str = str(earliest_allowed.isoformat())
self.db["server_monitoring"].delete_many(
{"context|time|time_now": {"$lt": earliest_allowed_str}}
)
def summarise_stored_items(self) -> Dict[str, Any]:
"""counts how many sequences exist of various types"""
retVal = {}
collections_present = self.db.list_collection_names()
for this_collection in self.expected_collections:
if this_collection in collections_present:
res = self.db.command("collstats", this_collection)
for relevant_metric in [
"totalIndexSize",
"storageSize",
"count",
"avgObjSize",
]:
if relevant_metric in res.keys():
target_key = "dstats|{0}|{1}".format(
this_collection.replace(".", "-"), relevant_metric
)
retVal[target_key] = res[relevant_metric]
return retVal
def connect(self) -> None:
"""test whether the database is connected, and if not, tries to connect.
if the connection fails, raises pymongo.errors.ConnectionFailure"""
if not self.is_connected():
self._connect()
def _connect(self) -> None:
"""connect to the database"""
# try to close any existing session, if it exists
self.closedown()
# open new client
self.client = pymongo.MongoClient(self.connString, retryWrites=True)
self.db = self.client[self.dbname]
# open gridfs systems
self.rcs = gridfs.GridFS(self.db, collection="refcompressedseq")
self.clusters = gridfs.GridFS(self.db, collection="clusters")
self.monitor = gridfs.GridFS(self.db, collection="monitor")
self.msa = gridfs.GridFS(self.db, collection="msa")
self.tree = gridfs.GridFS(self.db, collection="tree")
# enable sharding at database level
# self.client.admin.command('enableSharding', self.dbname)
def is_connected(self) -> bool:
"""Tests whether db is connected cf
http://api.mongodb.com/python/current/api/pymongo/mongo_client.html"""
try:
# The ismaster command is cheap and does not require auth.
self.client.admin.command("ismaster")
# success
return True
except pymongo.errors.ConnectionFailure:
return False
def rotate_log(self) -> None:
"""forces rotation of the mongo log file"""
self.client.admin.command("logRotate")
def raise_error(self, token: str) -> NoReturn:
"""raises a ZeroDivisionError, with token as the message.
useful for unit tests of error logging"""
raise ZeroDivisionError(token)
def _delete_existing_data(self) -> None:
"""deletes existing data from the databases"""
for collection in self.expected_collections:
self.db[collection].delete_many({})
def _delete_existing_clustering_data(self) -> None:
"""deletes any clustering data from the databases"""
for collection in self.expected_clustering_collections:
self.db[collection].delete_many({})
def first_run(self) -> bool:
"""if there is no config entry, it is a first-run situation"""
if self.db.config.find_one({"_id": "config"}) is None:
return True
else:
return False
def __del__(self) -> None:
"""closes any existing session"""
self.closedown()
def closedown(self) -> None:
"""closes any session"""
# client object has already been destroyed on reaching here
pass
# generic routines to handle insertion and read from standard mongodb stores
def _store(self, collection: str, key: str, object: Dict[str, Any]) -> Any:
"""stores key:object in collection. It is assumed object is a dictionary. Updates if appropriate."""
if not isinstance(object, dict):
raise TypeError(" object{0} passed must be a dictionary".format(object))
object["_id"] = key
res = self.db[collection].replace_one({"_id": key}, object, upsert=True)
if res.acknowledged is not True:
raise IOError(
"Mongo {0} did not acknowledge write of data: {1}".format(
self.db, object
)
)
return res
def _load(self, collection: str, key: str) -> Any:
"""loads object from collection[key]"""
return self.db[collection].find_one({"_id": key})
def _load_ids(self, collection: str) -> Set[str]:
"""loads guids from collection"""
retVal: Set[str] = set()
for item in self.db[collection].find({}):
retVal.add(item["_id"])
return retVal
def memory_usage(self) -> Dict[str, Union[int, float]]:
"""returns memory usage by current python3 process
Uses the psutil module, as the resource module is not available in windows.
"""
memdict = psutil.virtual_memory()._asdict()
sm = {"server|mstat|" + k: v for k, v in memdict.items()}
return sm
# methods for the config collection
def config_store(self, key: str, object: Dict[str, Any]) -> Any:
"""stores object into config collection
It is assumed object is a dictionary
"""
# if "excluded" in object.keys():
# del object["excluded"]
return self._store("config", key, object)
def config_read(self, key: str) -> Any:
"""loads object from config.
It is assumed object is a dictionary"""
return self._load("config", key)
# methods for the server and database monitoring
def recent_database_monitoring(
self, max_reported: int = 100
) -> RecentDatabaseMonitoringRet:
"""computes trends in the number of records holding pairs (guid2neighbours) vs. records.
This ratio is a measure of database health. Ratios > 100 indicate the database may become very large, and query slowly"""
db_data = self.recent_server_monitoring(
selection_field="content|activity|whatprocess",
selection_string="dbManager",
max_reported=max_reported,
)
res_df = pd.DataFrame.from_dict(db_data)
retDict: RecentDatabaseMonitoringRet
if len(res_df.index > 0):
res_df["storage_ratio"] = res_df["dstats|guid2neighbour|count"] / (
1 + res_df["dstats|guid2meta|count"]
)
res_df["context|time|time_now_dt"] = pd.to_datetime(
res_df["context|time|time_now"]
)
res_df["latest_time"] = max(res_df["context|time|time_now_dt"])
res_df["interval_seconds"] = (
res_df["context|time|time_now_dt"] - res_df["latest_time"]
).dt.total_seconds()
desired_cols = set(
[
"_id",
"storage_ratio",
"dstats|guid2neighbour|count",
"dstats|guid2meta|count",
"context|time|time_now",
"interval_seconds",
]
)
available_cols = set(res_df.columns.to_list())
select_cols = desired_cols.intersection(available_cols)
res_df = res_df[list(select_cols)] # select what we want
if len(res_df.index > 0):
retDict = {
"recompression_data": True,
"latest_stats": {"storage_ratio": res_df.at[0, "storage_ratio"]},
"trend_stats": res_df.to_dict(orient="records"),
}
else:
retDict = {
"recompression_data": False,
"latest_stats": {"storage_ratio": 1},
} # if there's no data, record as 1 (optimal)
# store the ratio as 1 if we can't compute it
if "dstats|guid2meta|count" not in res_df.columns.tolist():
retDict["latest_stats"]["storage_ratio"] = 1
elif res_df.at[0, "dstats|guid2meta|count"] == 0:
retDict["latest_stats"]["storage_ratio"] = 1
return retDict
def recent_server_monitoring(
self,
max_reported: int = 100,
selection_field: Optional[str] = None,
selection_string: Optional[str] = None,
) -> List[dict]:
"""returns a list containing recent server monitoring, in reverse order (i.e. tail first).
The _id field is an integer reflecting the order added. Lowest numbers are most recent.
Inputs
max_reported - return this number of lines, at most.
selection_field - if not None, will only return lines containing selection_string
in the 'selection_field' key of the returned dictionary.
selection_string -if selection_field is not None, only returns rows if
selection_string is present in the 'selection_field' key of the
monitoring element. If None, this constraint is ignored.
"""
if not isinstance(max_reported, int):
raise TypeError(
"limit must be an integer, but it is a {0}".format(type(max_reported))
)
if not max_reported >= 0:
raise ValueError("limit must be more than or equal to zero")
if max_reported == 0:
return []
n = 0
retVal = []
if selection_field is None:
formerly_cursor = (
self.db["server_monitoring"]
.find({})
.sort("_id", pymongo.DESCENDING)
.limit(max_reported)
)
else:
formerly_cursor = (
self.db["server_monitoring"]
.find({selection_field: selection_string})
.sort("_id", pymongo.DESCENDING)
.limit(max_reported)
)
for formerly in formerly_cursor:
n += 1
formerly["_id"] = n
retVal.append(formerly)
return retVal
def server_monitoring_store(
self,
message: str = "No message provided",
what: Optional[str] = None,
guid: Optional[str] = None,
content: Dict[str, Any] = {},
) -> bool:
"""stores content, a dictionary, into the server monitoring log"""
now = dict(**content)
if what is not None:
now["content|activity|whatprocess"] = what
if guid is not None:
now["content|activity|guid"] = guid
now["context|info|message"] = message
current_time = datetime.datetime.now()
now["context|time|time_now"] = str(current_time.isoformat())
now["context|time|time_boot"] = datetime.datetime.fromtimestamp(
psutil.boot_time()
).strftime("%Y-%m-%d %H:%M:%S")
# should we write this data? We have the option not to log all messages, to prevent the store getting very full.
write_content = False
if self.previous_server_monitoring_time is None:
write_content = True # yes if this is the first record written.
else:
time_since_last_write = (
current_time - self.previous_server_monitoring_time
) # yes if it's after the server_moni
t = (
1000 * float(time_since_last_write.seconds)
+ float(time_since_last_write.microseconds) / 1000
)
if t >= self.server_monitoring_min_interval_msec:
write_content = True
if write_content:
self.db["server_monitoring"].insert_one(now)
self.previous_server_monitoring_time = current_time
self.previous_server_monitoring_data = now
return True
else:
return False
# methods for monitor, which store the contents of an html file
# in a gridFS store.
def monitor_store(self, monitoring_id: str, html: str) -> str:
"""stores the monitor output string html. Overwrites any prior object."""
self.monitor.delete(monitoring_id)
with io.BytesIO(html.encode("utf-8")) as f:
id = self.monitor.put(f, _id=monitoring_id, filename=monitoring_id)
return id
def monitor_read(self, monitoring_id: str) -> Optional[str]:
"""loads stored string (e.g. html object) from the monitor collection."""
try:
res = self.monitor.get(monitoring_id)
except gridfs.errors.NoFile:
return None
if res is None:
return None
else:
return res.read().decode("utf-8")
# methods for multisequence alignments
def msa_store(self, msa_token: str, msa: dict) -> Optional[str]:
"""stores the msa object msa under token msa_token."""
if not isinstance(msa, dict):
raise TypeError(
"Can only store dictionary objects, not {0}".format(type(msa))
)
res = self.msa.find_one({"_id": msa_token})
if res is None:
json_repr = json.dumps(msa).encode("utf-8")
with io.BytesIO(json_repr) as f:
self.msa.put(f, _id=msa_token, filename=msa_token)
return msa_token
else:
return None
def msa_read(self, msa_token: str) -> Optional[dict]:
"""loads object from msa collection.
It is assumed object is a dictionary"""
res = self.msa.find_one({"_id": msa_token})
if res is None:
return None
json_repr = json.loads(res.read().decode("utf-8"))
return json_repr
def msa_delete(self, msa_token: str) -> None:
"""deletes the msa with token msa_token"""
self.msa.delete(msa_token)
def msa_stored_ids(self) -> List[str]:
"""returns a list of msa tokens of all objects stored"""
return [stored_msa._id for stored_msa in self.msa.find({})]
def msa_delete_unless_whitelisted(self, whitelist: Iterable[str]) -> None:
"""deletes the msa unless the id is in whitelist"""
to_delete: Set[str] = set()
for id in self.msa_stored_ids():
if id not in whitelist:
to_delete.add(id)
for msa_token in to_delete:
self.msa.delete(msa_token)
# methods for trees
def tree_store(self, tree_token: str, tree: dict) -> Optional[str]:
"""stores the tree object tree under token tree_token."""
if not isinstance(tree, dict):
raise TypeError(
"Can only store dictionary objects, not {0}".format(type(tree))
)
res = self.tree.find_one({"_id": tree_token})
if res is None:
json_repr = json.dumps(tree).encode("utf-8")
with io.BytesIO(json_repr) as f:
self.tree.put(f, _id=tree_token, filename=tree_token)
return tree_token
else:
return None
def tree_read(self, tree_token: str) -> Optional[dict]:
"""loads object from tree collection.
It is assumed object is a dictionary"""
res = self.tree.find_one({"_id": tree_token})
if res is None:
return None
json_repr = json.loads(res.read().decode("utf-8"))
return json_repr
def tree_delete(self, tree_token: str) -> None:
"""deletes the tree with token tree_token"""
self.tree.delete(tree_token)
def tree_stored_ids(self) -> List[str]:
"""returns a list of tree tokens of all objects stored"""
return [stored_tree._id for stored_tree in self.tree.find({})]
def tree_delete_unless_whitelisted(self, whitelist: Iterable[str]) -> None:
"""deletes the tree unless the id is in whitelist"""
to_delete: Set[str] = set()
for id in self.tree_stored_ids():
if id not in whitelist:
to_delete.add(id)
for tree_token in to_delete:
self.tree.delete(tree_token)
# methods for clusters
def cluster_store(self, clustering_key: str, obj: dict) -> str:
"""stores the clustering object obj. retains previous version. To clean these up, call cluster_delete_legacy.
obj: a dictionary to store
clustering_key: the name of the clustering, e.g. TBSNP12-graph
Returns:
current cluster version
Note; does not replace previous version, but stores a new one.
cf. warning in Mongo docs:
Do not use GridFS if you need to update the content of the entire file atomically.
As an alternative you can store multiple versions of each file and specify the current version of the file in the metadata.
You can update the metadata field that indicates “latest” status in an atomic update after uploading the new version of the file,
and later remove previous versions if needed.
"""
if not isinstance(obj, dict):
raise TypeError(
"Can only store dictionary objects, not {0}".format(type(obj))
)
json_repr = json.dumps(obj, cls=NPEncoder).encode("utf-8")
with io.BytesIO(json_repr) as f:
id = self.clusters.put(f, filename=clustering_key)
return id # this is the current cluster version
def cluster_read(self, clustering_key: str) -> Optional[dict]:
"""loads object from clusters collection corresponding to the most recent version of
the clustering, saved with filename = 'clustering_key'.
"""
cursor = (
self.clusters.find({"filename": clustering_key})
.sort("uploadDate", -1)
.limit(1)
)
for res in cursor:
json_repr = json.loads(res.read().decode("utf-8"))
return json_repr
# nothing there
return None
def cluster_read_update(
self, clustering_key: str, current_cluster_version: bson.objectid.ObjectId
) -> Optional[dict]:
"""loads object from clusters collection corresponding to the most recent version
of the clustering, saved with filename = 'clustering_key'.
it will read only if the current version is different from current_cluster_version; other wise, it returns None
It is assumed object is a dictionary"""
latest_version = self.cluster_latest_version(clustering_key)
if latest_version == current_cluster_version:
# no update
return None
else:
return self.cluster_read(clustering_key)
def cluster_latest_version(self, clustering_key: str) -> bson.objectid.ObjectId:
"""returns id of latest version"""
cursor = (
self.clusters.find({"filename": clustering_key})
.sort("uploadDate", -1)
.limit(1)
)
for res in cursor:
return res._id
return None
def cluster_keys(self, clustering_name: Optional[str] = None) -> List[str]:
"""lists clustering keys beginning with clustering_name. If clustering_name is none, all clustering keys are returned."""
cursor = self.clusters.find({})
filenames = set()
retVal = []
for res in cursor:
filenames.add(res.filename)
if (
clustering_name is not None
): # only report keys starting with clustering_name
retVal = [x for x in sorted(filenames) if x.startswith(clustering_name)]
else:
retVal = list(sorted(filenames))
return retVal
def cluster_versions(self, clustering_key: str) -> List[bson.objectid.ObjectId]:
"""lists ids and storage dates corresponding to versions of clustering identifed by clustering_key.
the newest version is first.
"""
cursor = self.clusters.find({"filename": clustering_key}).sort("uploadDate", -1)
retVal = []
for res in cursor:
retVal.append(res._id)
return retVal
def cluster_delete_all(self, clustering_key: str) -> None:
"""delete all clustering objects, including the latest version, stored under clustering_key"""
ids = self.cluster_versions(clustering_key)
for this_id in ids:
self.clusters.delete(this_id)
def cluster_delete_legacy_by_key(self, clustering_key: str) -> None:
"""delete all clustering objects, except latest version, stored with key clustering_key"""
ids = self.cluster_versions(clustering_key)
ids = ids[1:]
for i, this_id in enumerate(ids):
logging.info(
"Removing historical data for {0} {1} / {2}".format(
clustering_key, i, len(ids)
)
)
self.clusters.delete(this_id)
def cluster_delete_legacy(self, clustering_name: str) -> None:
"""delete all clustering objects, except latest version, stored with clustering_name"""
clustering_keys = self.cluster_keys(clustering_name=clustering_name)
for clustering_key in clustering_keys:
self.cluster_delete_legacy_by_key(clustering_key)
def refcompressedseq_store(self, guid: str, obj: Any) -> str:
"""stores the sequence object obj with guid guid.
Issues an error FileExistsError
if the guid already exists."""
pickled_obj = pickle.dumps(obj, protocol=2)
res = self.db.refcompressedseq.files.find_one({"_id": guid}, {"_id": 1})
if res is not None: # it exists
raise FileExistsError("Attempting to overwrite {0}".format(guid))
id = self.rcs.put(pickled_obj, _id=guid, filename=guid)
# do a functional test to verify write
recovered_obj = self.refcompressedsequence_read(guid)
if not recovered_obj == obj:
raise IOError(
"Integrity check failed on reference compressed item write/read for {0}".format(
guid
)
)
return id
def refcompressedsequence_read(self, guid: str) -> Any:
"""loads object from refcompressedseq collection.
It is assumed object stored is a dictionary"""
res = self.rcs.find_one({"_id": guid})
if res is None:
return None
return pickle.loads(res.read())
def refcompressedsequence_read_many(self, guids: Iterable) -> Any:
"""loads objects identified by any of guids from refcompressedseq collection.
It is assumed object stored is a dictionary
returns:
generator, which yields a tuple
(guid, referencecompressedsequence)
raises:
ValueError, if length of guids is > 1000
"""
if len(guids) > 1000:
raise ValueError("Maximum number of samples which can be sought is 1000")
results = self.rcs.find({"_id": {"$in": guids}})
for result in results:
yield (result._id, pickle.loads(result.read()))
def refcompressedsequence_read_all(self, internal_batch_size = 1000) -> Any:
"""loads object from refcompressedseq collection.
The objects loaded are guids.
It is assumed object stored is a dictionary
parameters:
internal_batch_size: how many samples are loaded into ram at a time. Default should be fine unless low memory
returns:
generator, which yields a tuple
(guid, referencecompressedsequence)
"""
# sanity check
if internal_batch_size < 1:
raise ValueError("Internal batch size must be >= 1")
all_guids = self.refcompressedsequence_guids()
batches = []
this_batch = []
for i, guid in enumerate(all_guids):
if i % internal_batch_size == 0 and i > 0:
batches.append(this_batch)
this_batch = []
this_batch.append(guid)
if len(this_batch) > 0:
batches.append(this_batch)
for this_batch in batches:
results = self.rcs.find({"_id": {"$in": this_batch}})
for result in results:
yield (result._id, pickle.loads(result.read()))
def refcompressedsequence_guids(self) -> Set[str]:
"""loads guids from refcompressedseq collection."""
# altered syntax because the .load() syntax previously used loaded > 16MB data and failed with > 600k samples
res = self.db.refcompressedseq.files.find({}, {"_id": 1})
guids = list()
for item in res:
guids.append(item["_id"])
return set(guids)
# methods for guid2meta
def guid_annotate(self, guid: str, nameSpace: str, annotDict: dict) -> None:
"""adds multiple annotations of guid from a dictionary;
all annotations go into a namespace.
creates the record if it does not exist"""
# check whethere there is an existing metadata object for this
metadataObj = self.db.guid2meta.find_one({"_id": guid})
if metadataObj is None:
# it doesn't exist. we create a new one.
metadataObj = {"_id": guid, "sequence_meta": {nameSpace: annotDict}}
if (
"sequence_meta" not in metadataObj.keys()
): # this is key is mandatory and is always present
metadataObj["sequence_meta"] = {}
# if the namespace does not exist as a subsidiary of sequence_meta, then we create it
if nameSpace not in metadataObj["sequence_meta"].keys():
metadataObj["sequence_meta"][nameSpace] = {}
# we add any annotations to the existing data
metadataObj["sequence_meta"][nameSpace] = {
**metadataObj["sequence_meta"][nameSpace],
**annotDict,
}
res = self.db.guid2meta.replace_one({"_id": guid}, metadataObj, upsert=True)
if res.acknowledged is not True:
raise IOError(
"Mongo {0} did not acknowledge write of data: {1}".format(
self.db, metadataObj
)
)
def guids(self) -> Set[str]:
"""returns all registered guids"""
retVal = [x["_id"] for x in self.db.guid2meta.find({}, {"_id": 1})]
return set(retVal)
def guids_added_after_sample(self, guid: str) -> Set[str]:
"""returns all guids added after a sample"""
print("*** SEARCHING FOR ", guid)
this_examination_time = self.guid_examination_time(guid)
if this_examination_time is None:
return None
return self.guids_considered_after(addition_datetime=this_examination_time)
def guids_considered_after(self, addition_datetime: datetime.datetime) -> Set[str]:
"""returns all registered guid added after addition_datetime
addition_datetime: a date of datetime class."""
if not isinstance(addition_datetime, datetime.datetime):
raise TypeError(
"addition_datetime must be a datetime.datetime value. It is {0}. Value = {1}".format(
type(addition_datetime), addition_datetime
)
)
retVal = [
x["_id"]
for x in self.db.guid2meta.find(
{
"sequence_meta.DNAQuality.examinationDate": {
"$gt": addition_datetime
}
},
{"_id": 1},
)
]
return set(retVal)
def _guids_selected_by_validity(self, validity: int) -> Set[str]:
"""returns registered guids, selected on their validity
0 = guid is valid
1 = guid is invalid
"""
if validity not in [0, 1]:
raise ValueError("Validity must be 0 or 1, not {0}".format(validity))
retVal = [
x["_id"]
for x in self.db.guid2meta.find(
{"sequence_meta.DNAQuality.invalid": validity}
)
]
return set(retVal)
def singletons(
self, method: str = "approximate", return_top: int = 1000
) -> pd.DataFrame:
"""returns guids and the number of singleton records, which
(if high numbers are present) indicates repacking is needed.
Inclusion of max_records is important for very large datasets, or the query is slow.
Parameters:
method: either 'approximate' or 'exact'. For ranking samples for repacking, the approximate method is recommended.
return_top: the number of results to return. Set > number of samples to return all records. If method = 'exact', return_top is ignored and all records are returned.
The approximate method is much faster than the exact one if large numbers of singletons are present; it randomly downsamples the guid-neighbour pairs in
the guid2neighbour collection, taking 100k samples only, and the counts the number of singletons in the downsample. This is therefore a method of ranking
samples which may benefit from repacking. This sampling method returns queries in a few milliseconds in testing on large tables.
This method is not deterministic.
In the event of failure to successfully generate a sample of sufficient size (set to 100k at present),
which can happen if there are fewer than singletons, then the exact method will be used as a fallback. If fallback of this kind occurs, only return_top samples will be returned.
The exact method computes the exact non-zero number of singletons for each sample. This requires an index scan which can be
surprisingly slow, with the query taking > 30 seconds with > table size ~ 3 x10^7. This method is deterministic.
Returns:
a set of sample identifiers ('guids') which contain > min_number_records singleton entries.
"""
# input validation
if not isinstance(return_top, int):
raise TypeError(
"return_top is {0}; this must be an integer not a {1}".format(
return_top, type(return_top)
)
)
if not return_top > 0:
raise TypeError(
"return_top is {0}; this must be a non zero positive integer".format(
return_top
)
)
if method not in ["exact", "approximate"]:
raise ValueError(
"Method must be either 'exact' or 'approximate', not {0}".format(method)
)
# use mongodb pipeline
# shell example: db.guid2neighbour.aggregate([ {$sample: {size: 100000}}, { $match: {rstat:'s'}}, { $sortByCount: "$guid"}, {$limit: 1000} ] )
approximate_pipeline = [
{"$sample": {"size": 100000}},
{"$match": {"rstat": "s"}},
{"$sortByCount": "$guid"},
{"$limit": return_top},
]
exact_pipeline = [{"$match": {"rstat": "s"}}, {"$sortByCount": "$guid"}]
fallback = False
if method == "approximate":
try:
results = self.db.guid2neighbour.aggregate(
approximate_pipeline, allowDiskUse=True
)
except pymongo.errors.OperationFailure:
# occurs if there are very few samples left; a random sample of the required size (in this case 100k) cannot be generated
method = "exact"
logging.info(
"mongoStore.singletons | unable to generate a random sample of the required size, due to a small numbers of singletons. Falling back to an exact method."
)
fallback = True
if method == "exact":
results = self.db.guid2neighbour.aggregate(
exact_pipeline, allowDiskUse=True
)
ret_list = []
for item in results:
ret_list.append(item)
if fallback is True and len(ret_list) > return_top:
logging.info(
"Fallback prcesses in place; restricting to top {0}. Exact search examined {1} samples with singletons".format(
return_top, len(ret_list)
)
)
ret_list = ret_list[:return_top]
ret_df = pd.DataFrame.from_records(ret_list)
ret_df.rename(columns={"_id": "guid"}, inplace=True)
if (
ret_df.empty
): # if empty, the '_id' column is not there, and the rename fails
ret_df = pd.DataFrame(columns=("guid", "count"))
ret_df.set_index("guid", drop=True, inplace=True)
return ret_df
def guids_valid(self) -> set:
"""return all registered valid guids.
Validity is determined by the contents of the DNAQuality.invalid field, on which there is an index"""
return self._guids_selected_by_validity(0)
def guids_invalid(self) -> set:
"""return all invalid guids
Validity is determined by the contents of the DNAQuality.invalid field, on which there is an index"""
return self._guids_selected_by_validity(1)
def guid_exists(self, guid: str) -> bool:
"""checks the presence of a single guid"""
res = self.db.guid2meta.find_one({"_id": guid})
if res is None:
return False
else:
return True
def guid_valid(self, guid: str) -> int:
"""checks the validity of a single guid
Parameters:
guid: the sequence identifier
Returns
-1 The guid does not exist
0 The guid exists and the sequence is valid
1 The guid exists and the sequence is invalid
-2 The guid exists, but there is no DNAQuality.valid key"""
res = self.db.guid2meta.find_one({"_id": guid})
if res is None:
return -1
else:
try:
return int(res["sequence_meta"]["DNAQuality"]["invalid"])
except KeyError:
return -2
def guid_examination_time(self, guid: str) -> Optional[datetime.datetime]:
"""returns the examination time for a single guid
Parameters:
guid: the sequence identifier
Returns either
The examination datetime value for this guid OR
None if the guid does not exist, or the sequence_meta.DNAQuality.examinationTime key does not exist.
"""
res = self.db.guid2meta.find_one(
{"_id": guid}, {"sequence_meta.DNAQuality.examinationDate": 1}
)
if res is None:
return None
try:
return res["sequence_meta"]["DNAQuality"]["examinationDate"]
except KeyError:
return None
def guids_considered_after_guid(self, guid: str) -> Set[str]:
"""returns all registered guids added after guid
guid: a sequence identifier"""
addition_datetime = self.guid_examination_time(guid)
if addition_datetime is None:
raise ValueError("guid is not valid: {0}".format(guid))
else:
return self.guids_considered_after(addition_datetime)
def guid_quality_check(
self, guid: str, cutoff: Union[float, int]
) -> Optional[bool]:
"""Checks whether the quality of one guid exceeds the cutoff.
If the guid does not exist, returns None.
If the guid does exist and has quality< cutoff, returns False.
Otherwise, returns True.
"""
# test input
if not type(cutoff) in [float, int]:
raise TypeError(
"Cutoff should be either floating point or integer, but it is %s"
% type(cutoff)
)
if not type(guid) == str:
raise TypeError("The guid passed should be as string, not %s" % str(guid))
# recover record, compare with quality
res = self.db.guid2meta.find_one({"_id": guid}, {"sequence_meta": 1})
if res is None: # no entry for this guid
return None
else:
try:
dnaq = res["sequence_meta"]["DNAQuality"]
except KeyError:
raise KeyError(
"DNA quality is not present in the sequence metadata {0}: {1}".format(
guid, res
)
)
# check the DNA quality metric expected is present
if "propACTG" not in dnaq.keys():
raise KeyError(
"propACTG is not present in DNAQuality namespace of guid {0}: {1}".format(
guid, dnaq
)
)
# report whether it is larger or smaller than cutoff
return dnaq["propACTG"] >= cutoff
def guid2item(
self, guidList: Optional[List[str]], namespace: str, tag: str
) -> Optional[dict]:
"""returns the annotation (such as sequence quality, which is stored as an annotation)
in namespace:tag for all guids in guidlist.
If guidList is None, all items are returned.
An error is raised if namespace and tag is not present in each record.
"""
retDict = {}
if guidList is None:
results = self.db.guid2meta.find({}, {"sequence_meta": 1})
else:
results = self.db.guid2meta.find(
{"_id": {"$in": guidList}}, {"sequence_meta": 1}
)
if results is None: # nothing found
return None
for res in results:
try:
namespace_content = res["sequence_meta"][namespace]
except KeyError:
raise KeyError(
"{2} is not present in the sequence metadata {0}: {1}".format(
guidList, res, namespace
)
)
# check the DNA quality metric expected is present
if tag not in namespace_content.keys():
raise KeyError(
"{2} is not present in {3} namespace of guid {0}: {1}".format(
guidList, namespace_content, tag, namespace
)
)
# return property
retDict[res["_id"]] = namespace_content[tag]
return retDict
def guid2ExaminationDateTime(
self, guidList: Optional[List[str]] = None
) -> Optional[dict]:
"""returns quality scores and examinationDate for all guids in guidlist. If guidList is None, all results are returned."""
return self.guid2item(guidList, "DNAQuality", "examinationDate")
def guid2quality(self, guidList: Optional[List[str]] = None) -> Optional[dict]:
"""returns quality scores for all guids in guidlist (or all samples if guidList is None)
potentially expensive query if guidList is None."""
return self.guid2item(guidList, "DNAQuality", "propACTG")
def guid2propACTG_filtered(self, cutoff: Union[int, float] = 0) -> Dict[str, float]:
"""recover guids which have good quality, > cutoff.
These are in the majority, so we run a table scan to find these.
This query is potentially very inefficient- best avoided
"""
allresults = self.db.guid2meta.find(
{"sequence_meta.DNAQuality.propACTG": {"$gte": cutoff}},
{"_id": 1, "sequence_meta.DNAQuality.propACTG": 1},
)
retDict = {}
for item in allresults:
retDict[item["_id"]] = item["sequence_meta"]["DNAQuality"]["propACTG"]
return retDict # note: slightly different from previous api
def guid2items(
self, guidList: Optional[List[str]], namespaces: Optional[Set[str]]
) -> Optional[Dict[Any, Dict[str, Any]]]:
"""returns all annotations in namespaces, which is a list, as a pandas dataframe.
If namespaces is None, all namespaces are returned.
If guidList is None, all items are returned.
To do this, a table scan is performed - indices are not used.
"""
retDict = {}
if guidList is None:
results = self.db.guid2meta.find({}, {"sequence_meta": 1})
else:
results = self.db.guid2meta.find(
{"_id": {"$in": guidList}}, {"sequence_meta": 1}
)
if results is None: # nothing found
return None
for res in results:
row = {}
sought_namespaces = set(res["sequence_meta"].keys())
if namespaces is not None: # we only want a subset
sought_namespaces = sought_namespaces.intersection(
namespaces
) # what we want, intersect what we've got
for sought_namespace in sought_namespaces:
for tag in res["sequence_meta"][sought_namespace].keys():
col_name = "{0}:{1}".format(sought_namespace, tag)
row[col_name] = res["sequence_meta"][sought_namespace][tag]
retDict[res["_id"]] = row
return retDict
def guid_annotations(self) -> Optional[Dict[Any, Dict[str, Any]]]:
"""return all annotations of all guids"""
return self.guid2items(None, None) # no restriction by namespace or by guid.
def guid_annotation(self, guid: str) -> Optional[Dict[Any, Dict[str, Any]]]:
"""return all annotations of one guid"""
return self.guid2items([guid], None) # restriction by guid.
def guid2neighbour_add_links(
self,
guid: str,
targetguids: Dict[str, Dict[str, int]],
use_update: bool = False,
) -> Dict[str, int]:
"""adds links between guid and their neighbours ('targetguids')
Parameters:
guid: the 'source' guid for the matches eg 'guid1'
targetguids: what is guid linked to, eg
{
'guid2':{'dist':12},
'guid3':{'dist':2}
}
use_update - currently ignored, always False. Setting True yields NotImplementedError
This stores links in the guid2neighbour collection.
If use_update = True, will update target documents, adding a new link to the targetguid document.
{targetguid -> {previousguid: distance1, previousguid2: distance2}} --> {targetguid -> {previousguid: distance1, previousguid2: distance2, guid: distance}
This approach has many disadvantages
- multiple database accesses: one to find a document to update, and one to update it - may be hundreds or thousands of database connections for each insert operation
- approach is (with Mongodb) not inherently atomic. If failure occurs in the middle of the process, database can be left in an inconsistent state, requiring use of transactions (slower)
- by contrast, the no_update method (default) requires a single insert_many operation, which is much cleaner and is atomic.
- the use_update approach is not implemented
If use_update = False (default), for each new link from targetguid -> guid, a new document will be inserted linking {targetguid -> {guid: distance}}
The function guid2neighbour_repack() reduces the number of documents
required to store the same information.
if annotation is not None, will additionally write an annotation dictionary
Returns:
The number of records written
"""
# find guid2neighbour entry for guid.
to_insert: List[Dict[str, Any]] = []
current_m: Optional[
Dict[str, Any]
] = None # no target record for guid -> multiple targets. We made a new one.
for targetguid in targetguids.keys():
payload = targetguids[targetguid] # a distance
## ADD REVERSE LINK
# NOTE: for the (new) guid --> targetguids, we can write a single record with many entries
# however, the other way round, we have to add multiple single samples
# targetguid --> guid (reverse link) (singleton)
if (
use_update
): # slower: multiple update operations against the database on insert, not atomic see above
raise NotImplementedError(
"Updating method for adding links is not implemented"
)
else:
# insert a new record, which can be processed by update_many
to_insert.append(
{"guid": targetguid, "rstat": "s", "neighbours": {guid: payload}}
)
# Now add one record containing many target guids
# guid --> {many targetguids } (forward link)
if current_m is not None:
if (
len(current_m["neighbours"].keys())
>= self.max_neighbours_per_document
): # need to make another one
current_m["rstat"] = "f" # full
to_insert.append(current_m)
current_m = None
# if we don't have a current_m, which is a data structure containing what we're going to write to disc,
# either because this is the first pass through the loop, or becuase our previous current_m was full (see above)
# then we make one
if current_m is None:
current_m = {"guid": guid, "rstat": "m", "neighbours": {}} # make one
# add the element to current_m
current_m["neighbours"][targetguid] = payload
# now we've covered all the elements to write
# do we have a current m with data in it?
if current_m is not None:
if len(current_m["neighbours"].keys()) > 0:
# check if it's full
if (
len(current_m["neighbours"].keys())
>= self.max_neighbours_per_document
):
current_m["rstat"] = "f" # full
# check if it's actually a singleton
if len(current_m["neighbours"].keys()) == 1:
current_m["rstat"] = "s" # just one
# add to to_insert
to_insert.append(current_m)
# when complete, do update
if len(to_insert) > 0:
res = self.db.guid2neighbour.insert_many(to_insert, ordered=False)
if res.acknowledged is not True:
raise IOError(
"Mongo {0} did not acknowledge write of data: {1}".format(
self.db, to_insert
)
)
# check there is a metadata object for the guid
metadataObj = self.db.guid2meta.find_one({"_id": guid})
if metadataObj is None:
# it doesn't exist. we create a new one.
metadataObj = {
"_id": guid,
"created": {"created_at": datetime.datetime.now().isoformat()},
}
res = self.db.guid2meta.insert_one({"_id": guid}, metadataObj)
if res.acknowledged is not True:
raise IOError(
"Mongo {0} did not acknowledge write of data: {1}".format(
self.db, metadataObj
)
)
return {"records_written": len(to_insert)}
def _audit_storage(self, guid: str) -> Tuple[set, pd.DataFrame, dict]:
"""returns a pandas data frame containing all neighbours of guid, as stored in mongo, as well as
a summary of storage statistics and a list of _ids which could be optimised
Parameters:
guid : the identifier of a sample to repack
Returns:
A tuple containing three elements:
to_optimise: a set of mongodb record _ids which are either single, not full (m) or contain duplicate guids and so could be optimised
content_df: a dataframe containing all record _ids, the neighbouring guid, and the snp distance.
audit_stats: a dictionary containing the following:
singletons: number of singleton records
m_records: number of records which are not completely full
f_records: number of full records
f_records_with_duplicates: number of full records containing the same guid twice
neighbouring_guids: the number of neighbouring guids
neighbouring_guids_recordings: number of times a neighbouring guid is recorded. Maybe larger than neighbouring_guids.
Designed for internal use
"""
## audit the storage of neighbours
# read all occurrences of each neighbour and its distances into a pandas dataframe
# note that its is OK for a neighbour to be present more than once
bytes_per_record: Dict[str, List[int]] = {"s": [], "m": [], "f": []}
contents = []
for res in self.db.guid2neighbour.find({"guid": guid}):
bytes_per_record[res["rstat"]].append(len(str(res)))
location = res.copy()
del location["neighbours"]
for neighbouring_guid in res["neighbours"].keys():
storage_element = location.copy()
storage_element["guid"] = neighbouring_guid
storage_element["dist"] = res["neighbours"][neighbouring_guid]["dist"]
contents.append(storage_element)
content_df = | pd.DataFrame.from_records(contents) | pandas.DataFrame.from_records |
import abc
import asyncio
import concurrent.futures
import datetime
import glob
import json
import logging
import os
import shutil
import socket
import time
from concurrent.futures import ThreadPoolExecutor
from contextlib import suppress
from pathlib import Path
from typing import Any, Callable, Coroutine, Dict, List, Optional, Set, Tuple, Union
import adaptive
import cloudpickle
import jinja2
import pandas as pd
import structlog
import zmq
import zmq.asyncio
import zmq.ssh
from tinydb import Query, TinyDB
from adaptive_scheduler.scheduler import BaseScheduler
from adaptive_scheduler.utils import (
_deserialize,
_progress,
_remove_or_move_files,
_serialize,
load_parallel,
maybe_lst,
)
from adaptive_scheduler.widgets import log_explorer
ctx = zmq.asyncio.Context()
logger = logging.getLogger("adaptive_scheduler.server")
logger.setLevel(logging.INFO)
log = structlog.wrap_logger(logger)
class MaxRestartsReached(Exception):
"""Jobs can fail instantly because of a error in
your Python code which results jobs being started indefinitely."""
class _BaseManager(metaclass=abc.ABCMeta):
def __init__(self) -> None:
self.ioloop: Optional[asyncio.events.AbstractEventLoop] = None
self._coro: Optional[Coroutine] = None
self.task: Optional[asyncio.Task] = None
def start(self):
if self.is_started:
raise Exception(f"{self.__class__} is already started!")
self._setup()
self.ioloop = asyncio.get_event_loop()
self._coro = self._manage()
self.task = self.ioloop.create_task(self._coro)
return self
@property
def is_started(self) -> bool:
return self.task is not None
def cancel(self) -> Optional[bool]:
if self.is_started:
return self.task.cancel()
def _setup(self):
"""Is run in the beginning of `self.start`."""
pass
@abc.abstractmethod
async def _manage(self) -> None:
pass
class DatabaseManager(_BaseManager):
"""Database manager.
Parameters
----------
url : str
The url of the database manager, with the format
``tcp://ip_of_this_machine:allowed_port.``. Use `get_allowed_url`
to get a `url` that will work.
scheduler : `~adaptive_scheduler.scheduler.BaseScheduler`
A scheduler instance from `adaptive_scheduler.scheduler`.
db_fname : str
Filename of the database, e.g. 'running.json'.
learners : list of `adaptive.BaseLearner` isinstances
List of `learners` corresponding to `fnames`.
fnames : list
List of `fnames` corresponding to `learners`.
overwrite_db : bool, default: True
Overwrite the existing database upon starting.
Attributes
----------
failed : list
A list of entries that have failed and have been removed from the database.
"""
def __init__(
self,
url: str,
scheduler: BaseScheduler,
db_fname: str,
learners: List[adaptive.BaseLearner],
fnames: Union[List[str], List[List[str]]],
overwrite_db: bool = True,
):
super().__init__()
self.url = url
self.scheduler = scheduler
self.db_fname = db_fname
self.learners = learners
self.fnames = fnames
self.overwrite_db = overwrite_db
self.defaults = dict(
job_id=None, is_done=False, log_fname=None, job_name=None, output_logs=[]
)
self._last_reply: Union[str, Exception, None] = None
self._last_request: Optional[Tuple[str, ...]] = None
self.failed: List[Dict[str, Any]] = []
def _setup(self) -> None:
if os.path.exists(self.db_fname) and not self.overwrite_db:
return
self.create_empty_db()
def update(self, queue: Optional[Dict[str, Dict[str, str]]] = None) -> None:
"""If the ``job_id`` isn't running anymore, replace it with None."""
if queue is None:
queue = self.scheduler.queue(me_only=True)
with TinyDB(self.db_fname) as db:
failed = [
entry
for entry in db.all()
if (entry["job_id"] is not None) and (entry["job_id"] not in queue)
]
self.failed.extend(failed)
doc_ids = [e.doc_id for e in failed]
db.update({"job_id": None, "job_name": None}, doc_ids=doc_ids)
def n_done(self) -> int:
Entry = Query()
with TinyDB(self.db_fname) as db:
return db.count(Entry.is_done == True) # noqa: E711
def create_empty_db(self) -> None:
"""Create an empty database that keeps track of
``fname -> (job_id, is_done, log_fname, job_name)``.
"""
entries = [dict(fname=fname, **self.defaults) for fname in self.fnames]
if os.path.exists(self.db_fname):
os.remove(self.db_fname)
with TinyDB(self.db_fname) as db:
db.insert_multiple(entries)
def as_dicts(self) -> List[Dict[str, str]]:
with TinyDB(self.db_fname) as db:
return db.all()
def _output_logs(self, job_id: str, job_name: str):
job_id = self.scheduler.sanatize_job_id(job_id)
output_fnames = self.scheduler.output_fnames(job_name)
return [
f.replace(self.scheduler._JOB_ID_VARIABLE, job_id) for f in output_fnames
]
def _start_request(
self, job_id: str, log_fname: str, job_name: str
) -> Optional[str]:
Entry = Query()
with TinyDB(self.db_fname) as db:
if db.contains(Entry.job_id == job_id):
entry = db.get(Entry.job_id == job_id)
fname = entry["fname"] # already running
raise Exception(
f"The job_id {job_id} already exists in the database and "
f"runs {fname}. You might have forgotten to use the "
"`if __name__ == '__main__': ...` idom in your code. Read the "
"warning in the [mpi4py](https://bit.ly/2HAk0GG) documentation."
)
entry = db.get(
(Entry.job_id == None) & (Entry.is_done == False)
) # noqa: E711
log.debug("choose fname", entry=entry)
if entry is None:
return None
db.update(
{
"job_id": job_id,
"log_fname": log_fname,
"job_name": job_name,
"output_logs": self._output_logs(job_id, job_name),
},
doc_ids=[entry.doc_id],
)
return entry["fname"]
def _stop_request(self, fname: Union[str, List[str]]) -> None:
fname = maybe_lst(fname) # if a BalancingLearner
Entry = Query()
with TinyDB(self.db_fname) as db:
reset = dict(job_id=None, is_done=True, job_name=None)
assert (
db.get(Entry.fname == fname) is not None
) # make sure the entry exists
db.update(reset, Entry.fname == fname)
def _stop_requests(self, fnames: List[Union[str, List[str]]]) -> None:
# Same as `_stop_request` but optimized for processing many `fnames` at once
fnames = {str(maybe_lst(fname)) for fname in fnames}
with TinyDB(self.db_fname) as db:
reset = dict(job_id=None, is_done=True, job_name=None)
doc_ids = [e.doc_id for e in db.all() if str(e["fname"]) in fnames]
db.update(reset, doc_ids=doc_ids)
def _dispatch(self, request: Tuple[str, ...]) -> Union[str, Exception, None]:
request_type, *request_arg = request
log.debug("got a request", request=request)
try:
if request_type == "start":
# workers send us their slurm ID for us to fill in
job_id, log_fname, job_name = request_arg
kwargs = dict(job_id=job_id, log_fname=log_fname, job_name=job_name)
# give the worker a job and send back the fname to the worker
fname = self._start_request(**kwargs)
if fname is None:
raise RuntimeError("No more learners to run in the database.")
learner = next(
l
for l, f in zip(self.learners, self.fnames)
if maybe_lst(f) == fname
)
log.debug("choose a fname", fname=fname, **kwargs)
return learner, fname
elif request_type == "stop":
fname = request_arg[0] # workers send us the fname they were given
log.debug("got a stop request", fname=fname)
self._stop_request(fname) # reset the job_id to None
return None
except Exception as e:
return e
async def _manage(self) -> None:
"""Database manager co-routine.
Returns
-------
coroutine
"""
log.debug("started database")
socket = ctx.socket(zmq.REP)
socket.bind(self.url)
try:
while True:
self._last_request = await socket.recv_serialized(_deserialize)
self._last_reply = self._dispatch(self._last_request)
await socket.send_serialized(self._last_reply, _serialize)
finally:
socket.close()
class JobManager(_BaseManager):
"""Job manager.
Parameters
----------
job_names : list
List of unique names used for the jobs with the same length as
`learners`. Note that a job name does not correspond to a certain
specific learner.
database_manager : `DatabaseManager`
A `DatabaseManager` instance.
scheduler : `~adaptive_scheduler.scheduler.BaseScheduler`
A scheduler instance from `adaptive_scheduler.scheduler`.
interval : int, default: 30
Time in seconds between checking and starting jobs.
max_simultaneous_jobs : int, default: 5000
Maximum number of simultaneously running jobs. By default no more than 5000
jobs will be running. Keep in mind that if you do not specify a ``runner.goal``,
jobs will run forever, resulting in the jobs that were not initially started
(because of this `max_simultaneous_jobs` condition) to not ever start.
max_fails_per_job : int, default: 40
Maximum number of times that a job can fail. This is here as a fail switch
because a job might fail instantly because of a bug inside `run_script`.
The job manager will stop when
``n_jobs * total_number_of_jobs_failed > max_fails_per_job`` is true.
Attributes
----------
n_started : int
Total number of jobs started by the `JobManager`.
"""
def __init__(
self,
job_names: List[str],
database_manager: DatabaseManager,
scheduler: BaseScheduler,
interval: int = 30,
*,
max_simultaneous_jobs: int = 5000,
max_fails_per_job: int = 100,
):
super().__init__()
self.job_names = job_names
self.database_manager = database_manager
self.scheduler = scheduler
self.interval = interval
self.max_simultaneous_jobs = max_simultaneous_jobs
self.max_fails_per_job = max_fails_per_job
self.n_started = 0
@property
def max_job_starts(self) -> int:
"""Equivalent to ``self.max_fails_per_job * len(self.job_names)``"""
return self.max_fails_per_job * len(self.job_names)
def _queued(self, queue) -> Set[str]:
return {
job["job_name"]
for job in queue.values()
if job["job_name"] in self.job_names
}
async def _manage(self) -> None:
with concurrent.futures.ProcessPoolExecutor() as ex:
while True:
try:
running = self.scheduler.queue(me_only=True)
self.database_manager.update(running) # in case some jobs died
queued = self._queued(running) # running `job_name`s
not_queued = set(self.job_names) - queued
n_done = self.database_manager.n_done()
if n_done == len(self.job_names):
# we are finished!
return
else:
n_to_schedule = max(0, len(not_queued) - n_done)
not_queued = set(list(not_queued)[:n_to_schedule])
while not_queued:
# start new jobs
if len(queued) <= self.max_simultaneous_jobs:
job_name = not_queued.pop()
queued.add(job_name)
await self.ioloop.run_in_executor(
ex, self.scheduler.start_job, job_name
)
self.n_started += 1
else:
break
if self.n_started > self.max_job_starts:
raise MaxRestartsReached(
"Too many jobs failed, your Python code probably has a bug."
)
await asyncio.sleep(self.interval)
except concurrent.futures.CancelledError:
log.info("task was cancelled because of a CancelledError")
raise
except MaxRestartsReached as e:
log.exception(
"too many jobs have failed, cancelling the job manager",
n_started=self.n_started,
max_fails_per_job=self.max_fails_per_job,
max_job_starts=self.max_job_starts,
exception=str(e),
)
raise
except Exception as e:
log.exception("got exception when starting a job", exception=str(e))
await asyncio.sleep(5)
def logs_with_string_or_condition(
error: Union[str, Callable[[List[str]], bool]], database_manager: DatabaseManager
) -> List[Tuple[str, List[str]]]:
"""Get jobs that have `string` (or apply a callable) inside their log-file.
Either use `string` or `error`.
Parameters
----------
error : str or callable
String that is searched for or callable that is applied
to the log text. Must take a single argument, a list of
strings, and return True if the job has to be killed, or
False if not.
database_manager : `DatabaseManager`
A `DatabaseManager` instance.
Returns
-------
has_string : dict
A list ``(job_name, fnames)``, which have the string inside their log-file.
"""
if isinstance(error, str):
has_error = lambda lines: error in "".join(lines) # noqa: E731
elif callable(error):
has_error = error
else:
raise ValueError("`error` can only be a `str` or `callable`.")
def file_has_error(fname):
if not os.path.exists(fname):
return False
with open(fname) as f:
lines = f.readlines()
return has_error(lines)
have_error = []
for entry in database_manager.as_dicts():
fnames = entry["output_logs"]
if entry["job_id"] is not None and any(file_has_error(f) for f in fnames):
all_fnames = fnames + [entry["log_fname"]]
have_error.append((entry["job_name"], all_fnames))
return have_error
class KillManager(_BaseManager):
"""Kill manager.
Automatically cancel jobs that contain an error (or other condition)
in the log files.
Parameters
----------
scheduler : `~adaptive_scheduler.scheduler.BaseScheduler`
A scheduler instance from `adaptive_scheduler.scheduler`.
database_manager : `DatabaseManager`
A `DatabaseManager` instance.
error : str or callable, default: "srun: error:"
If ``error`` is a string and is found in the log files, the job will
be cancelled and restarted. If it is a callable, it is applied
to the log text. Must take a single argument, a list of
strings, and return True if the job has to be killed, or
False if not.
interval : int, default: 600
Time in seconds between checking for the condition.
max_cancel_tries : int, default: 5
Try maximum `max_cancel_tries` times to cancel a job.
move_to : str, optional
If a job is cancelled the log is either removed (if ``move_to=None``)
or moved to a folder (e.g. if ``move_to='old_logs'``).
"""
def __init__(
self,
scheduler: BaseScheduler,
database_manager: DatabaseManager,
error: Union[str, Callable[[List[str]], bool]] = "srun: error:",
interval: int = 600,
max_cancel_tries: int = 5,
move_to: Optional[str] = None,
):
super().__init__()
self.scheduler = scheduler
self.database_manager = database_manager
self.error = error
self.interval = interval
self.max_cancel_tries = max_cancel_tries
self.move_to = move_to
self.cancelled: List[str] = []
self.deleted: List[str] = []
async def _manage(self) -> None:
while True:
try:
self.database_manager.update()
failed_jobs = logs_with_string_or_condition(
self.error, self.database_manager
)
to_cancel: List[str] = []
to_delete: List[str] = []
for job_name, fnames in failed_jobs:
to_cancel.append(job_name)
to_delete.extend(fnames)
self.scheduler.cancel(
to_cancel, with_progress_bar=False, max_tries=self.max_cancel_tries
)
_remove_or_move_files(
to_delete, with_progress_bar=False, move_to=self.move_to
)
self.cancelled.extend(to_cancel)
self.deleted.extend(to_delete)
await asyncio.sleep(self.interval)
except concurrent.futures.CancelledError:
log.info("task was cancelled because of a CancelledError")
raise
except Exception as e:
log.exception("got exception in kill manager", exception=str(e))
def get_allowed_url() -> str:
"""Get an allowed url for the database manager.
Returns
-------
url : str
An url that can be used for the database manager, with the format
``tcp://ip_of_this_machine:allowed_port.``.
"""
ip = socket.gethostbyname(socket.gethostname())
port = zmq.ssh.tunnel.select_random_ports(1)[0]
return f"tcp://{ip}:{port}"
def _make_default_run_script(
url: str,
save_interval: int,
log_interval: int,
goal: Union[Callable[[adaptive.BaseLearner], bool], None] = None,
runner_kwargs: Optional[Dict[str, Any]] = None,
run_script_fname: str = "run_learner.py",
executor_type: str = "mpi4py",
) -> None:
default_runner_kwargs = dict(shutdown_executor=True)
runner_kwargs = dict(default_runner_kwargs, goal=goal, **(runner_kwargs or {}))
serialized_runner_kwargs = cloudpickle.dumps(runner_kwargs)
if executor_type not in ("mpi4py", "ipyparallel", "dask-mpi", "process-pool"):
raise NotImplementedError(
"Use 'ipyparallel', 'dask-mpi', 'mpi4py' or 'process-pool'."
)
if executor_type == "dask-mpi":
try:
import dask_mpi # noqa: F401
except ModuleNotFoundError as e:
msg = "You need to have 'dask-mpi' installed to use `executor_type='dask-mpi'`."
raise Exception(msg) from e
with open(Path(__file__).parent / "run_script.py.j2") as f:
empty = "".join(f.readlines())
template = jinja2.Template(empty).render(
run_script_fname=run_script_fname,
executor_type=executor_type,
url=url,
serialized_runner_kwargs=serialized_runner_kwargs,
save_interval=save_interval,
log_interval=log_interval,
)
with open(run_script_fname, "w") as f:
f.write(template)
def _get_infos(fname: str, only_last: bool = True) -> List[str]:
status_lines: List[str] = []
with open(fname) as f:
lines = f.readlines()
for line in reversed(lines):
with suppress(Exception):
info = json.loads(line)
if info["event"] == "current status":
status_lines.append(info)
if only_last:
return status_lines
return status_lines
def parse_log_files(
job_names: List[str],
database_manager: DatabaseManager,
scheduler,
only_last: bool = True,
) -> pd.DataFrame:
"""Parse the log-files and convert it to a `~pandas.core.frame.DataFrame`.
This only works if you use `adaptive_scheduler.client_support.log_info`
inside your ``run_script``.
Parameters
----------
job_names : list
List of job names.
database_manager : `DatabaseManager`
A `DatabaseManager` instance.
scheduler : `~adaptive_scheduler.scheduler.BaseScheduler`
A scheduler instance from `adaptive_scheduler.scheduler`.
only_last : bool, default: True
Only look use the last printed status message.
Returns
-------
`~pandas.core.frame.DataFrame`
"""
_queue = scheduler.queue()
database_manager.update(_queue)
infos = []
for entry in database_manager.as_dicts():
log_fname = entry["log_fname"]
if log_fname is None or not os.path.exists(log_fname):
continue
for info in _get_infos(log_fname, only_last):
info.pop("event") # this is always "current status"
info["timestamp"] = datetime.datetime.strptime(
info["timestamp"], "%Y-%m-%d %H:%M.%S"
)
info["elapsed_time"] = pd.to_timedelta(info["elapsed_time"])
info.update(entry)
infos.append(info)
for info in infos:
info_from_queue = _queue.get(info["job_id"])
if info_from_queue is None:
continue
info["state"] = info_from_queue["state"]
info["job_name"] = info_from_queue["job_name"]
return | pd.DataFrame(infos) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# ================================================================================
# ACUMOS
# ================================================================================
# Copyright © 2017 AT&T Intellectual Property & Tech Mahindra. All rights reserved.
# ================================================================================
# This Acumos software file is distributed by AT&T and Tech Mahindra
# under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ================================================================================
from __future__ import print_function
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from crome import CromeProcessor
float_cols = ["cpu_usage", "cpu_usagemhz", "mem_active", "mem_consumed", "mem_granted", "mem_usage", "net_received", "net_transmitted", "net_usage"]
trim_cols = ["DATETIMEUTC", "DATEUTC", "GLOBAL_CUSTOMER_ID", "SUBSCRIBER_NAME", "VM_ID"]
features = ['VM_mapped', 'day', 'weekday', 'hour', 'minute', 'hist-1D8H', 'hist-1D4H', 'hist-1D2H', 'hist-1D1H', 'hist-1D', 'hist-1D15m', 'hist-1D30m', 'hist-1D45m']
def remove_column_spaces (df):
replace_dict = {}
for colname in df.columns:
replace_dict[colname] = colname.replace(" ", "")
df = df.rename(index=str, columns=replace_dict)
return df
def cols_to_float (df, columns):
for colname in columns:
print (" float: ", colname)
try:
df[colname] = df[colname].apply(lambda x:np.float64(str(x).replace(",","")))
except:
pass
return df
def trim_columns (df, columns):
for colname in columns:
print (" trim: ", colname)
try:
df[colname] = df[colname].str.strip()
except:
pass
return df
def preprocess (df, target_col, VM_list=[], max_proc=50):
print ("remove spaces")
df = remove_column_spaces(df)
print ("collect VMs")
to_trim = list(trim_cols)
df = trim_columns (df, ['VM_ID'])
to_trim.remove('VM_ID')
if not VM_list or len(VM_list) == 0:
VM_list = sorted(list(set(df['VM_ID']))) # process all VMs
else:
df = df[df['VM_ID'].isin(VM_list)]
vm_map = dict([(val, i) for i, val in enumerate(set(df['VM_ID']))])
df['VM_mapped'] = df['VM_ID'].apply(lambda x: vm_map[x])
print ("convert columns to float:", float_cols)
df = cols_to_float (df, float_cols)
print ("apply trim:", trim_cols)
df = trim_columns (df, trim_cols)
cp = CromeProcessor (target_col, feats=features)
result = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_table(*args, **kwds)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_regex_separator(self):
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = [[1, 2334., 5],
[10, 13, 10]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = [[1, 2., 4],
[5, np.nan, 10.]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = self.read_csv(StringIO(data_expected), header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From <NAME>: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assertRaisesRegexp(ValueError, "must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assertRaisesRegexp(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_fwf_colspecs_is_list_or_tuple(self):
with tm.assertRaisesRegexp(TypeError,
'column specifications must be a list or '
'tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(self.data1),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
with tm.assertRaisesRegexp(TypeError,
'Each column specification must be.+'):
read_fwf(StringIO(self.data1), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
self.assertTrue(len(res))
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71"""
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn", "dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_BytesIO_input(self):
if not compat.PY3:
raise nose.SkipTest(
"Bytes-related test - only needs to work on Python 3")
result = pd.read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = pd.DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
data = BytesIO("שלום::1234\n562::123".encode('cp1255'))
result = pd.read_table(data, sep="::", engine='python',
encoding='cp1255')
expected = pd.DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
buf = StringIO()
sys.stdout = buf
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True)
self.assertEqual(
buf.getvalue(), 'Filled 3 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
buf = StringIO()
sys.stdout = buf
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True, index_col=0)
self.assertEqual(
buf.getvalue(), 'Filled 1 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
def test_float_precision_specified(self):
# Should raise an error if float_precision (C parser option) is
# specified
with tm.assertRaisesRegexp(ValueError, "The 'float_precision' option "
"is not supported with the 'python' engine"):
self.read_csv(StringIO('a,b,c\n1,2,3'), float_precision='high')
def test_iteration_open_handle(self):
if PY3:
raise nose.SkipTest(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
try:
read_table(f, squeeze=True, header=None, engine='c')
except Exception:
pass
else:
raise ValueError('this should not happen')
result = read_table(f, squeeze=True, header=None,
engine='python')
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_iterator(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
| tm.assert_frame_equal(chunks[0], df[1:3]) | pandas.util.testing.assert_frame_equal |
import numpy as np
import pandas as pd
def set_order(df, row):
if | pd.isnull(row['order']) | pandas.isnull |
import os
import pickle
import re
from pathlib import Path
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from numpy import interp
import thoipapy
from thoipapy.utils import make_sure_path_exists
def validate_multiple_predictors_and_subsets_auboc(s, df_set, logging):
logging.info("start create_AUBOC_43databases_figs")
predictors = ["THOIPA_{}_LOO".format(s["set_number"]), "PREDDIMER", "TMDOCK", "LIPS_surface_ranked", "random"] # "LIPS_L*E",
subsets = ["crystal", "NMR", "ETRA"]
for subset in subsets:
df_o_minus_r_mean_df = pd.DataFrame()
AUBOC_list = []
mean_AUBOC_file = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/compare_selected_predictors/data/{s['setname']}.{subset}.4predictors_mean_AUBOC.csv"
mean_AUBOC_barplot_png = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/compare_selected_predictors/figs/{s['setname']}.{subset}.4predictors_mean_AUBOC.png"
BOCURVE_linechart_csv = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/compare_selected_predictors/data/{s['setname']}.{subset}.4predictors_BOCURVE_linechart.csv"
BOCURVE_linechart_png = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/compare_selected_predictors/figs/{s['setname']}.{subset}.4predictors_BOCURVE_linechart.png"
make_sure_path_exists(BOCURVE_linechart_png, isfile=True)
make_sure_path_exists(mean_AUBOC_file, isfile=True)
for predictor in predictors:
bocurve_data_xlsx = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/indiv_validation/bocurve/data/{predictor}/bocurve_data.xlsx"
df_o_minus_r = pd.read_excel(bocurve_data_xlsx, sheet_name="df_o_minus_r", index_col=0)
df_o_minus_r = df_o_minus_r.filter(regex=subset, axis=1)
df_o_minus_r_mean = df_o_minus_r.T.mean()
# apply cutoff (e.g. 5 residues for AUBOC5)
auboc_ser = df_o_minus_r_mean.iloc[:s["n_residues_AUBOC_validation"]]
AUBOC = np.trapz(y=auboc_ser, x=auboc_ser.index)
AUBOC_list.append(AUBOC)
df_o_minus_r_mean_df = | pd.concat([df_o_minus_r_mean_df, df_o_minus_r_mean], axis=1, join="outer") | pandas.concat |
import unittest
from unittest.mock import patch
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from road_data_scraper.steps.metadata import (
create_sensor_metadata_tuples,
direction_string_cleaner,
get_sensor_urls,
get_sites_by_sensor,
name_string_cleaner,
)
class TestMetadata(unittest.TestCase):
def setUp(self):
headers = [
"id",
"name",
"description",
"longitude",
"latitude",
"status",
"direction",
"easting",
"northing",
]
# fmt: off
data_midas = [
[1,"MIDAS","M4/2295A2",-0.520380,51.493012,"Inactive","westbound",502816,178156],
[2,"MIDAS","A1M/2259B",-0.320275,52.535158,"Active","southbound",514029,294356],
[3,"MIDAS","M5/7482B",-2.175138,52.175652,"Active","northbound",388120,253057],
[4,"MIDAS","M3/2173A",-1.392374,50.960359,"Active","westbound",442769,118058],
[5,"MIDAS","M25/5764B",0.283162,51.575617,"Active","clockwise",558308,188775],
]
data_tame = [
[6304,"TAME",30360220,-0.960508,50.986164,"Active","southbound",473059,121266],
[6305,"TAME",30360221,-0.961806,50.985430,"Active","northbound",472969,121183],
[6310,"TAME",30360229,-0.832786,51.298988,"Active","westbound",481472,156187],
[6311,"TAME",30360230,-1.035767,51.262403,"Active","eastbound",467374,151913],
[6312,"TAME",30360231,-1.037151,51.262037,"Active","westbound",467278,151871],
]
data_tmu = [
[7236,"TMU","5607/1",-1.338882,51.100315,"Active","northbound",446387,133654],
[7237,"TMU","5606/2",-1.341841,51.103119,"Active","southbound",446177,133964],
[7238,"TMU","5606/1",-1.341654,51.103190,"Active","southbound",446190,133972],
[7239,"TMU","5601/2",-1.339803,51.173895,"Active","northbound",446249,141836],
[7240,"TMU","5601/1",-1.340046,51.173915,"Active","northbound",446232,141838],
]
# fmt:on
self.sensor_tables = {
"midas": | pd.DataFrame(data_midas, columns=headers) | pandas.DataFrame |
import os
import sys
import pickle
import pandas as pd
import numpy as np
import sys
from sklearn.feature_selection import chi2, SelectKBest, f_regression
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.manifold import Isomap, LocallyLinearEmbedding
import settings as project_settings
target_data_folder = project_settings.target_data_folder
features_data_folder = project_settings.features_data_folder
class_folder = project_settings.class_folder
result_folder = project_settings.result_folder
algorithm_len = project_settings.algorithm_len
sys.path.append(class_folder)
from decision_tree_singletarget import DecisionTree_Single
from decision_tree_multitarget import DecisionTree_Multi
from random_forest_singletarget import RandomForestRegressor_Single
from random_forest_multitarget import RandomForestRegressor_Multi
from dnn_model import DNN
from dnn_single_model import DNN_Single
labels =pd.read_csv(f"{target_data_folder}labels.txt",sep=';',index_col=False)
sys_min = sys.float_info.min
def create_data(df,df_perf,labels):
df2 = df_perf.assign(label = labels['x'])
df2 = df2.rename(columns={'1' : 'Precision'})
data = df.join(df2.set_index('label'))
return data
def clean_dataset(df):
assert isinstance(df, pd.DataFrame), "df needs to be a pd.DataFrame"
df.dropna(inplace=True)
indices_to_keep = ~df.isin([np.nan, np.inf, -np.inf]).any(1)
df = df[df.replace([-np.inf], sys.float_info.min).notnull().all(axis=1)]
return df[indices_to_keep].astype(np.float32)
def get_data_for_algorith(algorithm, no_fold):
df_perf = pd.read_csv(f"{target_data_folder}performance_0_I{algorithm}.txt",sep='\t')
df_train = pd.read_csv(f"{features_data_folder}train_{no_fold}_fused.csv",sep='\t', index_col=0)
df_test = | pd.read_csv(f"{features_data_folder}test_{no_fold}_fused.csv",sep='\t', index_col=0) | pandas.read_csv |
"""
August 2020
<NAME>, Data Science Campus
Processes the raw JSON Play Store review file
Returned JSON from the API is in nested JSON, with some optional values.
See the following link for the schema:
https://developers.google.com/android-publisher/api-ref/rest/v3/reviews
Access to the API is controlled through oauth2 and you will need to authenticate
to access the console automatically to download reviews. For example:
from google.oauth2 import service_account
from apiclient.discovery import build
try:
credentials = service_account.Credentials.from_service_account_info(credentials_dict)
except Exception as e:
print(e)
return
# Get list of reviews using googleapiclient.discovery
try:
service = googleapiclient.discovery.build('androidpublisher', 'v3', credentials=credentials)
response = service.reviews().list(packageName='uk.organisation.appname.production').execute()
except Exception as e:
print(e)
return
if not 'reviews' in response or len(response['reviews']) == 0:
print('No reviews')
else:
print('Reviews detected.')
....
The following code assumes that the reviews have been downloaded and saved to
JSON, and this has been loaded to a variable reviews_j
all_review_data = process_json(review_j)
# You can save the reviews to CSV file directly
save_reviews(all_review_data, define_csv_file_name())
# Or convert to a Pandas dataframe abnd manipluate as required
all_reviews = pd.DataFrame(all_review_data)
"""
import pandas as pd
from datetime import datetime
def process_json(review_j):
"""
Processes the nested JSON reviews (converted to dict) to a list of flat dict
Each review is processed to create a flat dict of data
Args:
review_j (dict) - the JSON reviews convert to dict
Returns:
list of dict of extracted review data
"""
# Set up a list to
all_review_data = []
# Iterate through all review entries
for entries in review_j:
if entries == 'reviews':
reviews = review_j[entries]
# for an individual review there could be multiple comments
# Find the author and id and then create an entry for each comment
for review in reviews:
author_name = None
review_key = None
for review_key in review:
# get the author name and id
if review_key == 'reviewId':
review_id = review[review_key]
if review_key == 'authorName':
author_name = review[review_key]
# Extract all comments
if review_key == 'comments':
# Create a new entry for each user comment
comments = review[review_key]
for comment in comments:
review_data = extract_comments(comment, review_id, author_name)
all_review_data.append(review_data)
print(f"Processed {len(all_review_data)} records")
return all_review_data
def extract_timestamp(last_modified):
# Extracts time stamp from
seconds = None
nanos = None
for t in last_modified:
if t == "seconds":
seconds = last_modified[t]
if t == "nanos":
nanos = last_modified[t]
return (seconds, nanos)
def extract_comments(comment, review_id, author_name):
"""
Extracts a single review from the nested dict to create a flattened
dict.
The JSON schema is outlined in https://developers.google.com/android-publisher/api-ref/rest/v3/reviews#Review
Args:
comment (string) - user comment for this review
review_id (string) - review id for this review
author_name (string) - author name for this review
Returns:
dict of extracted flattened data
"""
review_data = {}
# Some entries may be missing, so add them as None
review_data['review_id'] = review_id
review_data['author_name'] = author_name
review_data["android_os_version"] = None
review_data["app_version_code"] = None
review_data["app_version_name"] = None
review_data["device"] = None
review_data["reviewer_language"] = None
review_data['dev_comment_last_modified_seconds'] = None
review_data['dev_comment_last_modified_nanos'] = None
review_data['dev_comment_text'] = None
# Define the expected keys from the Review schema
# THis is a list of tuples, the first value being the key
# and the second is the name to be used in the flattened
# dict file
comments_keys =[
("starRating", "star_rating"),
("reviewerLanguage", "reviewer_language"),
("device", "device"),
("androidOsVersion", "android_os_version"),
("appVersionCode", "app_version_code"),
("appVersionName", "app_version_name"),
("thumbsUpCount", "thumbs_up_count"),
("thumbsDownCount", "thumbs_down_count"),
("originalText", "original_text")
]
metadata_keys = [
("productName", "device_product_name"),
("manufacturer", "device_manufacturer"),
("screenHeightPx", "device_screen_height_px"),
("screenWidthPx", "device_screen_width_px"),
("screenHeightPx","device_screen_height_px"),
("nativePlatform", "device_native_platform"),
("screenDensityDpi", "device_screen_density_dpi"),
("glEsVersion", "device_gles_version"),
("cpuModel", "device_cpu_model"),
("cpuMake", "device_cpu_make"),
("ramMb", "device_ram_mb")
]
# The raw dict is nested, so we will process each nest section
# seperately, e.g., nested sections include comments and metadata
# We move through the dictionary and look for specific keys to either
# extract directly or to identify as a nested section for extraction
# For each use comment extract the user comments and the dev comments
for entry in comment:
if entry == 'userComment':
# This the user comments section
user_comment = comment[entry]
for val in user_comment:
#print(val)
if val == 'text':
review_data['user_comment'] = user_comment['text']
elif val == 'lastModified':
# Extract the timestamp
s,n = extract_timestamp(user_comment['lastModified'])
review_data['user_comment_last_modified_seconds'] = s
review_data['user_comment_last_modified_nanos'] = n
elif val == 'deviceMetadata':
for n in metadata_keys:
review_data[n[1]] = extract_values(user_comment, n[0])
else:
# Extract the comment values
for n in comments_keys:
review_data[n[1]] = extract_values(user_comment, n[0])
if entry == 'developerComment':
# Developer comments
dev_comment = comment[entry]
for val in dev_comment:
#print(val)
# Extract the time stamp
if val == 'lastModified':
s,n = extract_timestamp(dev_comment['lastModified'])
review_data['dev_comment_last_modified_seconds'] = s
review_data['dev_comment_last_modified_nanos'] = n
# Extract the text
elif val == 'text':
review_data['dev_comment_text'] = dev_comment['text']
return review_data
def extract_values(obj, key):
"""Pull value of specified key from nested dict section.
Args:
obj (dict) - the dict to search through
key (string) - key to extract
Returns:
value asscoiated with the
"""
arr = []
def extract(obj, arr, key):
"""Recursively search for values of key in JSON tree."""
if isinstance(obj, dict):
for k, v in obj.items():
if isinstance(v, (dict, list)):
extract(v, arr, key)
elif k == key:
arr.append(v)
return arr
elif isinstance(obj, list):
for item in obj:
extract(item, arr, key)
return arr
results = extract(obj, arr, key)
if len(results) == 0:
return None
else:
return results[0]
def define_csv_file_name():
# defines a timestamped filename for the flattened table data
ts = datetime.now()
file_name = f"google_review_{ts.strftime('%Y%m%d_%H%M%S')}.csv"
return file_name
def save_reviews(all_reviews, file_name):
# Convert to pandas dataframe
df = pd.DataFrame(all_reviews)
# The timestamp can be converted to a date time and added as extra columns:
df['user_comment_ts'] = | pd.to_datetime(df['user_comment_last_modified_seconds'],errors='coerce', unit='s') | pandas.to_datetime |
"""
Analyze results and plot figures
"""
# Imports
#==============#
import pandas as pd
import numpy as np
import scipy
import random
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
import bioinformatics as bioinf
# Plots for HMM method 10-fold cross validation
#===============================================#
fnt='Arial'
ticks_font = {'fontname':fnt, 'size':'18'}
legend_font = {'family':fnt, 'size':'12'}
label_font = {'family':fnt, 'size':'20'}
plt.rcParams['figure.figsize'] = [6,3]
ec = 'black'
legend_label = ['AS', 'SH']
# NCBI dataset
ex = pd.read_csv('results_final/ncbi_kfold.csv')
lw = 0.25
ASs = list(ex.diff_score[:300])
SHs = list(ex.diff_score[300:])
random.shuffle(ASs)
random.shuffle(SHs)
out1 = plt.bar(range(300), ASs, color='#00BFFF', linewidth=lw,
edgecolor='#00BFFF')
out2 = plt.bar(range(300,600), SHs, color='#00FA9A', linewidth=lw,
edgecolor='#00FA9A')
pltout = [x[0] for x in [out1, out2]]
plt.xlabel('Sequence', **label_font)
plt.ylabel('Score difference', **label_font)
plt.xticks(**ticks_font)
plt.yticks([-300,-150,0,150,300], **ticks_font)
plt.xlim([-1,601])
plt.axhline(color='black', linewidth=1)
plt.legend(pltout, legend_label, prop=legend_font,
loc='upper right',frameon=False)
plt.tight_layout()
plt.rcParams['savefig.dpi'] = 300
plt.rcParams['figure.dpi'] = 300
plt.savefig('plots/ncbi_kfold.pdf')
plt.savefig('plots/ncbi_kfold.png',transparent = True)
plt.savefig('plots/ncbi_kfold.svg',format='svg',transparent = True)
plt.close()
# Table of classification/association rules
#===========================================#
from subtype_rules import GH13MSA
ASmsa = 'fasta/GH13_positions_only/AS_cat.fasta'
SHmsa = 'fasta/GH13_positions_only/SH_cat.fasta'
GH13msa = GH13MSA(ASmsa, SHmsa)
GH13msa.get_freq(include_gaps=True)
rules = pd.read_csv('results_final/rules/rules_all.csv', index_col=0)
rules_amino = pd.read_csv('results_final/rules/rules_amino.csv', index_col=0)
rules_type = pd.read_csv('results_final/rules/rules_type.csv', index_col=0)
mcc = list(rules.mcc)
min_mcc = np.percentile(mcc, 98) # mcc > 0.86
rules_mcc = rules[rules.mcc >= min_mcc]
rules_amino_mcc = rules_amino[rules_amino.mcc >= min_mcc] # 32 rules
rules_type_mcc = rules_type[rules_type.mcc >= min_mcc] # 16 rules
positions = sorted(set(rules_mcc.Np_pos)) # 39 positions
rules_mcc.to_csv('results_final/rules/rules_mcc.csv')
rules_amino_mcc.to_csv('results_final/rules/rules_amino_mcc.csv')
rules_type_mcc.to_csv('results_final/rules/rules_type_mcc.csv')
rules_amino_table = rules_amino_mcc.loc[:,['Np_pos','rule', 'closest_subsite',
'dist_subsite','sens', 'spec', 'acc', 'mcc']]
rules_amino_table.columns = ['Position', 'Rule', 'Closest subsite',
'Distance to closest subsite (Å)', 'Sensitivity',
'Specificity', 'Accuracy', 'MCC']
rules_amino_table.to_csv('plots/rules_amino_table.csv')
rules_type_table = rules_type_mcc.loc[:,['Np_pos','rule', 'closest_subsite',
'dist_subsite', 'sens', 'spec', 'acc', 'mcc']]
rules_type_table.columns = ['Position', 'Rule', 'Closest subsite',
'Distance to closest subsite (Å)', 'Sensitivity',
'Specificity', 'Accuracy', 'MCC']
rules_type_table.to_csv('plots/rules_type_table.csv')
# Plot Histogram for MCC of rules
#=================================#
fnt='Arial'
ticks_font = {'fontname':fnt, 'size':'20'}
label_font = {'family':fnt, 'size':'22'}
title_font = {'family':fnt, 'size':'24'}
plt.rcParams['figure.figsize'] = [6,3.5]
plt.rcParams['grid.alpha'] = 0.5
plt.rcParams['axes.axisbelow'] = True
weights = np.zeros_like(mcc) + 1/len(mcc)
plt.hist(mcc, bins=12, rwidth=1, color='royalblue', weights=weights)
plt.xticks(np.arange(-60,101,40)*0.01, **ticks_font)
plt.yticks(np.arange(0,28,5)*0.01, **ticks_font)
plt.xlabel('MCC', **label_font)
plt.ylabel('Relative frequency', **label_font)
plt.tight_layout()
plt.rcParams['savefig.dpi'] = 300
plt.rcParams['figure.dpi'] = 300
plt.savefig('plots/rules_mcc_dist.png',transparent = True)
plt.savefig('plots/rules_mcc_dist.svg',format='svg',transparent = True)
plt.savefig('plots/rules_mcc_dist.pdf')
plt.close()
# Minimum distance between rules' positions and substrate
#============================================================#
fnt='Arial'
ticks_font = {'fontname':fnt, 'size':'20'}
label_font = {'family':fnt, 'size':'22'}
title_font = {'family':fnt, 'size':'24'}
plt.rcParams['figure.figsize'] = [6,3.5]
plt.rcParams['grid.alpha'] = 0.5
plt.rcParams['axes.axisbelow'] = True
#dist58 = np.percentile(rules_mcc.dist_subsite, 58) #4.79Å
rule_dist = list(rules_mcc.dist_subsite)
weights = np.zeros_like(rule_dist) + 1/len(rule_dist)
plt.hist(rule_dist, bins=10, weights=weights, color='royalblue')
plt.xticks(np.arange(0,31,5), **ticks_font)
plt.xlim((0,30))
plt.yticks(np.arange(0,76,25)*0.01, **ticks_font)
plt.xlabel('Distance to substrate (Å)', **label_font)
plt.ylabel('Relative frequency', **label_font)
plt.tight_layout()
plt.savefig('plots/rules_distance_dist.pdf')
plt.close()
# Distribution at 39 important positions
#==========================================#
plt.rcParams['figure.figsize'] = [7,4]
for i in range(len(positions)):
GH13msa.site_plot(site=positions[i], savefig=True,
savepath='plots/position_distribution')
# Aromatic residues within 6Å of substrate (and consensus AS and SH)
#==============================================================================#
GH13msa.get_consensus_sequences()
AS_consensus = list(GH13msa.consensus_AS)
SH_consensus = list(GH13msa.consensus_SH)
Np = bioinf.split_fasta('fasta/GH13_positions_only/consensus.fasta')[1][1]
excel = pd.read_csv('results_final/residue_distances.csv', index_col=0)
closest_subsite = list(excel.iloc[:,0])
distances = list(excel.iloc[:,1])
resid_aro, Np_aro, AS_aro, SH_aro, closest_subsite_aro, dist_aro = [],[],[],[],[],[]
AS_aro_freq, SH_aro_freq, conserved = [], [], []
aro_res = ['F', 'W', 'Y', 'H']
for i in range(len(Np)):
if (Np[i] in aro_res or AS_consensus[i] in aro_res or SH_consensus[i] in aro_res)\
and distances[i]<=6.0:
resid_aro.append(i+1)
Np_aro.append(Np[i])
AS_aro.append(AS_consensus[i])
SH_aro.append(SH_consensus[i])
closest_subsite_aro.append(closest_subsite[i])
dist_aro.append(distances[i])
AS_freq = GH13msa.AS_freq.iloc[[4,6,18,19],i].sum()*100
SH_freq = GH13msa.SH_freq.iloc[[4,6,18,19],i].sum()*100
AS_aro_freq.append(AS_freq)
SH_aro_freq.append(SH_freq)
if AS_freq > 66 and SH_freq < 66:
conserved.append('AS')
elif AS_freq < 66 and SH_freq > 66:
conserved.append('SH')
elif AS_freq > 66 and SH_freq > 66:
conserved.append('AS and SH')
else:
conserved.append('None')
store = pd.DataFrame([resid_aro, Np_aro, AS_aro, SH_aro, AS_aro_freq, SH_aro_freq,
closest_subsite_aro, dist_aro, conserved]).transpose()
store.columns = ['Position', 'GH13 residue', 'AS consensus residue',
'SH consensus residue', 'Frequency of aromatic residues in ASs (%)',
'Frequency of aromatic residues in SHs (%)', 'Closest subsite',
'Distance to closest subsite (Å)', 'Aromatic residues conserved (>66%) in']
store = store.sort_values('Closest subsite')
store.to_csv('results_final/aromatic_residues.csv')
# Pymol commands for viewing aromatic residues on structure
#=============================================================#
pymol_AS = 'select aroAS, '
pymol_both = 'select aroboth, '
for i in range(len(store)):
pos = store.iloc[i,0]
if store.iloc[i,-1]=='AS':
pymol_AS += f'resi {pos} or '
elif store.iloc[i,-1]=='AS and SH':
pymol_both += f'resi {pos} or '
with open('plots/aromatic_pymol.txt', 'w') as pym:
pym.write(pymol_AS[:-4] + '\n\n')
pym.write(pymol_both[:-4] + '\n\n')
# Table of position-specific rules for predicting hydrolysis or transglycosylation
#======================================================#
ex = | pd.read_csv('results_final/ml_rf_pred/position_rules.csv', index_col=0) | pandas.read_csv |
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import assert_frame_equal
from pytest import importorskip
from woodwork.logical_types import (
Boolean,
Categorical,
Datetime,
Double,
Integer,
NaturalLanguage
)
from evalml.exceptions import ComponentNotYetFittedError
from evalml.pipelines.components import TargetEncoder
importorskip('category_encoders', reason='Skipping test because category_encoders not installed')
def test_init():
parameters = {"cols": None,
"smoothing": 1.0,
"handle_unknown": "value",
"handle_missing": "value"}
encoder = TargetEncoder()
assert encoder.parameters == parameters
def test_parameters():
encoder = TargetEncoder(cols=['a'])
expected_parameters = {"cols": ['a'],
"smoothing": 1.0,
"handle_unknown": "value",
"handle_missing": "value"}
assert encoder.parameters == expected_parameters
def test_categories():
encoder = TargetEncoder()
with pytest.raises(AttributeError, match="'TargetEncoder' object has no attribute"):
encoder.categories
def test_invalid_inputs():
with pytest.raises(ValueError, match="Invalid input 'test' for handle_unknown"):
TargetEncoder(handle_unknown='test')
with pytest.raises(ValueError, match="Invalid input 'test2' for handle_missing"):
TargetEncoder(handle_missing='test2')
with pytest.raises(ValueError, match="Smoothing value needs to be strictly larger than 0"):
TargetEncoder(smoothing=0)
def test_null_values_in_dataframe():
X = pd.DataFrame({'col_1': ["a", "b", "c", "d", np.nan],
'col_2': ["a", "b", "a", "c", "b"],
'col_3': ["a", "a", "a", "a", "a"]})
y = pd.Series([0, 1, 1, 1, 0])
encoder = TargetEncoder(handle_missing='value')
encoder.fit(X, y)
X_t = encoder.transform(X)
X_expected = pd.DataFrame({'col_1': [0.6, 0.6, 0.6, 0.6, 0.6],
'col_2': [0.526894, 0.526894, 0.526894, 0.6, 0.526894],
'col_3': [0.6, 0.6, 0.6, 0.6, 0.6, ]})
assert_frame_equal(X_expected, X_t.to_dataframe())
encoder = TargetEncoder(handle_missing='return_nan')
encoder.fit(X, y)
X_t = encoder.transform(X)
X_expected = pd.DataFrame({'col_1': [0.6, 0.6, 0.6, 0.6, np.nan],
'col_2': [0.526894, 0.526894, 0.526894, 0.6, 0.526894],
'col_3': [0.6, 0.6, 0.6, 0.6, 0.6, ]})
assert_frame_equal(X_expected, X_t.to_dataframe())
encoder = TargetEncoder(handle_missing='error')
with pytest.raises(ValueError, match='Columns to be encoded can not contain null'):
encoder.fit(X, y)
def test_cols():
X = pd.DataFrame({'col_1': [1, 2, 1, 1, 2],
'col_2': ['2', '1', '1', '1', '1'],
'col_3': ["a", "a", "a", "a", "a"]})
X_expected = X.astype({'col_1': 'Int64', 'col_2': 'category', 'col_3': 'category'})
y = pd.Series([0, 1, 1, 1, 0])
encoder = TargetEncoder(cols=[])
encoder.fit(X, y)
X_t = encoder.transform(X)
assert_frame_equal(X_expected, X_t.to_dataframe())
encoder = TargetEncoder(cols=['col_2'])
encoder.fit(X, y)
X_t = encoder.transform(X)
X_expected = pd.DataFrame({'col_1': pd.Series([1, 2, 1, 1, 2], dtype="Int64"),
'col_2': [0.60000, 0.742886, 0.742886, 0.742886, 0.742886],
'col_3': pd.Series(["a", "a", "a", "a", "a"], dtype="category")})
assert_frame_equal(X_expected, X_t.to_dataframe(), check_less_precise=True)
encoder = TargetEncoder(cols=['col_2', 'col_3'])
encoder.fit(X, y)
X_t = encoder.transform(X)
encoder2 = TargetEncoder()
encoder2.fit(X, y)
X_t2 = encoder2.transform(X)
assert_frame_equal(X_t.to_dataframe(), X_t2.to_dataframe())
def test_transform():
X = pd.DataFrame({'col_1': [1, 2, 1, 1, 2],
'col_2': ["r", "t", "s", "t", "t"],
'col_3': ["a", "a", "a", "b", "a"]})
y = pd.Series([0, 1, 1, 1, 0])
encoder = TargetEncoder()
encoder.fit(X, y)
X_t = encoder.transform(X)
X_expected = pd.DataFrame({'col_1': pd.Series([1, 2, 1, 1, 2], dtype="Int64"),
'col_2': [0.6, 0.65872, 0.6, 0.65872, 0.65872],
'col_3': [0.504743, 0.504743, 0.504743, 0.6, 0.504743]})
assert_frame_equal(X_expected, X_t.to_dataframe())
def test_smoothing():
# larger smoothing values should bring the values closer to the global mean
X = pd.DataFrame({'col_1': [1, 2, 1, 1, 2],
'col_2': [2, 1, 1, 1, 1],
'col_3': ["a", "a", "a", "a", "b"]})
y = pd.Series([0, 1, 1, 1, 0])
encoder = TargetEncoder(smoothing=1)
encoder.fit(X, y)
X_t = encoder.transform(X)
X_expected = pd.DataFrame({'col_1': pd.Series([1, 2, 1, 1, 2], dtype="Int64"),
'col_2': pd.Series([2, 1, 1, 1, 1], dtype="Int64"),
'col_3': [0.742886, 0.742886, 0.742886, 0.742886, 0.6]})
assert_frame_equal(X_expected, X_t.to_dataframe())
encoder = TargetEncoder(smoothing=10)
encoder.fit(X, y)
X_t = encoder.transform(X)
X_expected = pd.DataFrame({'col_1': pd.Series([1, 2, 1, 1, 2], dtype="Int64"),
'col_2': pd.Series([2, 1, 1, 1, 1], dtype="Int64"),
'col_3': [0.686166, 0.686166, 0.686166, 0.686166, 0.6]})
assert_frame_equal(X_expected, X_t.to_dataframe())
encoder = TargetEncoder(smoothing=100)
encoder.fit(X, y)
X_t = encoder.transform(X)
X_expected = pd.DataFrame({'col_1': pd.Series([1, 2, 1, 1, 2], dtype="Int64"),
'col_2': | pd.Series([2, 1, 1, 1, 1], dtype="Int64") | pandas.Series |
# transform pairs table to contact counts between binned coordinates
# pos-pos -> bin-bin / point -> pixel
import pandas as pd, numpy as np
import datashader as ds
import datashader.transfer_functions as tf
import pickle as pkl
import xarray as xr
from pkgutil import get_data
from io import StringIO
from . import ref
class GenomeIdeograph:
@staticmethod
def get_lengths(ref_file):
# Get metrics of reference_genome,
# should cover all file-finding troubles.
## Input:
## ref_file: reference_abbrevations(file stored in module)
## or lengths file_path(csv format:: chrom, lengths)
## Return:
## chromosome_order; dict, {chromosome:length}
shipped_refs = {
"hg19":"hg19.len.csv",
"hg19.dip":"hg19.dip.len.csv",
"mm10":"mm10.len.csv",
"mm10.dip":"mm10.dip.len.csv"
}
if ref_file in shipped_refs:
ref_dat = get_data(ref.__name__, refs[reference])
lengths = pd.read_csv(
StringIO(ref_dat.decode()),
index_col=0)
else:
try:
lengths = pd.read_csv(
ref_file,index_col=0)
except FileNotFoundError:
print("ref: neither valid abbrevations nor valid reference file")
chrom_order = dict(zip(lengths.index, range(len(lengths.index))))
lengths = lengths.iloc[:,0].to_dict()
return chrom_order, lengths
def __init__(self, ref_file):
## ref_file: reference_abbrevations(file stored in module)
## or lengths file_path(csv format:: chrom, lengths)
## chromosome order is the order of presentation in
## ref_file
self.chr_order, self.lengths = \
get_lengths(ref_file)
self.chrs = list(self.chr_order.keys())
def breaks(self, binsize:int):
# Get binned reference(int version)
## Return:
## breaks of bins(dict of list)
all_breaks = {}
for chrom in self.lengths:
length = self.lengths[chrom]
breaks = list(range(0, length, binsize))
breaks.append(length) # don't forget the rightmost point
all_breaks[chrom] = breaks
return all_breaks
def bins(self, binsize:int):
# Get binned reference(IntervalIdex version)
## Return:
## intervals of each bin(
## dict of IntervalIndex)
breaks = self.breaks(binsize)
bins = {chrom : pd.IntervalIndex.from_breaks(
breaks[chrom], closed="left",
name=chrom,dtype='interval[int64]')
for chrom in breaks}
return bins
def chr_sort(self,chr_list):
# Sort input chr_id list according to this ideograph
return sorted(chr_list, key = lambda x: self.chr_order[x])
def chr_sort_keys(self):
# key function for sorted
return lambda x: self.chr_order[x]
def symmetry(X):
# flip lower-triangle-part and add
# it to upper-triangle
# set lower triangle to zeros
# Input:
# X: 2darray, assume square
X = np.tril(X,-1).T + X
X[np.tril_indices(X.shape[0],-1)] = 0
return X
def bin_cut(dat:pd.DataFrame, breaks:dict, bins:dict):
# Binnify contacts between a pair of chromosomes(chr_pair)
# Input:
## dat: pairs, assume intra-contacts or
## inter-contacts between two chromosome
## chr_pair: set with 1(for intra) or 2(inter) elements
## breaks: binned reference
## chromosome_name : boundary of each bin in that chromosome}
## (keys should contain all eles in chr_pair)
## bins: binned reference(IntervalIndex version)
## use as index
## chromosome_name : intervals of each bin in that chromosome}
## (keys should contain all eles in chr_pair)
# Output:
## pd.DataFrame with full interval_index
# using first row to infer which chr_pair this
chr1, chr2 = dat.iloc[0,[1,3]]
# binnify
b_dat, xi, yi = np.histogram2d(x=dat["pos1"],y=dat["pos2"],
bins=[breaks[chr1],breaks[chr2]])
# store in sparse matrix
if chr1 == chr2:
# upper-triangle for intra_contacts
b_dat = symmetry(b_dat)
b_dat = pd.DataFrame(
b_dat).astype(pd.SparseDtype(int,0))
# using Interval version of bins as index
b_dat.index, b_dat.columns = \
bins[chr1], bins[chr2]
return b_dat
def tiled_bin_cut(pairs:pd.DataFrame,ref:GenomeIdeograph,binsize:int)->Hicmap:
pairs_b = {}
for indi, dat in pairs.groupby(["chr1","chr2"]):
pairs_b[indi] = \
bin_cut(dat,ref.breaks(binsize),ref.bins(binsize))
# in-case input pairs isn't upper-triangle
norm_pairs_b = {}
for key in pairs_b:
if frozenset(key) in norm_pairs_b:
norm_pairs_b[frozenset(key)] += \
pairs_b[key]
else:
norm_pairs_b[frozenset(key)] = pairs_b[key]
return norm_pairs_b
def shader_matrix_plot(mat:pd.DataFrame, width:int=500, height:int=500, short_length:int=0):
# plot binnified pairs matrix, difference samples aligned in same coords
# short_legth set short edge of resulting image, and keep h_w ratio(according to mat.shape)
# useful when mat isn't standard square
x_mat = xr.DataArray(mat.values, coords=[("pos1",mat.index),("pos2",mat.columns)]) # transform to xarray form
if short_length != 0:
expand_r = short_length // min(*mat.shape)
cvs = ds.Canvas(plot_width=mat.shape[1]*expand_r, plot_height=mat.shape[0]*expand_r)
else:
cvs = ds.Canvas(plot_width=width, plot_height=height)
x_mat['_file_obj'] = None # work around for ds bug
return tf.shade(cvs.raster(x_mat))
def write_matrix(mat:scipy.sparse.csc_matrix,file_name:str):
with open(file_name,'wb') as f:
pkl.dump(mat, f)
def read_matrix(file_name:str)->pd.DataFrame:
with open(file_name,'rb') as f:
mat = pkl.load(f)
return | pd.DataFrame.sparse.from_spmatrix(mat) | pandas.DataFrame.sparse.from_spmatrix |
#!/usr/bin/env python
### Up to date as of 10/2019 ###
'''Section 0: Import python libraries
This code has a number of dependencies, listed below.
They can be installed using the virtual environment "slab23"
that is setup using script 'library/setup3env.sh'.
Additional functions are housed in file 'slab2functions.py'
and imported below.
There are some additional dependencies used by the function file
that do not need to be installed separately.
'''
# stdlib imports
from datetime import datetime
import os.path
import argparse
import numpy as np
from pandas import DataFrame
import pandas as pd
import warnings
import slab2functions as s2f
import math
import mapio.gmt as gmt
from functools import partial
from multiprocess import Pool
import loops as loops
from scipy import ndimage
import psutil
import cProfile
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def main(args):
'''Section 1: Setup
In this section:
(1) Identify necessary input files
(2) Load parameters from '[slab]input.par'
(3) Define optional boxes for PDF/print testing
(4) Define output file names
(5) Gathering optional arguments, setting defaults
(6) Define search ellipsoid parameters
(7) Define Average active source profiles
(8) Define reference model (Slab1.0 and/or slab guides)
(9) Define Trench Locations
(10) Open and modify input dataset
(11) Calculate seismogenic zone thickness
(12) Record variable parameters used for this model
(13) Define search grid
(14) Identify tomography datasets
(15) Initialize arrays for Section 2 '''
print('Start Section 1 of 7: Setup')
print(' Loading inputs...')
''' ------ (1) Identify necessary input files ------ '''
trenches = 'library/misc/trenches_usgs_2017_depths.csv'
agesFile = 'library/misc/interp_age.3.2g.nc'
ageerrorsFile = 'library/misc/interp_ageerror.3.2g.nc'
polygonFile = 'library/misc/slab_polygons.txt'
addFile = 'library/misc/addagain.csv'
parFile = args.parFile
pd.options.mode.chained_assignment = None
warnings.filterwarnings("ignore", message="invalid value encountered in less")
warnings.filterwarnings("ignore", message="invalid value encountered in true_divide")
warnings.filterwarnings("ignore", message="invalid value encountered in greater")
warnings.filterwarnings("ignore", message="invalid value encountered in double_scalars")
''' ------ (2) Load parameters from '[slab]input.par' ------'''
for line in open(parFile):
plist = line.split()
if len(plist)>2:
if plist[0] == 'inFile':
inFile = plist[2]
if plist[0] == 'use_box':
use_box = plist[2]
if plist[0] == 'latmin':
latmin = np.float64(plist[2])
if plist[0] == 'latmax':
latmax = np.float64(plist[2])
if plist[0] == 'lonmin':
lonmin = np.float64(plist[2])
if plist[0] == 'lonmax':
lonmax = np.float64(plist[2])
if plist[0] == 'slab':
slab = plist[2]
if plist[0] == 'grid':
grid = np.float64(plist[2])
if plist[0] == 'radius1':
radius1 = np.float64(plist[2])
if plist[0] == 'radius2':
radius2 = np.float64(plist[2])
if plist[0] == 'sdr':
sdr = np.float64(plist[2])
if plist[0] == 'ddr':
ddr = np.float64(plist[2])
if plist[0] == 'taper':
taper = np.float64(plist[2])
if plist[0] == 'T':
T = np.float64(plist[2])
if plist[0] == 'node':
node = np.float64(plist[2])
if plist[0] == 'filt':
filt = np.float64(plist[2])
if plist[0] == 'maxdist':
maxdist = np.float64(plist[2])
if plist[0] == 'minunc':
minunc = np.float64(plist[2])
if plist[0] == 'mindip':
mindip = np.float64(plist[2])
if plist[0] == 'minstk':
minstk = np.float64(plist[2])
if plist[0] == 'maxthickness':
maxthickness = np.float64(plist[2])
if plist[0] == 'seismo_thick':
seismo_thick = np.float64(plist[2])
if plist[0] == 'dipthresh':
dipthresh = np.float64(plist[2])
if plist[0] == 'fracS':
fracS = np.float64(plist[2])
if plist[0] == 'kdeg':
kdeg = np.float64(plist[2])
if plist[0] == 'knot_no':
knot_no = np.float64(plist[2])
if plist[0] == 'rbfs':
rbfs = np.float64(plist[2])
# loop through to find latest slab input file if specified
polyname = slab
if slab == 'kur' or slab == 'izu':
polyname = 'jap'
if inFile == 'latest':
yearmax = 0
monthmax = 0
for filename in os.listdir('Input'):
if filename.endswith('.csv'):
try:
slabname,datei,instring = filename.split('_')
except:
continue
if slabname == polyname and instring == 'input.csv':
try:
monthi, yeari = datei.split('-')
except:
continue
yeari = int(yeari)
monthi = int(monthi)
if yeari >= yearmax:
yearmax = yeari
inFile = 'Input/%s'%filename
if monthi > monthmax:
monthmax = monthi
inFile = 'Input/%s'%filename
print (' using input file: %s'%inFile)
if slab == 'mue' or slab == 'phi' or slab == 'cot' or slab == 'sul' or slab == 'ryu':
if args.undergrid is None:
if slab == 'mue':
print ('This slab is truncated by the Caribbean (car) slab, argument -u cardepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'cot':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'sul':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'phi':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'ryu':
print ('This slab is truncated by the Kurils-Japan (kur) slab, argument -u kurdepgrid is required')
print ('Exiting .... ')
exit()
else:
undergrid = args.undergrid
''' ------ (4) Define output file names ------ '''
date = datetime.today().strftime('%m.%d.%y')
now = datetime.now()
time = '%s.%s' % (now.hour, now.minute)
folder = '%s_slab2_%s' % (slab, date)
os.system('mkdir Output/%s'%folder)
outFile = 'Output/%s/%s_slab2_res_%s.csv' % (folder, slab, date)
dataFile = 'Output/%s/%s_slab2_dat_%s.csv' % (folder, slab, date)
nodeFile = 'Output/%s/%s_slab2_nod_%s.csv' % (folder, slab, date)
fillFile = 'Output/%s/%s_slab2_fil_%s.csv' % (folder, slab, date)
rempFile = 'Output/%s/%s_slab2_rem_%s.csv' % (folder, slab, date)
clipFile = 'Output/%s/%s_slab2_clp_%s.csv' % (folder, slab, date)
these_params = 'Output/%s/%s_slab2_par_%s.csv' % (folder, slab, date)
datainfo = 'Output/%s/%s_slab2_din_%s.csv' % (folder, slab, date)
nodeinfo = 'Output/%s/%s_slab2_nin_%s.csv' % (folder, slab, date)
suppFile = 'Output/%s/%s_slab2_sup_%s.csv' % (folder, slab, date)
nodexFile = 'Output/%s/%s_slab2_nox_%s.csv' % (folder, slab, date)
nodeuFile = 'Output/%s/%s_slab2_nou_%s.csv' % (folder, slab, date)
depTextFile = 'Output/%s/%s_slab2_dep_%s.txt' % (folder, slab, date)
depGridFile = 'Output/%s/%s_slab2_dep_%s.grd' % (folder, slab, date)
strTextFile = 'Output/%s/%s_slab2_str_%s.txt' % (folder, slab, date)
strGridFile = 'Output/%s/%s_slab2_str_%s.grd' % (folder, slab, date)
dipTextFile = 'Output/%s/%s_slab2_dip_%s.txt' % (folder, slab, date)
dipGridFile = 'Output/%s/%s_slab2_dip_%s.grd' % (folder, slab, date)
uncTextFile = 'Output/%s/%s_slab2_unc_%s.txt' % (folder, slab, date)
uncGridFile = 'Output/%s/%s_slab2_unc_%s.grd' % (folder, slab, date)
thickTextFile = 'Output/%s/%s_slab2_thk_%s.txt' % (folder, slab, date)
thickGridFile = 'Output/%s/%s_slab2_thk_%s.grd' % (folder, slab, date)
savedir = 'Output/%s'%folder
''' ------ (3) Define optional boxes for PDF/print testing ------'''
if args.test is not None:
testlonmin = args.test[0]
testlonmax = args.test[1]
testlatmin = args.test[2]
testlatmax = args.test[3]
if testlonmin < 0:
testlonmin += 360
if testlonmax < 0:
testlonmax += 360
testarea = [testlonmin, testlonmax, testlatmin, testlatmax]
printtest = True
os.system('mkdir Output/PDF%s' % (slab))
os.system('mkdir Output/multitest_%s' % (slab))
f = open(datainfo, 'w+')
f.write('dataID, nodeID, used_or_where_filtered')
f.write('\n')
f.close()
f = open(datainfo, 'w+')
f.write('nodeID, len(df), status, details')
f.write('\n')
f.close()
else:
# an area not in range of any slab polygon
testarea = [220, 230, 15, 20]
printtest = False
''' --- (5) Gathering optional arguments, setting defaults ---'''
if use_box == 'yes':
check = 1
slab = s2f.rectangleIntersectsPolygon(lonmin, lonmax, latmin,
latmax, polygonFile)
if isinstance(slab, str):
slab = slab
else:
try:
slab = slab[0]
except:
print('System exit because box does not intersect slab polygon')
raise SystemExit()
elif use_box == 'no':
check = 0
lon1, lon2, lat1, lat2 = s2f.determine_polygon_extrema(slab,
polygonFile)
lonmin = float(lon1)
lonmax = float(lon2)
latmin = float(lat1)
latmax = float(lat2)
else:
print('use_box in slab2input.par must be "yes" or "no"')
raise SystemExit()
''' ------ (6) Define search ellipsoid parameters ------'''
alen = radius1
blen = radius2
ec = math.sqrt(1-((math.pow(blen, 2))/(math.pow(alen, 2))))
mdist = alen * ec
''' ------ (7) Define Average active source profiles ------'''
# Different because alu is variable E/W
if slab == 'alu':
AA_data = pd.read_csv('library/avprofiles/alu_av5.csv')
global_average = False
elif slab == 'him':
AA_data = pd.read_csv('library/avprofiles/him_av.csv')
global_average = False
elif slab == 'kur' or slab == 'izu':
AA_source = 'library/avprofiles/%s_av.txt' % 'jap'
AA_data = pd.read_table(AA_source, delim_whitespace=True,\
header=None, names=['dist', 'depth'])
AA_data = AA_data[AA_data.dist < 125]
global_average = False
# Use RF data like AA data to constrain flat slab in Mexico
elif slab == 'cam':
AA_source = 'library/avprofiles/%s_av.txt' % slab
AA_data = pd.read_table(AA_source, delim_whitespace=True,\
header=None, names=['dist', 'depth'])
RF_data = pd.read_csv('library/avprofiles/cam_RF_av.csv')
AA_data = pd.concat([AA_data,RF_data],sort=True)
global_average = False
else:
global_average = False
# See if there is a averace active source profile for this slab
try:
AA_source = 'library/avprofiles/%s_av.txt' % slab
AA_data = pd.read_table(AA_source, delim_whitespace=True,\
header=None, names=['dist', 'depth'])
# If there is no profile for this slab, use the global profile
except:
AA_global = | pd.read_csv('library/avprofiles/global_as_av2.csv') | pandas.read_csv |
import re
import numpy as np
import pandas as pd
from dateutil.tz import tzutc
from dateutil.parser import parse as parse_date
from datetime import datetime, timedelta, timezone
from qset.utils.numeric import custom_round
# NOTE: slow
def parse_human_timestamp_re(hts, min_date_str="2000"):
"""
:param hts: Human timestamp: 20180101/2018010112/201801011200/20180101120000/20180101120000123...
:return:
"""
ts_PATTERN = re.compile(r"(\d{4})(\d{2})(\d{2})(\d{2})?(\d{2})?(\d{2})?(\d+)?")
hts = str(hts)
if hts < min_date_str:
raise Exception("Min date test failed")
split = list(ts_PATTERN.match(hts).groups()[: int((len(hts) - 2) / 2)])
# adjust microseconds
if len(split) == 7:
split[-1] = split[-1].ljust(6, "0")[:6]
return datetime(*map(int, split))
def parse_human_timestamp(hts, min_date_str="2000"):
"""
:param hts: Human timestamp: 20180101/2018010112/201801011200/20180101120000/20180101120000123...
:return:
"""
hts = str(hts)
if hts < min_date_str:
raise Exception("Min date test failed")
slices = [
slice(0, 4),
slice(4, 6),
slice(6, 8),
slice(8, 10),
slice(10, 12),
slice(12, 14),
slice(14, None),
][: int((len(hts) - 2) / 2)]
split = [hts[sl] for sl in slices]
# adjust microseconds
if len(split) == 7:
split[-1] = split[-1].ljust(6, "0")[:6]
return datetime(*map(int, split))
def cast_hts(dt_obj):
dt_obj = cast_datetime(dt_obj)
return int(dt_obj.strftime("%Y%m%d%H%M%S%f")[:-3])
def cast_datetime(dt_obj, none_invariant=True):
"""
:param dt_obj: datetime-like object: datetime.datetime or str
:return: datetime
is_utc =
NOTE: This is slow
"""
if isinstance(dt_obj, datetime):
return dt_obj
elif isinstance(dt_obj, str) and not is_freq(dt_obj):
try:
return parse_human_timestamp(dt_obj)
except:
pass
# '01.08.2019' type
search = re.search(r"(\d\d)\.(\d\d)\.(\d\d\d\d)", dt_obj)
if search:
d, m, y = search.groups()
return datetime(int(y), int(m), int(d))
# '01.08.20' type
for pat in [
r"^(\d\d)\.(\d\d)\.(\d\d)$",
r"^\d(\d\d)\.(\d\d)\.(\d\d)$",
r"^\d(\d\d)\.(\d\d)\.(\d\d)^\d",
r"^(\d\d)\.(\d\d)\.(\d\d)\d",
]:
search = re.search(pat, dt_obj)
if search:
d, m, y = search.groups()
return datetime(int(y) + 2000, int(m), int(d))
dt = parse_date(dt_obj)
if dt and dt.tzinfo:
dt = dt.astimezone(tzutc()).replace(tzinfo=None)
return dt
elif none_invariant and dt_obj is None:
return None
elif isinstance(dt_obj, (int, float, np.integer)):
try:
return parse_human_timestamp(dt_obj)
except:
pass
try:
return parse_date(str(dt_obj))
except:
pass
try:
return datetime.fromtimestamp(dt_obj, tz=timezone.utc).replace(tzinfo=None)
except:
pass
return datetime.fromtimestamp(dt_obj / 1000, tz=timezone.utc).replace(
tzinfo=None
)
else:
raise Exception("Unknown datetime-like object type")
cast_dt = cast_datetime
def cast_timestamp(dt_obj):
"""
:param dt_obj: naive datetime
:return:
"""
dt_obj = cast_datetime(dt_obj)
# timestamp is always in utc!
return dt_obj.replace(tzinfo=timezone.utc).timestamp()
cast_ts = cast_timestamp
def cast_mts(dt_obj):
return cast_timestamp(dt_obj) * 1000
def cast_str(dt, format=None):
if isinstance(dt, str):
return dt
elif isinstance(dt, datetime):
if not format:
return str(dt)
return dt.strftime(format)
else:
raise Exception("Unsupported type")
def get_strptime_pattern(s):
"""
:param s: str
:return: get strptime pattern
NOTE: be careful with microseconds. It is not handled properly
"""
if len(s) > 20:
raise Exception("Too big string")
return "%Y%m%d%H%M%S%f"[: int(len(s) - 2)]
def cast_datetime_series(s):
"""
:param s: a series of datetime-like objects with the same prototype.
:return: a datetime series
"""
sample = s.iloc[0]
# process hts case
try:
parse_human_timestamp(sample)
except:
pass
else:
sample = str(sample)
pattern = get_strptime_pattern(sample)
s = s.astype(str)
# 20 is for full %Y%m%D%H%M%S%f and 17 is for the same format, but when %f is replaced with 3 digits, not 6 as is by default
if len(sample) > 20:
# crop to microseconds
s = s[:20]
elif len(sample) >= 17:
# add zeros for microseconds
s = s + "0" * (20 - len(sample))
return pd.to_datetime(s, format=pattern)
if isinstance(sample, (int, np.integer)):
# considered as timestamp: 1521193807
int_part = str(sample).split(".")[0]
# '1521193807'
if len(int_part) == 10:
return pd.to_datetime(s, unit="s")
# '1521193807000'
elif len(int_part) == 13:
return pd.to_datetime(s, unit="ms")
# '1521193807000000'
elif len(int_part) == 16:
return pd.to_datetime(s, unit="us")
# '1521193807000000000'
elif len(int_part) == 19:
return pd.to_datetime(s, unit="ns")
elif isinstance(sample, (float, np.float)):
# considered as timestamp: 1521193807
int_part = str(sample).split(".")[0]
# '1521193807'
if len(int_part) == 10:
return s.apply(datetime.utcfromtimestamp)
# '1521193807000'
elif len(int_part) == 13:
return (s / 1000).apply(datetime.utcfromtimestamp)
return pd.to_datetime(s, infer_datetime_format=True)
def cast_datetime_many(lst):
# NOTE: THIS CODE INFERS DATETIME FORMAT! In other words, all passed values should have the same format
return cast_datetime_series( | pd.Series(lst) | pandas.Series |
import os
import tqdm
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from collections import Counter
from sklearn import model_selection
def load_data():
fp = os.path.dirname(__file__)
# Sensor data
data = pd.read_csv(fp + '/PdM_telemetry.csv.gz')
# Error alarm logs
data = data.merge(
pd.read_csv(fp + '/PdM_errors.csv.gz'),
how='left', on=['datetime', 'machineID'])
# Failure logs
data = data.merge(
pd.read_csv(fp + '/PdM_failures.csv.gz'),
how='left', on=['datetime', 'machineID'])
# Formatting
data.datetime = pd.to_datetime(data.datetime)
return data
def cleaning(df):
# NaN values are encoded to -1
df = df.sort_values('errorID')
df.errorID = df.errorID.factorize()[0]
df = df.sort_values('failure')
df.failure = df.failure.factorize()[0]
df = df.sort_values(['machineID', 'datetime'])
df.errorID = df.errorID.astype('category')
df.failure = df.failure.astype('category')
df.volt = df.volt.astype('float32')
df.rotate = df.rotate.astype('float32')
df.pressure = df.pressure.astype('float32')
df.vibration = df.vibration.astype('float32')
df.datetime = pd.to_datetime(df.datetime)
return df
def load_clean_data():
return cleaning(load_data())
def generate_run_to_failure(raw_data, health_censor_aug=1000,
min_lifetime=10, max_lifetime=300,
seed=123, outfn=None):
run_to_failure = []
error_ids = raw_data.errorID.dropna().sort_values().unique().tolist()
for machine_id, g in tqdm.tqdm(raw_data.groupby('machineID'), desc='run-to-failure'):
g = g.set_index('datetime').sort_index()
start_date = g.index.values[0]
failures = g.loc[~g.failure.isnull()]
for event_time, event in failures.iterrows():
# Extracting a single cycle/process
cycle = g[start_date:event_time].drop('machineID', axis=1)
lifetime = (event_time - start_date).days
if lifetime < 1:
start_date = event_time
continue
numerical_features = cycle.agg(['min', 'max', 'mean']).unstack().reset_index()
numerical_features['feature'] = numerical_features.level_0.str.cat(numerical_features.level_1, sep='_')
numerical_features = numerical_features.pivot_table(columns='feature', values=0)
categorical_features = pd.DataFrame(Counter(cycle.errorID), columns=error_ids, index=[0])
sample = pd.concat([numerical_features, categorical_features], axis=1)
sample[['machine_id', 'lifetime', 'broken']] = machine_id, lifetime, 1
run_to_failure.append(sample)
start_date = event_time
run_to_failure = pd.concat(run_to_failure, axis=0).reset_index(drop=True)
health_censors = censoring_augmentation(raw_data,
n_samples=health_censor_aug,
min_lifetime=min_lifetime,
max_lifetime=max_lifetime,
seed=seed)
run_to_failure = pd.concat([run_to_failure, health_censors])
# Shuffle
run_to_failure = run_to_failure.sample(frac=1, random_state=seed).reset_index(drop=True)
run_to_failure = run_to_failure.fillna(0.)
if outfn is not None:
run_to_failure.to_csv(outfn, index=False)
return run_to_failure
def censoring_augmentation(raw_data, n_samples=10, max_lifetime=150, min_lifetime=2, seed=123):
error_ids = raw_data.errorID.dropna().sort_values().unique().tolist()
np.random.seed(seed)
samples = []
pbar = tqdm.tqdm(total=n_samples, desc='augmentation')
while len(samples) < n_samples:
censor_timing = np.random.randint(min_lifetime, max_lifetime)
machine_id = np.random.randint(100) + 1
tmp = raw_data[raw_data.machineID == machine_id]
tmp = tmp.drop('machineID', axis=1).set_index('datetime').sort_index()
failures = tmp[~tmp.failure.isnull()]
if failures.shape[0] < 2:
continue
failure_id = np.random.randint(failures.shape[0])
failure = failures.iloc[failure_id]
event_time = failure.name
start_date = tmp.index.values[0] if failure_id == 0 else failures.iloc[failure_id - 1].name
# censoring
cycle = tmp[start_date:event_time]
cycle = cycle.iloc[:censor_timing]
if not cycle.shape[0] == censor_timing:
continue
numerical_features = cycle.agg(['min', 'max', 'mean', 'std']).unstack().reset_index()
numerical_features['feature'] = numerical_features.level_0.str.cat(numerical_features.level_1, sep='_')
numerical_features = numerical_features.pivot_table(columns='feature', values=0)
categorical_features = pd.DataFrame(Counter(cycle.errorID), columns=error_ids, index=[0])
sample = pd.concat([numerical_features, categorical_features], axis=1)
sample[['machine_id', 'lifetime', 'broken']] = machine_id, censor_timing, 0
samples.append(sample)
pbar.update(1)
pbar.close()
return pd.concat(samples).reset_index(drop=True).fillna(0)
def generate_validation_sets(method='kfold', n_splits=5, seed=123, outdir=None):
validation_sets = []
if method == 'kfold':
# K-fold cross validation
assert type(n_splits) == int
assert n_splits > 2
raw_data = load_data()
kfold = model_selection.KFold(n_splits=n_splits, shuffle=True, random_state=seed)
for i, (train_index, test_index) in enumerate(kfold.split(np.arange(100))):
print('K-fold {}/{}'.format(i+1, n_splits))
# train/test split by machine ID
train_machines = raw_data[raw_data.machineID.isin(train_index)]
test_machines = raw_data[raw_data.machineID.isin(test_index)]
# print('train:', train_machines.shape)
# print('test:', test_machines.shape)
# convert the two sets into run-to-failure data
train_censored_data = generate_run_to_failure(
train_machines, health_censor_aug=len(train_index)*10, seed=seed)
test_consored_data = generate_run_to_failure(
test_machines, health_censor_aug=len(test_index)*10, seed=seed)
# print('train:', train_censored_data.shape)
# print('test:', test_consored_data.shape)
validation_sets.append((train_censored_data, test_consored_data))
if outdir is not None:
train_censored_data.to_csv(outdir + f'/train_{i}.csv.gz', index=False)
test_consored_data.to_csv(outdir + f'/test_{i}.csv.gz', index=False)
elif method == 'leave-one-out':
raise NotImplementedError
return validation_sets
def load_validation_sets(filepath, n_splits=5):
return [(pd.read_csv(filepath + f'/train_{i}.csv.gz'),
pd.read_csv(filepath + f'/test_{i}.csv.gz'))
for i in range(n_splits)]
def plot_sequence_and_events(data, machine_id=1):
data = data[data.machineID == machine_id]
fig, ax = plt.subplots(4 + 2, figsize=(8, 8))
data.plot(y='volt', legend=True, ax=ax[0])
data.plot(y='rotate', legend=True, ax=ax[1])
data.plot(y='pressure', legend=True, ax=ax[2])
data.plot(y='vibration', legend=True, ax=ax[3])
if data.errorID.isnull().sum() < data.errorID.shape[0]:
pd.get_dummies(data.errorID).plot(ax=ax[4])
if data.failure.isnull().sum() < data.failure.shape[0]:
| pd.get_dummies(data.failure) | pandas.get_dummies |
import pandas as pd
from xml.etree import ElementTree as etree
pd.set_option('display.max_columns', 500)
class DataFrame:
def __init__(self, doc, allElements):
'''doc = .eaf file; allElements = line element and its children'''
self.doc = doc
self.allElements = allElements
self.tbl = self.buildTable(doc,self.allElements)
def getTbl(self):
return self.tbl
# def getTimeSlotIDs(self, doc, tbl_elements):
# '''next step asks for row 0 of dataframe (speech), get value of TSRef1 (start time)'''
# startTimeSlotID = tbl_elements.iloc[0, tbl_elements.columns.values.tolist().index('TIME_SLOT_REF1')]
# endTimeSlotID = tbl_elements.iloc[0, tbl_elements.columns.values.tolist().index('TIME_SLOT_REF2')]
# return startTimeSlotID, endTimeSlotID
def getTimeSlotIDs(self, doc, tbl_elements):
'''next step asks for row 0 of dataframe (speech), get value of TSRef1 (start time)'''
if 'TIME_SLOT_REF1' in tbl_elements.columns:
startTimeSlotID = tbl_elements.iloc[0, tbl_elements.columns.values.tolist().index('TIME_SLOT_REF1')]
endTimeSlotID = tbl_elements.iloc[0, tbl_elements.columns.values.tolist().index('TIME_SLOT_REF2')]
else:
startTimeSlotID = False
parentRefID = tbl_elements.iloc[0, tbl_elements.columns.values.tolist().index('ANNOTATION_REF')]
while startTimeSlotID == False:
# print(parentRefID)
parentAnnotation = doc.find('TIER/ANNOTATION/ALIGNABLE_ANNOTATION[@ANNOTATION_ID="%s"]' %parentRefID)
if not parentAnnotation:
parentAnnotation = doc.find('TIER/ANNOTATION/REF_ANNOTATION[@ANNOTATION_ID="%s"]' % parentRefID)
if 'TIME_SLOT_REF1' in parentAnnotation.attrib:
startTimeSlotID = parentAnnotation.attrib['TIME_SLOT_REF1']
endTimeSlotID = parentAnnotation.attrib['TIME_SLOT_REF2']
newTSR1_column = [startTimeSlotID]
newTSR2_column = [endTimeSlotID]
for i in range(1,tbl_elements.shape[0]):
newTSR1_column.append("NaN")
newTSR2_column.append("NaN")
tbl_elements.insert(0,'TIME_SLOT_REF1',newTSR1_column)
tbl_elements.insert(0, 'TIME_SLOT_REF2', newTSR2_column)
# row = tbl_elements.loc[tbl_elements['TIER_ID'] == self.speechTier]
# print("speech tier row is %s" %row)
else:
try:
parentRefID = parentAnnotation.attrib[('ANNOTATION_REF')]
except KeyError:
'''this will happen if the speech tier is not time-aligned or the child
of a time-aligned tier; this will probably crash SLEXIL, but this is an inadmissible
file type anyway, we can figure out how to warn the user later'''
print('bailing')
startTimeSlotID = float('NaN')
endTimeSlotID = float('NaN')
return startTimeSlotID, endTimeSlotID
def buildTable(self, doc, lineElements):
#doc = .eaf file; lineElements = line element and its children
tbl_elements = pd.DataFrame(e.attrib for e in lineElements)
startTimeSlotID, endTimeSlotID = self.getTimeSlotIDs(doc, tbl_elements)
pattern = "TIME_ORDER/TIME_SLOT[@TIME_SLOT_ID='%s']" % startTimeSlotID
startTime = int(doc.find(pattern).attrib["TIME_VALUE"])
startTimes = [startTime]
rowCount = tbl_elements.shape[0]
'''next step fills in NaN for all the children of the time-aligned tier, but since that
messes us up with the getStart/End methods in IjalLine if the *speech tier* isn't aligned,
let's just give every row a copy of the start and end times'''
for i in range(1, rowCount):
# startTimes.append(float('NaN'))
startTimes.append(startTime)
'''repeat previous for end times'''
pattern = "TIME_ORDER/TIME_SLOT[@TIME_SLOT_ID='%s']" % endTimeSlotID
endTime = int(doc.find(pattern).attrib["TIME_VALUE"])
endTimes = [endTime]
for i in range(1, rowCount):
# endTimes.append(float('NaN'))
endTimes.append(endTime)
tbl_times = | pd.DataFrame({"START": startTimes, "END": endTimes}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import random
from human_ISH_config import *
import math
import os
import sklearn
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
from sklearn.ensemble import RandomForestClassifier
#DATA_DIR: This is defined in human_ISJ_config.py. This is the directory you have defined to store all the data.
PATH_TO_SZ_STUDY = os.path.join(DATA_DIR, "schizophrenia")
PATH_TO_SZ_POST_PROCESS = os.path.join(PATH_TO_SZ_STUDY, "post_process_on_sz")
def get_sz_labels_image_and_donor_level(label):
"""
This function is used to select a certain column from the info csv file to be later used as a label in downstream tasks.
The main columns that we were interested are: "description" and "smoker"
"description" indicates whether the donor was case or control, and "smoker" indicates whether they smoked or not.
This information is available from the Allen website.
:param label: string. The column name to be used as label
:return: None
"""
path_to_sz_info = os.path.join(PATH_TO_SZ_STUDY, "human_ISH_info.csv")
sz_info_df = pd.read_csv(path_to_sz_info)
if label == 'description':
new_df = pd.DataFrame(columns=['ID', label])
# --------------- image level ---------------
new_df['ID'] = sz_info_df['image_id']
diagnosis = list(sz_info_df['description'])
image_sz_count = 0
image_no_sz_count = 0
for i in range(len(diagnosis)):
if "schizophrenia" in diagnosis[i]:
diagnosis[i] = True
image_sz_count +=1
elif "control" in diagnosis[i]:
diagnosis[i] = False
image_no_sz_count +=1
else:
diagnosis[i] = None
new_df[label] = diagnosis
file_name = "sz_diagnosis_as_label_image_level.csv"
new_df.to_csv(os.path.join(PATH_TO_SZ_POST_PROCESS, file_name), index=None)
print ("image sz count: ", image_sz_count)
print ("image no sz count: ", image_no_sz_count)
print ("total: ", image_sz_count + image_no_sz_count)
# --------------- donor level ---------------
group_by_donor = sz_info_df.groupby('donor_id')
donor_list=[]
diagnosis_list = []
donor_sz_count = 0
donor_no_sz_count = 0
for key, item in group_by_donor:
donor_list.append(key)
diagnosis = list(item['description'])[0]
if "schizophrenia" in diagnosis:
diagnosis_list.append(True)
donor_sz_count +=1
elif "control" in diagnosis:
diagnosis_list.append(False)
donor_no_sz_count +=1
else:
diagnosis_list.append(None)
new_df = pd.DataFrame(columns=['ID', label])
new_df['ID']= donor_list
new_df[label] = diagnosis_list
file_name = "sz_diagnosis_as_label_donor_level.csv"
new_df.to_csv(os.path.join(PATH_TO_SZ_POST_PROCESS, file_name), index=None)
print ("donor sz count: ", donor_sz_count)
print ("donor no sz count: ", donor_no_sz_count)
print ("total: ", donor_sz_count + donor_no_sz_count)
elif label in ['donor_age', 'donor_sex', 'smoker', 'pmi', 'tissue_ph', 'donor_race']:
new_df = pd.DataFrame(columns=['ID', label])
# --------------- image level ---------------
new_df['ID'] = sz_info_df['image_id']
new_df[label] = list(sz_info_df[label])
file_name = label + "_as_label_image_level.csv"
new_df.to_csv(os.path.join(PATH_TO_SZ_POST_PROCESS, file_name), index=None)
# --------------- donor level ---------------
group_by_donor = sz_info_df.groupby('donor_id')
donor_list = []
label_list = []
for key, item in group_by_donor:
donor_list.append(key)
label_list.append(list(item[label])[0])
new_df = pd.DataFrame(columns=['ID', label])
new_df['ID'] = donor_list
new_df[label] = label_list
file_name = label + "_as_label_donor_level.csv"
new_df.to_csv(os.path.join(PATH_TO_SZ_POST_PROCESS, file_name), index=None)
def embeddings_per_gene_per_donor(path_to_per_gene_per_donor_level_files, input_type, ts, embeddings_df):
"""
This function gets an image-level embedding file and outputs a donor-level csv file for each gene.
Each gene will have a separate csv file: gene_name.csv
Each row in the csv file will represent a donor.
The number of rows in the csv file is the number of donors on which this specific gene was tested.
We will use image level embeddings, then group them by gene. So each group will be all the images that assay the same gene.
Then, within each group, we will group the images again by donor_id and use the mean() function to take the average of the embeddings.
:param path_to_per_gene_per_donor_level_files: the path in which per gene donor-level files should be saved.
The directory will be created if it doesn't alredy exist.
:param input_type: str. Determine the type of input vectors.
Could be: ['embed','demog','demog_and_embed','random','plain_resnet']
:param ts: str. The timestamp that indicates which files to use.
:param embeddings_df: pandas data frame. Image-level embeddings.
:return: a list of genes
"""
# the embeddings are image level
path_to_sz_info = os.path.join(PATH_TO_SZ_STUDY, "human_ISH_info.csv")
sz_info_df = | pd.read_csv(path_to_sz_info) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 25 21:13:58 2020
@author: sarakohnke
"""
#Set working directory
import os
path="/Users/sarakohnke/Desktop/data_type_you/interim-tocsv"
os.chdir(path)
os.getcwd()
#Import required packages
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
#Import data for files with demographic information
#(This is after converted .xpt to .csv)
#import 7 demo files
demo05=pd.read_csv('DEMO_D_NHANES_Demographics_2005.csv')
demo07=pd.read_csv('DEMO_E_NHANES_Demographics_2007.csv')
demo09=pd.read_csv('DEMO_F_NHANES_Demographics_2009.csv')
demo11=pd.read_csv('DEMO_G_NHANES_Demographics_2011.csv')
demo13=pd.read_csv('DEMO_H_NHANES_Demographics_2013.csv')
demo15=pd.read_csv('DEMO_I_NHANES_Demographics_2015.csv')
demo17=pd.read_csv('DEMO_J_NHANES_Demographics_2017.csv')
#add year as a column
demo05['Year']=2005
demo07['Year']=2007
demo09['Year']=2009
demo11['Year']=2011
demo13['Year']=2013
demo15['Year']=2015
demo17['Year']=2017
#append all dfs together
demographics_allyears=demo05.append(demo07, ignore_index = True)
demographics_allyears=demographics_allyears.append(demo09, ignore_index = True)
demographics_allyears=demographics_allyears.append(demo11, ignore_index = True)
demographics_allyears=demographics_allyears.append(demo13, ignore_index = True)
demographics_allyears=demographics_allyears.append(demo15, ignore_index = True)
demographics_allyears=demographics_allyears.append(demo17, ignore_index = True)
#select only desired cols
demographics_allyears2=demographics_allyears[['SEQN','RIAGENDR','RIDAGEYR']].copy()
#rename cols
demographics_allyears2.rename(columns={'SEQN':'Patient ID',
'RIAGENDR':'Male',
'RIDAGEYR':'Age (years)'}, inplace=True)
#see if there are unknowns (eg 777)
demographics_allyears2['Age (years)'].value_counts().sort_index()
#replace 2 with 0 for female
demographics_allyears2['Male'].replace(2,0,inplace=True)
#drop rows with nas
demographics_allyears3=demographics_allyears2.dropna(axis=0)
#filter for adults
demographics_allyears4=demographics_allyears3[demographics_allyears3['Age (years)']>=18]
#Import data for files with blood pressure information
#import 7 bp files
bp05=pd.read_csv('BPX_D_NHANES_Blood_Pressure_2005.csv')
bp07=pd.read_csv('BPX_E_NHANES_Blood_Pressure_2007.csv')
bp09=pd.read_csv('BPX_F_NHANES_Blood_Pressure_2009.csv')
bp11=pd.read_csv('BPX_G_NHANES_Blood_Pressure_2011.csv')
bp13=pd.read_csv('BPX_H_NHANES_Blood_Pressure_2013.csv')
bp15=pd.read_csv('BPX_I_NHANES_Blood_Pressure_2015.csv')
bp17=pd.read_csv('BPX_J_NHANES_Blood_Pressure_2017.csv')
#add year as a column
bp05['Year']=2005
bp07['Year']=2007
bp09['Year']=2009
bp11['Year']=2011
bp13['Year']=2013
bp15['Year']=2015
bp17['Year']=2017
#append all dfs together
bp_allyears=bp05.append(bp07, ignore_index = True)
bp_allyears=bp_allyears.append(bp09, ignore_index = True)
bp_allyears=bp_allyears.append(bp11, ignore_index = True)
bp_allyears=bp_allyears.append(bp13, ignore_index = True)
bp_allyears=bp_allyears.append(bp15, ignore_index = True)
bp_allyears=bp_allyears.append(bp17, ignore_index = True)
#select only desired cols
bp_allyears2=bp_allyears[['SEQN','BPXPLS','BPXSY1','BPXDI1']].copy()
#rename cols
bp_allyears2.rename(columns={'SEQN':'Patient ID',
'BPXPLS':'Pulse (60sec)',
'BPXSY1':'Systolic pressure (mmHg)',
'BPXDI1':'Diastolic pressure (mmHg)'}, inplace=True)
#see if there are unknowns (eg 777)
bp_allyears2['Systolic pressure (mmHg)'].value_counts().sort_index()
#replace values that don't make sense with NaNs
bp_allyears2['Pulse (60sec)'].replace(0,np.nan,inplace=True)
bp_allyears2['Pulse (60sec)'].value_counts().sort_index()
bp_allyears2['Diastolic pressure (mmHg)'].replace([0,2,4,6,8,10,12,14,16,18],np.nan,inplace=True)
bp_allyears2['Diastolic pressure (mmHg)'].value_counts().sort_index()
#drop rows with nas
bp_allyears3=bp_allyears2.dropna(axis=0)
#Import data for files with body measure information
#import 7 body measure files
bm05=pd.read_csv('BMX_D_NHANES_Body_Measures_2005.csv')
bm07=pd.read_csv('BMX_E_NHANES_Body_Measures_2007.csv')
bm09=pd.read_csv('BMX_F_NHANES_Body_Measures_2009.csv')
bm11=pd.read_csv('BMX_G_NHANES_Body_Measures_2011.csv')
bm13=pd.read_csv('BMX_H_NHANES_Body_Measures_2013.csv')
bm15=pd.read_csv('BMX_I_NHANES_Body_Measures_2015.csv')
bm17=pd.read_csv('BMX_J_NHANES_Body_Measures_2017.csv')
#add year as a column
bm05['Year']=2005
bm07['Year']=2007
bm09['Year']=2009
bm11['Year']=2011
bm13['Year']=2013
bm15['Year']=2015
bm17['Year']=2017
#append all dfs together
bm_allyears=bm05.append(bm07, ignore_index = True)
bm_allyears=bm_allyears.append(bm09, ignore_index = True)
bm_allyears=bm_allyears.append(bm11, ignore_index = True)
bm_allyears=bm_allyears.append(bm13, ignore_index = True)
bm_allyears=bm_allyears.append(bm15, ignore_index = True)
bm_allyears=bm_allyears.append(bm17, ignore_index = True)
#select only desired cols
bm_allyears2=bm_allyears[['SEQN','BMXBMI']].copy()
#rename cols
bm_allyears2.rename(columns={'SEQN':'Patient ID',
'BMXBMI':'BMI (kg/m2)'}, inplace=True)
#see if there are unknowns (eg 777)
bm_allyears2['BMI (kg/m2)'].value_counts().sort_index()
#drop rows with nas
bm_allyears3=bm_allyears2.dropna(axis=0)
#Import data for files with total cholesterol information
#import 7 chol files
chol05=pd.read_csv('TCHOL_D_NHANES_Total_Cholesterol_2005.csv')
chol07=pd.read_csv('TCHOL_E_NHANES_Total_Cholesterol_2007.csv')
chol09=pd.read_csv('TCHOL_F_NHANES_Total_Cholesterol_2009.csv')
chol11=pd.read_csv('TCHOL_G_NHANES_Total_Cholesterol_2011.csv')
chol13=pd.read_csv('TCHOL_H_NHANES_Total_Cholesterol_2013.csv')
chol15=pd.read_csv('TCHOL_I_NHANES_Total_Cholesterol_2015.csv')
chol17=pd.read_csv('TCHOL_J_NHANES_Total_Cholesterol_2017.csv')
#add year as a column
chol05['Year']=2005
chol07['Year']=2007
chol09['Year']=2009
chol11['Year']=2011
chol13['Year']=2013
chol15['Year']=2015
chol17['Year']=2017
#append all dfs together
chol_allyears=chol05.append(chol07, ignore_index = True)
chol_allyears=chol_allyears.append(chol09, ignore_index = True)
chol_allyears=chol_allyears.append(chol11, ignore_index = True)
chol_allyears=chol_allyears.append(chol13, ignore_index = True)
chol_allyears=chol_allyears.append(chol15, ignore_index = True)
chol_allyears=chol_allyears.append(chol17, ignore_index = True)
#select only desired cols
chol_allyears2=chol_allyears[['SEQN','LBXTC']].copy()
#rename cols
chol_allyears2.rename(columns={'SEQN':'Patient ID',
'LBXTC':'Total cholesterol (mg/dl)'}, inplace=True)
#see if there are unknowns (eg 777)
chol_allyears2['Total cholesterol (mg/dl)'].value_counts().sort_index()
#drop rows with nas
chol_allyears3=chol_allyears2.dropna(axis=0)
#Import data for files with blood count information
#import 7 blood count files
cbc05=pd.read_csv('CBC_D_NHANES_Complete_Blood_Count_2005.csv')
cbc07=pd.read_csv('CBC_E_NHANES_Complete_Blood_Count_2007.csv')
cbc09=pd.read_csv('CBC_F_NHANES_Complete_Blood_Count_2009.csv')
cbc11=pd.read_csv('CBC_G_NHANES_Complete_Blood_Count_2011.csv')
cbc13=pd.read_csv('CBC_H_NHANES_Complete_Blood_Count_2013.csv')
cbc15=pd.read_csv('CBC_I_NHANES_Complete_Blood_Count_2015.csv')
cbc17=pd.read_csv('CBC_J_NHANES_Complete_Blood_Count_2017.csv')
#add year as a column
cbc05['Year']=2005
cbc07['Year']=2007
cbc09['Year']=2009
cbc11['Year']=2011
cbc13['Year']=2013
cbc15['Year']=2015
cbc17['Year']=2017
#append all dfs together
cbc_allyears=cbc05.append(cbc07, ignore_index = True)
cbc_allyears=cbc_allyears.append(cbc09, ignore_index = True)
cbc_allyears=cbc_allyears.append(cbc11, ignore_index = True)
cbc_allyears=cbc_allyears.append(cbc13, ignore_index = True)
cbc_allyears=cbc_allyears.append(cbc15, ignore_index = True)
cbc_allyears=cbc_allyears.append(cbc17, ignore_index = True)
#select only desired cols
cbc_allyears2=cbc_allyears[['SEQN','LBXMPSI','LBXPLTSI','LBXRBCSI','LBDEONO',
'LBDLYMNO','LBDBANO','LBDMONO']].copy()
#rename cols
cbc_allyears2.rename(columns={'SEQN':'Patient ID',
'LBXMPSI':'Mean platelet volume (fL)',
'LBXPLTSI':'Platelet count (1000 cells/uL)',
'LBXRBCSI':'Red blood cell count (million cells/uL)',
'LBDEONO':'Eosinophils number (1000 cells/uL)',
'LBDLYMNO':'Lymphocyte number (1000 cells/uL)',
'LBDBANO':'Basophils number (1000 cells/uL)',
'LBDMONO':'Monocyte number (1000 cells/uL)'},
inplace=True)
#see if there are unknowns (eg 777)
cbc_allyears2['Monocyte number (1000 cells/uL)'].value_counts().sort_index()
#drop rows with nas
cbc_allyears3=cbc_allyears2.dropna(axis=0)
#Import data for files with A1c/glycohemoglobin information
#import 7 a1c files
a1c05= | pd.read_csv('GHB_D_NHANES_A1C_2005.csv') | pandas.read_csv |
import functools
import json
import warnings
from abc import ABC, abstractmethod, abstractproperty
from collections.abc import Iterable
from typing import Dict, List, Optional, Tuple, Union
import pandas as pd
import pastas as ps
from numpy import isin
from pastas.io.pas import PastasEncoder
from tqdm import tqdm
from .util import ItemInLibraryException, _custom_warning, validate_names
FrameorSeriesUnion = Union[pd.DataFrame, pd.Series]
warnings.showwarning = _custom_warning
class BaseConnector(ABC):
"""Base Connector class.
Class holds base logic for dealing with timeseries and Pastas
Models. Create your own Connector to a data source by writing a a
class that inherits from this BaseConnector. Your class has to
override each abstractmethod and abstractproperty.
"""
_default_library_names = ["oseries", "stresses", "models",
"oseries_models"]
# whether to check model timeseries contents against stored copies
check_model_series_values = True
def __repr__(self):
"""Representation string of the object."""
return (f"<{type(self).__name__}> '{self.name}': "
f"{self.n_oseries} oseries, "
f"{self.n_stresses} stresses, "
f"{self.n_models} models")
@abstractmethod
def _get_library(self, libname: str):
"""Get library handle.
Must be overriden by subclass.
Parameters
----------
libname : str
name of the library
Returns
-------
lib : Any
handle to the library
"""
pass
@abstractmethod
def _add_item(self, libname: str,
item: Union[FrameorSeriesUnion, Dict],
name: str,
metadata: Optional[Dict] = None,
overwrite: bool = False) -> None:
"""Internal method to add item for both timeseries and pastas.Models.
Must be overriden by subclass.
Parameters
----------
libname : str
name of library to add item to
item : FrameorSeriesUnion or dict
item to add
name : str
name of the item
metadata : dict, optional
dictionary containing metadata, by default None
"""
pass
@abstractmethod
def _get_item(self, libname: str, name: str) \
-> Union[FrameorSeriesUnion, Dict]:
"""Internal method to get item (series or pastas.Models).
Must be overriden by subclass.
Parameters
----------
libname : str
name of library
name : str
name of item
Returns
-------
item : FrameorSeriesUnion or dict
item (timeseries or pastas.Model)
"""
pass
@abstractmethod
def _del_item(self, libname: str, name: str) -> None:
"""Internal method to delete items (series or models).
Must be overriden by subclass.
Parameters
----------
libname : str
name of library to delete item from
name : str
name of item to delete
"""
pass
@abstractmethod
def _get_metadata(self, libname: str, name: str) -> Dict:
"""Internal method to get metadata.
Must be overriden by subclass.
Parameters
----------
libname : str
name of the library
name : str
name of the item
Returns
-------
metadata : dict
dictionary containing metadata
"""
pass
@abstractproperty
def oseries_names(self):
"""List of oseries names.
Property must be overriden by subclass.
"""
pass
@abstractproperty
def stresses_names(self):
"""List of stresses names.
Property must be overriden by subclass.
"""
pass
@abstractproperty
def model_names(self):
"""List of model names.
Property must be overriden by subclass.
"""
pass
def set_check_model_series_values(self, b: bool):
"""Turn check_model_series_values option on (True) or off (False).
The default option is off. When turned on, the model timeseries
(ml.oseries.series_original, and stressmodel.stress.series_original)
values are checked against the stored copies in the database. If
these do not match, an error is raised, and the model is not added to
the database. This check is somewhat computationally expensive, which
is why it can be turned on or off.
Parameters
----------
b : bool
boolean indicating whether option should be turned on (True) or
off (False). Option is off by default.
"""
self.check_model_series_values = b
print(f"Model timeseries checking set to: {b}.")
def _add_series(self, libname: str,
series: FrameorSeriesUnion,
name: str,
metadata: Optional[dict] = None,
overwrite: bool = False) -> None:
"""Internal method to add series to database.
Parameters
----------
libname : str
name of the library to add the series to
series : pandas.Series or pandas.DataFrame
data to add
name : str
name of the timeseries
metadata : dict, optional
dictionary containing metadata, by default None
overwrite : bool, optional
overwrite existing dataset with the same name,
by default False
Raises
------
ItemInLibraryException
if overwrite is False and name is already in the database
"""
if not isinstance(name, str):
name = str(name)
self._validate_input_series(series)
series = self._set_series_name(series, name)
in_store = getattr(self, f"{libname}_names")
if name not in in_store or overwrite:
self._add_item(libname, series, name, metadata=metadata,
overwrite=overwrite)
self._clear_cache(libname)
else:
raise ItemInLibraryException(f"Item with name '{name}' already"
f" in '{libname}' library!")
def _update_series(self, libname: str,
series: FrameorSeriesUnion,
name: str,
metadata: Optional[dict] = None) -> None:
"""Internal method to update timeseries.
Parameters
----------
libname : str
name of library
series : FrameorSeriesUnion
timeseries containing update values
name : str
name of the timeseries to update
metadata : Optional[dict], optional
optionally provide metadata dictionary which will also update
the current stored metadata dictionary, by default None
"""
if libname not in ["oseries", "stresses"]:
raise ValueError("Library must be 'oseries' or 'stresses'!")
self._validate_input_series(series)
series = self._set_series_name(series, name)
stored = self._get_series(libname, name, progressbar=False)
# get union of index
idx_union = stored.index.union(series.index)
# update series with new values
update = stored.reindex(idx_union)
update.update(series)
# metadata
update_meta = self._get_metadata(libname, name)
if metadata is not None:
update_meta.update(metadata)
self._add_series(libname, update, name, metadata=update_meta,
overwrite=True)
def _upsert_series(self, libname: str,
series: FrameorSeriesUnion,
name: str,
metadata: Optional[dict] = None) -> None:
"""Update or insert series depending on whether it exists in store.
Parameters
----------
libname : str
name of library
series : FrameorSeriesUnion
timeseries to update/insert
name : str
name of the timeseries
metadata : Optional[dict], optional
metadata dictionary, by default None
"""
if libname not in ["oseries", "stresses"]:
raise ValueError("Library must be 'oseries' or 'stresses'!")
if name in getattr(self, f"{libname}_names"):
self._update_series(libname, series, name, metadata=metadata)
else:
self._add_series(libname, series, name, metadata=metadata)
def update_metadata(self, libname: str, name: str, metadata: dict) -> None:
"""Update metadata.
Note: also retrieves and stores timeseries as updating only metadata
is not supported for some Connectors.
Parameters
----------
libname : str
name of library
name : str
name of the item for which to update metadata
metadata : dict
metadata dictionary that will be used to update the stored
metadata
"""
if libname not in ["oseries", "stresses"]:
raise ValueError("Library must be 'oseries' or 'stresses'!")
update_meta = self._get_metadata(libname, name)
update_meta.update(metadata)
# get series, since just updating metadata is not really defined
# in all cases
s = self._get_series(libname, name, progressbar=False)
self._add_series(libname, s, name, metadata=update_meta,
overwrite=True)
def add_oseries(self, series: Union[FrameorSeriesUnion, ps.TimeSeries],
name: str,
metadata: Optional[dict] = None,
overwrite: bool = False) -> None:
"""Add oseries to the database.
Parameters
----------
series : pandas.Series, pandas.DataFrame or pastas.TimeSeries
data to add
name : str
name of the timeseries
metadata : dict, optional
dictionary containing metadata, by default None. If
pastas.TimeSeries is passed, metadata is kwarg is ignored and
metadata is taken from pastas.TimeSeries object
overwrite : bool, optional
overwrite existing dataset with the same name,
by default False
"""
series, metadata = self._parse_series_input(series, metadata)
self._add_series("oseries", series, name=name, metadata=metadata,
overwrite=overwrite)
def add_stress(self, series: Union[FrameorSeriesUnion, ps.TimeSeries],
name: str, kind: str,
metadata: Optional[dict] = None,
overwrite: bool = False) -> None:
"""Add stress to the database.
Parameters
----------
series : pandas.Series, pandas.DataFrame or pastas.TimeSeries
data to add, if pastas.Timeseries is passed, series_orignal
and metadata is stored in database
name : str
name of the timeseries
kind : str
category to identify type of stress, this label is added to the
metadata dictionary.
metadata : dict, optional
dictionary containing metadata, by default None. If
pastas.TimeSeries is passed, metadata is kwarg is ignored and
metadata is taken from pastas.TimeSeries object
overwrite : bool, optional
overwrite existing dataset with the same name,
by default False
"""
series, metadata = self._parse_series_input(series, metadata)
if metadata is None:
metadata = {}
metadata["kind"] = kind
self._add_series("stresses", series, name=name,
metadata=metadata, overwrite=overwrite)
def add_model(self, ml: Union[ps.Model, dict],
overwrite: bool = False,
validate_metadata: bool = False) -> None:
"""Add model to the database.
Parameters
----------
ml : pastas.Model or dict
pastas Model or dictionary to add to the database
overwrite : bool, optional
if True, overwrite existing model, by default False
validate_metadata, bool optional
remove unsupported characters from metadata dictionary keys
Raises
------
TypeError
if model is not pastas.Model or dict
ItemInLibraryException
if overwrite is False and model is already in the database
"""
if isinstance(ml, ps.Model):
mldict = ml.to_dict(series=False)
name = ml.name
if validate_metadata:
metadata = validate_names(d=ml.oseries.metadata)
else:
metadata = ml.oseries.metadata
elif isinstance(ml, dict):
mldict = ml
name = ml["name"]
metadata = None
else:
raise TypeError("Expected pastas.Model or dict!")
if not isinstance(name, str):
name = str(name)
if name not in self.model_names or overwrite:
# check if stressmodels supported
self._check_stressmodels_supported(ml)
# check if oseries and stresses exist in store
self._check_model_series_names_for_store(ml)
self._check_oseries_in_store(ml)
self._check_stresses_in_store(ml)
# write model to store
self._add_item("models", mldict, name, metadata=metadata,
overwrite=overwrite)
else:
raise ItemInLibraryException(f"Model with name '{name}' "
"already in 'models' library!")
self._clear_cache("_modelnames_cache")
self._add_oseries_model_links(str(mldict["oseries"]["name"]), name)
@staticmethod
def _parse_series_input(series: Union[FrameorSeriesUnion, ps.TimeSeries],
metadata: Optional[Dict] = None) \
-> Tuple[FrameorSeriesUnion, Optional[Dict]]:
"""Internal method to parse series input.
Parameters
----------
series : Union[FrameorSeriesUnion, ps.TimeSeries],
series object to parse
metadata : dict, optional
metadata dictionary or None, by default None
Returns
-------
series, metadata : FrameorSeriesUnion, Optional[Dict]
timeseries as pandas.Series or DataFrame and optionally
metadata dictionary
"""
if isinstance(series, ps.TimeSeries):
if metadata is not None:
print("Warning! Metadata kwarg ignored. Metadata taken from "
"pastas.TimeSeries object!")
s = series.series_original
m = series.metadata
else:
s = series
m = metadata
return s, m
def update_oseries(self, series: Union[FrameorSeriesUnion, ps.TimeSeries],
name: str, metadata: Optional[dict] = None) -> None:
"""Update oseries values.
Parameters
----------
series : Union[FrameorSeriesUnion, ps.TimeSeries]
timeseries to update stored oseries with
name : str
name of the oseries to update
metadata : Optional[dict], optional
optionally provide metadata, which will update
the stored metadata dictionary, by default None
"""
series, metadata = self._parse_series_input(series, metadata)
self._update_series("oseries", series, name, metadata=metadata)
def upsert_oseries(self, series: Union[FrameorSeriesUnion, ps.TimeSeries],
name: str, metadata: Optional[dict] = None) -> None:
"""Update or insert oseries values depending on whether it exists.
Parameters
----------
series : Union[FrameorSeriesUnion, ps.TimeSeries]
timeseries to update/insert
name : str
name of the oseries
metadata : Optional[dict], optional
optionally provide metadata, which will update
the stored metadata dictionary if it exists, by default None
"""
series, metadata = self._parse_series_input(series, metadata)
self._upsert_series("oseries", series, name, metadata=metadata)
def update_stress(self, series: Union[FrameorSeriesUnion, ps.TimeSeries],
name: str, metadata: Optional[dict] = None) -> None:
"""Update stresses values.
Note: the 'kind' attribute of a stress cannot be updated! To update
the 'kind' delete and add the stress again.
Parameters
----------
series : Union[FrameorSeriesUnion, ps.TimeSeries]
timeseries to update stored stress with
name : str
name of the stress to update
metadata : Optional[dict], optional
optionally provide metadata, which will update
the stored metadata dictionary, by default None
"""
series, metadata = self._parse_series_input(series, metadata)
self._update_series("stresses", series, name, metadata=metadata)
def upsert_stress(self, series: Union[FrameorSeriesUnion, ps.TimeSeries],
name: str, kind: str,
metadata: Optional[dict] = None) -> None:
"""Update or insert stress values depending on whether it exists.
Parameters
----------
series : Union[FrameorSeriesUnion, ps.TimeSeries]
timeseries to update/insert
name : str
name of the stress
metadata : Optional[dict], optional
optionally provide metadata, which will update
the stored metadata dictionary if it exists, by default None
"""
series, metadata = self._parse_series_input(series, metadata)
if metadata is None:
metadata = {}
metadata["kind"] = kind
self._upsert_series("stresses", series, name, metadata=metadata)
def del_models(self, names: Union[list, str]) -> None:
"""Delete model(s) from the database.
Parameters
----------
names : str or list of str
name(s) of the model to delete
"""
for n in self._parse_names(names, libname="models"):
mldict = self.get_models(n, return_dict=True)
oname = mldict["oseries"]["name"]
self._del_item("models", n)
self._del_oseries_model_link(oname, n)
self._clear_cache("_modelnames_cache")
def del_oseries(self, names: Union[list, str]):
"""Delete oseries from the database.
Parameters
----------
names : str or list of str
name(s) of the oseries to delete
"""
for n in self._parse_names(names, libname="oseries"):
self._del_item("oseries", n)
self._clear_cache("oseries")
def del_stress(self, names: Union[list, str]):
"""Delete stress from the database.
Parameters
----------
names : str or list of str
name(s) of the stress to delete
"""
for n in self._parse_names(names, libname="stresses"):
self._del_item("stresses", n)
self._clear_cache("stresses")
def _get_series(self, libname: str, names: Union[list, str],
progressbar: bool = True, squeeze: bool = True) \
-> FrameorSeriesUnion:
"""Internal method to get timeseries.
Parameters
----------
libname : str
name of the library
names : str or list of str
names of the timeseries to load
progressbar : bool, optional
show progressbar, by default True
squeeze : bool, optional
if True return DataFrame or Series instead of dictionary
for single entry
Returns
-------
pandas.DataFrame or dict of pandas.DataFrames
either returns timeseries as pandas.DataFrame or
dictionary containing the timeseries.
"""
ts = {}
names = self._parse_names(names, libname=libname)
desc = f"Get {libname}"
for n in (tqdm(names, desc=desc) if progressbar else names):
ts[n] = self._get_item(libname, n)
# return frame if len == 1
if len(ts) == 1 and squeeze:
return ts[n]
else:
return ts
def get_metadata(self, libname: str, names: Union[list, str],
progressbar: bool = False, as_frame: bool = True,
squeeze: bool = True) -> Union[dict, pd.DataFrame]:
"""Read metadata from database.
Parameters
----------
libname : str
name of the library containing the dataset
names : str or list of str
names of the datasets for which to read the metadata
squeeze : bool, optional
if True return dict instead of list of dict
for single entry
Returns
-------
dict or pandas.DataFrame
returns metadata dictionary or DataFrame of metadata
"""
metalist = []
names = self._parse_names(names, libname=libname)
desc = f"Get metadata {libname}"
for n in (tqdm(names, desc=desc) if progressbar else names):
imeta = self._get_metadata(libname, n)
if imeta is None:
imeta = {}
if "name" not in imeta.keys():
imeta["name"] = n
metalist.append(imeta)
if as_frame:
meta = self._meta_list_to_frame(metalist, names=names)
return meta
else:
if len(metalist) == 1 and squeeze:
return metalist[0]
else:
return metalist
def get_oseries(self, names: Union[list, str],
return_metadata: bool = False,
progressbar: bool = False,
squeeze: bool = True) \
-> Union[Union[FrameorSeriesUnion, Dict],
Optional[Union[Dict, List]]]:
"""Get oseries from database.
Parameters
----------
names : str or list of str
names of the oseries to load
return_metadata : bool, optional
return metadata as dictionary or list of dictionaries,
default is False
progressbar : bool, optional
show progressbar, by default False
squeeze : bool, optional
if True return DataFrame or Series instead of dictionary
for single entry
Returns
-------
oseries : pandas.DataFrame or dict of DataFrames
returns timeseries as DataFrame or dictionary of DataFrames if
multiple names were passed
metadata : dict or list of dict
metadata for each oseries, only returned if return_metadata=True
"""
oseries = self._get_series("oseries", names, progressbar=progressbar,
squeeze=squeeze)
if return_metadata:
metadata = self.get_metadata("oseries",
names,
progressbar=progressbar,
as_frame=False,
squeeze=squeeze)
return oseries, metadata
else:
return oseries
def get_stresses(self, names: Union[list, str],
return_metadata: bool = False,
progressbar: bool = False,
squeeze: bool = True) \
-> Union[Union[FrameorSeriesUnion, Dict],
Optional[Union[Dict, List]]]:
"""Get stresses from database.
Parameters
----------
names : str or list of str
names of the stresses to load
return_metadata : bool, optional
return metadata as dictionary or list of dictionaries,
default is False
progressbar : bool, optional
show progressbar, by default False
squeeze : bool, optional
if True return DataFrame or Series instead of dictionary
for single entry
Returns
-------
stresses : pandas.DataFrame or dict of DataFrames
returns timeseries as DataFrame or dictionary of DataFrames if
multiple names were passed
metadata : dict or list of dict
metadata for each stress, only returned if return_metadata=True
"""
stresses = self._get_series("stresses", names, progressbar=progressbar,
squeeze=squeeze)
if return_metadata:
metadata = self.get_metadata("stresses",
names,
progressbar=progressbar,
as_frame=False,
squeeze=squeeze)
return stresses, metadata
else:
return stresses
def get_models(self, names: Union[list, str], return_dict: bool = False,
progressbar: bool = False, squeeze: bool = True,
update_ts_settings: bool = False) \
-> Union[ps.Model, list]:
"""Load models from database.
Parameters
----------
names : str or list of str
names of the models to load
return_dict : bool, optional
return model dictionary instead of pastas.Model (much
faster for obtaining parameters, for example)
progressbar : bool, optional
show progressbar, by default False
squeeze : bool, optional
if True return Model instead of list of Models
for single entry
update_ts_settings : bool, optional
update timeseries settings based on timeseries in store.
overwrites stored tmin/tmax in model.
Returns
-------
pastas.Model or list of pastas.Model
return pastas model, or list of models if multiple names were
passed
"""
models = []
names = self._parse_names(names, libname="models")
desc = "Get models"
for n in (tqdm(names, desc=desc) if progressbar else names):
data = self._get_item("models", n)
if return_dict:
ml = data
else:
ml = self._parse_model_dict(
data, update_ts_settings=update_ts_settings)
models.append(ml)
if len(models) == 1 and squeeze:
return models[0]
else:
return models
def empty_library(self, libname: str, prompt: bool = True,
progressbar: bool = True):
"""Empty library of all its contents.
Parameters
----------
libname : str
name of the library
prompt : bool, optional
prompt user for input before deleting
contents, by default True. Default answer is
"n", user must enter 'y' to delete contents
progressbar : bool, optional
show progressbar, by default True
"""
if prompt:
ui = input(f"Do you want to empty '{libname}'"
" library of all its contents? [y/N] ")
if ui.lower() != "y":
return
names = self._parse_names(None, libname)
for name in (tqdm(names, desc=f"Deleting items from {libname}")
if progressbar else names):
self._del_item(libname, name)
self._clear_cache(libname)
print(f"Emptied library {libname} in {self.name}: "
f"{self.__class__}")
def _iter_series(self, libname: str, names: Optional[List[str]] = None):
"""Internal method iterate over timeseries in library.
Parameters
----------
libname : str
name of library (e.g. 'oseries' or 'stresses')
names : Optional[List[str]], optional
list of names, by default None, which defaults to
all stored series
Yields
-------
pandas.Series or pandas.DataFrame
timeseries contained in library
"""
names = self._parse_names(names, libname)
for nam in names:
yield self._get_series(libname, nam, progressbar=False)
def iter_oseries(self, names: Optional[List[str]] = None):
"""Iterate over oseries in library.
Parameters
----------
names : Optional[List[str]], optional
list of oseries names, by default None, which defaults to
all stored series
Yields
-------
pandas.Series or pandas.DataFrame
oseries contained in library
"""
yield from self._iter_series("oseries", names=names)
def iter_stresses(self, names: Optional[List[str]] = None):
"""Iterate over stresses in library.
Parameters
----------
names : Optional[List[str]], optional
list of stresses names, by default None, which defaults to
all stored series
Yields
-------
pandas.Series or pandas.DataFrame
stresses contained in library
"""
yield from self._iter_series("stresses", names=names)
def iter_models(self, modelnames: Optional[List[str]] = None,
return_dict: bool = False):
"""Iterate over models in library.
Parameters
----------
modelnames : Optional[List[str]], optional
list of models to iterate over, by default None which uses
all models
return_dict : bool, optional
if True, return model as dictionary, by default False,
which returns a pastas.Model.
Yields
-------
pastas.Model or dict
timeseries model
"""
modelnames = self._parse_names(modelnames, "models")
for mlnam in modelnames:
yield self.get_models(mlnam, return_dict=return_dict,
progressbar=False)
def _add_oseries_model_links(self, onam: str,
mlnames: Union[str, List[str]]):
"""Add model name to stored list of models per oseries.
Parameters
----------
onam : str
name of oseries
mlnames : Union[str, List[str]]
model name or list of model names for an oseries with name
onam.
"""
# get stored list of model names
if str(onam) in self.oseries_with_models:
modellist = self._get_item("oseries_models", onam)
else:
# else empty list
modellist = []
# if one model name, make list for loop
if isinstance(mlnames, str):
mlnames = [mlnames]
# loop over model names
for iml in mlnames:
# if not present, add to list
if iml not in modellist:
modellist.append(iml)
self._add_item("oseries_models", modellist, onam, overwrite=True)
self._clear_cache("oseries_models")
def _del_oseries_model_link(self, onam, mlnam):
"""Delete model name from stored list of models per oseries.
Parameters
----------
onam : str
name of oseries
mlnam : str
name of model
"""
modellist = self._get_item("oseries_models", onam)
modellist.remove(mlnam)
if len(modellist) == 0:
self._del_item("oseries_models", onam)
else:
self._add_item("oseries_models", modellist, onam, overwrite=True)
self._clear_cache("oseries_models")
def _update_all_oseries_model_links(self):
"""Add all model names to oseries metadata dictionaries.
Used for old PastaStore versions, where relationship between
oseries and models was not stored. If there are any models in
the database and if the oseries_models library is empty, loops
through all models to determine which oseries each model belongs
to.
"""
# get oseries_models library if there are any contents, if empty
# add all model links.
if self.n_models > 0:
if len(self.oseries_models) == 0:
links = self._get_all_oseries_model_links()
for onam, mllinks in tqdm(links.items(),
desc="Store models per oseries",
total=len(links)):
self._add_oseries_model_links(onam, mllinks)
def _get_all_oseries_model_links(self):
"""Get all model names per oseries in dictionary.
Returns
-------
links : dict
dictionary with oseries names as keys and lists of model names as
values
"""
links = {}
for mldict in tqdm(self.iter_models(return_dict=True),
total=self.n_models,
desc="Get models per oseries"):
onam = mldict["oseries"]["name"]
mlnam = mldict["name"]
if onam in links:
links[onam].append(mlnam)
else:
links[onam] = [mlnam]
return links
@staticmethod
def _clear_cache(libname: str) -> None:
"""Clear cached property."""
if libname == "models":
libname = "_modelnames_cache"
getattr(BaseConnector, libname).fget.cache_clear()
@property # type: ignore
@functools.lru_cache()
def oseries(self):
"""Dataframe with overview of oseries."""
return self.get_metadata("oseries", self.oseries_names)
@property # type: ignore
@functools.lru_cache()
def stresses(self):
"""Dataframe with overview of stresses."""
return self.get_metadata("stresses", self.stresses_names)
@property # type: ignore
@functools.lru_cache()
def _modelnames_cache(self):
"""List of model names."""
return self.model_names
@ property
def n_oseries(self):
return len(self.oseries_names)
@property
def n_stresses(self):
return len(self.stresses_names)
@property
def n_models(self):
return len(self.model_names)
@property # type: ignore
@functools.lru_cache()
def oseries_models(self):
"""List of model names per oseries.
Returns
-------
d : dict
dictionary with oseries names as keys and list of model names as
values
"""
d = {}
for onam in self.oseries_with_models:
d[onam] = self._get_item("oseries_models", onam)
return d
class ConnectorUtil:
"""Mix-in class for general Connector helper functions.
Only for internal methods, and not methods that are related to CRUD
operations on database.
"""
def _parse_names(self, names: Optional[Union[list, str]] = None,
libname: Optional[str] = "oseries") -> list:
"""Internal method to parse names kwarg, returns iterable with name(s).
Parameters
----------
names : Union[list, str], optional
str or list of str or None or 'all' (last two options
retrieves all names)
libname : str, optional
name of library, default is 'oseries'
Returns
-------
list
list of names
"""
if not isinstance(names, str) and isinstance(names, Iterable):
return names
elif isinstance(names, str) and names != "all":
return [names]
elif names is None or names == "all":
if libname == "oseries":
return getattr(self, "oseries_names")
elif libname == "stresses":
return getattr(self, "stresses_names")
elif libname == "models":
return getattr(self, "model_names")
elif libname == "oseries_models":
return getattr(self, "oseries_with_models")
else:
raise ValueError(f"No library '{libname}'!")
else:
raise NotImplementedError(f"Cannot parse 'names': {names}")
@staticmethod
def _meta_list_to_frame(metalist: list, names: list):
"""Convert list of metadata dictionaries to DataFrame.
Parameters
----------
metalist : list
list of metadata dictionaries
names : list
list of names corresponding to data in metalist
Returns
-------
pandas.DataFrame
DataFrame containing overview of metadata
"""
# convert to dataframe
if len(metalist) > 1:
meta = pd.DataFrame(metalist)
if len({"x", "y"}.difference(meta.columns)) == 0:
meta["x"] = meta["x"].astype(float)
meta["y"] = meta["y"].astype(float)
elif len(metalist) == 1:
meta = pd.DataFrame(metalist)
elif len(metalist) == 0:
meta = pd.DataFrame()
if "name" in meta.columns:
meta.set_index("name", inplace=True)
else:
meta.index = names
return meta
def _parse_model_dict(self, mdict: dict,
update_ts_settings: bool = False):
"""Internal method to parse dictionary describing pastas models.
Parameters
----------
mdict : dict
dictionary describing pastas.Model
update_ts_settings : bool, optional
update stored tmin and tmax in timeseries settings
based on timeseries loaded from store.
Returns
-------
ml : pastas.Model
timeseries analysis model
"""
# oseries
if 'series' not in mdict['oseries']:
name = str(mdict["oseries"]['name'])
if name not in self.oseries.index:
msg = 'oseries {} not present in project'.format(name)
raise LookupError(msg)
mdict['oseries']['series'] = self.get_oseries(name)
# update tmin/tmax from timeseries
if update_ts_settings:
mdict["oseries"]["settings"]["tmin"] = \
mdict['oseries']['series'].index[0]
mdict["oseries"]["settings"]["tmax"] = \
mdict['oseries']['series'].index[-1]
# StressModel, WellModel
for ts in mdict["stressmodels"].values():
if "stress" in ts.keys():
for stress in ts["stress"]:
if 'series' not in stress:
name = str(stress['name'])
if name in self.stresses.index:
stress['series'] = self.get_stresses(name)
# update tmin/tmax from timeseries
if update_ts_settings:
stress["settings"]["tmin"] = \
stress['series'].index[0]
stress["settings"]["tmax"] = \
stress['series'].index[-1]
# RechargeModel, TarsoModel
if ("prec" in ts.keys()) and ("evap" in ts.keys()):
for stress in [ts["prec"], ts["evap"]]:
if 'series' not in stress:
name = str(stress['name'])
if name in self.stresses.index:
stress['series'] = self.get_stresses(name)
# update tmin/tmax from timeseries
if update_ts_settings:
stress["settings"]["tmin"] = \
stress['series'].index[0]
stress["settings"]["tmax"] = \
stress['series'].index[-1]
else:
msg = "stress '{}' not present in project".format(
name)
raise KeyError(msg)
# hack for pcov w dtype object (when filled with NaNs on store?)
if "fit" in mdict:
if "pcov" in mdict["fit"]:
pcov = mdict["fit"]["pcov"]
if pcov.dtypes.apply(
lambda dtyp: isinstance(dtyp, object)).any():
mdict["fit"]["pcov"] = pcov.astype(float)
try:
# pastas>=0.15.0
ml = ps.io.base._load_model(mdict)
except AttributeError:
# pastas<0.15.0
ml = ps.io.base.load_model(mdict)
return ml
@staticmethod
def _validate_input_series(series):
"""check if series is pandas.DataFrame or pandas.Series.
Parameters
----------
series : object
object to validate
Raises
------
TypeError
if object is not of type pandas.DataFrame or pandas.Series
"""
if not (isinstance(series, pd.DataFrame) or
isinstance(series, pd.Series)):
raise TypeError("Please provide pandas.DataFrame"
" or pandas.Series!")
if isinstance(series, pd.DataFrame):
if series.columns.size > 1:
raise ValueError("Only DataFrames with one "
"column are supported!")
@staticmethod
def _set_series_name(series, name):
"""Set series name to match user defined name in store.
Parameters
----------
series : pandas.Series or pandas.DataFrame
set name for this timeseries
name : str
name of the timeseries (used in the pastastore)
"""
if isinstance(series, pd.Series):
series.name = name
# empty string on index name causes trouble when reading
# data from Arctic VersionStores
if series.index.name == "":
series.index.name = None
if isinstance(series, pd.DataFrame):
series.columns = [name]
return series
@staticmethod
def _check_stressmodels_supported(ml):
supported_stressmodels = [
"StressModel",
"StressModel2",
"RechargeModel",
"WellModel",
"TarsoModel",
"Constant",
"LinearTrend",
"StepModel",
]
if isinstance(ml, ps.Model):
smtyps = [sm._name for sm in ml.stressmodels.values()]
elif isinstance(ml, dict):
smtyps = [sm["stressmodel"] for sm in ml["stressmodels"].values()]
check = isin(smtyps, supported_stressmodels)
if not all(check):
unsupported = set(smtyps) - set(supported_stressmodels)
raise NotImplementedError(
"PastaStore does not support storing models with the "
f"following stressmodels: {unsupported}")
@staticmethod
def _check_model_series_names_for_store(ml):
prec_evap_model = ["RechargeModel", "TarsoModel"]
if isinstance(ml, ps.Model):
# non RechargeModel nor Tarsomodel stressmodels
series_names = [istress.series.name
for sm in ml.stressmodels.values()
if sm._name not in prec_evap_model
for istress in sm.stress]
# RechargeModel, TarsoModel
if isin(prec_evap_model,
[i._name for i in ml.stressmodels.values()]
).any():
series_names += [istress.series.name
for sm in ml.stressmodels.values()
if sm._name in prec_evap_model
for istress in sm.stress]
elif isinstance(ml, dict):
# non RechargeModel nor Tarsomodel stressmodels
series_names = [istress["name"] for sm in
ml["stressmodels"].values()
if sm["stressmodel"] not in prec_evap_model
for istress in sm["stress"]]
# RechargeModel, TarsoModel
if isin(prec_evap_model,
[i["stressmodel"] for i in ml["stressmodels"].values()]
).any():
series_names += [istress["name"] for sm in
ml["stressmodels"].values()
if sm["stressmodel"] in prec_evap_model
for istress in [sm["prec"], sm["evap"]]]
else:
raise TypeError("Expected pastas.Model or dict!")
if len(series_names) - len(set(series_names)) > 0:
msg = ("There are multiple stresses series with the same name! "
"Each series name must be unique for the PastaStore!")
raise ValueError(msg)
def _check_oseries_in_store(self, ml: Union[ps.Model, dict]):
"""Internal method, check if Model oseries are contained in PastaStore.
Parameters
----------
ml : Union[ps.Model, dict]
pastas Model
"""
if isinstance(ml, ps.Model):
name = ml.oseries.name
elif isinstance(ml, dict):
name = str(ml["oseries"]["name"])
else:
raise TypeError("Expected pastas.Model or dict!")
if name not in self.oseries.index:
msg = (f"Cannot add model because oseries '{name}' "
"is not contained in store.")
raise LookupError(msg)
# expensive check
if self.check_model_series_values and isinstance(ml, ps.Model):
s_org = self.get_oseries(name).squeeze().dropna()
if not ml.oseries.series_original.dropna().equals(s_org):
raise ValueError(
f"Cannot add model because model oseries '{name}'"
" is different from stored oseries!")
def _check_stresses_in_store(self, ml: Union[ps.Model, dict]):
"""Internal method, check if stresses timeseries are contained in
PastaStore.
Parameters
----------
ml : Union[ps.Model, dict]
pastas Model
"""
prec_evap_model = ["RechargeModel", "TarsoModel"]
if isinstance(ml, ps.Model):
for sm in ml.stressmodels.values():
if sm._name in prec_evap_model:
stresses = [sm.prec, sm.evap]
else:
stresses = sm.stress
for s in stresses:
if s.name not in self.stresses.index:
msg = (f"Cannot add model because stress '{s.name}' "
"is not contained in store.")
raise LookupError(msg)
if self.check_model_series_values:
s_org = self.get_stresses(s.name).squeeze()
if not s.series_original.equals(s_org):
raise ValueError(
f"Cannot add model because model stress "
f"'{s.name}' is different from stored stress!")
elif isinstance(ml, dict):
for sm in ml["stressmodels"].values():
if sm["stressmodel"] in prec_evap_model:
stresses = [sm["prec"], sm["evap"]]
else:
stresses = sm["stress"]
for s in stresses:
if s["name"] not in self.stresses.index:
msg = (f"Cannot add model because stress '{s['name']}' "
"is not contained in store.")
raise LookupError(msg)
else:
raise TypeError("Expected pastas.Model or dict!")
def _stored_series_to_json(self,
libname: str,
names: Optional[Union[list, str]] = None,
squeeze: bool = True,
progressbar: bool = False):
"""Write stored series to JSON.
Parameters
----------
libname : str
library name
names : Optional[Union[list, str]], optional
names of series, by default None
squeeze : bool, optional
return single entry as json string instead
of list, by default True
progressbar : bool, optional
show progressbar, by default False
Returns
-------
files : list or str
list of series converted to JSON string or single string
if single entry is returned and squeeze is True
"""
names = self._parse_names(names, libname=libname)
files = []
for n in (tqdm(names, desc=libname) if progressbar else names):
s = self._get_series(libname, n, progressbar=False)
if isinstance(s, pd.Series):
s = s.to_frame()
try:
sjson = s.to_json(orient="columns")
except ValueError as e:
msg = (f"DatetimeIndex of '{n}' probably contains NaT "
"or duplicate timestamps!")
raise ValueError(msg) from e
files.append(sjson)
if len(files) == 1 and squeeze:
return files[0]
else:
return files
def _stored_metadata_to_json(self,
libname: str,
names: Optional[Union[list, str]] = None,
squeeze: bool = True,
progressbar: bool = False):
"""Write metadata from stored series to JSON.
Parameters
----------
libname : str
library containing series
names : Optional[Union[list, str]], optional
names to parse, by default None
squeeze : bool, optional
return single entry as json string instead of list, by default True
progressbar : bool, optional
show progressbar, by default False
Returns
-------
files : list or str
list of json string
"""
names = self._parse_names(names, libname=libname)
files = []
for n in (tqdm(names, desc=libname) if progressbar else names):
meta = self.get_metadata(libname, n, as_frame=False)
meta_json = json.dumps(meta, cls=PastasEncoder, indent=4)
files.append(meta_json)
if len(files) == 1 and squeeze:
return files[0]
else:
return files
def _series_to_archive(self, archive, libname: str,
names: Optional[Union[list, str]] = None,
progressbar: bool = True):
"""Internal method for writing DataFrame or Series to zipfile.
Parameters
----------
archive : zipfile.ZipFile
reference to an archive to write data to
libname : str
name of the library to write to zipfile
names : str or list of str, optional
names of the timeseries to write to archive, by default None,
which writes all timeseries to archive
progressbar : bool, optional
show progressbar, by default True
"""
names = self._parse_names(names, libname=libname)
for n in (tqdm(names, desc=libname) if progressbar else names):
sjson = self._stored_series_to_json(
libname, names=n, progressbar=False, squeeze=True)
meta_json = self._stored_metadata_to_json(
libname, names=n, progressbar=False, squeeze=True)
archive.writestr(f"{libname}/{n}.json", sjson)
archive.writestr(f"{libname}/{n}_meta.json", meta_json)
def _models_to_archive(self, archive, names=None, progressbar=True):
"""Internal method for writing pastas.Model to zipfile.
Parameters
----------
archive : zipfile.ZipFile
reference to an archive to write data to
names : str or list of str, optional
names of the models to write to archive, by default None,
which writes all models to archive
progressbar : bool, optional
show progressbar, by default True
"""
names = self._parse_names(names, libname="models")
for n in (tqdm(names, desc="models") if progressbar else names):
m = self.get_models(n, return_dict=True)
jsondict = json.dumps(m, cls=PastasEncoder, indent=4)
archive.writestr(f"models/{n}.pas", jsondict)
@ staticmethod
def _series_from_json(fjson: str):
"""Load timeseries from JSON.
Parameters
----------
fjson : str
path to file
Returns
-------
s : pd.DataFrame
DataFrame containing timeseries
"""
s = pd.read_json(fjson, orient="columns")
if not isinstance(s.index, pd.DatetimeIndex):
s.index = | pd.to_datetime(s.index, unit='ms') | pandas.to_datetime |
import pandas as pd
from pandas._testing import assert_frame_equal
import pytest
import numpy as np
from scripts.normalize_data import (
remove_whitespace_from_column_names,
normalize_expedition_section_cols,
remove_bracket_text,
remove_whitespace,
ddm2dec,
remove_empty_unnamed_columns,
normalize_columns
)
class RemoveSpacesFromColumns:
def test_replaces_leading_and_trailing_spaces_from_columns(self):
df = pd.DataFrame(columns=[' Aa', 'Bb12 ', ' Cc', 'Dd ', ' Ed Ed ', ' 12 ' ])
res = remove_whitespace_from_column_names(df)
assert res == ['Aa', 'Bb12', 'Cc', 'Dd', 'Ee Ee', '12']
def test_returns_columns_if_no_leading_and_trailing_spaces(self):
df = pd.DataFrame(columns=['Aa', 'Bb', 'Cc', 'Dd', 'Ed Ed'])
res = remove_whitespace_from_column_names(df)
assert res == ['Aa', 'Bb', 'Cc', 'Dd', 'Ee Ee' ]
class TestNormalizeExpeditionSectionCols:
def test_dataframe_does_not_change_if_expection_section_columns_exist(self):
data = {
"Col": [0, 1],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
df = pd.DataFrame(data)
expected = | pd.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@author: <EMAIL>
@site: e-smartdata.org
"""
import numpy as np
import pandas as pd
import seaborn as sns
sns.set()
dft = pd.DataFrame({'price': np.random.randn(97)},
index=pd.date_range('20190101 09:00:00', periods=97,
freq='5min'))
fake_price = dft.cumsum()
fake_price_mean = fake_price.rolling(10).mean()
# %%
fake_price.plot()
# %%
stats = | pd.concat([fake_price, fake_price_mean], axis=1) | pandas.concat |
Subsets and Splits