prompt
stringlengths
19
1.03M
completion
stringlengths
4
2.12k
api
stringlengths
8
90
#!usr/bin/env python import pandas as pd import argparse import csv parser=argparse.ArgumentParser() #parser.add_argument("-flags", "--flags", dest="flags_input", help="Special alert flags") parser.add_argument("-i", "--isolate", dest="isolate", help="isolate") parser.add_argument("-cc", "--clonalcomplex", dest="cc_input", help="clonal complex file") parser.add_argument("-mec", "--mecfile", dest="mec_input", help="mectype file") parser.add_argument("-spa", "--spatype", dest="spa_input", help="spa type file") parser.add_argument("-mlst", "--mlst", dest="mlst_input", help="MLST file") parser.add_argument("-pfge", "--pfge", dest="pfge_input", help="pfge file, non CC8") parser.add_argument("-o", "--output", dest="output", help="converted tree file, in nexus format") parser.add_argument("-can", "--canSNP", dest="can_input", help="canSNP file") args= parser.parse_args() fields= ["Isolate", "Clonal Complex", "Mec Type","Spa Type","Spa Repeats", "MLST", "PFGE","canSNP Clade", "canSNP USA Type" ] clonalcomplex = open(args.cc_input).read().split() #print("The cctype of this isolate is, "+clonalcomplex) mectype = open(args.mec_input).read().split() #print("The mec type of this isolate is, "+mectype) spaT = pd.read_csv(args.spa_input,sep='\t',engine='python') spaframe =
pd.DataFrame(spaT, index=None, dtype=None)
pandas.DataFrame
import re import numpy as np import pytest from pandas import DataFrame, Series import pandas.util.testing as tm @pytest.mark.parametrize("subset", ["a", ["a"], ["a", "B"]]) def test_duplicated_with_misspelled_column_name(subset): # GH 19730 df =
DataFrame({"A": [0, 0, 1], "B": [0, 0, 1], "C": [0, 0, 1]})
pandas.DataFrame
""" In the memento task, the behavioral responses of participants were written to log files. However, different participants played different versions of the task, and different versions of the task saved a different amount of variables as a Matlab struct into the log file. This file contains information on the variables and their indexes per subject. Indexing is done according to Python, i.e., zero-based. """ import logging from pymento_meg.config import subjectmapping from scipy.io import loadmat from pathlib import Path import pandas as pd logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO) def get_behavioral_data(subject, behav_dir, fieldname, variable=None): """ Read in behavioral data and return the values of one variable. :param subject: :param behav_dir: Path to the directory that contains subject-specific log directories (e.g., data/DMS_MEMENTO/Data_Behav/Data_Behav_Memento) :param fieldname: Fieldname where the variable is in. Can be "probmagrew", "single_onset", "disptimes", or "onset" :param variable: str, variable name that should be retrieved. If None is specified, it will get all variables of this fieldname :return: """ key = f"memento_{subject}" logging.info(f"Reading in experiment log files of {key} for {fieldname}...") # get the information about the subject's behavioral data out of the subject # mapping, but make sure it is actually there first assert key in subjectmapping.keys() subinfo = subjectmapping[key] # based on the information in subinfo, load the file fname = subinfo["logfilename"] path = Path(behav_dir) / fname res = loadmat(path) # get the subject ID out of the behavioral data struct. It is buried quite # deep, and typically doesn't have a leading zero subID = str(res["mementores"]["subID"][0][0][0][0]) assert subID in subject # first, retrieve all possible variables given the fieldname var = subinfo[fieldname] if variable: # make sure the required variable is inside this list assert variable in var # get the index from the required variable. This is necessary to index # the struct in the right place. Only fieldnames seem to be indexable # by name, not their variables idx = var.index(variable) # for all relevant fieldnames, it takes two [0] indices to get to an # unnested matrix of all variables wanted_var = res["mementores"][fieldname][0][0][idx] return wanted_var else: return res["mementores"][fieldname][0][0] def write_to_df(participant, behav_dir, bids_dir, write_out=False): """ Write logfile data to a dataframe to get rid of the convoluted matlab structure. All variables should exist 510 times. :param: str, subject identifier in the form of "001" """ # read the data in as separate dataframes # Onset times are timestamps! View with datetime # first, get matlab data onsets = get_behavioral_data( subject=participant, behav_dir=behav_dir, fieldname="onsets" ) disps = get_behavioral_data( subject=participant, behav_dir=behav_dir, fieldname="disptimes" ) probs = get_behavioral_data( subject=participant, behav_dir=behav_dir, fieldname="probmagrew" ) # we need to transpose the dataframe to get variables as columns and # trials as rows df_onsets = pd.DataFrame(onsets).transpose() df_disps = pd.DataFrame(disps).transpose() df_probs =
pd.DataFrame(probs)
pandas.DataFrame
import pytest import numpy as np import pandas as pd from hypothetical.descriptive import covar, pearson, spearman, var, std_dev, variance_condition, \ kurtosis, skewness, mean_absolute_deviation from scipy.stats import spearmanr from numpy.core.multiarray import array class TestCorrelationCovariance(object): d = np.array([[ 1. , 1.11 , 2.569, 3.58 , 0.76 ], [ 1. , 1.19 , 2.928, 3.75 , 0.821], [ 1. , 1.09 , 2.865, 3.93 , 0.928], [ 1. , 1.25 , 3.844, 3.94 , 1.009], [ 1. , 1.11 , 3.027, 3.6 , 0.766], [ 1. , 1.08 , 2.336, 3.51 , 0.726], [ 1. , 1.11 , 3.211, 3.98 , 1.209], [ 1. , 1.16 , 3.037, 3.62 , 0.75 ], [ 2. , 1.05 , 2.074, 4.09 , 1.036], [ 2. , 1.17 , 2.885, 4.06 , 1.094], [ 2. , 1.11 , 3.378, 4.87 , 1.635], [ 2. , 1.25 , 3.906, 4.98 , 1.517], [ 2. , 1.17 , 2.782, 4.38 , 1.197], [ 2. , 1.15 , 3.018, 4.65 , 1.244], [ 2. , 1.17 , 3.383, 4.69 , 1.495], [ 2. , 1.19 , 3.447, 4.4 , 1.026], [ 3. , 1.07 , 2.505, 3.76 , 0.912], [ 3. , 0.99 , 2.315, 4.44 , 1.398], [ 3. , 1.06 , 2.667, 4.38 , 1.197], [ 3. , 1.02 , 2.39 , 4.67 , 1.613], [ 3. , 1.15 , 3.021, 4.48 , 1.476], [ 3. , 1.2 , 3.085, 4.78 , 1.571], [ 3. , 1.2 , 3.308, 4.57 , 1.506], [ 3. , 1.17 , 3.231, 4.56 , 1.458], [ 4. , 1.22 , 2.838, 3.89 , 0.944], [ 4. , 1.03 , 2.351, 4.05 , 1.241], [ 4. , 1.14 , 3.001, 4.05 , 1.023], [ 4. , 1.01 , 2.439, 3.92 , 1.067], [ 4. , 0.99 , 2.199, 3.27 , 0.693], [ 4. , 1.11 , 3.318, 3.95 , 1.085], [ 4. , 1.2 , 3.601, 4.27 , 1.242], [ 4. , 1.08 , 3.291, 3.85 , 1.017], [ 5. , 0.91 , 1.532, 4.04 , 1.084], [ 5. , 1.15 , 2.552, 4.16 , 1.151], [ 5. , 1.14 , 3.083, 4.79 , 1.381], [ 5. , 1.05 , 2.33 , 4.42 , 1.242], [ 5. , 0.99 , 2.079, 3.47 , 0.673], [ 5. , 1.22 , 3.366, 4.41 , 1.137], [ 5. , 1.05 , 2.416, 4.64 , 1.455], [ 5. , 1.13 , 3.1 , 4.57 , 1.325], [ 6. , 1.11 , 2.813, 3.76 , 0.8 ], [ 6. , 0.75 , 0.84 , 3.14 , 0.606], [ 6. , 1.05 , 2.199, 3.75 , 0.79 ], [ 6. , 1.02 , 2.132, 3.99 , 0.853], [ 6. , 1.05 , 1.949, 3.34 , 0.61 ], [ 6. , 1.07 , 2.251, 3.21 , 0.562], [ 6. , 1.13 , 3.064, 3.63 , 0.707], [ 6. , 1.11 , 2.469, 3.95 , 0.952]]) def test_naive_covariance(self): np.testing.assert_allclose(covar(self.d[:, 1:], method='naive'), np.cov(self.d[:, 1:], rowvar=False)) np.testing.assert_allclose(covar(self.d[:, 1:3], self.d[:, 3:], 'naive'), np.cov(self.d[:, 1:], rowvar=False)) def test_shifted_covariance(self): np.testing.assert_allclose(covar(self.d[:, 1:], method='shifted covariance'), np.cov(self.d[:, 1:], rowvar=False)) np.testing.assert_allclose(covar(self.d[:, 1:3], self.d[:, 3:], 'shifted covariance'), np.cov(self.d[:, 1:], rowvar=False)) def test_two_pass_covariance(self): np.testing.assert_allclose(covar(self.d[:, 1:], method='two-pass covariance'), np.cov(self.d[:, 1:], rowvar=False)) np.testing.assert_allclose(covar(self.d[:, 1:3], self.d[:, 3:], 'two-pass covariance'), np.cov(self.d[:, 1:], rowvar=False)) def test_covar_no_method(self): with pytest.raises(ValueError): covar(self.d[:, 1:3], self.d[:, 3:], 'NA_METHOD') def test_pearson(self): np.testing.assert_allclose(pearson(self.d[:, 1:]), np.corrcoef(self.d[:, 1:], rowvar=False)) np.testing.assert_allclose(pearson(self.d[:, 1:3], self.d[:, 3:]), np.corrcoef(self.d[:, 1:], rowvar=False)) def test_spearman(self): np.testing.assert_allclose(spearman(self.d[:, 1:]), spearmanr(self.d[:, 1:])[0]) np.testing.assert_allclose(spearman(self.d[:, 1:3], self.d[:, 3:]), spearmanr(self.d[:, 1:])[0]) class TestVariance(object): f = pd.DataFrame({0: [1, -1, 2, 2], 1: [-1, 2, 1, -1], 2: [2, 1, 3, 2], 3: [2, -1, 2, 1]}) h = [[16, 4, 8, 4], [4, 10, 8, 4], [8, 8, 12, 10], [4, 4, 10, 12]] fa = np.array(f) def test_var_corrected_two_pass(self): np.testing.assert_allclose(np.array(var(self.f)).reshape(4,), np.array([2, 2.25, 0.666667, 2]), rtol=1e-02) np.testing.assert_allclose(np.array(var(self.f, 'corrected two pass')).reshape(4,), np.array([2, 2.25, 0.666667, 2]), rtol=1e-02) np.testing.assert_allclose(var(self.h).reshape(4,), np.array([32, 9, 3.666667, 17]), rtol=1e-02) def test_var_textbook_one_pass(self): np.testing.assert_allclose(np.array(var(self.f, 'textbook one pass')).reshape(4,), np.array([2, 2.25, 0.666667, 2]), rtol=1e-02) np.testing.assert_allclose(np.array(var(self.h, 'textbook one pass')).reshape(4,), np.array([32, 9, 3.666667, 17]), rtol=1e-02) np.testing.assert_almost_equal(var(self.fa[:, 2], 'textbook one pass'), 0.66666666666666663) def test_var_standard_two_pass(self): np.testing.assert_allclose(np.array(var(self.f, 'standard two pass')).reshape(4,), np.array([2, 2.25, 0.666667, 2]), rtol=1e-02) np.testing.assert_allclose(np.array(var(self.h, 'standard two pass')).reshape(4,), np.array([32, 9, 3.666667, 17]), rtol=1e-02) np.testing.assert_equal(var(self.fa[:, 1], 'standard two pass'), 2.25) def test_var_youngs_cramer(self): np.testing.assert_allclose(np.array(var(self.f, 'youngs cramer')).reshape(4,), np.array([2, 2.25, 0.666667, 2]), rtol=1e-02) np.testing.assert_allclose(np.array(var(self.h, 'youngs cramer')).reshape(4,), np.array([32, 9, 3.666667, 17]), rtol=1e-02) np.testing.assert_equal(var(self.fa[:, 1], 'youngs cramer'), 2.25) def test_stddev(self): np.testing.assert_equal(std_dev(self.fa[:, 1]), 1.5) np.testing.assert_allclose(std_dev(self.fa), array([ 1.41421356, 1.5 , 0.81649658, 1.41421356])) def test_var_cond(self): np.testing.assert_almost_equal(variance_condition(self.fa[:, 1]), 1.7638342073763937) np.testing.assert_allclose(variance_condition(self.fa), array([2.23606798, 1.76383421, 5.19615242, 2.23606798])) np.testing.assert_allclose(variance_condition(
pd.DataFrame(self.fa)
pandas.DataFrame
import pandas as pd import numpy as np np.random.seed(0) def print_unique(df): print(np.unique(df[1], return_counts=True)) file_name = '/Volumes/CT500/Researches/Attention_OOD/data/isic/isic_train_0.txt' df_train = pd.read_csv(file_name, header=None) print_unique(df_train) file_name = '/Volumes/CT500/Researches/Attention_OOD/data/isic/isic_val_0.txt' df_val = pd.read_csv(file_name, header=None) print_unique(df_val) file_name = '/Volumes/CT500/Researches/Attention_OOD/data/isic/isic_unseen_0.txt' df_unseen = pd.read_csv(file_name, header=None) print_unique(df_unseen) df = pd.concat([df_train, df_val, df_unseen]) print_unique(df) file_keep = 1000 val = int(0.1 * file_keep) df_res = None df_dict = {} for i in range(8): temp = df[df[1]==i] if len(temp) > file_keep: ids = np.random.choice(len(temp), file_keep, replace=False) else: ids = list(range(len(temp))) np.random.shuffle(ids) id_v = ids[:val] id_t = ids[val:] df_dict[f'{i}_t'] = temp.iloc[id_t] df_dict[f'{i}_v'] = temp.iloc[id_v] if df_res is None: df_res = temp.iloc[ids] else: df_res = pd.concat([df_res, temp.iloc[ids]]) print_unique(df_res) import os new_dir = '/Volumes/CT500/Researches/Attention_OOD/data/isic_new' if not os.path.exists(new_dir): os.makedirs(new_dir, exist_ok=True) for i in range(8): df_u = pd.concat([df_dict[f'{i}_t'], df_dict[f'{i}_v']]) df_t = None df_v = None for j in range(8): if i == j: continue if df_t is None: df_t = df_dict[f'{j}_t'] df_v = df_dict[f'{j}_v'] else: df_t =
pd.concat([df_t, df_dict[f'{j}_t']])
pandas.concat
import sys from io import StringIO from PySide6.QtCore import * from PySide6.QtGui import * from PySide6.QtWidgets import * from modules.settings.settings import SettingsManager from modules.pseudo_id.pseudo_id import PseudoIDManager from gms_uploader.modules.models.pandasmodel import PandasModel from gms_uploader.modules.delegates.delegates import ComboBoxDelegate, \ DateAutoCorrectDelegate, AgeDelegate, IconCheckBoxDelegate from gms_uploader.modules.fx.fx_manager import FxManager from gms_uploader.modules.dialogs.dialogs import ValidationDialog, MsgAlert, MsgOKCancel from gms_uploader.modules.models.sortfilterproxymodel import MultiSortFilterProxyModel from gms_uploader.modules.extra.auxiliary_functions import to_list, get_pd_row_index, \ date_validate, age_validate, add_gridlayout_row, update_df from gms_uploader.modules.credentials.credentials import CredManager from gms_uploader.modules.validate.validate import validate from gms_uploader.modules.upload.uploader import Uploader import pandas as pd from datetime import datetime from pathlib import Path import yaml import json import csv from gms_uploader.ui.mw import Ui_MainWindow import qdarktheme import resources __version__ = '0.2.0' __title__ = 'GMS-uploader' class MainWindow(QMainWindow, Ui_MainWindow): def __init__(self): super(MainWindow, self).__init__() self.setup_complete = False self.setupUi(self) self.setAcceptDrops(True) self.clipboard = QGuiApplication.clipboard() self.setWindowIcon(QIcon(':/img/GMS-logo.png')) self.setWindowTitle(__title__ + " " + __version__) self.set_tb_bkg() self.fx_manager = FxManager(Path('fx')) self.fx = None # add icons self.set_icons() default_config_path = Path('config', 'config.yaml') with default_config_path.open(encoding='utf8') as fp: self.conf = yaml.safe_load(fp) self.settm = SettingsManager(self.conf) self.credm = CredManager(self.settm) self.pidm = PseudoIDManager(self.conf['tr']['lab_to_code'], self.settm) self.fx_config = None self.tableView_columns = list(self.conf['model_fields'].keys()) self.df = pd.DataFrame(columns=self.tableView_columns) self.model = PandasModel(self.df, self.conf['model_fields']) self.mfilter_sort_proxy_model = MultiSortFilterProxyModel() self.filter_cols = self.get_filter_cols() # setup settings self.delegates = {} self.delegates['patient'] = {} self.delegates['lab'] = {} self.delegates['organism'] = {} self.set_signals() self.setup_tableviews() # self.set_dataview_setting_widget_values() self.stackedWidget.setCurrentIndex(0) self.tabWidget_metadata.setCurrentIndex(0) self.set_hidden_columns() self.set_col_widths() self.setup_settingview_widgets() self.set_delegates() # Status widgets change status to activated when there is data in the model. Default is disabled. self.status_widgets = [ self.action_import_csv, self.action_upload_meta_seqs, self.pushButton_filtermarked, self.pushButton_invert, self.pushButton_drop, self.pushButton_clear, self.pushButton_filldown, self.pushButton_resetfilters, self.action_save_meta, self.action_import_fx, self.action_paste_fx, self.lineEdit_filter ] self.set_datastatus_empty(True) self.ui_init() self.setup_complete = True # setup and init-related functions def ui_init(self): self.tabWidget_metadata.setStyleSheet("QTabWidget::pane { border: 0; }") self.scrollArea.setStyleSheet("QScrollArea { border: 0; }") self.toolBar.setFixedWidth(50) self.toolBar.setMovable(False) self.tabWidget_metadata.setTabText(0, "patient metadata") self.tabWidget_metadata.setTabText(1, "organism metadata") self.tabWidget_metadata.setTabText(2, "lab metadata") self.lineEdit_filter.setPlaceholderText("freetext filter") self.action_import_fx.setDisabled(True) self.action_paste_fx.setDisabled(True) def get_filter_cols(self): cols = list(self.df.columns) used_cols = self.conf['freetext_filter']['model_fields'] return [self.df.columns.get_loc(c) for c in cols if c in used_cols] def set_tb_bkg(self): """ Sets bg image to tableviews. Image shown before metadata is imported. :return: None """ img = ':/img/GMS-logo.png' for tbv in [self.tableView_patient, self.tableView_organism, self.tableView_lab]: tbv.setStyleSheet( """ background-repeat: no-repeat; background-position: center; background-image: url(%s); """ % img ) tbv.horizontalScrollBar().setStyleSheet( """ background: white; """ ) def rem_tb_bkg(self): """ Removes bg image from tableviews. Images removed when metadata is imported, otherwise they are visible through the tables. :return: None """ for tbv in [self.tableView_patient, self.tableView_organism, self.tableView_lab]: tbv.setStyleSheet("background-image: none;") def set_signals(self): """ Setup of signals for static widgets (pushbuttons, actionbuttons, lineedit for filter). :return: """ self.action_show_prefs.triggered.connect(lambda: self.stackedWidget.setCurrentIndex(1)) self.action_show_meta.triggered.connect(lambda: self.stackedWidget.setCurrentIndex(0)) self.lineEdit_filter.textChanged.connect(self.set_free_filter) self.pushButton_filtermarked.setCheckable(True) self.pushButton_filtermarked.clicked.connect(self.set_mark_filter) self.pushButton_drop.clicked.connect(self.drop_rows) self.pushButton_clear.clicked.connect(self.clear_table) self.action_select_seq_files.triggered.connect(self.get_seq_files) self.action_upload_meta_seqs.triggered.connect(self.upload) self.action_save_meta.triggered.connect(self.save_metadata_file) self.action_open_meta.triggered.connect(self.open_metadata_file) self.pushButton_invert.clicked.connect(self.invert_marks) self.action_import_csv.triggered.connect(self.get_csv_file_combine) def set_icons(self): self.action_open_meta.setIcon(QIcon(':/icons/AppIcons/folder-open-outline_mdi.svg')) self.action_save_meta.setIcon(QIcon(':/icons/AppIcons/content-save-outline_mdi.svg')) self.action_show_meta.setIcon(QIcon(':/table')) # ':/icons/AppIcons/table_mdi.svg')) self.action_show_prefs.setIcon(QIcon(':/cog')) #:/icons/AppIcons/cog-outline_mdi.svg')) self.action_upload_meta_seqs.setIcon(QIcon(':/icons/AppIcons/tray-arrow-up_mdi.svg')) self.action_select_seq_files.setIcon(QIcon(':/icons/AppIcons/folder-open-outline-dna_mdi.svg')) self.action_import_csv.setIcon(QIcon(':/import-csv')) #':/icons/AppIcons/import-csv_own.svg')) self.action_import_fx.setIcon(QIcon(':/import-fx')) #':/icons/AppIcons/content-import-fx_own.svg')) self.action_paste_fx.setIcon(QIcon(':/paste-fx')) #':/icons/AppIcons/content-paste-fx_own.svg')) self.pushButton_filldown.setIcon(QIcon(':/arrow-down')) #':/icons/AppIcons/arrow-down_mdi.svg')) self.pushButton_drop.setIcon(QIcon(':/close')) #':/icons/AppIcons/close_mdi.svg')) self.pushButton_clear.setIcon(QIcon(':/clear')) # ':/icons/AppIcons/delete-outline_mdi.svg')) self.pushButton_resetfilters.setIcon(QIcon('/filter-remove')) #QIcon(':/icons/AppIcons/filter-remove-outline_mdi.svg')) self.pushButton_filtermarked.setIcon(QIcon(':/filter')) # QIcon(':/icons/AppIcons/filter-outline_mdi.svg')) self.pushButton_invert.setIcon(QIcon(':/invert')) #':/icons/AppIcons/invert_own.svg')) def set_col_widths(self): for i, name in enumerate(self.conf['model_fields']): self.tableView_patient.setColumnWidth(i, self.conf['model_fields'][name]['col_width']) self.tableView_organism.setColumnWidth(i, self.conf['model_fields'][name]['col_width']) self.tableView_lab.setColumnWidth(i, self.conf['model_fields'][name]['col_width']) def set_hidden_columns(self): for i, name in enumerate(self.conf['model_fields']): if 'patient' not in self.conf['model_fields'][name]['view']: self.tableView_patient.setColumnHidden(i, True) if 'organism' not in self.conf['model_fields'][name]['view']: self.tableView_organism.setColumnHidden(i, True) if 'lab' not in self.conf['model_fields'][name]['view']: self.tableView_lab.setColumnHidden(i, True) def set_dataview_setting_widget_values(self): """ Sets values in static lineedits on the dataview pane. :return: None """ print("reset dataviews") self.lineEdit_submitter.setText(str(self.settm.get_value("entered_value", "submitter"))) self.lineEdit_lab.setText(str(self.settm.get_value("select_single", "lab"))) self.lineEdit_seq_technology.setText(str(self.settm.get_value("select_single", "seq_technology"))) self.lineEdit_host.setText(str(self.settm.get_value("select_single", "host"))) self.lineEdit_lib_method.setText(str(self.settm.get_value("select_single", "library_method"))) self.lineEdit_import_fx.setText(str(self.settm.get_value("select_single", "fx"))) self.lineEdit_pseudo_id.setText(str(self.pidm.get_first_pid())) self.lineEdit_ul_target_label.setText(str(self.credm.get_current_target_label())) self.lineEdit_ul_protocol.setText(str(self.credm.get_current_protocol())) def setup_settingview_widgets(self): """ Creates and sets up dymamic setting widgets based on the config file :return: None """ for category in self.conf['settings_structure']: if category['target_layout'] == "form": category_name = category['label'] label = QLabel(category_name) label.setProperty("class", "bold") self.verticalLayout_forms.addWidget(label) grid_layout = QGridLayout() grid_layout.setColumnMinimumWidth(0, 150) self.verticalLayout_forms.addLayout(grid_layout) for item in category['items']: for field_type, fields in item.items(): if field_type == "entered_value": for field in fields: func = self.get_button_func(field) if func is not None: button_name = field + "button" button = QPushButton("...", objectName=button_name) button.clicked.connect(func) edit = QLineEdit(objectName=field) edit.textChanged.connect(self.update_setting) edit.setReadOnly(True) hbox = QHBoxLayout() hbox.addWidget(edit) hbox.addWidget(button) label = QLabel(field) label.setProperty("class", "padding-left") label.setMinimumWidth(40) value = self.settm.get_value(field_type, field) edit.setText(str(value)) add_gridlayout_row(grid_layout, label, hbox) else: edit = QLineEdit(objectName=field, editingFinished=self.update_setting) value = self.settm.get_value(field_type, field) edit.setText(value) label = QLabel(field) label.setProperty("class", "padding-left") label.setMinimumWidth(40) add_gridlayout_row(grid_layout, label, edit) elif field_type == "select_single": for field in fields: combo = QComboBox(objectName=field) combo.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) items = [] if field in self.conf['add_empty_selection']: items = ['None'] if field == "fx": for name in self.fx_manager.get_fx_names(): items.append(name) else: if self.conf['settings_values']['select_single'][field] != "None": items.extend(list(self.conf['settings_values']['select_single'][field].keys())) combo.addItems(items) value = self.settm.get_value(field_type, field) combo.setCurrentText(value) label = QLabel(field) label.setProperty("class", "padding-left") label.setMinimumWidth(40) combo.currentTextChanged.connect(self.update_setting) add_gridlayout_row(grid_layout, label, combo) elif field_type == "select_single_fx": for field in fields: combo = QComboBox(objectName=field) combo.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) combo.addItem(str('None')) for name in self.fx_manager.get_fx_names(): combo.addItem(str(name)) label = QLabel(field) label.setProperty("class", "padding-left") label.setMinimumWidth(40) value = self.settm.get_value('select_single', field) if value is not None: if combo.findText(value) >= 0: combo.setCurrentText(value) combo.currentTextChanged.connect(self.update_setting) add_gridlayout_row(grid_layout, label, combo) elif category['target_layout'] == "tabs": category_name = category['label'] label = QLabel(category_name) label.setProperty("class", "bold") self.verticalLayout_tabs.addWidget(QLabel()) self.verticalLayout_tabs.addWidget(label) tabwidget_settings = QTabWidget(objectName='tabwidget_settings') tabwidget_settings.setMinimumHeight(420) tabwidget_settings.setStyleSheet("QTabWidget::pane { border: 0; }") tabwidget_settings.setMinimumHeight(550) self.verticalLayout_tabs.addWidget(tabwidget_settings) for item in category['items']: for field_type, fields in item.items(): if field_type == "select_multi": for field in fields: value = self.settm.get_value(field_type, field) store_checked = to_list(value) model = QStandardItemModel() model.setColumnCount(2) tableview = QTableView() model = QStandardItemModel(objectName=field) model.setColumnCount(2) for key, checked in self.conf['settings_values'][field_type][field].items(): item1 = QStandardItem("0") item2 = QStandardItem(key) if key in store_checked: item1.setText("1") model.appendRow([item1, item2]) tableview.setModel(model) tableview.setItemDelegateForColumn(0, IconCheckBoxDelegate(None)) tableview.setColumnWidth(0, 15) hheader = tableview.horizontalHeader() hheader.setStretchLastSection(True) hheader.hide() tableview.verticalHeader().setDefaultSectionSize(20) tableview.verticalHeader().hide() tableview.setShowGrid(False) model.itemChanged.connect(self.update_setting) tabwidget_settings.addTab(tableview, field) self.set_target_label_items() self.set_dataview_setting_widget_values() self.set_fx() def set_fx(self): value = self.settm.get_value('select_single', 'fx') if value is not None and value != 'None': self.fx = self.fx_manager.load_fx(value) self.fx.set_path(self.settm.get_value('entered_value', 'fx_import_path')) if self.fx.from_clipboard: self.action_paste_fx.triggered.connect(self.import_fx_clipboard) if self.fx.from_file: self.action_import_fx.triggered.connect(self.import_fx_file) def update_setting(self, item=None): if self.setup_complete: if isinstance(item, QStandardItem): self.settm.update_setting(item=item) else: obj = self.sender() self.settm.update_setting(obj=obj) self.pidm.init_settings() self.set_dataview_setting_widget_values() self.update_delegates() self.set_fx() def setup_tableviews(self): """ Setup of data tableviews, connects to mfilter_sort_proxy_model, and the pandas model. :return: None """ self.mfilter_sort_proxy_model.setSourceModel(self.model) self.tableView_patient.setModel(self.mfilter_sort_proxy_model) self.tableView_patient.setEditTriggers(QAbstractItemView.DoubleClicked | QAbstractItemView.SelectedClicked | QAbstractItemView.EditKeyPressed) self.tableView_patient.horizontalHeader().setStretchLastSection(True) self.tableView_patient.horizontalHeader().setSectionsMovable(True) self.tableView_patient.setSortingEnabled(True) self.tableView_organism.setModel(self.mfilter_sort_proxy_model) self.tableView_organism.setEditTriggers( QAbstractItemView.DoubleClicked | QAbstractItemView.SelectedClicked | QAbstractItemView.EditKeyPressed) self.tableView_organism.horizontalHeader().setStretchLastSection(True) self.tableView_organism.horizontalHeader().setSectionsMovable(True) self.tableView_organism.setSortingEnabled(True) self.tableView_lab.setModel(self.mfilter_sort_proxy_model) self.tableView_lab.setEditTriggers( QAbstractItemView.DoubleClicked | QAbstractItemView.SelectedClicked | QAbstractItemView.EditKeyPressed) self.tableView_lab.horizontalHeader().setStretchLastSection(True) self.tableView_lab.horizontalHeader().setSectionsMovable(True) self.tableView_lab.setSortingEnabled(True) self.pushButton_resetfilters.clicked.connect(self.reset_sort_filter) self.pushButton_filldown.clicked.connect(self.filldown) self.tableView_patient.verticalHeader().hide() self.tableView_lab.verticalHeader().hide() self.tableView_organism.verticalHeader().hide() self.update_model() def setup_credentials(self): self.credm = CredManager(self.settm) # def load_fx_settings(self): # store_key = "/".join(['select_single', 'import_fx']) # fx_name = self.qsettings.value(store_key) # # # default_config_path = Path('config', 'config.yaml') # with default_config_path.open(encoding='utf8') as fp: # self.conf = yaml.safe_load(fp) # model and data-import related functions def update_model(self): self.model = PandasModel(self.df, self.conf['model_fields']) self.mfilter_sort_proxy_model = MultiSortFilterProxyModel() self.mfilter_sort_proxy_model.setSourceModel(self.model) self.tableView_patient.setModel(self.mfilter_sort_proxy_model) self.tableView_lab.setModel(self.mfilter_sort_proxy_model) self.tableView_organism.setModel(self.mfilter_sort_proxy_model) self.set_col_widths() def df_insert(self, df, row): insert_loc = df.index.max() if pd.isna(insert_loc): df.loc[0] = row else: df.loc[insert_loc + 1] = row def verify_files(self, files): """ Ensures that all filespaths in a list exist and have correct suffixes, corresponding to raw sequence data files. Only correct files are returned. If a path is a dir, paths for files in that directory are listed, verified and returned. :param files: list of filepaths and/or dirpaths :return: list of verified filepaths """ verified_files = [] for file in files: f = Path(file) if f.is_dir(): for type in self.conf['seq_files']: ext = self.conf['seq_files'][type]['ext'] for fp in f.rglob(ext): if Path(fp).exists(): verified_files.append(fp) else: for type in self.conf['seq_files']: ext = self.conf['seq_files'][type]['ext'] if f.match(ext) and f.exists(): verified_files.append(f) return verified_files def extract_metadata_from_filenames(self, files): """ Extract metadata from sequence data filenames :param files: list of filepaths :return: list of dicts with metadata from filenames """ _data = {} for file in files: seq_path = file.parent filename = file.name filename_obj = Path(filename) sample = filename.split('_')[0] if sample not in _data: _data[sample] = {} _data[sample]['seq_path'] = str(seq_path) if filename_obj.match(self.conf['seq_files']['fastq_gz']['ext']): f = file.stem.split('.')[0] lane = f.split('_')[-1] _data[sample]['lane'] = lane if 'fastq' not in _data[sample]: _data[sample]['fastq'] = [] fastq_list = _data[sample]['fastq'] fastq_list.append(filename) elif filename_obj.match(self.conf['seq_files']['fast5']['ext']): if 'fast5' not in _data[sample]: _data[sample]['fast5'] = [] fast5_list = _data[sample]['fast5'] fast5_list.append(filename) filename_metadata = [] for sample in _data: row = dict() row['mark'] = 0 # add mark column row['internal_lab_id'] = sample for key in _data[sample]: value = _data[sample][key] if isinstance(value, list): sorted_files = sorted(value) row[key] = sorted_files else: row[key] = value filename_metadata.append(row) return filename_metadata def find_duplicates(self, df1, df2): """ Checks if the same internal_lab_id are present in two dataframes :param df1: dataframe1 :param df2: dataframe2 :return: Bool """ df3 = df1.append(df2) return df3['internal_lab_id'].duplicated().any() def add_files_metadata_to_model(self, data): """ Creates new pandas df, from files and metadata, check for duplicates and merge with existing df dataset and create new model. :param data: list of dicts containing metadata and filenames :return: None """ new_df =
pd.DataFrame(data)
pandas.DataFrame
#必要なライブラリをインポート from bs4 import BeautifulSoup import requests from time import sleep import json import pandas as pd from tqdm import tqdm_notebook as tqdm #スクレイピングに必要なパラメータを入力 start = 1 #初めのページ数 end = 1000 #終わりのページ数(SUUMOのサイトを見て、何ページまでデータがあるかを確認する) place = '相模原' #(辞書urlsに入っている内の)読み込む地域 #後でformatでページ数を代入するので、urlの内「pn=」の部分は「pn={}」としておく urls = { '相模原':"https://suumo.jp/jj/common/ichiran/JJ901FC004/?ar=030&ta=14&sc=14151&sc=14152&sc=14153&kwd=&cb=0.0&ct=9999999&kb=0&kt=9999999&km=1&xb=0&xt=9999999&et=9999999&cn=9999999&newflg=0&pn={}", '横浜':"https://suumo.jp/jj/common/ichiran/JJ901FC004/?initFlg=1&seniFlg=1&pc=30&ar=030&ta=14&sa=01&newflg=0&km=1&bs=040&pn={}", '渋谷':"https://suumo.jp/jj/common/ichiran/JJ901FC004/?ar=030&ta=13&sc=13113&kwd=&cb=0.0&ct=9999999&kb=0&kt=9999999&km=1&xb=0&xt=9999999&et=9999999&cn=9999999&newflg=0&pn={}", '新宿':"https://suumo.jp/jj/common/ichiran/JJ901FC004/?initFlg=1&seniFlg=1&pc=30&ar=030&ta=13&scTmp=13104&kb=0&xb=0&newflg=0&km=1&sc=13104&bs=040&pn={}", '港':"https://suumo.jp/jj/common/ichiran/JJ901FC004/?ar=030&ta=13&sc=13103&kwd=&cb=0.0&ct=9999999&kb=0&kt=9999999&km=1&xb=0&xt=9999999&et=9999999&cn=9999999&newflg=0&pn={}" } #不動産データスクレイピングのメインプログラム #Real_Estate_Data_Scraping def REDS(start,end,place,pre_results=[]): #初期値として、pre_resultsを継承する。 d_list = pre_results url = urls[place] #pre_resultsに値があった場合(再開する場合)、読み込んだページ数をprogressに記録する。 if len(d_list) > 0: progress = list(pd.DataFrame(d_list)['ページ'].unique()) else: progress = [] #tqdmをfor文のrangeに適応することで、スクレイピングの進捗を確認しやすくなる。 for i in tqdm(range(start,end+1)): #progressにあるページ番号(既に読み込んだページ番号)は飛ばす。 if i in progress: continue #途中でエラーが発生してもそれまでの結果が保存できるようにする。 try: #ページを遷移させる。 target_url = url.format(i) #Requestsを用いてtarget_urlにアクセスする。 r = requests.get(target_url) #サーバー負荷軽減の為、ループ毎に1秒間隔を空ける。 sleep(1) #取得したHTMLをBeautifulSoupで解析する。 soup = BeautifulSoup(r.text) #BeautifulSoupで解析したHTMLの内、欲しい不動産情報が乗っている部分を取得 contents = soup.find_all('div',class_='cassettebox js-normalLink js-cassetLink') #1ページ当たり30件の不動産データが表示されていれば、リストcontentsは30個の要素を持つはずなので、それらを一つずつ取り出してデータを取得する。 for content in contents: #再開機能の為、取得元のページ数を保存しておく。 pages = i #上の行のデータを取得 rows = content.find_all('table',class_='listtable') address = rows[0].find_all('div',class_='infodatabox-box-txt')[0].text station = rows[0].find_all('div',class_='infodatabox-box-txt')[1].text access = rows[0].find_all('div',class_='infodatabox-box-txt')[2].text #下の行のデータを取得 r_fees = rows[1].find_all('dd',class_='infodatabox-details-txt')[0].text[:-2] mc_fees = rows[1].find_all('dd',class_='infodatabox-details-txt')[1].text[:-1] k_fees = rows[1].find_all('dd',class_='infodatabox-details-txt')[2].text.split('/')[0] s_fees = rows[1].find_all('dd',class_='infodatabox-details-txt')[2].text.split('/')[1][:-2] area = rows[1].find_all('dd',class_='infodatabox-details-txt')[3].text[:-2] layout = rows[1].find_all('dd',class_='infodatabox-details-txt')[4].text age = rows[1].find_all('div',class_='infodatabox-box-txt')[2].text #取得した各種データを辞書dに格納する。 d = { 'ページ':pages, '住所':address, '路線':station, '交通':access, '賃料':r_fees, '管理共益費':mc_fees, '礼金':k_fees, '敷金':s_fees, '専有面積':area, '間取り':layout, '築年数':age } #辞書dのデータをリストd_listに格納する。 d_list.append(d) #重複したデータの削除は、後でまとめてする。 #d_list = list(map(json.loads,set(map(json.dumps,d_list)))) #進捗を報告させる。 print("d_list's progress:",i,"page  ",len(d_list)) print(target_url) #リストにある存在しないページにアクセスした場合、そのページの読み込みをスキップする。 except IndexError: continue #スクレイピングが中断されても、その時点までに読み込んだデータを出力できるようにする。 except: break #スクレイピングが終わった事を通知 print('Scraping Completed!') return d_list #中断した場合に進捗を保存しておくリスト reds_pre_results = [] #中断してもすぐ再開できるように、スクレイピングの結果はreds_testとreds_pre_resultsに共有しておく。 reds_test = REDS(start,end,place,reds_pre_results) reds_pre_results = reds_test reds_pre_results #取得したデータはpickleデータとして保存しておく。 reds_df =
pd.DataFrame(reds_test)
pandas.DataFrame
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Jan 26 15:39:02 2018 @author: joyce """ import pandas as pd import numpy as np from numpy.matlib import repmat from stats import get_stockdata_from_sql,get_tradedate,Corr,Delta,Rank,Cross_max,\ Cross_min,Delay,Sum,Mean,STD,TsRank,TsMax,TsMin,DecayLinear,Count,SMA,Cov,DTM,DBM,\ Highday,Lowday,HD,LD,RegBeta,RegResi,SUMIF,get_indexdata_from_sql,timer,get_fama class stAlpha(object): def __init__(self,begin,end): self.begin = begin self.end = end self.close = get_stockdata_from_sql(1,self.begin,self.end,'Close') self.open = get_stockdata_from_sql(1,self.begin,self.end,'Open') self.high = get_stockdata_from_sql(1,self.begin,self.end,'High') self.low = get_stockdata_from_sql(1,self.begin,self.end,'Low') self.volume = get_stockdata_from_sql(1,self.begin,self.end,'Vol') self.amt = get_stockdata_from_sql(1,self.begin,self.end,'Amount') self.vwap = get_stockdata_from_sql(1,self.begin,self.end,'Vwap') self.ret = get_stockdata_from_sql(1,begin,end,'Pctchg') self.close_index = get_indexdata_from_sql(1,begin,end,'close','000001.SH') self.open_index = get_indexdata_from_sql(1,begin,end,'open','000001.SH') # self.mkt = get_fama_from_sql() @timer def alpha1(self): volume = self.volume ln_volume = np.log(volume) ln_volume_delta = Delta(ln_volume,1) close = self.close Open = self.open price_temp = pd.concat([close,Open],axis = 1,join = 'outer') price_temp['ret'] = (price_temp['Close'] - price_temp['Open'])/price_temp['Open'] del price_temp['Close'],price_temp['Open'] r_ln_volume_delta = Rank(ln_volume_delta) r_ret = Rank(price_temp) rank = pd.concat([r_ln_volume_delta,r_ret],axis = 1,join = 'inner') rank.columns = ['r1','r2'] corr = Corr(rank,6) alpha = corr alpha.columns = ['alpha1'] return alpha @timer def alpha2(self): close = self.close low = self.low high = self.high temp = pd.concat([close,low,high],axis = 1,join = 'outer') temp['alpha'] = (2 * temp['Close'] - temp['Low'] - temp['High']) \ / (temp['High'] - temp['Low']) del temp['Close'],temp['Low'],temp['High'] alpha = -1 * Delta(temp,1) alpha.columns = ['alpha2'] return alpha @timer def alpha3(self): close = self.close low = self.low high = self.high temp = pd.concat([close,low,high],axis = 1,join = 'outer') close_delay = Delay(pd.DataFrame(temp['Close']),1) close_delay.columns = ['close_delay'] temp = pd.concat([temp,close_delay],axis = 1,join = 'inner') temp['min'] = Cross_max(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['Low'])) temp['max'] = Cross_min(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['High'])) temp['alpha_temp'] = 0 temp['alpha_temp'][temp['Close'] > temp['close_delay']] = temp['Close'] - temp['min'] temp['alpha_temp'][temp['Close'] < temp['close_delay']] = temp['Close'] - temp['max'] alpha = Sum(pd.DataFrame(temp['alpha_temp']),6) alpha.columns = ['alpha3'] return alpha @timer def alpha4(self): close = self.close volume = self.volume close_mean_2 = Mean(close,2) close_mean_8 = Mean(close,8) close_std = STD(close,8) volume_mean_20 = Mean(volume,20) data = pd.concat([close_mean_2,close_mean_8,close_std,volume_mean_20,volume],axis = 1,join = 'inner') data.columns = ['close_mean_2','close_mean_8','close_std','volume_mean_20','volume'] data['alpha'] = -1 data['alpha'][data['close_mean_2'] < data['close_mean_8'] - data['close_std']] = 1 data['alpha'][data['volume']/data['volume_mean_20'] >= 1] = 1 alpha = pd.DataFrame(data['alpha']) alpha.columns = ['alpha4'] return alpha @timer def alpha5(self): volume = self.volume high = self.high r1 = TsRank(volume,5) r2 = TsRank(high,5) rank = pd.concat([r1,r2],axis = 1,join = 'inner') rank.columns = ['r1','r2'] corr = Corr(rank,5) alpha = -1 * TsMax(corr,5) alpha.columns = ['alpha5'] return alpha @timer def alpha6(self): Open = self.open high = self.high df = pd.concat([Open,high],axis = 1,join = 'inner') df['price'] = df['Open'] * 0.85 + df['High'] * 0.15 df_delta = Delta(pd.DataFrame(df['price']),1) alpha = Rank(np.sign(df_delta)) alpha.columns = ['alpha6'] return alpha @timer def alpha7(self): close = self.close vwap = self.vwap volume = self.volume volume_delta = Delta(volume,3) data = pd.concat([close,vwap],axis = 1,join = 'inner') data['diff'] = data['Vwap'] - data['Close'] r1 = Rank(TsMax(pd.DataFrame(data['diff']),3)) r2 = Rank(TsMin(pd.DataFrame(data['diff']),3)) r3 = Rank(volume_delta) rank = pd.concat([r1,r2,r3],axis = 1,join = 'inner') rank.columns = ['r1','r2','r3'] alpha = (rank['r1'] + rank['r2'])* rank['r3'] alpha = pd.DataFrame(alpha) alpha.columns = ['alpha7'] return alpha @timer def alpha8(self): high = self.high low = self.low vwap = self.vwap data = pd.concat([high,low,vwap],axis = 1,join = 'inner') data_price = (data['High'] + data['Low'])/2 * 0.2 + data['Vwap'] * 0.2 data_price_delta = Delta(
pd.DataFrame(data_price)
pandas.DataFrame
# Scraping import json import time import pandas as pd import requests import sqlite3 import mpld3 import matplotlib.pyplot as plt import FinanceDataReader as fdr from bs4 import BeautifulSoup from datetime import datetime from datetime import datetime , timedelta from dateutil.relativedelta import relativedelta # Deep Learning # [설치]CUDA 설치 참조 : https://chancoding.tistory.com/89 # [설치]Cuda toolkit 설치 : https://developer.nvidia.com/cuda-toolkit-archive # [설치]cuDNN 설치 참조 : https://webnautes.tistory.com/1423 # [에러]cudart64_110.dll not found 에러 : https://goddessbest-qa.tistory.com/47 # [에러]오른쪽 오류시 아래링크 참조 : _jpype.cp36-win_amd64.pyd already loaded in another classloader # https://byeon-sg.tistory.com/entry/%EC%9E%90%EC%97%B0%EC%96%B4-%EC%B2%98%EB%A6%AC-konlpy-%EC%84%A4%EC%B9%98-%EC%98%A4%EB%A5%98-okt%EC%97%90%EB%9F%AC-already-loaded-in-another-classloader-SystemErro-1 # [에러] _jpype.cp36-win_amd64.pyd already loaded in another classloader 아래 링크 참조 # https://www.lfd.uci.edu/~gohlke/pythonlibs/#jpype import numpy as np import tensorflow as tf import pickle import konlpy # Proccess from django.shortcuts import render from django.http import HttpResponse print('서버시작') now = datetime.now() gap = now - timedelta(days=10) yesterday = now - timedelta(days=1) now = str(now)[0:10] gap = str(gap)[0:10] year = str(now)[0:5] yesterday = str(yesterday)[0:10] df_krx = fdr.StockListing('KRX') df_krx = df_krx.fillna('no') df_krx_list = df_krx['Name'].tolist() company_info = df_krx[['Name','Symbol','Market','Sector','Industry','ListingDate','SettleMonth','Representative','Region','HomePage']] company_list = company_info.values.tolist() ks11 = fdr.DataReader('KS11', year) kq11 = fdr.DataReader('KQ11', year) ks11_date = ks11.index kq11_date = kq11.index ks11_close = ks11['Close'] kq11_close = kq11['Close'] # print('올해 ks11 그래프 생성') ks11_plt = plt.figure() plt.plot(ks11_date, ks11_close) graph_ks11 = mpld3.fig_to_html(ks11_plt , figid='THIS_IS_FIGID') file = open('./pop/templates/graph_ks11.html','w',encoding='UTF-8') file.write(graph_ks11) # # print('올해 kq11 그래프 생성') kq11_plt = plt.figure() plt.plot(kq11_date, kq11_close) graph_kq11 = mpld3.fig_to_html(kq11_plt , figid='THIS_IS_FIGID') file = open('./pop/templates/graph_kq11.html','w',encoding='UTF-8') file.write(graph_kq11) def item_code_by_item_name(item_name): # 종목명을 받아 종목코드를 찾아 반환하는 함수 item_code_list = df_krx.loc[df_krx["Name"] == item_name, "Symbol"].tolist() if len(item_code_list) > 0: item_code = item_code_list[0] return item_code else: return False # 예외처리 def scrap_company_stock(): now = datetime.now() # gap = now - relativedelta(years=2) gap = now - timedelta(days=1) now = str(now)[0:10] gap = str(gap)[0:10] dt_index = pd.date_range(start=gap, end=gap) dt_list = dt_index.strftime("%Y%m%d").tolist() conn = sqlite3.connect('./db.sqlite3') c_select = conn.cursor() c_insert = conn.cursor() yesterday_data = c_select.execute( "SELECT title , date , time , stock , press , posi_nega , link FROM article where date ='" + gap + "' order by date desc , time desc") df = pd.DataFrame(yesterday_data) if len(df) != 0: df.columns = ['title', 'date', 'time', 'stock', 'press', 'posi_nega' , 'link'] df_list = df['title'].values.tolist() else: df_list = [] def search_title(title): okt = konlpy.tag.Okt() new_sentence = okt.morphs(title, stem=True) new_sentence = [tok for tok in new_sentence if tok not in stopwords] vob_size = len(tokenizer.word_index) encoded = tokenizer.texts_to_sequences([new_sentence]) pad_new = tf.keras.preprocessing.sequence.pad_sequences(encoded, maxlen=50) score = model.predict(pad_new) acp = np.argmax(score) # if acp == 0: # print('부정') # elif acp == 1: # print('중립') # else: # print('긍정') return acp stopwords = pickle.load(open('./pop/process/stopwords.pkl', 'rb')) tokenizer = pickle.load(open('./pop/process/tokenizer.pkl', 'rb')) model = tf.keras.models.load_model('./pop/process/posnev.h5') for j in dt_list: date_cnt_uri = 'https://finance.naver.com/news/news_list.nhn?mode=LSS3D&section_id=101&section_id2=258&section_id3=402&date=' + j + '&page=100' date_cnt_target = date_cnt_uri date_cnt_req = requests.get(date_cnt_target) date_cnt_soup = BeautifulSoup(date_cnt_req.content, 'html.parser') date_cnt_page = int(date_cnt_soup.select('td.on > a ')[0].get_text()) uri = 'https://finance.naver.com/news/news_list.nhn?mode=LSS3D&section_id=101&section_id2=258&section_id3=402&date=' + j + '&page=' df_krx_list = df_krx['Name'].tolist() for page in range(1, date_cnt_page + 1): target = uri + str(page) req = requests.get(target) soup = BeautifulSoup(req.content, 'html.parser') datas = soup.select('#contentarea_left > ul.realtimeNewsList') for content in datas: titles = content.select(' li > dl > dd.articleSubject') article_date = content.select('li > dl > dd.articleSummary > span.wdate ') article_press = content.select('li > dl > dd.articleSummary > span.press ') article_link = content.select('li > dl > dd.articleSubject > a ') article_sum = list() for i in range(0, len(titles) - 1): article_data = list() data_date = article_date[i].get_text(" ", strip=True)[0:10] data_time = article_date[i].get_text(" ", strip=True)[11:17] data_press = article_press[i].get_text(" ", strip=True) data_title = titles[i].get_text(" ", strip=True) data_link = "https://finance.naver.com" + article_link[i]["href"] data_link = data_link.replace('§','&') data_stock = 'stock' for i in range(0, len(data_title.split())): data_title_splt = ''.join(filter(str.isalnum, data_title.split()[i])) # 특수문자 제거 if data_title_splt in df_krx_list: data_stock = data_title_splt data_posi_nega = str(search_title(data_title)) # print(data_title,data_stock,data_posi_nega) if data_title not in df_list and data_stock != 'stock': c_insert.execute( "INSERT INTO article( date , time , press , title, stock , posi_nega ,link ) VALUES(?,?,?,?,?,?,?)", (data_date, data_time, data_press, data_title, data_stock, data_posi_nega, data_link)) conn.commit() print( data_date, data_time, data_press, data_title, data_stock, data_posi_nega, data_link ) print('DB저장완료') print('스크랩 완료') # 기업 종목 관련 어제 기사 스크랩 print('기업 종목 관련 어제 기사 스크랩') scrap_company_stock() # 장중 기사 스크랩 # print('장중 기사 스크랩') # scrap_market() # 장중 기사 스크랩 # print('장중 기사 스크랩') # scrap_market() def index(request): now = datetime.now() gap = now - timedelta(days=7) yesterday = now - timedelta(days=1) now = str(now)[0:10] gap = str(gap)[0:10] year = str(now)[0:4] yesterday = str(yesterday)[0:10] print('DB로드시작') # 기사 DB conn = sqlite3.connect('./db.sqlite3') c_select = conn.cursor() # 언론사 정보 conn_press = sqlite3.connect('./percentage.sqlite3') c_select_press = conn_press.cursor() datas = pd.read_sql('select press , sum(percentage) / count(percentage) as wei from percentage group by press ', con=conn_press) # print(datas) print('DB로드완료') today_data = c_select.execute( "SELECT title , date , time , stock , press , posi_nega , link FROM article where stock != 'stock' and date > '"+ gap + "' and date < '" + now +"' order by date desc , time desc") df = pd.DataFrame(today_data) df.columns = ['title', 'date', 'time', 'stock' , 'press' , 'posi_nega' ,'link'] article_list = [] stock_list = "" stock_sugg_list = [] article_list_dup = [] for i in range(1, len(df.values)): article = {} sugg_list = {} article['title'] = df['title'][i] article['date'] = df['date'][i] article['time'] = df['time'][i] article['stock'] = df['stock'][i].replace("'", "") article['press'] = df['press'][i] article['posi_nega'] = df['posi_nega'][i] article['link'] = df['link'][i].replace("§", "&") article_list.append(article) if i == len(df.values) - 1: stock_list += article['stock'] else: stock_list += article['stock'] + ',' stock_code = item_code_by_item_name(article['stock']) stock_sugg = fdr.DataReader( stock_code , gap, now) stock_sugg = stock_sugg.fillna(0) # print( article['stock'] , gap , now , stock_sugg ) # print( stock_sugg.iloc[-1]['Close'] ) stock_sugg_close = stock_sugg.iloc[-1]['Close'] stock_sugg_change = stock_sugg.iloc[-1]['Change'] * 100 stock_sugg_volume = stock_sugg.iloc[-1]['Volume'] sugg_list['stock'] = article['stock'] sugg_list['close'] = stock_sugg_close sugg_list['change_disp'] = str(round(stock_sugg_change, 3)) + '%' sugg_list['change'] = round(stock_sugg_change, 3) sugg_list['volume'] = stock_sugg_volume sugg_list['link'] = 'https://finance.naver.com/item/main.nhn?code=' + stock_code stock_sugg_list.append(sugg_list) stock_sugg_list_final = [] for stock in stock_sugg_list: if stock not in stock_sugg_list_final: stock_sugg_list_final.append(stock) df_art =
pd.DataFrame(article_list)
pandas.DataFrame
""" Adapted from Tybalt data_models: https://github.com/greenelab/tybalt/blob/master/tybalt/data_models.py """ import os import numpy as np import pandas as pd from scipy.stats.mstats import zscore from sklearn import decomposition from sklearn.preprocessing import StandardScaler, MinMaxScaler import config as cfg class DataModel(): """ Methods for loading and compressing input data Usage: from data_models import DataModel data = DataModel(filename) """ def __init__(self, filename=None, df=False, select_columns=False, gene_modules=None, test_filename=None, test_df=None): """ DataModel can be initialized with either a filename or a pandas dataframe and processes gene modules and sample labels if provided. Arguments: filename - if provided, load gene expression data into object df - dataframe of preloaded gene expression data select_columns - the columns of the dataframe to use gene_modules - a list of gene module assignments for each gene (for use with the simulated data or when ground truth gene modules are known) test_filename - if provided, loads testing dataset into object test_df - dataframe of prelaoded gene expression testing set data """ # Load gene expression data self.filename = filename if filename is None: self.df = df else: self.df = pd.read_table(self.filename, index_col=0) if select_columns: subset_df = self.df.iloc[:, select_columns] other_columns = range(max(select_columns) + 1, self.df.shape[1]) self.other_df = self.df.iloc[:, other_columns] self.df = subset_df if self.test_df is not None: self.test_df = self.test_df.iloc[:, select_columns] if gene_modules is not None: self.gene_modules = pd.DataFrame(gene_modules).T self.gene_modules.index = ['modules'] self.num_samples, self.num_genes = self.df.shape # Load test set gene expression data if applicable self.test_filename = test_filename self.test_df = test_df if test_filename is not None and test_df is None: self.test_df = pd.read_table(self.test_filename, index_col=0) self.num_test_samples, self.num_test_genes = self.test_df.shape assert_ = 'train and test sets must have same number of genes' assert self.num_genes == self.num_test_genes, assert_ def transform(self, how): self.transformation = how if how == 'zscore': self.transform_fit = StandardScaler().fit(self.df) elif how == 'zeroone': self.transform_fit = MinMaxScaler().fit(self.df) else: raise ValueError('how must be either "zscore" or "zeroone".') self.df = pd.DataFrame(self.transform_fit.transform(self.df), index=self.df.index, columns=self.df.columns) if self.test_df is not None: if how == 'zscore': self.transform_test_fit = StandardScaler().fit(self.test_df) elif how == 'zeroone': self.transform_test_fit = MinMaxScaler().fit(self.test_df) test_transform = self.transform_test_fit.transform(self.test_df) self.test_df = pd.DataFrame(test_transform, index=self.test_df.index, columns=self.test_df.columns) @classmethod def list_algorithms(self): return ['pca', 'ica', 'nmf', 'plier'] def pca(self, n_components, transform_df=False, transform_test_df=False): self.pca_fit = decomposition.PCA(n_components=n_components) self.pca_df = self.pca_fit.fit_transform(self.df) colnames = ['pca_{}'.format(x) for x in range(0, n_components)] self.pca_df = pd.DataFrame(self.pca_df, index=self.df.index, columns=colnames) self.pca_weights = pd.DataFrame(self.pca_fit.components_, columns=self.df.columns, index=colnames) if transform_df: out_df = self.pca_fit.transform(self.df) return out_df if transform_test_df: self.pca_test_df = self.pca_fit.transform(self.test_df) def ica(self, n_components, transform_df=False, transform_test_df=False, seed=1): self.ica_fit = decomposition.FastICA(n_components=n_components, random_state=seed) self.ica_df = self.ica_fit.fit_transform(self.df) colnames = ['ica_{}'.format(x) for x in range(0, n_components)] self.ica_df = pd.DataFrame(self.ica_df, index=self.df.index, columns=colnames) self.ica_weights = pd.DataFrame(self.ica_fit.components_, columns=self.df.columns, index=colnames) if transform_df: out_df = self.ica_fit.transform(self.df) return out_df if transform_test_df: self.ica_test_df = self.ica_fit.transform(self.test_df) def nmf(self, n_components, transform_df=False, transform_test_df=False, seed=1, init='nndsvdar', tol=5e-3): self.nmf_fit = decomposition.NMF(n_components=n_components, init=init, tol=tol, random_state=seed) self.nmf_df = self.nmf_fit.fit_transform(self.df) colnames = ['nmf_{}'.format(x) for x in range(n_components)] self.nmf_df = pd.DataFrame(self.nmf_df, index=self.df.index, columns=colnames) self.nmf_weights = pd.DataFrame(self.nmf_fit.components_, columns=self.df.columns, index=colnames) if transform_df: out_df = self.nmf_fit.transform(self.df) return out_df if transform_test_df: self.nmf_test_df = self.nmf_fit.transform(self.test_df) def plier(self, n_components, pathways_file, transform_df=False, transform_test_df=False, shuffled=False, seed=1, verbose=False, skip_cache=False): import subprocess import tempfile plier_output_dir = os.path.join(cfg.data_dir, 'plier_output') if not os.path.exists(plier_output_dir): os.makedirs(plier_output_dir) output_prefix = os.path.join(plier_output_dir, 'plier_k{}_s{}'.format( n_components, seed)) if shuffled: output_prefix += '_shuffled' output_data = output_prefix + '_z.tsv' output_weights = output_prefix + '_b.tsv' output_l2 = output_prefix + '_l2.tsv' if skip_cache or (not os.path.exists(output_data) or not os.path.exists(output_weights)): # Warning: # If the temporary file for the expression data is still open for # writing when PLIER is trying to read it, it may cause issues. # # Thus, open the file with delete=False here, then clean up the # temporary file manually after PLIER is finished running. tf = tempfile.NamedTemporaryFile(mode='w', delete=False) expression_filename = tf.name self.df.to_csv(tf, sep='\t') tf.close() args = [ 'Rscript', os.path.join(cfg.scripts_dir, 'run_plier.R'), '--data', expression_filename, '--k', str(n_components), '--seed', str(seed), '--pathways_file', pathways_file, '--output_prefix', output_prefix, ] if verbose: args.append('--verbose') subprocess.check_call(args) os.remove(expression_filename) # The dimensions of matrices here are a bit confusing, since PLIER # does everything backward as compared to sklearn: # # - Input X has shape (n_features, n_samples) # - PLIER Z matrix has shape (n_features, n_components) # - PLIER B matrix has shape (n_components, n_samples) # # So in order to make this match the output of sklearn, set: # # - plier_df = PLIER B.T, has shape (n_samples, n_components) # - plier_weights = PLIER Z.T, has shape (n_components, n_features) self.plier_df = pd.read_csv(output_weights, sep='\t').T self.plier_weights =
pd.read_csv(output_data, sep='\t')
pandas.read_csv
import json import logging import os import sys from pathlib import Path from typing import Union import fire import pandas as pd from sklearn.model_selection import StratifiedKFold from smart_open import open from tqdm import tqdm from cord19.preprocessing.negative_sampling import get_cocitations from cord19.utils import get_sorted_pair, to_label from cord19.preprocessing.cord19_reader import get_papers_and_citations_from_cord19, merge_cord19_and_s2_papers from cord19.preprocessing.negative_sampling import get_negative_pairs from cord19.utils import normalize_section, resolve_and_sect_titles, get_text_from_doi logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) def save_dataset(input_dir: Union[str, Path], output_dir: Union[str, Path], cv_folds: int = 4): """ Run with: $ python -m cord19.dataset save_dataset <input_dir> <output_dir> input_dir = '/home/mostendorff/datasets/cord-19/' output_dir = '/home/mostendorff/datasets/cord-19/dataset/' cv_folds = 4 input_dir/metadata.csv input_dir/doi2paper.json.gz input_dir/<subsets> = ['biorxiv_medrxiv', 'comm_use_subset', 'custom_license', 'noncomm_use_subset'] output_dir/docs.jsonl output_dir/folds/1/train.csv output_dir/folds/1/test.csv tar -cvzf cord19_docrel.tar.gz docs.jsonl folds/ curl --upload-file cord19_docrel.tar.gz ftp://$FTP_LOGIN:[email protected]/cloud.ostendorff.org/static/ :param input_dir: Path to directory with input files :param output_dir: Output files are written to this dir :param cv_folds: Number of folds in k-fold cross validation """ label_col = 'label' negative_label = 'none' min_text_length = 50 negative_sampling_ratio = 0.5 doc_a_col = 'from_doi' doc_b_col = 'to_doi' labels = [ 'discussion', 'introduction', 'conclusion', 'results', 'methods', 'background', 'materials', 'virus', 'future work' ] # input_dir = os.path.join(env['datasets_dir'], 'cord-19') # Convert dirs to Path if is string if isinstance(output_dir, str): output_dir = Path(output_dir) if isinstance(input_dir, str): input_dir = Path(input_dir) # Read meta data meta_df = pd.read_csv(input_dir / 'metadata.csv', dtype={'doi': str, 'journal': str}) id2meta = {row['sha']: row for idx, row in meta_df.iterrows() if row['sha']} logger.info('Unique DOIs in meta data: %s' % (len(meta_df['doi'].unique()) / len(meta_df))) # Load paper data and citations from CORD-19 id2paper, cits = get_papers_and_citations_from_cord19(input_dir, id2meta) # Load paper data from disk (scraped from S2) if os.path.exists(input_dir / 'doi2s2paper.json.gz'): with open(str(input_dir / 'doi2s2paper.json.gz'), 'r') as f: doi2s2paper = json.load(f) logger.info(f'Loaded {len(doi2s2paper):,} scraped papers from disk') else: logger.error('Cannot load S2 papers from: %s' % (input_dir / 'doi2paper.json.gz')) doi2s2paper = {} # Merge CORD-19 papers and S2 papers doi2paper = merge_cord19_and_s2_papers(id2paper, id2meta, doi2s2paper) logger.info(f'Loaded {len(doi2paper)} from CORD-19') all_dois = list(doi2paper.keys()) # DOIs with text doi2text = {} for doi in all_dois: text = get_text_from_doi(doi, doi2paper, raise_not_found_error=False) if len(text) > min_text_length: doi2text[doi] = text logger.info(f'Total DOIs: {len(all_dois):,}') logger.info(f'With text DOIs: {len(doi2text):,}') # Filter citations with existing DOI cits_with_doi = [c for c in cits if c[0] in doi2paper and c[1] in doi2paper] # CORD-19 only: Citations with DOI: 30655 (0.09342419246206499) # + S2: Citations with DOI: 170454 (0.5194756908148369) logger.info(f'Citations with DOI: {len(cits_with_doi)} ({len(cits_with_doi) / len(cits)})') missing_papers = [c[0] for c in cits if c[0] not in doi2paper] missing_papers += [c[1] for c in cits if c[1] not in doi2paper] logger.info(f'Missing paper data, but DOI: {len(missing_papers)}') unique_missing_papers = set(missing_papers) logger.info(f'Unique DOIs of missing papers: {len(unique_missing_papers)}') # resolve 'and' titles normalized_cits_with_doi = resolve_and_sect_titles(cits_with_doi) cits_df =
pd.DataFrame(normalized_cits_with_doi, columns=[doc_a_col, doc_b_col, 'citing_section'])
pandas.DataFrame
# -*- coding: utf-8 -*- """ Methods to perform coverage analysis. @author: <NAME> <<EMAIL>> """ import pandas as pd import numpy as np import geopandas as gpd from typing import List, Optional from shapely import geometry as geo from datetime import datetime, timedelta from skyfield.api import load, wgs84, EarthSatellite from ..schemas.point import Point from ..schemas.satellite import Satellite from ..schemas.instrument import Instrument, DutyCycleScheme from ..utils import ( compute_min_altitude, swath_width_to_field_of_regard, compute_max_access_time, compute_orbit_period, ) def collect_observations( point: Point, satellite: Satellite, instrument: Instrument, start: datetime, end: datetime, omit_solar: bool = True, sample_distance: Optional[float] = None, ) -> gpd.GeoDataFrame: """ Collect single satellite observations of a geodetic point of interest. :param point: The ground point of interest :type point: :class:`tatc.schemas.point.Point` :param satellite: The observing satellite :type satellite: :class:`tatc.schemas.satellite.Satellite` :param instrument: The instrument used to make observations :type instrument::`tatc.schemas.instrument.instrument` :param start: The start of the mission window :type start::`datetime.datetime` :param end: The end of the mission window :type end::`datetime.datetime` :param omit_solar: True, if solar angles should be omitted to improve computational efficiency, defaults to True :type omit_solar: bool, optional :param sample_distance: Ground sample distance (m) to override instrument field of regard, defaults to None :type sample_distance: int, optional :return: An instance of :class:`geopandas.GeoDataFrame` containing all recorded reduce_observations :rtype::`geopandas.GeoDataFrame` """ # build a topocentric point at the designated geodetic point topos = wgs84.latlon(point.latitude, point.longitude) # load the timescale and define starting and ending points ts = load.timescale() t0 = ts.from_datetime(start) t1 = ts.from_datetime(end) # load the ephemerides eph = load("de421.bsp") # convert orbit to tle orbit = satellite.orbit.to_tle() # construct a satellite for propagation sat = EarthSatellite(orbit.tle[0], orbit.tle[1], satellite.name) # compute the initial satellite height (altitude) satellite_height = wgs84.subpoint(sat.at(t0)).elevation.m # compute the minimum altitude angle required for observation min_altitude = compute_min_altitude( satellite_height, instrument.field_of_regard if sample_distance is None else swath_width_to_field_of_regard(satellite_height, sample_distance), ) # compute the maximum access time to filter bad data max_access_time = timedelta( seconds=compute_max_access_time(satellite_height, min_altitude) ) # TODO: consider instrument operational intervals ops_intervals = pd.Series( [pd.Interval(pd.Timestamp(start), pd.Timestamp(end), "both")] ) # find the set of observation events t, events = sat.find_events(topos, t0, t1, altitude_degrees=min_altitude) if omit_solar: # basic dataframe without solar angles df = pd.DataFrame( { "point_id": pd.Series([], dtype="int"), "geometry": pd.Series([], dtype="object"), "satellite": pd.Series([], dtype="str"), "instrument": pd.Series([], dtype="str"), "start": pd.Series([], dtype="datetime64[ns, utc]"), "end": pd.Series([], dtype="datetime64[ns, utc]"), "epoch": pd.Series([], dtype="datetime64[ns, utc]"), "sat_alt": pd.Series([], dtype="float64"), "sat_az": pd.Series([], dtype="float64"), } ) else: # extended dataframe including solar angles df = pd.DataFrame( { "point_id":
pd.Series([], dtype="int")
pandas.Series
''' Reads in literature metallicities and makes new Fe/H basis ''' import pickle import sys import pandas as pd import numpy as np import matplotlib.pyplot as plt from astroquery.simbad import Simbad from . import * class LitFehRaw(): ''' Read in Fe/H values from the literature, before making any transformations ''' def __init__(self): # map the raw data to object # source_dir=config_red["data_dirs"]["DIR_LIT_HIGH_RES_FEH"]): source_dir = "/Users/bandari/Documents/git.repos/rrlfe/src/high_res_feh/" # stand-in that consists of our program star names self.df_our_program_stars = pd.read_csv(source_dir + "our_program_stars_names_only.csv") # Fe/H from Layden+ 1994; this may serve as the common basis for RRabs self.df_layden_feh = pd.read_csv(source_dir + "layden_1994_abundances.dat") # RES: "rather low" # Fe/H Clementini+ 1995 self.df_clementini_feh = pd.read_csv(source_dir + "clementini_1995_abundances.dat") # Fe/H Fernley+ 1996 self.df_fernley96_feh = pd.read_csv(source_dir + "fernley_1996_abundances.dat") # RES: 60,000, FeI & FeII, 5900-8100 A # Fe/H from Fernley+ 1997 self.df_fernley97_feh = pd.read_csv(source_dir + "fernley_1997_abundances.dat") # RES: 60,000, two FeII lines, 5900-8100 A # log(eps) from Lambert+ 1996 self.df_lambert_logeps = pd.read_csv(source_dir + "lambert_1996_abundances.dat") # RES: ~23,000, FeII + photometric models, 3600-9000 A # Fe/H from Wallerstein and Huang 2010, arXiv 1004.2017 self.df_wallerstein_feh = pd.read_csv(source_dir + "wallerstein_huang_2010_abundances.dat") # RES: ~30,000, FeII # Fe/H from Chadid+ 2017 ApJ 835.2:187 (FeI and II lines) self.df_chadid_feh = pd.read_csv(source_dir + "chadid_2017_abundances.dat") # RES: 38000, FeI & FeII, 3400-9900 A # Fe/H from Liu+ 2013 Res Ast Astroph 13:1307 self.df_liu_feh = pd.read_csv(source_dir + "liu_2013_abundances.dat") # RES: ~60,000, FeI (& FeII?), 5100-6400 A # Fe/H from Nemec+ 2013 self.df_nemec_feh = pd.read_csv(source_dir + "nemec_2013_abundances.dat") # RES: ~65,000 or 36,000, FeI & FeII, 5150-5200 A # Fe/H from Solano+ 1997 self.df_solano_feh =
pd.read_csv(source_dir + "solano_1997_abundances.dat")
pandas.read_csv
from delta.tables import DeltaTable from notebookutils import mssparkutils from pyspark.sql.types import StructType, StructField, StringType, IntegerType, DoubleType, ArrayType, TimestampType, BooleanType, ShortType, DateType from pyspark.sql import functions as F from pyspark.sql import SparkSession from pyspark.sql.utils import AnalysisException from opencensus.ext.azure.log_exporter import AzureLogHandler, logging import pandas as pd import sys import re import json import datetime import pytz import random import io logger = logging.getLogger('OEA') class OEA: def __init__(self, storage_account='', instrumentation_key=None, salt='', logging_level=logging.DEBUG): if storage_account: self.storage_account = storage_account else: oea_id = mssparkutils.env.getWorkspaceName()[8:] # extracts the OEA id for this OEA instance from the synapse workspace name (based on OEA naming convention) self.storage_account = 'stoea' + oea_id # sets the name of the storage account based on OEA naming convention self.keyvault = 'kv-oea-' + oea_id self.keyvault_linked_service = 'LS_KeyVault_OEA' self.serverless_sql_endpoint = mssparkutils.env.getWorkspaceName() + '-ondemand.sql.azuresynapse.net' self._initialize_logger(instrumentation_key, logging_level) self.salt = salt self.timezone = 'EST' self.stage1np = 'abfss://stage1np@' + self.storage_account + '.dfs.core.windows.net' self.stage2np = 'abfss://stage2np@' + self.storage_account + '.dfs.core.windows.net' self.stage2p = 'abfss://stage2p@' + self.storage_account + '.dfs.core.windows.net' self.stage3np = 'abfss://stage3np@' + self.storage_account + '.dfs.core.windows.net' self.stage3p = 'abfss://stage3p@' + self.storage_account + '.dfs.core.windows.net' self.framework_path = 'abfss://oea-framework@' + self.storage_account + '.dfs.core.windows.net' # Initialize framework db spark.sql(f"CREATE DATABASE IF NOT EXISTS oea") spark.sql(f"CREATE TABLE IF NOT EXISTS oea.env (name string not null, value string not null, description string) USING DELTA LOCATION '{self.framework_path}/db/env'") df = spark.sql("select value from oea.env where name='storage_account'") if df.first(): spark.sql(f"UPDATE oea.env set value='{self.storage_account}' where name='storage_account'") else: spark.sql(f"INSERT INTO oea.env VALUES ('storage_account', '{self.storage_account}', 'The name of the data lake storage account for this OEA instance.')") spark.sql(f"CREATE TABLE IF NOT EXISTS OEA.watermark (source string not null, entity string not null, watermark timestamp not null) USING DELTA LOCATION '{self.framework_path}/db/watermark'") logger.debug("OEA initialized.") def path(self, container_name, directory_path=None): if directory_path: return f'abfss://{container_name}@{self.storage_account}.dfs.core.windows.net/{directory_path}' else: return f'abfss://{container_name}@{self.storage_account}.dfs.core.windows.net' def convert_path(self, path): """ Converts the given path into a valid url. eg, convert_path('stage1np/contoso_sis/student/*') # returns abfss://[email protected]/contoso_sis/student/* """ path_args = path.split('/') stage = path_args.pop(0) return self.path(stage, '/'.join(path_args)) def _initialize_logger(self, instrumentation_key, logging_level): logging.lastResort = None # the logger will print an error like "ValueError: I/O operation on closed file" because we're trying to have log messages also print to stdout # and apparently this causes issues on some of the spark executor nodes. The bottom line is that we don't want these logging errors to get printed in the notebook output. logging.raiseExceptions = False logger.setLevel(logging_level) handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging_level) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) if instrumentation_key: # Setup logging to go to app insights (more info here: https://github.com/balakreshnan/Samples2021/blob/main/Synapseworkspace/opencensuslog.md#azure-synapse-spark-logs-runtime-errors-to-application-insights) logger.addHandler(AzureLogHandler(connection_string='InstrumentationKey=' + instrumentation_key)) def get_value_from_db(self, query): df = spark.sql(query) if df.first(): return df.first()[0] else: return None def get_last_watermark(self, source, entity): return self.get_value_from_db(f"select w.watermark from oea.watermark w where w.source='{source}' and w.entity='{entity}' order by w.watermark desc") def insert_watermark(self, source, entity, watermark_datetime): spark.sql(f"insert into oea.watermark values ('{source}', '{entity}', '{watermark_datetime}')") def get_secret(self, secret_name): """ Retrieves the specified secret from the keyvault. This method assumes that the keyvault linked service has been setup and is accessible. """ sc = SparkSession.builder.getOrCreate() token_library = sc._jvm.com.microsoft.azure.synapse.tokenlibrary.TokenLibrary value = token_library.getSecret(self.keyvault, secret_name, self.keyvault_linked_service) return value def delete(self, path): oea.rm_if_exists(self.convert_path(path)) def land(self, data_source, entity, df, partition_label='', format_str='csv', header=True, mode='overwrite'): """ Lands data in stage1np. If partition label is not provided, the current datetime is used with the label of 'batchdate'. eg, land('contoso_isd', 'student', data, 'school_year=2021') """ tz = pytz.timezone(self.timezone) datetime_str = datetime.datetime.now(tz).replace(microsecond=0).isoformat() datetime_str = datetime_str.replace(':', '') # Path names can't have a colon - https://github.com/apache/hadoop/blob/trunk/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md#path-names df.write.format(format_str).save(self.path('stage1np', f'{data_source}/{entity}/{partition_label}/batchdate={datetime_str}'), header=header, mode=mode) def load(self, folder, table, stage=None, data_format='delta'): """ Loads a dataframe based on the path specified in the given args """ if stage is None: stage = self.stage2p path = f"{stage}/{folder}/{table}" try: df = spark.read.load(f"{stage}/{folder}/{table}", format=data_format) return df except AnalysisException as e: raise ValueError("Failed to load. Are you sure you have the right path?\nMore info below:\n" + str(e)) def load_csv(self, path, header=True): """ Loads a dataframe based on the path specified eg, df = load_csv('stage1np/example/student/*') """ url_path = self.convert_path(path) try: df = spark.read.load(url_path, format='csv', header=header) return df except AnalysisException as e: raise ValueError(f"Failed to load from: {url_path}. Are you sure you have the right path?\nMore info below:\n" + str(e)) def load_delta(self, path): """ Loads a dataframe based on the path specified eg, df = load_delta('stage2np/example/student/*') """ url_path = self.convert_path(path) try: df = spark.read.load(url_path, format='delta') return df except AnalysisException as e: raise ValueError(f"Failed to load from: {url_path}. Are you sure you have the right path?\nMore info below:\n" + str(e)) def load_from_stage1(self, path_and_filename, data_format='csv', header=True): """ Loads a dataframe with data from stage1, based on the path specified in the given args """ path = f"{self.stage1np}/{path_and_filename}" df = spark.read.load(path, format=data_format, header=header) return df def load_sample_from_csv_file(self, path_and_filename, header=True, stage=None): """ Loads a sample from the specified csv file and returns a pandas dataframe. Ex: print(load_sample_from_csv_file('/student_data/students.csv')) """ if stage is None: stage = self.stage1np csv_str = mssparkutils.fs.head(f"{stage}/{path_and_filename}") # https://docs.microsoft.com/en-us/azure/synapse-analytics/spark/microsoft-spark-utilities?pivots=programming-language-python#preview-file-content complete_lines = re.match(r".*\n", csv_str, re.DOTALL).group(0) if header: header = 0 # for info on why this is needed: https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.read_csv.html else: header = None pdf = pd.read_csv(io.StringIO(complete_lines), sep=',', header=header) return pdf def print_stage(self, path): """ Prints out the highlevel contents of the specified stage.""" msg = path + "\n" folders = self.get_folders(path) for folder_name in folders: entities = self.get_folders(path + '/' + folder_name) msg += f"{folder_name}: {entities}\n" print(msg) def fix_column_names(self, df): """ Fix column names to satisfy the Parquet naming requirements by substituting invalid characters with an underscore. """ df_with_valid_column_names = df.select([F.col(col).alias(re.sub("[ ,;{}()\n\t=]+", "_", col)) for col in df.columns]) return df_with_valid_column_names def to_spark_schema(self, schema):#: list[list[str]]): """ Creates a spark schema from a schema specified in the OEA schema format. Example: schemas['Person'] = [['Id','string','hash'], ['CreateDate','timestamp','no-op'], ['LastModifiedDate','timestamp','no-op']] to_spark_schema(schemas['Person']) """ fields = [] for col_name, dtype, op in schema: fields.append(StructField(col_name, globals()[dtype.lower().capitalize() + "Type"](), True)) spark_schema = StructType(fields) return spark_schema def ingest_incremental_data(self, source_system, tablename, schema, partition_by, primary_key='id', data_format='csv', has_header=True): """ Processes incremental batch data from stage1 into stage2 """ source_path = f'{self.stage1np}/{source_system}/{tablename}' p_destination_path = f'{self.stage2p}/{source_system}/{tablename}_pseudo' np_destination_path = f'{self.stage2np}/{source_system}/{tablename}_lookup' logger.info(f'Processing incremental data from: {source_path} and writing out to: {p_destination_path}') if has_header: header_flag = 'true' else: header_flag = 'false' spark_schema = self.to_spark_schema(schema) df = spark.readStream.load(source_path + '/*', format=data_format, header=header_flag, schema=spark_schema) #df = spark.read.load(source_path + '/*', format=data_format, header=header_flag, schema=spark_schema) #display(df) #df = df.withColumn('batchdate', F.to_timestamp(df.batchdate, "yyyy-MM-dd'T'HHmmssZ")) df = df.dropDuplicates([primary_key]) # drop duplicates across batches. More info: https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html#streaming-deduplication df_pseudo, df_lookup = self.pseudonymize(df, schema) if len(df_pseudo.columns) == 0: logger.info('No data to be written to stage2p') else: query = df_pseudo.writeStream.format("delta").outputMode("append").trigger(once=True).option("checkpointLocation", source_path + '/_checkpoints/incremental_p').partitionBy(partition_by) query = query.start(p_destination_path) query.awaitTermination() # block until query is terminated, with stop() or with error; A StreamingQueryException will be thrown if an exception occurs. logger.info(query.lastProgress) if len(df_lookup.columns) == 0: logger.info('No data to be written to stage2np') else: query2 = df_lookup.writeStream.format("delta").outputMode("append").trigger(once=True).option("checkpointLocation", source_path + '/_checkpoints/incremental_np').partitionBy(partition_by) query2 = query2.start(np_destination_path) query2.awaitTermination() # block until query is terminated, with stop() or with error; A StreamingQueryException will be thrown if an exception occurs. logger.info(query2.lastProgress) def _merge_into_table(self, df, destination_path, checkpoints_path, condition): """ Merges data from the given dataframe into the delta table at the specified destination_path, based on the given condition. If not delta table exists at the specified destination_path, a new delta table is created and the data from the given dataframe is inserted. eg, merge_into_table(df_lookup, np_destination_path, source_path + '/_checkpoints/delta_np', "current.id_pseudonym = updates.id_pseudonym") """ if DeltaTable.isDeltaTable(spark, destination_path): dt = DeltaTable.forPath(spark, destination_path) def upsert(batch_df, batchId): dt.alias("current").merge(batch_df.alias("updates"), condition).whenMatchedUpdateAll().whenNotMatchedInsertAll().execute() query = df.writeStream.format("delta").foreachBatch(upsert).outputMode("update").trigger(once=True).option("checkpointLocation", checkpoints_path) else: logger.info(f'Delta table does not yet exist at {destination_path} - creating one now and inserting initial data.') query = df.writeStream.format("delta").outputMode("append").trigger(once=True).option("checkpointLocation", checkpoints_path) query = query.start(destination_path) query.awaitTermination() # block until query is terminated, with stop() or with error; A StreamingQueryException will be thrown if an exception occurs. logger.info(query.lastProgress) def ingest_delta_data(self, source_system, tablename, schema, partition_by, primary_key='id', data_format='csv', has_header=True): """ Processes delta batch data from stage1 into stage2 """ source_path = f'{self.stage1np}/{source_system}/{tablename}' p_destination_path = f'{self.stage2p}/{source_system}/{tablename}_pseudo' np_destination_path = f'{self.stage2np}/{source_system}/{tablename}_lookup' logger.info(f'Processing delta data from: {source_path} and writing out to: {p_destination_path}') if has_header: header_flag = 'true' else: header_flag = 'false' spark_schema = self.to_spark_schema(schema) df = spark.readStream.load(source_path + '/*', format=data_format, header=header_flag, schema=spark_schema) df_pseudo, df_lookup = self.pseudonymize(df, schema) if len(df_pseudo.columns) == 0: logger.info('No data to be written to stage2p') else: self._merge_into_table(df_pseudo, p_destination_path, source_path + '/_checkpoints/delta_p', "current.id_pseudonym = updates.id_pseudonym") if len(df_lookup.columns) == 0: logger.info('No data to be written to stage2np') else: self._merge_into_table(df_lookup, np_destination_path, source_path + '/_checkpoints/delta_np', "current.id_pseudonym = updates.id_pseudonym") def ingest_snapshot_data(self, source_system, tablename, schema, partition_by, primary_key='id', data_format='csv', has_header=True): """ Processes snapshot batch data from stage1 into stage2 """ source_path = f'{self.stage1np}/{source_system}/{tablename}' latest_batch = self.get_latest_folder(source_path) source_path = source_path + '/' + latest_batch p_destination_path = f'{self.stage2p}/{source_system}/{tablename}_pseudo' np_destination_path = f'{self.stage2np}/{source_system}/{tablename}_lookup' logger.info(f'Processing snapshot data from: {source_path} and writing out to: {p_destination_path}') if has_header: header_flag = 'true' else: header_flag = 'false' spark_schema = self.to_spark_schema(schema) df = spark.read.load(source_path, format=data_format, header=header_flag, schema=spark_schema) df = df.dropDuplicates([primary_key]) # More info: https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html#streaming-deduplication df_pseudo, df_lookup = self.pseudonymize(df, schema) if len(df_pseudo.columns) == 0: logger.info('No data to be written to stage2p') else: df_pseudo.write.save(p_destination_path, format='delta', mode='overwrite', partitionBy=partition_by) if len(df_lookup.columns) == 0: logger.info('No data to be written to stage2np') else: df_lookup.write.save(np_destination_path, format='delta', mode='overwrite', partitionBy=partition_by) def pseudonymize(self, df, schema): #: list[list[str]]): """ Performs pseudonymization of the given dataframe based on the provided schema. For example, if the given df is for an entity called person, 2 dataframes will be returned, one called person that has hashed ids and masked fields, and one called person_lookup that contains the original person_id, person_id_pseudo, and the non-masked values for columns marked to be masked.""" df_pseudo = df_lookup = df for col_name, dtype, op in schema: if op == "hash-no-lookup" or op == "hnl": # This means that the lookup can be performed against a different table so no lookup is needed. df_pseudo = df_pseudo.withColumn(col_name, F.sha2(F.concat(F.col(col_name), F.lit(self.salt)), 256)).withColumnRenamed(col_name, col_name + "_pseudonym") df_lookup = df_lookup.drop(col_name) elif op == "hash" or op == 'h': df_pseudo = df_pseudo.withColumn(col_name, F.sha2(F.concat(F.col(col_name), F.lit(self.salt)), 256)).withColumnRenamed(col_name, col_name + "_pseudonym") df_lookup = df_lookup.withColumn(col_name + "_pseudonym", F.sha2(F.concat(F.col(col_name), F.lit(self.salt)), 256)) elif op == "mask" or op == 'm': df_pseudo = df_pseudo.withColumn(col_name, F.lit('*')) elif op == "partition-by": pass # make no changes for this column so that it will be in both dataframes and can be used for partitioning elif op == "no-op" or op == 'x': df_lookup = df_lookup.drop(col_name) df_pseudo = self.fix_column_names(df_pseudo) df_lookup = self.fix_column_names(df_lookup) return (df_pseudo, df_lookup) # Returns true if the path exists def path_exists(self, path): tableExists = False try: items = mssparkutils.fs.ls(path) tableExists = True except Exception as e: # This Exception comes as a generic Py4JJavaError that occurs when the path specified is not found. pass return tableExists def ls(self, path): if not path.startswith("abfss:"): path = self.convert_path(path) folders = [] files = [] try: items = mssparkutils.fs.ls(path) for item in items: if item.isFile: files.append(item.name) elif item.isDir: folders.append(item.name) except Exception as e: logger.warning("[OEA] Could not peform ls on specified path: " + path + "\nThis may be because the path does not exist.") return (folders, files) def print_stage(self, path): print(path) folders = self.get_folders(path) for folder_name in folders: entities = self.get_folders(path + '/' + folder_name) print(f"{folder_name}: {entities}") # Return the list of folders found in the given path. def get_folders(self, path): dirs = [] try: items = mssparkutils.fs.ls(path) for item in items: #print(item.name, item.isDir, item.isFile, item.path, item.size) if item.isDir: dirs.append(item.name) except Exception as e: logger.warning("[OEA] Could not get list of folders in specified path: " + path + "\nThis may be because the path does not exist.") return dirs def get_latest_folder(self, path): folders = self.get_folders(path) if len(folders) > 0: return folders[-1] else: return None # Remove a folder if it exists (defaults to use of recursive removal). def rm_if_exists(self, path, recursive_remove=True): try: mssparkutils.fs.rm(path, recursive_remove) except Exception as e: pass def pop_from_path(self, path): """ Pops the last arg in a path and returns the path and the last arg as a tuple. pop_from_path('abfss://[email protected]/ms_insights/test.csv') # returns ('abfss://[email protected]/ms_insights', 'test.csv') """ m = re.match(r"(.*)\/([^/]+)", path) return (m.group(1), m.group(2)) def parse_source_path(self, path): """ Parses a path that looks like this: abfss://[email protected]/ms_insights and returns a dictionary like this: {'stage_num': '2', 'ss': 'ms_insights'} Note that it will also return a 'stage_num' of 2 if the path is stage2p or stage2np - this is by design because the spark db with the s2 prefix will be used for data in stage2 and stage2p. """ m = re.match(r".*:\/\/stage(?P<stage_num>\d+)[n]?[p]?@[^/]+\/(?P<ss>[^/]+)", path) return m.groupdict() def create_lake_db(self, stage_num, source_dir, source_format='DELTA'): """ Creates a spark db that points to data in the given stage under the specified source directory (assumes that every folder in the source_dir is a table). Example: create_lake_db(2, 'contoso_sis') Note that a spark db that points to source data in the delta format can't be queried via SQL serverless pool. More info here: https://docs.microsoft.com/en-us/azure/synapse-analytics/sql/resources-self-help-sql-on-demand#delta-lake """ db_name = f's{stage_num}_{source_dir}' spark.sql(f'CREATE DATABASE IF NOT EXISTS {db_name}') self.create_lake_views(db_name, self.path(f'stage{stage_num}p', source_dir), source_format) self.create_lake_views(db_name, self.path(f'stage{stage_num}np', source_dir), source_format) result = "Database created: " + db_name logger.info(result) return result def create_lake_views(self, db_name, source_path, source_format): dirs = self.get_folders(source_path) for table_name in dirs: spark.sql(f"create table if not exists {db_name}.{table_name} using {source_format} location '{source_path}/{table_name}'") def drop_lake_db(self, db_name): spark.sql(f'DROP DATABASE IF EXISTS {db_name} CASCADE') result = "Database dropped: " + db_name logger.info(result) return result def create_sql_db(self, stage_num, source_dir, source_format='DELTA'): """ Prints out the sql script needed for creating a sql serverless db and set of views. """ db_name = f'sqls{stage_num}_{source_dir}' cmd += '-- Create a new sql script then execute the following in it:' cmd += f"IF NOT EXISTS (SELECT * FROM sys.databases WHERE name = '{db_name}')\nBEGIN\n CREATE DATABASE {db_name};\nEND;\nGO\n" cmd += f"USE {db_name};\nGO\n\n" cmd += self.create_sql_views(self.path(f'stage{stage_num}p', source_dir), source_format) cmd += self.create_sql_views(self.path(f'stage{stage_num}np', source_dir), source_format) print(cmd) def create_sql_views(self, source_path, source_format): cmd = '' dirs = self.get_folders(source_path) for table_name in dirs: cmd += f"CREATE OR ALTER VIEW {table_name} AS\n SELECT * FROM OPENROWSET(BULK '{source_path}/{table_name}', FORMAT='{source_format}') AS [r];\nGO\n" return cmd def drop_sql_db(self, db_name): print('Click on the menu next to the SQL db and select "Delete"') # List installed packages def list_packages(self): import pkg_resources for d in pkg_resources.working_set: print(d) def print_schema_starter(self, entity_name, df): """ Prints a starter schema that can be modified as needed when developing the oea schema for a new module. """ st = f"self.schemas['{entity_name}'] = [" for col in df.schema: st += f"['{col.name}', '{str(col.dataType)[:-4].lower()}', 'no-op'],\n\t\t\t\t\t\t\t\t\t" return st[:-11] + ']' def write_rows_as_csv(data, folder, filename, container=None): """ Writes a dictionary as a csv to the specified location. This is helpful when creating test data sets and landing them in stage1np. data = [{'id':'1','fname':'John'}, {'id':'1','fname':'Jane'}] """ if container == None: container = self.stage1np pdf = pd.DataFrame(data) mssparkutils.fs.put(f"{container}/{folder}/{filename}", pdf.to_csv(index=False), True) # True indicates overwrite mode def write_rowset_as_csv(data, folder, container=None): """ Writes out as csv rows the passed in data. The inbound data should be in a format like this: data = { 'students':[{'id':'1','fname':'John'}], 'courses':[{'id':'31', 'name':'Math'}] } """ if container == None: container = self.stage1np for entity_name, value in data.items(): pdf =
pd.DataFrame(value)
pandas.DataFrame
# Copyright 2020 The Q2 Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import pandas as pd import numpy as np import scipy.stats as st import matplotlib.pyplot as plt import seaborn as sns from collections import namedtuple from sklearn import metrics from baselines import add_baselines sns.set_theme(style="darkgrid", font_scale=1.1) PrecisionRecallValues = namedtuple('PrecisionRecallValues', ['precision', 'recall', 'thresholds']) GROUNDED_LABEL = 1 UNGROUNDED_LABEL = 0 DEFAULT_THRESHOLD = 0.5 def compute_precision_recall_single_threshold(scores, labels, threshold): """Returns a dictionary containing the grounded and ungrounded Precision-Recall values, for a given threshold.""" predictions = [] for score in scores: if score <= threshold: predictions.append(UNGROUNDED_LABEL) else: predictions.append(GROUNDED_LABEL) grounded_precision = metrics.precision_score(y_true=labels, y_pred=predictions, pos_label=GROUNDED_LABEL) grounded_recall = metrics.recall_score(y_true=labels, y_pred=predictions, pos_label=GROUNDED_LABEL) ungrounded_precision = metrics.precision_score(y_true=labels, y_pred=predictions, pos_label=UNGROUNDED_LABEL) ungrounded_recall = metrics.recall_score(y_true=labels, y_pred=predictions, pos_label=UNGROUNDED_LABEL) accuracy = metrics.accuracy_score(y_true=labels, y_pred=predictions) result_dict = { 'grounded_precision': grounded_precision, 'grounded_recall': grounded_recall, 'ungrounded_precision': ungrounded_precision, 'ungrounded_recall': ungrounded_recall, 'accuracy': accuracy } return result_dict def compute_precision_recall_various_thresholds(scores, labels, grounded_detection=False): """Computes the Precision-Recall values for different thresholds and returns three arrays: Precision values, Recall values, and the corresponding thresholds.""" if not grounded_detection: # In the ungrounded case, each example will be predicted as ungrounded if # its score is greater than, or equal to, the threshold. We therefore take # 1-score for ungrounded text detection. scores = 1 - scores precision, recall, thresholds = metrics.precision_recall_curve(y_true=labels, probas_pred=scores, pos_label=int(grounded_detection)) return PrecisionRecallValues(precision=precision, recall=recall, thresholds=thresholds) def plot_precision_recall_vs_thresholds(precisions, recalls, thresholds, plot_grounded, fig_name): """Plots the Precision and Recall values for various thresholds.""" if not plot_grounded: # The Precision and Recall were calculated by classifying as ungrounded any # example for which (1-score) is larger than the threshold. We would like # the plot to show the Precision and Recall for classifying as ungrounded any # example for which the score in smaller than the threshold. thresholds = 1 - thresholds plt.figure() plt.plot(thresholds, precisions[:-1], 'b--', label='precision') plt.plot(thresholds, recalls[:-1], 'g--', label='recall') plt.xlabel('Threshold') plt.legend() if plot_grounded: plot_type = 'grounded' else: plot_type = 'ungrounded' plot_title = f'Precision and Recall vs. various thresholds, {plot_type} detection' plt.title(plot_title) plt.savefig(fig_name) def create_single_metric_plots(scores, labels, metric_name, fig_name): """Plot the Grounded and Ungrounded Precision and Recall for a given metric, for various thresholds.""" grounded_precision_recall = compute_precision_recall_various_thresholds(scores, labels, grounded_detection=True) plot_precision_recall_vs_thresholds(grounded_precision_recall.precision, grounded_precision_recall.recall, grounded_precision_recall.thresholds, True, f'{fig_name}_{metric_name}_grounded.png') ungrounded_precision_recall = compute_precision_recall_various_thresholds(scores, labels, grounded_detection=False) plot_precision_recall_vs_thresholds(ungrounded_precision_recall.precision, ungrounded_precision_recall.recall, ungrounded_precision_recall.thresholds, False, f'{fig_name}_{metric_name}_ungrounded.png') def create_multiple_metrics_comparison_plots(metrics_scores, fig_name): """ Plot the Precision-Recall curves for several metrics. The plot shows the Precision-Recall trade-off for several input metrics, allowing comparison between the different metrics. """ precision = np.array([]) recall = np.array([]) metric_type = [] for metric, scores in metrics_scores.items(): inconsistent = scores[0] consistent = scores[1] metric_scores = np.append(inconsistent, consistent) gold_score_inconsistent = np.zeros(shape=(len(inconsistent))) gold_score_consistent = np.ones(shape=(len(consistent))) gold_scores = np.append(gold_score_inconsistent, gold_score_consistent) metric_precision, metric_recall, _ = metrics.precision_recall_curve(y_true=gold_scores, probas_pred=metric_scores) precision = np.append(precision, metric_precision) recall = np.append(recall, metric_recall) metric_type.extend([metric] * len(metric_precision)) plt.figure() for_plt = pd.DataFrame({"Recall": recall, "Precision": precision, "Metric": metric_type}) sns_plot = sns.lineplot(x='Recall', y='Precision', hue="Metric", data=for_plt).set_title("Precision vs. Recall, consistent and inconsistent scores") sns_plot.figure.savefig(fig_name) def plot_hist(metrics_scores, fig_name): for metric, scores in metrics_scores.items(): inconsistent = scores[0] consistent = scores[1] plt.figure() df_inconsistent = pd.DataFrame({"Score": inconsistent}) sns_plot = sns.histplot(df_inconsistent, x="Score", bins=10).set_title("Histogram of response scores, {0}, " "inconsistent data".format(metric)) sns_plot.figure.savefig("{0}_inconsistent_{1}".format(fig_name, metric)) plt.figure() df_consistent = pd.DataFrame({"Score": consistent}) sns_plot = sns.histplot(df_consistent, x="Score", bins=10).set_title("Histogram of response scores, {0}, " "consistent data".format(metric)) sns_plot.figure.savefig("{0}_consistent_{1}".format(fig_name, metric)) def get_metric_scores(incons_dodeca, cons_dodeca, incons_memnet, cons_memnet, metric_type): incons_scores_dodeca = incons_dodeca[metric_type].to_numpy(dtype=np.float64) cons_scores_dodeca = cons_dodeca[metric_type].to_numpy(dtype=np.float64) incons_scores_memnet = incons_memnet[metric_type].to_numpy(dtype=np.float64) cons_scores_memnet = cons_memnet[metric_type].to_numpy(dtype=np.float64) inconsistent_scores = np.append(incons_scores_dodeca, incons_scores_memnet) consistent_scores = np.append(cons_scores_dodeca, cons_scores_memnet) return inconsistent_scores, consistent_scores def response_level_evaluation(incons_dodeca, cons_dodeca, incons_memnet, cons_memnet, metrics_names, metric_to_threshold): scores_dict = {} for metric_name in metrics_names: inconsistent_metric_scores, consistent_metric_scores = get_metric_scores(incons_dodeca, cons_dodeca, incons_memnet, cons_memnet, metric_name) # Normalize BLEU scores to be in [0,1] if metric_name == 'bleu': inconsistent_metric_scores = inconsistent_metric_scores / 100 consistent_metric_scores = consistent_metric_scores / 100 # Normalize BERTScore to be in [0,1] elif metric_name == 'bertscore': min_bertscore = np.amin(np.append(inconsistent_metric_scores, consistent_metric_scores)) inconsistent_metric_scores = inconsistent_metric_scores - min_bertscore consistent_metric_scores = consistent_metric_scores - min_bertscore max_bertscore = np.amax(np.append(inconsistent_metric_scores, consistent_metric_scores)) inconsistent_metric_scores = inconsistent_metric_scores / max_bertscore consistent_metric_scores = consistent_metric_scores / max_bertscore metric_scores = np.append(inconsistent_metric_scores, consistent_metric_scores) gold_labels_inconsistent = np.zeros(shape=(len(inconsistent_metric_scores))) gold_labels_consistent = np.ones(shape=(len(consistent_metric_scores))) gold_labels = np.append(gold_labels_inconsistent, gold_labels_consistent) create_single_metric_plots(metric_scores, gold_labels, metric_name, 'metric_precision_recall') precision_recall_dict = compute_precision_recall_single_threshold(metric_scores, gold_labels, metric_to_threshold[metric_name]) print(f'For metric {metric_name}:', precision_recall_dict) scores_dict[metric_name] = [inconsistent_metric_scores, consistent_metric_scores] create_multiple_metrics_comparison_plots(scores_dict, 'precision_recall_comparison_2.png') if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--incons_dodeca_f", type=str, required=True) parser.add_argument("--cons_dodeca_f", type=str, required=True) parser.add_argument("--incons_memnet_f", type=str, required=True) parser.add_argument("--cons_memnet_f", type=str, required=True) parser.add_argument('--metrics_names', nargs="+", required=True) parser.add_argument('--thresholds', nargs="*", required=False) parser.add_argument("--add_baselines", default=False, action="store_true", help="Whether to include baseline methods in the meta-evaluation.") args = parser.parse_args() inconsistent_dodeca =
pd.read_csv(args.incons_dodeca_f)
pandas.read_csv
from .genometric_space import GenometricSpace from .dataset.parser.parser import Parser import pandas as pd import warnings import numpy as np class MultiRefModel: """ GenometricSpace class to represent data that are mapped with multiple references """ def __init__(self): """ Constructor """ self.data_model = [] return def load(self, path, genes_uuid, regs=['chr', 'left', 'right', 'strand'], meta=[], values=[], full_load=False): """ Loads the multi referenced mapped data from the file system :param path: The path to the files :param genes_uuid: The unique identifier metadata column name to separate the data by the number of references :param regs: The region data that are to be analyzed :param meta: The metadata that are to be analyzed :param values: The values to fill the matrix :param full_load: Specifies the method of parsing the data. If False then parser omits the parsing of zero(0) values in order to speed up and save memory. However, while creating the matrix, those zero values are going to be put into the matrix. (unless a row contains "all zero columns". This parsing is strongly recommended for sparse datasets. If the full_load parameter is True then all the zero(0) data are going to be read. """ if not full_load: warnings.warn("\n\n You are using the optimized loading technique. " "All-zero rows are not going to be loaded into memory. " "To load all the data please set the full_load parameter equal to True.") p = Parser(path) all_meta_data = p.parse_meta(meta) all_data = p.parse_data(regs, values, full_load) all_data = pd.pivot_table(all_data, values=values, columns=regs, index=['sample'], fill_value=0) group1 = all_meta_data.groupby([genes_uuid]).count() for g in group1.index.values: series = all_meta_data[genes_uuid] == g m = (all_meta_data[series]) d = (all_data.loc[series]).dropna(axis=1, how='all') # not to show the NaN data self.data_model.append(GenometricSpace.from_memory(d, m)) self.all_meta_data = all_meta_data def merge(self, samples_uuid): """ The method to merge the datamodels belonging to different references :param samples_uuid: The unique identifier metadata column name to identify the identical samples having different references :return: Returns the merged dataframe """ all_meta_data =
pd.DataFrame()
pandas.DataFrame
import pandas as pd import numpy as np import re import marcformat class MarcExtractor(object): tag_marc_file = 'MARC_FILE' tag_filter_columns = 'FILTER_COLUMNS' tag_marc_output_file = 'MARC_OUTPUT_FILE' marcFile = '' marcOutFile = '' filteredColumns = [] df = pd.DataFrame() df1 = pd.DataFrame() df2 = pd.DataFrame() chunkSize = 1000 count = 0 def __init__(self, config_file): self.__processConfigFile(config_file) pass def processDataSet(self): header = pd.DataFrame() for chunk in
pd.read_csv(self.marcFile, chunksize=self.chunkSize, encoding='latin1')
pandas.read_csv
import matplotlib.pyplot as plt import numpy as np import itertools as itt import pathlib as pl import src.data.rasters from src.data.load import load from src.metrics.reliability import signal_reliability from src.data.cache import make_cache, get_cache from src.data import LDA as cLDA, dPCA as cdPCA from src.metrics import dprime as cDP from progressbar import ProgressBar from src.utils.tools import shuffle_along_axis as shuffle from scipy.stats import ranksums, wilcoxon import pandas as pd import seaborn as sn import collections as col """ Summary of the d' context discrimination significance, and propulation effect significance across all combinations of sites and probes. The two metrics extracted are the total number of significant time bins and the position of the last time bin. it is highly recomended to add a way of keeping track of the distibution of significant bins over time across each category """ def fourway_analysis(site, probe, meta): recs = load(site) if len(recs) > 2: print(f'\n\n{recs.keys()}\n\n') rec = recs['trip0'] sig = rec['resp'] # calculates response realiability and select only good cells to improve analysis r_vals, goodcells = signal_reliability(sig, r'\ASTIM_*', threshold=meta['reliability']) goodcells = goodcells.tolist() # get the full data raster Context x Probe x Rep x Neuron x Time raster = src.data.rasters.raster_from_sig(sig, probe, channels=goodcells, contexts=meta['transitions'], smooth_window=meta['smoothing_window'], raster_fs=meta['raster_fs'], zscore=meta['zscore']) # trialR shape: Trial x Cell x Context x Probe x Time; R shape: Cell x Context x Probe x Time trialR, _, _ = cdPCA.format_raster(raster) trialR = trialR.squeeze() # squeezes out probe R, C, S, T = trialR.shape # calculates full LDA. i.e. considering all 4 categories LDA_projection, LDA_transformation = cLDA.fit_transform_over_time(trialR, 1) dprime = cDP.pairwise_dprimes(LDA_projection.squeeze()) # calculates floor (ctx shuffle) and ceiling (simulated data) sim_dprime = np.empty([meta['montecarlo']] + list(dprime.shape)) shuf_dprime = np.empty([meta['montecarlo']] + list(dprime.shape)) ctx_shuffle = trialR.copy() pbar = ProgressBar() for rr in pbar(range(meta['montecarlo'])): # ceiling: simulates data, calculates dprimes sim_trial = np.random.normal(np.mean(trialR, axis=0), np.std(trialR, axis=0), size=[R, C, S, T]) sim_projection = cLDA.transform_over_time(cLDA._reorder_dims(sim_trial), LDA_transformation) sim_dprime[rr, ...] = cDP.pairwise_dprimes(cLDA._recover_dims(sim_projection).squeeze()) ctx_shuffle = shuffle(ctx_shuffle, shuffle_axis=2, indie_axis=0) shuf_projection, _ = cLDA.fit_transform_over_time(ctx_shuffle) shuf_dprime[rr, ...] = cDP.pairwise_dprimes(shuf_projection.squeeze()) return dprime, shuf_dprime, sim_dprime def dPCA_fourway_analysis(site, probe, meta): recs = load(site) if len(recs) > 2: print(f'\n\n{recs.keys()}\n\n') rec = recs['trip0'] sig = rec['resp'] # calculates response realiability and select only good cells to improve analysis r_vals, goodcells = signal_reliability(sig, r'\ASTIM_*', threshold=meta['reliability']) goodcells = goodcells.tolist() # get the full data raster Context x Probe x Rep x Neuron x Time raster = src.data.rasters.raster_from_sig(sig, probe, channels=goodcells, contexts=meta['transitions'], smooth_window=meta['smoothing_window'], raster_fs=meta['raster_fs'], zscore=meta['zscore']) # trialR shape: Trial x Cell x Context x Probe x Time; R shape: Cell x Context x Probe x Time trialR, R, _ = cdPCA.format_raster(raster) trialR, R = trialR.squeeze(), R.squeeze() # squeezes out probe Re, C, S, T = trialR.shape # calculates full dPCA. i.e. considering all 4 categories def fit_transformt(R, trialR): _, dPCA_projection, _, dpca = cdPCA._cpp_dPCA(R, trialR, significance=False, dPCA_parms={}) dPCA_projection = dPCA_projection['ct'][:, 0, ] dPCA_transformation = np.tile(dpca.D['ct'][:, 0][:, None, None], [1, 1, T]) return dPCA_projection, dPCA_transformation dPCA_projection, dPCA_transformation = fit_transformt(R, trialR) dprime = cDP.pairwise_dprimes(dPCA_projection) # calculates floor (ctx shuffle) and ceiling (simulated data) sim_dprime = np.empty([meta['montecarlo']] + list(dprime.shape)) shuf_dprime = np.empty([meta['montecarlo']] + list(dprime.shape)) ctx_shuffle = trialR.copy() pbar = ProgressBar() for rr in pbar(range(meta['montecarlo'])): # ceiling: simulates data, calculates dprimes sim_trial = np.random.normal(np.mean(trialR, axis=0), np.std(trialR, axis=0), size=[Re, C, S, T]) sim_projection = cLDA.transform_over_time(cLDA._reorder_dims(sim_trial), dPCA_transformation) sim_dprime[rr, ...] = cDP.pairwise_dprimes(cLDA._recover_dims(sim_projection).squeeze()) ctx_shuffle = shuffle(ctx_shuffle, shuffle_axis=2, indie_axis=0) shuf_projection = cLDA.transform_over_time(cLDA._reorder_dims(ctx_shuffle), dPCA_transformation) shuf_dprime[rr, ...] = cDP.pairwise_dprimes(cLDA._recover_dims(shuf_projection).squeeze()) return dprime, shuf_dprime, sim_dprime CB_color_cycle = ['#377eb8', '#ff7f00', '#4daf4a', '#a65628', # blue, orange, green, brow, '#984ea3', '#999999', '#e41a1c', '#dede00'] # purple, gray, scarlet, lime trans_color_map = {'silence': '#377eb8', # blue 'continuous': '#ff7f00', # orange 'similar': '#4daf4a', # green 'sharp': '#a65628'} # brown MC_color = {'shuffled': 'orange', 'simulated': 'purple'} # transferable plotting parameters plt.rcParams['svg.fonttype'] = 'none' sup_title_size = 30 sub_title_size = 20 ax_lab_size = 15 ax_val_size = 11 meta = {'reliability': 0.1, # r value 'smoothing_window': 0, # ms 'raster_fs': 30, 'transitions': ['silence', 'continuous', 'similar', 'sharp'], 'significance': False, 'montecarlo': 1000, 'zscore': False} analysis_name = 'LDA_dprime' analysis_parameters = '_'.join(['{}-{}'.format(key, str(val)) for key, val in meta.items()]) code_to_name = {'t': 'Probe', 'ct': 'Context'} all_probes = [2, 3, 5, 6] # all_probes = [2,3] # all_probes = [5,6] all_sites = ['ley070a', # good site. A1 'ley072b', # Primary looking responses with strong contextual effects 'AMT028b', # good site 'AMT029a', # Strong response, somehow visible contextual effects 'AMT030a', # low responses, Ok but not as good # 'AMT031a', # low response, bad 'AMT032a'] # great site. PEG bad_sites = list() df = list() # for site, probe in zip(['AMT029a', 'ley070a'],[5,2]): for site, probe in itt.product(all_sites, all_probes): try: LDA_anal_name = f'191014_{site}_P{probe}_fourway_analysis' LDA_anal = make_cache(function=fourway_analysis, func_args={'site': site, 'probe': probe, 'meta': meta}, classobj_name=LDA_anal_name, cache_folder=f'/home/mateo/mycache/{analysis_name}/{analysis_parameters}') # LDA_real, LDA_shuffled, LDA_simulated = get_cache(LDA_anal) except: bad_sites.append(f"{site}_P{probe}_LDA") continue try: dPCA_anal_name = f'191015_{site}_P{probe}_fourway_analysis' dPCA_anal = make_cache(function=dPCA_fourway_analysis, func_args={'site': site, 'probe': probe, 'meta': meta}, classobj_name=dPCA_anal_name, cache_folder=f'/home/mateo/mycache/{analysis_name}/{analysis_parameters}') # dPCA_real, dPCA_shuffled, dPCA_simulated = get_cache(dPCA_anal) except: bad_sites.append(f"{site}_P{probe}_dPCA") continue for transformation, cache in zip(['LDA', 'dPCA'], [LDA_anal, dPCA_anal]): real, shuffled, simulated = get_cache(cache) for montecarlo, MCarray in zip(['context discrimination', 'population effect'], [shuffled, simulated]): # calculates a signed pvalue, the signs is indicative of the direction, with possitive being higher and # negative being lower than the mean of the Montecarlo distribution. Bi virtue of this distinction # the p value is calculated on a single tail. mont_mean = np.mean(MCarray, axis=0) neg_pval = np.sum((MCarray < real), axis=0) / meta['montecarlo'] pos_pval = np.sum((MCarray > real), axis=0) / meta['montecarlo'] pvalues = np.where(real >= mont_mean, pos_pval, -neg_pval) for pp, trans_pair in enumerate(itt.combinations(meta['transitions'], 2)): for pval_threshold in [0.05, 0.01, 0.001]: signif = np.abs(pvalues[pp, :]) < pval_threshold total_sig = np.sum(signif) * 100 / len(signif) # todo remove the hardcode percentage try: last_sig = np.max(np.argwhere(signif)) except: # if there is not significant last_sig = 0 d = {'site': site, 'probe': probe, 'transformation': transformation, 'montecarlo': montecarlo, 'pair': f'{trans_pair[0]}_{trans_pair[1]}', 'threshold': pval_threshold, 'parameter': 'total_sig', 'value': total_sig} df.append(d) d = {'site': site, 'probe': probe, 'transformation': transformation, 'montecarlo': montecarlo, 'pair': f'{trans_pair[0]}_{trans_pair[1]}', 'threshold': pval_threshold, 'parameter': 'last_sig', 'value': last_sig} df.append(d) # # plots to check proper pval calculation # fig, axes = plt.subplots(2,3, sharex=True, sharey=True) # axes = np.ravel(axes) # for pp in range(real.shape[0]): # ax = axes[pp] # ax.plot(real[pp,:], color='black') # ax.plot(MCarray[:,pp,:].T, color='gray', alpha=0.01) # ax.plot(np.abs(pvalues[pp,:]), color='green') # ax.plot(np.abs(pvalues[pp,:])<0.05, color='orange') # DF =
pd.DataFrame(df)
pandas.DataFrame
# -*- coding: utf-8 -*- import csv import os import platform import codecs import re import sys from datetime import datetime import pytest import numpy as np from pandas._libs.lib import Timestamp import pandas as pd import pandas.util.testing as tm from pandas import DataFrame, Series, Index, MultiIndex from pandas import compat from pandas.compat import (StringIO, BytesIO, PY3, range, lrange, u) from pandas.errors import DtypeWarning, EmptyDataError, ParserError from pandas.io.common import URLError from pandas.io.parsers import TextFileReader, TextParser class ParserTests(object): """ Want to be able to test either C+Cython or Python+Cython parsers """ data1 = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ def test_empty_decimal_marker(self): data = """A|B|C 1|2,334|5 10|13|10. """ # Parsers support only length-1 decimals msg = 'Only length-1 decimal markers supported' with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(data), decimal='') def test_bad_stream_exception(self): # Issue 13652: # This test validates that both python engine # and C engine will raise UnicodeDecodeError instead of # c engine raising ParserError and swallowing exception # that caused read to fail. handle = open(self.csv_shiftjs, "rb") codec = codecs.lookup("utf-8") utf8 = codecs.lookup('utf-8') # stream must be binary UTF8 stream = codecs.StreamRecoder( handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter) if compat.PY3: msg = "'utf-8' codec can't decode byte" else: msg = "'utf8' codec can't decode byte" with tm.assert_raises_regex(UnicodeDecodeError, msg): self.read_csv(stream) stream.close() def test_read_csv(self): if not compat.PY3: if compat.is_platform_windows(): prefix = u("file:///") else: prefix = u("file://") fname = prefix + compat.text_type(self.csv1) self.read_csv(fname, index_col=0, parse_dates=True) def test_1000_sep(self): data = """A|B|C 1|2,334|5 10|13|10. """ expected = DataFrame({ 'A': [1, 10], 'B': [2334, 13], 'C': [5, 10.] }) df = self.read_csv(StringIO(data), sep='|', thousands=',') tm.assert_frame_equal(df, expected) df = self.read_table(StringIO(data), sep='|', thousands=',') tm.assert_frame_equal(df, expected) def test_squeeze(self): data = """\ a,1 b,2 c,3 """ idx = Index(['a', 'b', 'c'], name=0) expected = Series([1, 2, 3], name=1, index=idx) result = self.read_table(StringIO(data), sep=',', index_col=0, header=None, squeeze=True) assert isinstance(result, Series) tm.assert_series_equal(result, expected) def test_squeeze_no_view(self): # see gh-8217 # Series should not be a view data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13""" result = self.read_csv(StringIO(data), index_col='time', squeeze=True) assert not result._is_view def test_malformed(self): # see gh-6607 # all data = """ignore A,B,C 1,2,3 # comment 1,2,3,4,5 2,3,4 """ msg = 'Expected 3 fields in line 4, saw 5' with tm.assert_raises_regex(Exception, msg): self.read_table(StringIO(data), sep=',', header=1, comment='#') # first chunk data = """ignore A,B,C skip 1,2,3 3,5,10 # comment 1,2,3,4,5 2,3,4 """ msg = 'Expected 3 fields in line 6, saw 5' with tm.assert_raises_regex(Exception, msg): it = self.read_table(StringIO(data), sep=',', header=1, comment='#', iterator=True, chunksize=1, skiprows=[2]) it.read(5) # middle chunk data = """ignore A,B,C skip 1,2,3 3,5,10 # comment 1,2,3,4,5 2,3,4 """ msg = 'Expected 3 fields in line 6, saw 5' with tm.assert_raises_regex(Exception, msg): it = self.read_table(StringIO(data), sep=',', header=1, comment='#', iterator=True, chunksize=1, skiprows=[2]) it.read(3) # last chunk data = """ignore A,B,C skip 1,2,3 3,5,10 # comment 1,2,3,4,5 2,3,4 """ msg = 'Expected 3 fields in line 6, saw 5' with tm.assert_raises_regex(Exception, msg): it = self.read_table(StringIO(data), sep=',', header=1, comment='#', iterator=True, chunksize=1, skiprows=[2]) it.read() # skipfooter is not supported with the C parser yet if self.engine == 'python': # skipfooter data = """ignore A,B,C 1,2,3 # comment 1,2,3,4,5 2,3,4 footer """ msg = 'Expected 3 fields in line 4, saw 5' with tm.assert_raises_regex(Exception, msg): self.read_table(StringIO(data), sep=',', header=1, comment='#', skipfooter=1) def test_quoting(self): bad_line_small = """printer\tresult\tvariant_name Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten"" Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa pytest.raises(Exception, self.read_table, StringIO(bad_line_small), sep='\t') good_line_small = bad_line_small + '"' df = self.read_table(StringIO(good_line_small), sep='\t') assert len(df) == 3 def test_unnamed_columns(self): data = """A,B,C,, 1,2,3,4,5 6,7,8,9,10 11,12,13,14,15 """ expected = np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]], dtype=np.int64) df = self.read_table(StringIO(data), sep=',') tm.assert_almost_equal(df.values, expected) tm.assert_index_equal(df.columns, Index(['A', 'B', 'C', 'Unnamed: 3', 'Unnamed: 4'])) def test_csv_mixed_type(self): data = """A,B,C a,1,2 b,3,4 c,4,5 """ expected = DataFrame({'A': ['a', 'b', 'c'], 'B': [1, 3, 4], 'C': [2, 4, 5]}) out = self.read_csv(StringIO(data)) tm.assert_frame_equal(out, expected) def test_read_csv_dataframe(self): df = self.read_csv(self.csv1, index_col=0, parse_dates=True) df2 = self.read_table(self.csv1, sep=',', index_col=0, parse_dates=True) tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D'])) assert df.index.name == 'index' assert isinstance( df.index[0], (datetime, np.datetime64, Timestamp)) assert df.values.dtype == np.float64 tm.assert_frame_equal(df, df2) def test_read_csv_no_index_name(self): df = self.read_csv(self.csv2, index_col=0, parse_dates=True) df2 = self.read_table(self.csv2, sep=',', index_col=0, parse_dates=True) tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D', 'E'])) assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp)) assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64 tm.assert_frame_equal(df, df2) def test_read_table_unicode(self): fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8')) df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None) assert isinstance(df1[0].values[0], compat.text_type) def test_read_table_wrong_num_columns(self): # too few! data = """A,B,C,D,E,F 1,2,3,4,5,6 6,7,8,9,10,11,12 11,12,13,14,15,16 """ pytest.raises(ValueError, self.read_csv, StringIO(data)) def test_read_duplicate_index_explicit(self): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo,12,13,14,15 bar,12,13,14,15 """ result = self.read_csv(
StringIO(data)
pandas.compat.StringIO
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Nov 4 00:13:06 2020 @author: sahand """ from rake_nltk import Rake import pandas as pd import re import numpy as np from sklearn.feature_extraction.text import TfidfVectorizer import nltk from nltk.corpus import stopwords nltk.download('stopwords') st = set(stopwords.words('english')) path = '/home/sahand/GoogleDrive/Data/' # data_address = path+"Corpus/AI 4k/copyr_deflem_stopword_removed_thesaurus May 28/1990-2019/1990-2019 abstract_title" data_address = path+"Corpus/AI 4k/copyr_deflem_stopword_removed_thesaurus May 28/1990-2019/1990-2019 abstract_title" df1 = pd.read_csv(data_address,names=['abstract']) labels = pd.read_csv(path+'Corpus/AI 4k/embeddings/clustering/k10/Doc2Vec patent_wos_ai corpus DEC 200,500,10 k10 labels') df1['label'] = labels['label'] corpus = [] for cluster in df1.groupby('label').groups: corpus.append( ' '.join(df1[df1['label']==cluster]['abstract'].values.tolist())) # ============================================================================= # TFIDF # ============================================================================= all_keys = [] all_keyscores = [] for cor in corpus: text = cor text = text.replace('.',' ') text = re.sub(r'\s+',' ',re.sub(r'[^\w \s]','',text) ).lower() corpus_n = re.split('chapter \d+',text) vectorizer = TfidfVectorizer() vectors = vectorizer.fit_transform(corpus_n) names = vectorizer.get_feature_names() data = vectors.todense().tolist() # Create a dataframe with the results df = pd.DataFrame(data, columns=names) df = df[filter(lambda x: x not in list(st) , df.columns)] N = 10; keys = [] keyscores = [] for i in df.iterrows(): keyscores.append(i[1].sort_values(ascending=False)[:N].values.tolist()) keys.append(list(i[1].sort_values(ascending=False)[:N].index)) all_keys.append(keys) all_keyscores.append(keyscores) all_keys_df = pd.DataFrame(np.array(all_keys).squeeze()) all_keys_df.to_csv() # ============================================================================= # Rake -- won't work with long text # ============================================================================= r = Rake() r.extract_keywords_from_text(corpus[0]) r.get_ranked_phrases() #%%# ========================================================================== # From taxonomy # # The issue is that, too many possible words are out there in kw list. Like, "mind" or "eye". These are correct. But out of context. # We have to bee too specific if we wamt to rely om author kerywords, like the kw from 4 AI journals. # ============================================================================= import numpy as np import pandas as pd import re from tqdm import tqdm from sciosci.assets import text_assets as kw from sciosci.assets import keyword_dictionaries as kd from gensim.parsing.preprocessing import strip_multiple_whitespaces from nltk.tokenize import word_tokenize from nltk.corpus import stopwords from multiprocessing import Pool range_s,range_e = 400000,480000 stops = ['a','an','we','result','however','yet','since','previously','although','propose','proposed','this','...'] stop_words = list(set(stopwords.words("english")))+stops path = '/home/sahand/GoogleDrive/Data/' keywords = list(set(pd.read_csv(path+'Corpus/Taxonomy/TAI Taxonomy.csv',sep='===',names=['keyword'])['keyword'].values.tolist())) keywords = keywords+list(set(pd.read_csv(path+'Corpus/Taxonomy/AI ALL Scopus n>2')['keywords'].values.tolist())) keywords = keywords+list(set(pd.read_csv(path+'Corpus/Taxonomy/CSO.3.3-taxonomy.csv')['keywords'].values.tolist())) # lemmatize keywords = [kw.string_pre_processing(x,stemming_method='None',lemmatization='DEF',stop_word_removal=True,stop_words_extra=stops,verbose=False,download_nltk=False) for x in tqdm(keywords)] keywords = [x for x in tqdm(keywords) if len(x)>2] keywords = [kw.replace_british_american(strip_multiple_whitespaces(kw.replace_british_american(strip_multiple_whitespaces(keyword),kd.gb2us)),kd.gb2us) for keyword in tqdm(keywords)] keywords = [k.strip().lower() for k in tqdm(keywords)] keywords = np.array(keywords) # pub_idx = pd.read_csv(path+'Corpus/Dimensions AI unlimited citations/clean/publication idx')[:] abstracts = pd.read_csv(path+'Corpus/Dimensions All/clean/abstract_title method_b')[range_s:range_e] idx = abstracts.index pub_idx = abstracts[['id']] abstracts = abstracts['abstract'].values.tolist() tmp = [] for ab in tqdm(abstracts): try: tmp.append(kw.replace_british_american(strip_multiple_whitespaces(kw.replace_british_american(strip_multiple_whitespaces(ab),kd.gb2us)),kd.gb2us)) except: tmp.append('') abstracts = tmp # abstracts = [kw.replace_british_american(strip_multiple_whitespaces(kw.replace_british_american(strip_multiple_whitespaces(ab),kd.gb2us)),kd.gb2us) for ab in tqdm(abstracts)] # pd.DataFrame(keywords).to_csv(path+'Corpus/Taxonomy/AI kw merged US',index=False,header=False) # ============================================================================= # abstract = word_tokenize(abstract) # abstract = [word for word in abstract if not word in stop_words] # # extraction = [word for word in abstract if word in keywords] # matches = [keyword in abstract for keyword in keywords] # selection = keywords[matches] # ============================================================================= abstracts_s = [strip_multiple_whitespaces(' '+(' '.join(re.split('( |,|\.|\!|\?|\(|\))',abstract)))+' ') for abstract in tqdm(abstracts)] keywords_s = [' '+keyword+' ' for keyword in tqdm(keywords)] def extract(abstracts,keywords): pubkeywords = [] errors = [] for i,abstract in tqdm(enumerate(abstracts),total=len(abstracts)): try: pubkeywords.append([x for x in keywords if x in abstract]) except: pubkeywords.append([]) errors.append(i) print('errors:'+str(errors)) return pubkeywords,errors extracted,errors = extract(abstracts_s,keywords_s) extracted = list(extracted) extracted_df = [str(list(row))[1:-1] for row in extracted] extracted_df = pd.DataFrame(extracted_df) extracted_df.index = idx extracted_df['id'] = pub_idx extracted_df.columns = ['kw','id'] extracted_df.to_csv(path+'Corpus/Dimensions All/clean/kw from taxonomy/keyword US p-'+str(int(range_s/80000)),header=True) #%% =========================================================================== # # concat multiple parts # ============================================================================= import pandas as pd import numpy as np from tqdm import tqdm tqdm.pandas() path = '/home/sahand/GoogleDrive/Data/' def retract_check(string): try: if string.startswith('retracted'): return True except: pass return False abstracts = pd.read_csv(path+'Corpus/Dimensions All/clean/abstract_title method_b') # abstracts['retracted'] = abstracts['abstract'].progress_apply(lambda x: retract_check(x)) # bad_abstracts = abstracts[abstracts['retracted']==True][['id']] # bad_abstracts.to_csv(path+'Corpus/Dimensions AI unlimited citations/clean/_bad data',index=False) idx = abstracts.index pub_idx = abstracts[['id']] extracted_df = pd.read_csv(path+'Corpus/Dimensions All/clean/kw from taxonomy/keyword US p-0',index_col=0) for i in range(1,7): extracted_df_b = pd.read_csv(path+'Corpus/Dimensions All/clean/kw from taxonomy/keyword US p-'+str(i),index_col=0) extracted_df = extracted_df.append(extracted_df_b) extracted_df[extracted_df.index.duplicated()] #check for duplicated index extracted_df['id-n'] = pub_idx['id'] assert extracted_df['id'].equals(extracted_df['id-n']), "Oh no! id mismatch here... Please fix it!" extracted_df = extracted_df.drop(['id-n'],axis=1) # pub_idx.to_csv(path+'Corpus/Dimensions AI unlimited citations/clean/keyword US',index=False) # clean, and prepare def clean_and_extract(words,join=None): try: if join==None: return list(set([word.strip() for word in words[2:-2].split("', '")])) else: return join.join(list(set([word.strip() for word in words[2:-2].split("', '")]))) except: return np.nan extracted_df['kw'] = extracted_df['kw'].progress_apply(lambda words: clean_and_extract(words,join=';;;')) extracted_df.to_csv(path+'Corpus/Dimensions All/clean/kw from taxonomy/keyword US - sco-cso-tai',index=False) #%% =========================================================================== # Check kw quality # ============================================================================= import pandas as pd import numpy as np from tqdm import tqdm tqdm.pandas() path = '/home/sahand/GoogleDrive/Data/' file = 'keyword US - sco-tai' file = 'keyword US - scopus-sco-tai' pub_idx = pd.read_csv(path+'Corpus/Dimensions AI unlimited citations/clean/'+file) bad_abstracts = pd.read_csv(path+'Corpus/Dimensions AI unlimited citations/clean/_bad data') pub_idx = pub_idx[~pub_idx['id'].isin(bad_abstracts['id'].values.tolist())] pub_idx = pub_idx[
pd.notnull(pub_idx['keywords'])
pandas.notnull
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import decimal from datetime import datetime from distutils.version import LooseVersion import inspect import sys import unittest from io import StringIO from typing import List import numpy as np import pandas as pd from pandas.tseries.offsets import DateOffset from pyspark import StorageLevel from pyspark.ml.linalg import SparseVector from pyspark.sql.types import StructType from pyspark import pandas as ps from pyspark.pandas.config import option_context from pyspark.pandas.exceptions import PandasNotImplementedError from pyspark.pandas.frame import CachedDataFrame from pyspark.pandas.missing.frame import _MissingPandasLikeDataFrame from pyspark.pandas.typedef.typehints import ( extension_dtypes, extension_dtypes_available, extension_float_dtypes_available, extension_object_dtypes_available, ) from pyspark.testing.pandasutils import ( have_tabulate, PandasOnSparkTestCase, SPARK_CONF_ARROW_ENABLED, tabulate_requirement_message, ) from pyspark.testing.sqlutils import SQLTestUtils from pyspark.pandas.utils import name_like_string class DataFrameTest(PandasOnSparkTestCase, SQLTestUtils): @property def pdf(self): return pd.DataFrame( {"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]}, index=np.random.rand(9), ) @property def psdf(self): return ps.from_pandas(self.pdf) @property def df_pair(self): pdf = self.pdf psdf = ps.from_pandas(pdf) return pdf, psdf def test_dataframe(self): pdf, psdf = self.df_pair self.assert_eq(psdf["a"] + 1, pdf["a"] + 1) self.assert_eq(psdf.columns, pd.Index(["a", "b"])) self.assert_eq(psdf[psdf["b"] > 2], pdf[pdf["b"] > 2]) self.assert_eq(-psdf[psdf["b"] > 2], -pdf[pdf["b"] > 2]) self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]]) self.assert_eq(psdf.a, pdf.a) self.assert_eq(psdf.b.mean(), pdf.b.mean()) self.assert_eq(psdf.b.var(), pdf.b.var()) self.assert_eq(psdf.b.std(), pdf.b.std()) pdf, psdf = self.df_pair self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]]) self.assertEqual(psdf.a.notnull().rename("x").name, "x") # check ps.DataFrame(ps.Series) pser = pd.Series([1, 2, 3], name="x", index=np.random.rand(3)) psser = ps.from_pandas(pser) self.assert_eq(pd.DataFrame(pser), ps.DataFrame(psser)) # check psdf[pd.Index] pdf, psdf = self.df_pair column_mask = pdf.columns.isin(["a", "b"]) index_cols = pdf.columns[column_mask] self.assert_eq(psdf[index_cols], pdf[index_cols]) def _check_extension(self, psdf, pdf): if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"): self.assert_eq(psdf, pdf, check_exact=False) for dtype in psdf.dtypes: self.assertTrue(isinstance(dtype, extension_dtypes)) else: self.assert_eq(psdf, pdf) @unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available") def test_extension_dtypes(self): pdf = pd.DataFrame( { "a": pd.Series([1, 2, None, 4], dtype="Int8"), "b": pd.Series([1, None, None, 4], dtype="Int16"), "c": pd.Series([1, 2, None, None], dtype="Int32"), "d": pd.Series([None, 2, None, 4], dtype="Int64"), } ) psdf = ps.from_pandas(pdf) self._check_extension(psdf, pdf) self._check_extension(psdf + psdf, pdf + pdf) @unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available") def test_astype_extension_dtypes(self): pdf = pd.DataFrame( { "a": [1, 2, None, 4], "b": [1, None, None, 4], "c": [1, 2, None, None], "d": [None, 2, None, 4], } ) psdf = ps.from_pandas(pdf) astype = {"a": "Int8", "b": "Int16", "c": "Int32", "d": "Int64"} self._check_extension(psdf.astype(astype), pdf.astype(astype)) @unittest.skipIf( not extension_object_dtypes_available, "pandas extension object dtypes are not available" ) def test_extension_object_dtypes(self): pdf = pd.DataFrame( { "a": pd.Series(["a", "b", None, "c"], dtype="string"), "b": pd.Series([True, None, False, True], dtype="boolean"), } ) psdf = ps.from_pandas(pdf) self._check_extension(psdf, pdf) @unittest.skipIf( not extension_object_dtypes_available, "pandas extension object dtypes are not available" ) def test_astype_extension_object_dtypes(self): pdf = pd.DataFrame({"a": ["a", "b", None, "c"], "b": [True, None, False, True]}) psdf = ps.from_pandas(pdf) astype = {"a": "string", "b": "boolean"} self._check_extension(psdf.astype(astype), pdf.astype(astype)) @unittest.skipIf( not extension_float_dtypes_available, "pandas extension float dtypes are not available" ) def test_extension_float_dtypes(self): pdf = pd.DataFrame( { "a": pd.Series([1.0, 2.0, None, 4.0], dtype="Float32"), "b": pd.Series([1.0, None, 3.0, 4.0], dtype="Float64"), } ) psdf = ps.from_pandas(pdf) self._check_extension(psdf, pdf) self._check_extension(psdf + 1, pdf + 1) self._check_extension(psdf + psdf, pdf + pdf) @unittest.skipIf( not extension_float_dtypes_available, "pandas extension float dtypes are not available" ) def test_astype_extension_float_dtypes(self): pdf = pd.DataFrame({"a": [1.0, 2.0, None, 4.0], "b": [1.0, None, 3.0, 4.0]}) psdf = ps.from_pandas(pdf) astype = {"a": "Float32", "b": "Float64"} self._check_extension(psdf.astype(astype), pdf.astype(astype)) def test_insert(self): # # Basic DataFrame # pdf = pd.DataFrame([1, 2, 3]) psdf = ps.from_pandas(pdf) psdf.insert(1, "b", 10) pdf.insert(1, "b", 10) self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True) psdf.insert(2, "c", 0.1) pdf.insert(2, "c", 0.1) self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True) psdf.insert(3, "d", psdf.b + 1) pdf.insert(3, "d", pdf.b + 1) self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True) psser = ps.Series([4, 5, 6]) self.assertRaises(ValueError, lambda: psdf.insert(0, "y", psser)) self.assertRaisesRegex( ValueError, "cannot insert b, already exists", lambda: psdf.insert(1, "b", 10) ) self.assertRaisesRegex( TypeError, '"column" should be a scalar value or tuple that contains scalar values', lambda: psdf.insert(0, list("abc"), psser), ) self.assertRaisesRegex( TypeError, "loc must be int", lambda: psdf.insert((1,), "b", 10), ) self.assertRaisesRegex( NotImplementedError, "Assigning column name as tuple is only supported for MultiIndex columns for now.", lambda: psdf.insert(0, ("e",), 10), ) self.assertRaises(ValueError, lambda: psdf.insert(0, "e", [7, 8, 9, 10])) self.assertRaises(ValueError, lambda: psdf.insert(0, "f", ps.Series([7, 8]))) self.assertRaises(AssertionError, lambda: psdf.insert(100, "y", psser)) self.assertRaises(AssertionError, lambda: psdf.insert(1, "y", psser, allow_duplicates=True)) # # DataFrame with MultiIndex as columns # pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]}) psdf = ps.from_pandas(pdf) psdf.insert(1, "b", 10) pdf.insert(1, "b", 10) self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True) psdf.insert(2, "c", 0.1) pdf.insert(2, "c", 0.1) self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True) psdf.insert(3, "d", psdf.b + 1) pdf.insert(3, "d", pdf.b + 1) self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True) self.assertRaisesRegex( ValueError, "cannot insert d, already exists", lambda: psdf.insert(4, "d", 11) ) self.assertRaisesRegex( ValueError, r"cannot insert \('x', 'a', 'b'\), already exists", lambda: psdf.insert(4, ("x", "a", "b"), 11), ) self.assertRaisesRegex( ValueError, '"column" must have length equal to number of column levels.', lambda: psdf.insert(4, ("e",), 11), ) def test_inplace(self): pdf, psdf = self.df_pair pser = pdf.a psser = psdf.a pdf["a"] = pdf["a"] + 10 psdf["a"] = psdf["a"] + 10 self.assert_eq(psdf, pdf) self.assert_eq(psser, pser) def test_assign_list(self): pdf, psdf = self.df_pair pser = pdf.a psser = psdf.a pdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90] psdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90] self.assert_eq(psdf.sort_index(), pdf.sort_index()) self.assert_eq(psser, pser) with self.assertRaisesRegex(ValueError, "Length of values does not match length of index"): psdf["z"] = [10, 20, 30, 40, 50, 60, 70, 80] def test_dataframe_multiindex_columns(self): pdf = pd.DataFrame( { ("x", "a", "1"): [1, 2, 3], ("x", "b", "2"): [4, 5, 6], ("y.z", "c.d", "3"): [7, 8, 9], ("x", "b", "4"): [10, 11, 12], }, index=np.random.rand(3), ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf, pdf) self.assert_eq(psdf["x"], pdf["x"]) self.assert_eq(psdf["y.z"], pdf["y.z"]) self.assert_eq(psdf["x"]["b"], pdf["x"]["b"]) self.assert_eq(psdf["x"]["b"]["2"], pdf["x"]["b"]["2"]) self.assert_eq(psdf.x, pdf.x) self.assert_eq(psdf.x.b, pdf.x.b) self.assert_eq(psdf.x.b["2"], pdf.x.b["2"]) self.assertRaises(KeyError, lambda: psdf["z"]) self.assertRaises(AttributeError, lambda: psdf.z) self.assert_eq(psdf[("x",)], pdf[("x",)]) self.assert_eq(psdf[("x", "a")], pdf[("x", "a")]) self.assert_eq(psdf[("x", "a", "1")], pdf[("x", "a", "1")]) def test_dataframe_column_level_name(self): column = pd.Index(["A", "B", "C"], name="X") pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=column, index=np.random.rand(2)) psdf = ps.from_pandas(pdf) self.assert_eq(psdf, pdf) self.assert_eq(psdf.columns.names, pdf.columns.names) self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names) def test_dataframe_multiindex_names_level(self): columns = pd.MultiIndex.from_tuples( [("X", "A", "Z"), ("X", "B", "Z"), ("Y", "C", "Z"), ("Y", "D", "Z")], names=["lvl_1", "lvl_2", "lv_3"], ) pdf = pd.DataFrame( [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]], columns=columns, index=np.random.rand(5), ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.columns.names, pdf.columns.names) self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names) psdf1 = ps.from_pandas(pdf) self.assert_eq(psdf1.columns.names, pdf.columns.names) self.assertRaises( AssertionError, lambda: ps.DataFrame(psdf1._internal.copy(column_label_names=("level",))), ) self.assert_eq(psdf["X"], pdf["X"]) self.assert_eq(psdf["X"].columns.names, pdf["X"].columns.names) self.assert_eq(psdf["X"].to_pandas().columns.names, pdf["X"].columns.names) self.assert_eq(psdf["X"]["A"], pdf["X"]["A"]) self.assert_eq(psdf["X"]["A"].columns.names, pdf["X"]["A"].columns.names) self.assert_eq(psdf["X"]["A"].to_pandas().columns.names, pdf["X"]["A"].columns.names) self.assert_eq(psdf[("X", "A")], pdf[("X", "A")]) self.assert_eq(psdf[("X", "A")].columns.names, pdf[("X", "A")].columns.names) self.assert_eq(psdf[("X", "A")].to_pandas().columns.names, pdf[("X", "A")].columns.names) self.assert_eq(psdf[("X", "A", "Z")], pdf[("X", "A", "Z")]) def test_itertuples(self): pdf = pd.DataFrame({"num_legs": [4, 2], "num_wings": [0, 2]}, index=["dog", "hawk"]) psdf = ps.from_pandas(pdf) for ptuple, ktuple in zip( pdf.itertuples(index=False, name="Animal"), psdf.itertuples(index=False, name="Animal") ): self.assert_eq(ptuple, ktuple) for ptuple, ktuple in zip(pdf.itertuples(name=None), psdf.itertuples(name=None)): self.assert_eq(ptuple, ktuple) pdf.index = pd.MultiIndex.from_arrays( [[1, 2], ["black", "brown"]], names=("count", "color") ) psdf = ps.from_pandas(pdf) for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")): self.assert_eq(ptuple, ktuple) pdf.columns = pd.MultiIndex.from_arrays( [["CA", "WA"], ["age", "children"]], names=("origin", "info") ) psdf = ps.from_pandas(pdf) for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")): self.assert_eq(ptuple, ktuple) pdf = pd.DataFrame([1, 2, 3]) psdf = ps.from_pandas(pdf) for ptuple, ktuple in zip( (pdf + 1).itertuples(name="num"), (psdf + 1).itertuples(name="num") ): self.assert_eq(ptuple, ktuple) # DataFrames with a large number of columns (>254) pdf = pd.DataFrame(np.random.random((1, 255))) psdf = ps.from_pandas(pdf) for ptuple, ktuple in zip(pdf.itertuples(name="num"), psdf.itertuples(name="num")): self.assert_eq(ptuple, ktuple) def test_iterrows(self): pdf = pd.DataFrame( { ("x", "a", "1"): [1, 2, 3], ("x", "b", "2"): [4, 5, 6], ("y.z", "c.d", "3"): [7, 8, 9], ("x", "b", "4"): [10, 11, 12], }, index=np.random.rand(3), ) psdf = ps.from_pandas(pdf) for (pdf_k, pdf_v), (psdf_k, psdf_v) in zip(pdf.iterrows(), psdf.iterrows()): self.assert_eq(pdf_k, psdf_k) self.assert_eq(pdf_v, psdf_v) def test_reset_index(self): pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3)) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.reset_index(), pdf.reset_index()) self.assert_eq(psdf.reset_index().index, pdf.reset_index().index) self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True)) pdf.index.name = "a" psdf.index.name = "a" with self.assertRaisesRegex(ValueError, "cannot insert a, already exists"): psdf.reset_index() self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True)) # inplace pser = pdf.a psser = psdf.a pdf.reset_index(drop=True, inplace=True) psdf.reset_index(drop=True, inplace=True) self.assert_eq(psdf, pdf) self.assert_eq(psser, pser) pdf.columns = ["index", "b"] psdf.columns = ["index", "b"] self.assert_eq(psdf.reset_index(), pdf.reset_index()) def test_reset_index_with_default_index_types(self): pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3)) psdf = ps.from_pandas(pdf) with ps.option_context("compute.default_index_type", "sequence"): self.assert_eq(psdf.reset_index(), pdf.reset_index()) with ps.option_context("compute.default_index_type", "distributed-sequence"): self.assert_eq(psdf.reset_index(), pdf.reset_index()) with ps.option_context("compute.default_index_type", "distributed"): # the index is different. self.assert_eq(psdf.reset_index().to_pandas().reset_index(drop=True), pdf.reset_index()) def test_reset_index_with_multiindex_columns(self): index = pd.MultiIndex.from_tuples( [("bird", "falcon"), ("bird", "parrot"), ("mammal", "lion"), ("mammal", "monkey")], names=["class", "name"], ) columns = pd.MultiIndex.from_tuples([("speed", "max"), ("species", "type")]) pdf = pd.DataFrame( [(389.0, "fly"), (24.0, "fly"), (80.5, "run"), (np.nan, "jump")], index=index, columns=columns, ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf, pdf) self.assert_eq(psdf.reset_index(), pdf.reset_index()) self.assert_eq(psdf.reset_index(level="class"), pdf.reset_index(level="class")) self.assert_eq( psdf.reset_index(level="class", col_level=1), pdf.reset_index(level="class", col_level=1), ) self.assert_eq( psdf.reset_index(level="class", col_level=1, col_fill="species"), pdf.reset_index(level="class", col_level=1, col_fill="species"), ) self.assert_eq( psdf.reset_index(level="class", col_level=1, col_fill="genus"), pdf.reset_index(level="class", col_level=1, col_fill="genus"), ) with self.assertRaisesRegex(IndexError, "Index has only 2 levels, not 3"): psdf.reset_index(col_level=2) pdf.index.names = [("x", "class"), ("y", "name")] psdf.index.names = [("x", "class"), ("y", "name")] self.assert_eq(psdf.reset_index(), pdf.reset_index()) with self.assertRaisesRegex(ValueError, "Item must have length equal to number of levels."): psdf.reset_index(col_level=1) def test_index_to_frame_reset_index(self): def check(psdf, pdf): self.assert_eq(psdf.reset_index(), pdf.reset_index()) self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True)) pdf.reset_index(drop=True, inplace=True) psdf.reset_index(drop=True, inplace=True) self.assert_eq(psdf, pdf) pdf, psdf = self.df_pair check(psdf.index.to_frame(), pdf.index.to_frame()) check(psdf.index.to_frame(index=False), pdf.index.to_frame(index=False)) check(psdf.index.to_frame(name="a"), pdf.index.to_frame(name="a")) check(psdf.index.to_frame(index=False, name="a"), pdf.index.to_frame(index=False, name="a")) check(psdf.index.to_frame(name=("x", "a")), pdf.index.to_frame(name=("x", "a"))) check( psdf.index.to_frame(index=False, name=("x", "a")), pdf.index.to_frame(index=False, name=("x", "a")), ) def test_multiindex_column_access(self): columns = pd.MultiIndex.from_tuples( [ ("a", "", "", "b"), ("c", "", "d", ""), ("e", "", "f", ""), ("e", "g", "", ""), ("", "", "", "h"), ("i", "", "", ""), ] ) pdf = pd.DataFrame( [ (1, "a", "x", 10, 100, 1000), (2, "b", "y", 20, 200, 2000), (3, "c", "z", 30, 300, 3000), ], columns=columns, index=np.random.rand(3), ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf, pdf) self.assert_eq(psdf["a"], pdf["a"]) self.assert_eq(psdf["a"]["b"], pdf["a"]["b"]) self.assert_eq(psdf["c"], pdf["c"]) self.assert_eq(psdf["c"]["d"], pdf["c"]["d"]) self.assert_eq(psdf["e"], pdf["e"]) self.assert_eq(psdf["e"][""]["f"], pdf["e"][""]["f"]) self.assert_eq(psdf["e"]["g"], pdf["e"]["g"]) self.assert_eq(psdf[""], pdf[""]) self.assert_eq(psdf[""]["h"], pdf[""]["h"]) self.assert_eq(psdf["i"], pdf["i"]) self.assert_eq(psdf[["a", "e"]], pdf[["a", "e"]]) self.assert_eq(psdf[["e", "a"]], pdf[["e", "a"]]) self.assert_eq(psdf[("a",)], pdf[("a",)]) self.assert_eq(psdf[("e", "g")], pdf[("e", "g")]) # self.assert_eq(psdf[("i",)], pdf[("i",)]) self.assert_eq(psdf[("i", "")], pdf[("i", "")]) self.assertRaises(KeyError, lambda: psdf[("a", "b")]) def test_repr_cache_invalidation(self): # If there is any cache, inplace operations should invalidate it. df = ps.range(10) df.__repr__() df["a"] = df["id"] self.assertEqual(df.__repr__(), df.to_pandas().__repr__()) def test_repr_html_cache_invalidation(self): # If there is any cache, inplace operations should invalidate it. df = ps.range(10) df._repr_html_() df["a"] = df["id"] self.assertEqual(df._repr_html_(), df.to_pandas()._repr_html_()) def test_empty_dataframe(self): pdf = pd.DataFrame({"a": pd.Series([], dtype="i1"), "b": pd.Series([], dtype="str")}) psdf = ps.from_pandas(pdf) self.assert_eq(psdf, pdf) with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}): psdf = ps.from_pandas(pdf) self.assert_eq(psdf, pdf) def test_all_null_dataframe(self): pdf = pd.DataFrame( { "a": [None, None, None, "a"], "b": [None, None, None, 1], "c": [None, None, None] + list(np.arange(1, 2).astype("i1")), "d": [None, None, None, 1.0], "e": [None, None, None, True], "f": [None, None, None] + list(pd.date_range("20130101", periods=1)), }, ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1]) with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}): self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1]) pdf = pd.DataFrame( { "a": pd.Series([None, None, None], dtype="float64"), "b": pd.Series([None, None, None], dtype="str"), }, ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf, pdf) with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}): psdf = ps.from_pandas(pdf) self.assert_eq(psdf, pdf) def test_nullable_object(self): pdf = pd.DataFrame( { "a": list("abc") + [np.nan, None], "b": list(range(1, 4)) + [np.nan, None], "c": list(np.arange(3, 6).astype("i1")) + [np.nan, None], "d": list(np.arange(4.0, 7.0, dtype="float64")) + [np.nan, None], "e": [True, False, True, np.nan, None], "f": list(pd.date_range("20130101", periods=3)) + [np.nan, None], }, index=np.random.rand(5), ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf, pdf) with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}): psdf = ps.from_pandas(pdf) self.assert_eq(psdf, pdf) def test_assign(self): pdf, psdf = self.df_pair psdf["w"] = 1.0 pdf["w"] = 1.0 self.assert_eq(psdf, pdf) psdf.w = 10.0 pdf.w = 10.0 self.assert_eq(psdf, pdf) psdf[1] = 1.0 pdf[1] = 1.0 self.assert_eq(psdf, pdf) psdf = psdf.assign(a=psdf["a"] * 2) pdf = pdf.assign(a=pdf["a"] * 2) self.assert_eq(psdf, pdf) # multi-index columns columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "w"), ("y", "v")]) pdf.columns = columns psdf.columns = columns psdf[("a", "c")] = "def" pdf[("a", "c")] = "def" self.assert_eq(psdf, pdf) psdf = psdf.assign(Z="ZZ") pdf = pdf.assign(Z="ZZ") self.assert_eq(psdf, pdf) psdf["x"] = "ghi" pdf["x"] = "ghi" self.assert_eq(psdf, pdf) def test_head(self): pdf, psdf = self.df_pair self.assert_eq(psdf.head(2), pdf.head(2)) self.assert_eq(psdf.head(3), pdf.head(3)) self.assert_eq(psdf.head(0), pdf.head(0)) self.assert_eq(psdf.head(-3), pdf.head(-3)) self.assert_eq(psdf.head(-10), pdf.head(-10)) with option_context("compute.ordered_head", True): self.assert_eq(psdf.head(), pdf.head()) def test_attributes(self): psdf = self.psdf self.assertIn("a", dir(psdf)) self.assertNotIn("foo", dir(psdf)) self.assertRaises(AttributeError, lambda: psdf.foo) psdf = ps.DataFrame({"a b c": [1, 2, 3]}) self.assertNotIn("a b c", dir(psdf)) psdf = ps.DataFrame({"a": [1, 2], 5: [1, 2]}) self.assertIn("a", dir(psdf)) self.assertNotIn(5, dir(psdf)) def test_column_names(self): pdf, psdf = self.df_pair self.assert_eq(psdf.columns, pdf.columns) self.assert_eq(psdf[["b", "a"]].columns, pdf[["b", "a"]].columns) self.assert_eq(psdf["a"].name, pdf["a"].name) self.assert_eq((psdf["a"] + 1).name, (pdf["a"] + 1).name) self.assert_eq((psdf.a + psdf.b).name, (pdf.a + pdf.b).name) self.assert_eq((psdf.a + psdf.b.rename("a")).name, (pdf.a + pdf.b.rename("a")).name) self.assert_eq((psdf.a + psdf.b.rename()).name, (pdf.a + pdf.b.rename()).name) self.assert_eq((psdf.a.rename() + psdf.b).name, (pdf.a.rename() + pdf.b).name) self.assert_eq( (psdf.a.rename() + psdf.b.rename()).name, (pdf.a.rename() + pdf.b.rename()).name ) def test_rename_columns(self): pdf = pd.DataFrame( {"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7) ) psdf = ps.from_pandas(pdf) psdf.columns = ["x", "y"] pdf.columns = ["x", "y"] self.assert_eq(psdf.columns, pd.Index(["x", "y"])) self.assert_eq(psdf, pdf) self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"]) self.assert_eq(psdf.to_spark().columns, ["x", "y"]) self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"]) columns = pdf.columns columns.name = "lvl_1" psdf.columns = columns self.assert_eq(psdf.columns.names, ["lvl_1"]) self.assert_eq(psdf, pdf) msg = "Length mismatch: Expected axis has 2 elements, new values have 4 elements" with self.assertRaisesRegex(ValueError, msg): psdf.columns = [1, 2, 3, 4] # Multi-index columns pdf = pd.DataFrame( {("A", "0"): [1, 2, 2, 3], ("B", "1"): [1, 2, 3, 4]}, index=np.random.rand(4) ) psdf = ps.from_pandas(pdf) columns = pdf.columns self.assert_eq(psdf.columns, columns) self.assert_eq(psdf, pdf) pdf.columns = ["x", "y"] psdf.columns = ["x", "y"] self.assert_eq(psdf.columns, pd.Index(["x", "y"])) self.assert_eq(psdf, pdf) self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"]) self.assert_eq(psdf.to_spark().columns, ["x", "y"]) self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"]) pdf.columns = columns psdf.columns = columns self.assert_eq(psdf.columns, columns) self.assert_eq(psdf, pdf) self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"]) self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"]) self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"]) columns.names = ["lvl_1", "lvl_2"] psdf.columns = columns self.assert_eq(psdf.columns.names, ["lvl_1", "lvl_2"]) self.assert_eq(psdf, pdf) self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"]) self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"]) self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"]) def test_rename_dataframe(self): pdf1 = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) psdf1 = ps.from_pandas(pdf1) self.assert_eq( psdf1.rename(columns={"A": "a", "B": "b"}), pdf1.rename(columns={"A": "a", "B": "b"}) ) result_psdf = psdf1.rename(index={1: 10, 2: 20}) result_pdf = pdf1.rename(index={1: 10, 2: 20}) self.assert_eq(result_psdf, result_pdf) # inplace pser = result_pdf.A psser = result_psdf.A result_psdf.rename(index={10: 100, 20: 200}, inplace=True) result_pdf.rename(index={10: 100, 20: 200}, inplace=True) self.assert_eq(result_psdf, result_pdf) self.assert_eq(psser, pser) def str_lower(s) -> str: return str.lower(s) self.assert_eq( psdf1.rename(str_lower, axis="columns"), pdf1.rename(str_lower, axis="columns") ) def mul10(x) -> int: return x * 10 self.assert_eq(psdf1.rename(mul10, axis="index"), pdf1.rename(mul10, axis="index")) self.assert_eq( psdf1.rename(columns=str_lower, index={1: 10, 2: 20}), pdf1.rename(columns=str_lower, index={1: 10, 2: 20}), ) idx = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Y", "D")]) pdf2 = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx) psdf2 = ps.from_pandas(pdf2) self.assert_eq(psdf2.rename(columns=str_lower), pdf2.rename(columns=str_lower)) self.assert_eq( psdf2.rename(columns=str_lower, level=0), pdf2.rename(columns=str_lower, level=0) ) self.assert_eq( psdf2.rename(columns=str_lower, level=1), pdf2.rename(columns=str_lower, level=1) ) pdf3 = pd.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list("ab")) psdf3 = ps.from_pandas(pdf3) self.assert_eq(psdf3.rename(index=str_lower), pdf3.rename(index=str_lower)) self.assert_eq( psdf3.rename(index=str_lower, level=0), pdf3.rename(index=str_lower, level=0) ) self.assert_eq( psdf3.rename(index=str_lower, level=1), pdf3.rename(index=str_lower, level=1) ) pdf4 = pdf2 + 1 psdf4 = psdf2 + 1 self.assert_eq(psdf4.rename(columns=str_lower), pdf4.rename(columns=str_lower)) pdf5 = pdf3 + 1 psdf5 = psdf3 + 1 self.assert_eq(psdf5.rename(index=str_lower), pdf5.rename(index=str_lower)) msg = "Either `index` or `columns` should be provided." with self.assertRaisesRegex(ValueError, msg): psdf1.rename() msg = "`mapper` or `index` or `columns` should be either dict-like or function type." with self.assertRaisesRegex(ValueError, msg): psdf1.rename(mapper=[str_lower], axis=1) msg = "Mapper dict should have the same value type." with self.assertRaisesRegex(ValueError, msg): psdf1.rename({"A": "a", "B": 2}, axis=1) msg = r"level should be an integer between \[0, column_labels_level\)" with self.assertRaisesRegex(ValueError, msg): psdf2.rename(columns=str_lower, level=2) def test_rename_axis(self): index = pd.Index(["A", "B", "C"], name="index") columns = pd.Index(["numbers", "values"], name="cols") pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns) psdf = ps.from_pandas(pdf) for axis in [0, "index"]: self.assert_eq( pdf.rename_axis("index2", axis=axis).sort_index(), psdf.rename_axis("index2", axis=axis).sort_index(), ) self.assert_eq( pdf.rename_axis(["index2"], axis=axis).sort_index(), psdf.rename_axis(["index2"], axis=axis).sort_index(), ) for axis in [1, "columns"]: self.assert_eq( pdf.rename_axis("cols2", axis=axis).sort_index(), psdf.rename_axis("cols2", axis=axis).sort_index(), ) self.assert_eq( pdf.rename_axis(["cols2"], axis=axis).sort_index(), psdf.rename_axis(["cols2"], axis=axis).sort_index(), ) pdf2 = pdf.copy() psdf2 = psdf.copy() pdf2.rename_axis("index2", axis="index", inplace=True) psdf2.rename_axis("index2", axis="index", inplace=True) self.assert_eq(pdf2.sort_index(), psdf2.sort_index()) self.assertRaises(ValueError, lambda: psdf.rename_axis(["index2", "index3"], axis=0)) self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols2", "cols3"], axis=1)) self.assertRaises(TypeError, lambda: psdf.rename_axis(mapper=["index2"], index=["index3"])) self.assert_eq( pdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(), psdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(), ) self.assert_eq( pdf.rename_axis(index={"missing": "index2"}, columns={"missing": "cols2"}).sort_index(), psdf.rename_axis( index={"missing": "index2"}, columns={"missing": "cols2"} ).sort_index(), ) self.assert_eq( pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(), psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(), ) index = pd.MultiIndex.from_tuples( [("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"] ) columns = pd.MultiIndex.from_tuples( [("numbers", "first"), ("values", "second")], names=["cols1", "cols2"] ) pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns) psdf = ps.from_pandas(pdf) for axis in [0, "index"]: self.assert_eq( pdf.rename_axis(["index3", "index4"], axis=axis).sort_index(), psdf.rename_axis(["index3", "index4"], axis=axis).sort_index(), ) for axis in [1, "columns"]: self.assert_eq( pdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(), psdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(), ) self.assertRaises( ValueError, lambda: psdf.rename_axis(["index3", "index4", "index5"], axis=0) ) self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols3", "cols4", "cols5"], axis=1)) self.assert_eq( pdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(), psdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(), ) self.assert_eq( pdf.rename_axis(index={"missing": "index3"}, columns={"missing": "cols3"}).sort_index(), psdf.rename_axis( index={"missing": "index3"}, columns={"missing": "cols3"} ).sort_index(), ) self.assert_eq( pdf.rename_axis( index={"index1": "index3", "index2": "index4"}, columns={"cols1": "cols3", "cols2": "cols4"}, ).sort_index(), psdf.rename_axis( index={"index1": "index3", "index2": "index4"}, columns={"cols1": "cols3", "cols2": "cols4"}, ).sort_index(), ) self.assert_eq( pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(), psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(), ) def test_dot(self): psdf = self.psdf with self.assertRaisesRegex(TypeError, "Unsupported type DataFrame"): psdf.dot(psdf) def test_dot_in_column_name(self): self.assert_eq( ps.DataFrame(ps.range(1)._internal.spark_frame.selectExpr("1L as `a.b`"))["a.b"], ps.Series([1], name="a.b"), ) def test_aggregate(self): pdf = pd.DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=["A", "B", "C"] ) psdf = ps.from_pandas(pdf) self.assert_eq( psdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(), # TODO?: fix column order pdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(), ) self.assert_eq( psdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(), pdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(), ) self.assertRaises(KeyError, lambda: psdf.agg({"A": ["sum", "min"], "X": ["min", "max"]})) # multi-index columns columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")]) pdf.columns = columns psdf.columns = columns self.assert_eq( psdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(), pdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(), ) self.assert_eq( psdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[ [("X", "A"), ("X", "B")] ].sort_index(), pdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[ [("X", "A"), ("X", "B")] ].sort_index(), ) self.assertRaises(TypeError, lambda: psdf.agg({"X": ["sum", "min"], "Y": ["min", "max"]})) # non-string names pdf = pd.DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=[10, 20, 30] ) psdf = ps.from_pandas(pdf) self.assert_eq( psdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(), pdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(), ) self.assert_eq( psdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(), pdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(), ) columns = pd.MultiIndex.from_tuples([("X", 10), ("X", 20), ("Y", 30)]) pdf.columns = columns psdf.columns = columns self.assert_eq( psdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(), pdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(), ) self.assert_eq( psdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[ [("X", 10), ("X", 20)] ].sort_index(), pdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[ [("X", 10), ("X", 20)] ].sort_index(), ) pdf = pd.DataFrame( [datetime(2019, 2, 2, 0, 0, 0, 0), datetime(2019, 2, 3, 0, 0, 0, 0)], columns=["timestamp"], ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.timestamp.min(), pdf.timestamp.min()) self.assert_eq(psdf.timestamp.max(), pdf.timestamp.max()) self.assertRaises(ValueError, lambda: psdf.agg(("sum", "min"))) def test_droplevel(self): pdf = ( pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) .set_index([0, 1]) .rename_axis(["a", "b"]) ) pdf.columns = pd.MultiIndex.from_tuples( [("c", "e"), ("d", "f")], names=["level_1", "level_2"] ) psdf = ps.from_pandas(pdf) self.assertRaises(ValueError, lambda: psdf.droplevel(["a", "b"])) self.assertRaises(ValueError, lambda: psdf.droplevel([1, 1, 1, 1, 1])) self.assertRaises(IndexError, lambda: psdf.droplevel(2)) self.assertRaises(IndexError, lambda: psdf.droplevel(-3)) self.assertRaises(KeyError, lambda: psdf.droplevel({"a"})) self.assertRaises(KeyError, lambda: psdf.droplevel({"a": 1})) self.assertRaises(ValueError, lambda: psdf.droplevel(["level_1", "level_2"], axis=1)) self.assertRaises(IndexError, lambda: psdf.droplevel(2, axis=1)) self.assertRaises(IndexError, lambda: psdf.droplevel(-3, axis=1)) self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1"}, axis=1)) self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1": 1}, axis=1)) self.assert_eq(pdf.droplevel("a"), psdf.droplevel("a")) self.assert_eq(pdf.droplevel(["a"]), psdf.droplevel(["a"])) self.assert_eq(pdf.droplevel(("a",)), psdf.droplevel(("a",))) self.assert_eq(pdf.droplevel(0), psdf.droplevel(0)) self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1)) self.assert_eq(pdf.droplevel("level_1", axis=1), psdf.droplevel("level_1", axis=1)) self.assert_eq(pdf.droplevel(["level_1"], axis=1), psdf.droplevel(["level_1"], axis=1)) self.assert_eq(pdf.droplevel(("level_1",), axis=1), psdf.droplevel(("level_1",), axis=1)) self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1)) self.assert_eq(pdf.droplevel(-1, axis=1), psdf.droplevel(-1, axis=1)) # Tupled names pdf.columns.names = [("level", 1), ("level", 2)] pdf.index.names = [("a", 10), ("x", 20)] psdf = ps.from_pandas(pdf) self.assertRaises(KeyError, lambda: psdf.droplevel("a")) self.assertRaises(KeyError, lambda: psdf.droplevel(("a", 10))) self.assert_eq(pdf.droplevel([("a", 10)]), psdf.droplevel([("a", 10)])) self.assert_eq( pdf.droplevel([("level", 1)], axis=1), psdf.droplevel([("level", 1)], axis=1) ) # non-string names pdf = ( pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) .set_index([0, 1]) .rename_axis([10.0, 20.0]) ) pdf.columns = pd.MultiIndex.from_tuples([("c", "e"), ("d", "f")], names=[100.0, 200.0]) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.droplevel(10.0), psdf.droplevel(10.0)) self.assert_eq(pdf.droplevel([10.0]), psdf.droplevel([10.0])) self.assert_eq(pdf.droplevel((10.0,)), psdf.droplevel((10.0,))) self.assert_eq(pdf.droplevel(0), psdf.droplevel(0)) self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1)) self.assert_eq(pdf.droplevel(100.0, axis=1), psdf.droplevel(100.0, axis=1)) self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1)) def test_drop(self): pdf = pd.DataFrame({"x": [1, 2], "y": [3, 4], "z": [5, 6]}, index=np.random.rand(2)) psdf = ps.from_pandas(pdf) # Assert 'labels' or 'columns' parameter is set expected_error_message = "Need to specify at least one of 'labels' or 'columns'" with self.assertRaisesRegex(ValueError, expected_error_message): psdf.drop() # # Drop columns # # Assert using a str for 'labels' works self.assert_eq(psdf.drop("x", axis=1), pdf.drop("x", axis=1)) self.assert_eq((psdf + 1).drop("x", axis=1), (pdf + 1).drop("x", axis=1)) # Assert using a list for 'labels' works self.assert_eq(psdf.drop(["y", "z"], axis=1), pdf.drop(["y", "z"], axis=1)) self.assert_eq(psdf.drop(["x", "y", "z"], axis=1), pdf.drop(["x", "y", "z"], axis=1)) # Assert using 'columns' instead of 'labels' produces the same results self.assert_eq(psdf.drop(columns="x"), pdf.drop(columns="x")) self.assert_eq(psdf.drop(columns=["y", "z"]), pdf.drop(columns=["y", "z"])) self.assert_eq(psdf.drop(columns=["x", "y", "z"]), pdf.drop(columns=["x", "y", "z"])) self.assert_eq(psdf.drop(columns=[]), pdf.drop(columns=[])) columns = pd.MultiIndex.from_tuples([(1, "x"), (1, "y"), (2, "z")]) pdf.columns = columns psdf = ps.from_pandas(pdf) self.assert_eq(psdf.drop(columns=1), pdf.drop(columns=1)) self.assert_eq(psdf.drop(columns=(1, "x")), pdf.drop(columns=(1, "x"))) self.assert_eq(psdf.drop(columns=[(1, "x"), 2]), pdf.drop(columns=[(1, "x"), 2])) self.assert_eq( psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]), pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]), ) self.assertRaises(KeyError, lambda: psdf.drop(columns=3)) self.assertRaises(KeyError, lambda: psdf.drop(columns=(1, "z"))) pdf.index = pd.MultiIndex.from_tuples([("i", 0), ("j", 1)]) psdf = ps.from_pandas(pdf) self.assert_eq( psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]), pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]), ) # non-string names pdf = pd.DataFrame({10: [1, 2], 20: [3, 4], 30: [5, 6]}, index=np.random.rand(2)) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.drop(10, axis=1), pdf.drop(10, axis=1)) self.assert_eq(psdf.drop([20, 30], axis=1), pdf.drop([20, 30], axis=1)) # # Drop rows # pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"]) psdf = ps.from_pandas(pdf) # Given labels (and axis = 0) self.assert_eq(psdf.drop(labels="A", axis=0), pdf.drop(labels="A", axis=0)) self.assert_eq(psdf.drop(labels="A"), pdf.drop(labels="A")) self.assert_eq((psdf + 1).drop(labels="A"), (pdf + 1).drop(labels="A")) self.assert_eq(psdf.drop(labels=["A", "C"], axis=0), pdf.drop(labels=["A", "C"], axis=0)) self.assert_eq( psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0) ) with ps.option_context("compute.isin_limit", 2): self.assert_eq( psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0) ) # Given index self.assert_eq(psdf.drop(index="A"), pdf.drop(index="A")) self.assert_eq(psdf.drop(index=["A", "C"]), pdf.drop(index=["A", "C"])) self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"])) self.assert_eq(psdf.drop(index=[]), pdf.drop(index=[])) with ps.option_context("compute.isin_limit", 2): self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"])) # Non-string names pdf.index = [10, 20, 30] psdf = ps.from_pandas(pdf) self.assert_eq(psdf.drop(labels=10, axis=0), pdf.drop(labels=10, axis=0)) self.assert_eq(psdf.drop(labels=[10, 30], axis=0), pdf.drop(labels=[10, 30], axis=0)) self.assert_eq( psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0) ) with ps.option_context("compute.isin_limit", 2): self.assert_eq( psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0) ) # MultiIndex pdf.index = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")]) psdf = ps.from_pandas(pdf) self.assertRaises(NotImplementedError, lambda: psdf.drop(labels=[("a", "x")])) # # Drop rows and columns # pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"]) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.drop(index="A", columns="X"), pdf.drop(index="A", columns="X")) self.assert_eq( psdf.drop(index=["A", "C"], columns=["X", "Z"]), pdf.drop(index=["A", "C"], columns=["X", "Z"]), ) self.assert_eq( psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]), pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]), ) with ps.option_context("compute.isin_limit", 2): self.assert_eq( psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]), pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]), ) self.assert_eq( psdf.drop(index=[], columns=["X", "Z"]), pdf.drop(index=[], columns=["X", "Z"]), ) self.assert_eq( psdf.drop(index=["A", "B", "C"], columns=[]), pdf.drop(index=["A", "B", "C"], columns=[]), ) self.assert_eq( psdf.drop(index=[], columns=[]), pdf.drop(index=[], columns=[]), ) self.assertRaises( ValueError, lambda: psdf.drop(labels="A", axis=0, columns="X"), ) def _test_dropna(self, pdf, axis): psdf = ps.from_pandas(pdf) self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis)) self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all")) self.assert_eq(psdf.dropna(axis=axis, subset=["x"]), pdf.dropna(axis=axis, subset=["x"])) self.assert_eq(psdf.dropna(axis=axis, subset="x"), pdf.dropna(axis=axis, subset=["x"])) self.assert_eq( psdf.dropna(axis=axis, subset=["y", "z"]), pdf.dropna(axis=axis, subset=["y", "z"]) ) self.assert_eq( psdf.dropna(axis=axis, subset=["y", "z"], how="all"), pdf.dropna(axis=axis, subset=["y", "z"], how="all"), ) self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2)) self.assert_eq( psdf.dropna(axis=axis, thresh=1, subset=["y", "z"]), pdf.dropna(axis=axis, thresh=1, subset=["y", "z"]), ) pdf2 = pdf.copy() psdf2 = psdf.copy() pser = pdf2[pdf2.columns[0]] psser = psdf2[psdf2.columns[0]] pdf2.dropna(inplace=True, axis=axis) psdf2.dropna(inplace=True, axis=axis) self.assert_eq(psdf2, pdf2) self.assert_eq(psser, pser) # multi-index columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")]) if axis == 0: pdf.columns = columns else: pdf.index = columns psdf = ps.from_pandas(pdf) self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis)) self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all")) self.assert_eq( psdf.dropna(axis=axis, subset=[("a", "x")]), pdf.dropna(axis=axis, subset=[("a", "x")]) ) self.assert_eq( psdf.dropna(axis=axis, subset=("a", "x")), pdf.dropna(axis=axis, subset=[("a", "x")]) ) self.assert_eq( psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]), pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]), ) self.assert_eq( psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"), pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"), ) self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2)) self.assert_eq( psdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]), pdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]), ) def test_dropna_axis_index(self): pdf = pd.DataFrame( { "x": [np.nan, 2, 3, 4, np.nan, 6], "y": [1, 2, np.nan, 4, np.nan, np.nan], "z": [1, 2, 3, 4, np.nan, np.nan], }, index=np.random.rand(6), ) psdf = ps.from_pandas(pdf) self._test_dropna(pdf, axis=0) # empty pdf = pd.DataFrame(index=np.random.rand(6)) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.dropna(), pdf.dropna()) self.assert_eq(psdf.dropna(how="all"), pdf.dropna(how="all")) self.assert_eq(psdf.dropna(thresh=0), pdf.dropna(thresh=0)) self.assert_eq(psdf.dropna(thresh=1), pdf.dropna(thresh=1)) with self.assertRaisesRegex(ValueError, "No axis named foo"): psdf.dropna(axis="foo") self.assertRaises(KeyError, lambda: psdf.dropna(subset="1")) with self.assertRaisesRegex(ValueError, "invalid how option: 1"): psdf.dropna(how=1) with self.assertRaisesRegex(TypeError, "must specify how or thresh"): psdf.dropna(how=None) def test_dropna_axis_column(self): pdf = pd.DataFrame( { "x": [np.nan, 2, 3, 4, np.nan, 6], "y": [1, 2, np.nan, 4, np.nan, np.nan], "z": [1, 2, 3, 4, np.nan, np.nan], }, index=[str(r) for r in np.random.rand(6)], ).T self._test_dropna(pdf, axis=1) psdf = ps.from_pandas(pdf) with self.assertRaisesRegex( ValueError, "The length of each subset must be the same as the index size." ): psdf.dropna(subset=(["x", "y"]), axis=1) # empty pdf = pd.DataFrame({"x": [], "y": [], "z": []}) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.dropna(axis=1), pdf.dropna(axis=1)) self.assert_eq(psdf.dropna(axis=1, how="all"), pdf.dropna(axis=1, how="all")) self.assert_eq(psdf.dropna(axis=1, thresh=0), pdf.dropna(axis=1, thresh=0)) self.assert_eq(psdf.dropna(axis=1, thresh=1), pdf.dropna(axis=1, thresh=1)) def test_dtype(self): pdf = pd.DataFrame( { "a": list("abc"), "b": list(range(1, 4)), "c": np.arange(3, 6).astype("i1"), "d": np.arange(4.0, 7.0, dtype="float64"), "e": [True, False, True], "f": pd.date_range("20130101", periods=3), }, index=np.random.rand(3), ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf, pdf) self.assertTrue((psdf.dtypes == pdf.dtypes).all()) # multi-index columns columns = pd.MultiIndex.from_tuples(zip(list("xxxyyz"), list("abcdef"))) pdf.columns = columns psdf.columns = columns self.assertTrue((psdf.dtypes == pdf.dtypes).all()) def test_fillna(self): pdf = pd.DataFrame( { "x": [np.nan, 2, 3, 4, np.nan, 6], "y": [1, 2, np.nan, 4, np.nan, np.nan], "z": [1, 2, 3, 4, np.nan, np.nan], }, index=np.random.rand(6), ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf, pdf) self.assert_eq(psdf.fillna(-1), pdf.fillna(-1)) self.assert_eq( psdf.fillna({"x": -1, "y": -2, "z": -5}), pdf.fillna({"x": -1, "y": -2, "z": -5}) ) self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill")) self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2)) self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill")) self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2)) pdf = pdf.set_index(["x", "y"]) psdf = ps.from_pandas(pdf) # check multi index self.assert_eq(psdf.fillna(-1), pdf.fillna(-1)) self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill")) self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill")) pser = pdf.z psser = psdf.z pdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True) psdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True) self.assert_eq(psdf, pdf) self.assert_eq(psser, pser) s_nan = pd.Series([-1, -2, -5], index=["x", "y", "z"], dtype=int) self.assert_eq(psdf.fillna(s_nan), pdf.fillna(s_nan)) with self.assertRaisesRegex(NotImplementedError, "fillna currently only"): psdf.fillna(-1, axis=1) with self.assertRaisesRegex(NotImplementedError, "fillna currently only"): psdf.fillna(-1, axis="columns") with self.assertRaisesRegex(ValueError, "limit parameter for value is not support now"): psdf.fillna(-1, limit=1) with self.assertRaisesRegex(TypeError, "Unsupported.*DataFrame"): psdf.fillna(pd.DataFrame({"x": [-1], "y": [-1], "z": [-1]})) with self.assertRaisesRegex(TypeError, "Unsupported.*int64"): psdf.fillna({"x": np.int64(-6), "y": np.int64(-4), "z": -5}) with self.assertRaisesRegex(ValueError, "Expecting 'pad', 'ffill', 'backfill' or 'bfill'."): psdf.fillna(method="xxx") with self.assertRaisesRegex( ValueError, "Must specify a fillna 'value' or 'method' parameter." ): psdf.fillna() # multi-index columns pdf = pd.DataFrame( { ("x", "a"): [np.nan, 2, 3, 4, np.nan, 6], ("x", "b"): [1, 2, np.nan, 4, np.nan, np.nan], ("y", "c"): [1, 2, 3, 4, np.nan, np.nan], }, index=np.random.rand(6), ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.fillna(-1), pdf.fillna(-1)) self.assert_eq( psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}), pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}), ) self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill")) self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2)) self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill")) self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2)) self.assert_eq(psdf.fillna({"x": -1}), pdf.fillna({"x": -1})) self.assert_eq( psdf.fillna({"x": -1, ("x", "b"): -2}), pdf.fillna({"x": -1, ("x", "b"): -2}) ) self.assert_eq( psdf.fillna({("x", "b"): -2, "x": -1}), pdf.fillna({("x", "b"): -2, "x": -1}) ) # check multi index pdf = pdf.set_index([("x", "a"), ("x", "b")]) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.fillna(-1), pdf.fillna(-1)) self.assert_eq( psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}), pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}), ) def test_isnull(self): pdf = pd.DataFrame( {"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")}, index=np.random.rand(6) ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.notnull(), pdf.notnull()) self.assert_eq(psdf.isnull(), pdf.isnull()) def test_to_datetime(self): pdf = pd.DataFrame( {"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}, index=np.random.rand(2) ) psdf = ps.from_pandas(pdf) self.assert_eq(pd.to_datetime(pdf), ps.to_datetime(psdf)) def test_nunique(self): pdf = pd.DataFrame({"A": [1, 2, 3], "B": [np.nan, 3, np.nan]}, index=np.random.rand(3)) psdf = ps.from_pandas(pdf) # Assert NaNs are dropped by default self.assert_eq(psdf.nunique(), pdf.nunique()) # Assert including NaN values self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False)) # Assert approximate counts self.assert_eq( ps.DataFrame({"A": range(100)}).nunique(approx=True), pd.Series([103], index=["A"]), ) self.assert_eq( ps.DataFrame({"A": range(100)}).nunique(approx=True, rsd=0.01), pd.Series([100], index=["A"]), ) # Assert unsupported axis value yet msg = 'axis should be either 0 or "index" currently.' with self.assertRaisesRegex(NotImplementedError, msg): psdf.nunique(axis=1) # multi-index columns columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")], names=["1", "2"]) pdf.columns = columns psdf.columns = columns self.assert_eq(psdf.nunique(), pdf.nunique()) self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False)) def test_sort_values(self): pdf = pd.DataFrame( {"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7) ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.sort_values("b"), pdf.sort_values("b")) for ascending in [True, False]: for na_position in ["first", "last"]: self.assert_eq( psdf.sort_values("a", ascending=ascending, na_position=na_position), pdf.sort_values("a", ascending=ascending, na_position=na_position), ) self.assert_eq(psdf.sort_values(["a", "b"]), pdf.sort_values(["a", "b"])) self.assert_eq( psdf.sort_values(["a", "b"], ascending=[False, True]), pdf.sort_values(["a", "b"], ascending=[False, True]), ) self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], ascending=[False])) self.assert_eq( psdf.sort_values(["a", "b"], na_position="first"), pdf.sort_values(["a", "b"], na_position="first"), ) self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], na_position="invalid")) pserA = pdf.a psserA = psdf.a self.assert_eq(psdf.sort_values("b", inplace=True), pdf.sort_values("b", inplace=True)) self.assert_eq(psdf, pdf) self.assert_eq(psserA, pserA) # multi-index columns pdf = pd.DataFrame( {("X", 10): [1, 2, 3, 4, 5, None, 7], ("X", 20): [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7), ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.sort_values(("X", 20)), pdf.sort_values(("X", 20))) self.assert_eq( psdf.sort_values([("X", 20), ("X", 10)]), pdf.sort_values([("X", 20), ("X", 10)]) ) self.assertRaisesRegex( ValueError, "For a multi-index, the label must be a tuple with elements", lambda: psdf.sort_values(["X"]), ) # non-string names pdf = pd.DataFrame( {10: [1, 2, 3, 4, 5, None, 7], 20: [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7) ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.sort_values(20), pdf.sort_values(20)) self.assert_eq(psdf.sort_values([20, 10]), pdf.sort_values([20, 10])) def test_sort_index(self): pdf = pd.DataFrame( {"A": [2, 1, np.nan], "B": [np.nan, 0, np.nan]}, index=["b", "a", np.nan] ) psdf = ps.from_pandas(pdf) # Assert invalid parameters self.assertRaises(NotImplementedError, lambda: psdf.sort_index(axis=1)) self.assertRaises(NotImplementedError, lambda: psdf.sort_index(kind="mergesort")) self.assertRaises(ValueError, lambda: psdf.sort_index(na_position="invalid")) # Assert default behavior without parameters self.assert_eq(psdf.sort_index(), pdf.sort_index()) # Assert sorting descending self.assert_eq(psdf.sort_index(ascending=False), pdf.sort_index(ascending=False)) # Assert sorting NA indices first self.assert_eq(psdf.sort_index(na_position="first"), pdf.sort_index(na_position="first")) # Assert sorting descending and NA indices first self.assert_eq( psdf.sort_index(ascending=False, na_position="first"), pdf.sort_index(ascending=False, na_position="first"), ) # Assert sorting inplace pserA = pdf.A psserA = psdf.A self.assertEqual(psdf.sort_index(inplace=True), pdf.sort_index(inplace=True)) self.assert_eq(psdf, pdf) self.assert_eq(psserA, pserA) # Assert multi-indices pdf = pd.DataFrame( {"A": range(4), "B": range(4)[::-1]}, index=[["b", "b", "a", "a"], [1, 0, 1, 0]] ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.sort_index(), pdf.sort_index()) self.assert_eq(psdf.sort_index(level=[1, 0]), pdf.sort_index(level=[1, 0])) self.assert_eq(psdf.reset_index().sort_index(), pdf.reset_index().sort_index()) # Assert with multi-index columns columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")]) pdf.columns = columns psdf.columns = columns self.assert_eq(psdf.sort_index(), pdf.sort_index()) def test_swaplevel(self): # MultiIndex with two levels arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]] pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color")) pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.swaplevel(), psdf.swaplevel()) self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1)) self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1)) self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color")) # MultiIndex with more than two levels arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"], ["l", "m", "s", "xs"]] pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size")) pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.swaplevel(), psdf.swaplevel()) self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1)) self.assert_eq(pdf.swaplevel(0, 2), psdf.swaplevel(0, 2)) self.assert_eq(pdf.swaplevel(1, 2), psdf.swaplevel(1, 2)) self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1)) self.assert_eq(pdf.swaplevel(-1, -2), psdf.swaplevel(-1, -2)) self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color")) self.assert_eq(pdf.swaplevel("number", "size"), psdf.swaplevel("number", "size")) self.assert_eq(pdf.swaplevel("color", "size"), psdf.swaplevel("color", "size")) self.assert_eq( pdf.swaplevel("color", "size", axis="index"), psdf.swaplevel("color", "size", axis="index"), ) self.assert_eq( pdf.swaplevel("color", "size", axis=0), psdf.swaplevel("color", "size", axis=0) ) pdf = pd.DataFrame( { "x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"], "x3": ["a", "b", "c", "d"], "x4": ["a", "b", "c", "d"], } ) pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size")) pdf.columns = pidx psdf = ps.from_pandas(pdf) self.assert_eq(pdf.swaplevel(axis=1), psdf.swaplevel(axis=1)) self.assert_eq(pdf.swaplevel(0, 1, axis=1), psdf.swaplevel(0, 1, axis=1)) self.assert_eq(pdf.swaplevel(0, 2, axis=1), psdf.swaplevel(0, 2, axis=1)) self.assert_eq(pdf.swaplevel(1, 2, axis=1), psdf.swaplevel(1, 2, axis=1)) self.assert_eq(pdf.swaplevel(1, 1, axis=1), psdf.swaplevel(1, 1, axis=1)) self.assert_eq(pdf.swaplevel(-1, -2, axis=1), psdf.swaplevel(-1, -2, axis=1)) self.assert_eq( pdf.swaplevel("number", "color", axis=1), psdf.swaplevel("number", "color", axis=1) ) self.assert_eq( pdf.swaplevel("number", "size", axis=1), psdf.swaplevel("number", "size", axis=1) ) self.assert_eq( pdf.swaplevel("color", "size", axis=1), psdf.swaplevel("color", "size", axis=1) ) self.assert_eq( pdf.swaplevel("color", "size", axis="columns"), psdf.swaplevel("color", "size", axis="columns"), ) # Error conditions self.assertRaises(AssertionError, lambda: ps.DataFrame([1, 2]).swaplevel()) self.assertRaises(IndexError, lambda: psdf.swaplevel(0, 9, axis=1)) self.assertRaises(KeyError, lambda: psdf.swaplevel("not_number", "color", axis=1)) self.assertRaises(ValueError, lambda: psdf.swaplevel(axis=2)) def test_swapaxes(self): pdf = pd.DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["x", "y", "z"], columns=["a", "b", "c"] ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.swapaxes(0, 1), pdf.swapaxes(0, 1)) self.assert_eq(psdf.swapaxes(1, 0), pdf.swapaxes(1, 0)) self.assert_eq(psdf.swapaxes("index", "columns"), pdf.swapaxes("index", "columns")) self.assert_eq(psdf.swapaxes("columns", "index"), pdf.swapaxes("columns", "index")) self.assert_eq((psdf + 1).swapaxes(0, 1), (pdf + 1).swapaxes(0, 1)) self.assertRaises(AssertionError, lambda: psdf.swapaxes(0, 1, copy=False)) self.assertRaises(ValueError, lambda: psdf.swapaxes(0, -1)) def test_nlargest(self): pdf = pd.DataFrame( {"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7) ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.nlargest(n=5, columns="a"), pdf.nlargest(5, columns="a")) self.assert_eq(psdf.nlargest(n=5, columns=["a", "b"]), pdf.nlargest(5, columns=["a", "b"])) def test_nsmallest(self): pdf = pd.DataFrame( {"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7) ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.nsmallest(n=5, columns="a"), pdf.nsmallest(5, columns="a")) self.assert_eq( psdf.nsmallest(n=5, columns=["a", "b"]), pdf.nsmallest(5, columns=["a", "b"]) ) def test_xs(self): d = { "num_legs": [4, 4, 2, 2], "num_wings": [0, 0, 2, 2], "class": ["mammal", "mammal", "mammal", "bird"], "animal": ["cat", "dog", "bat", "penguin"], "locomotion": ["walks", "walks", "flies", "walks"], } pdf = pd.DataFrame(data=d) pdf = pdf.set_index(["class", "animal", "locomotion"]) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.xs("mammal"), pdf.xs("mammal")) self.assert_eq(psdf.xs(("mammal",)), pdf.xs(("mammal",))) self.assert_eq(psdf.xs(("mammal", "dog", "walks")), pdf.xs(("mammal", "dog", "walks"))) self.assert_eq( ps.concat([psdf, psdf]).xs(("mammal", "dog", "walks")), pd.concat([pdf, pdf]).xs(("mammal", "dog", "walks")), ) self.assert_eq(psdf.xs("cat", level=1), pdf.xs("cat", level=1)) self.assert_eq(psdf.xs("flies", level=2), pdf.xs("flies", level=2)) self.assert_eq(psdf.xs("mammal", level=-3), pdf.xs("mammal", level=-3)) msg = 'axis should be either 0 or "index" currently.' with self.assertRaisesRegex(NotImplementedError, msg): psdf.xs("num_wings", axis=1) with self.assertRaises(KeyError): psdf.xs(("mammal", "dog", "walk")) msg = r"'Key length \(4\) exceeds index depth \(3\)'" with self.assertRaisesRegex(KeyError, msg): psdf.xs(("mammal", "dog", "walks", "foo")) msg = "'key' should be a scalar value or tuple that contains scalar values" with self.assertRaisesRegex(TypeError, msg): psdf.xs(["mammal", "dog", "walks", "foo"]) self.assertRaises(IndexError, lambda: psdf.xs("foo", level=-4)) self.assertRaises(IndexError, lambda: psdf.xs("foo", level=3)) self.assertRaises(KeyError, lambda: psdf.xs(("dog", "walks"), level=1)) # non-string names pdf = pd.DataFrame(data=d) pdf = pdf.set_index(["class", "animal", "num_legs", "num_wings"]) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.xs(("mammal", "dog", 4)), pdf.xs(("mammal", "dog", 4))) self.assert_eq(psdf.xs(2, level=2), pdf.xs(2, level=2)) self.assert_eq((psdf + "a").xs(("mammal", "dog", 4)), (pdf + "a").xs(("mammal", "dog", 4))) self.assert_eq((psdf + "a").xs(2, level=2), (pdf + "a").xs(2, level=2)) def test_missing(self): psdf = self.psdf missing_functions = inspect.getmembers(_MissingPandasLikeDataFrame, inspect.isfunction) unsupported_functions = [ name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function" ] for name in unsupported_functions: with self.assertRaisesRegex( PandasNotImplementedError, "method.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name), ): getattr(psdf, name)() deprecated_functions = [ name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function" ] for name in deprecated_functions: with self.assertRaisesRegex( PandasNotImplementedError, "method.*DataFrame.*{}.*is deprecated".format(name) ): getattr(psdf, name)() missing_properties = inspect.getmembers( _MissingPandasLikeDataFrame, lambda o: isinstance(o, property) ) unsupported_properties = [ name for (name, type_) in missing_properties if type_.fget.__name__ == "unsupported_property" ] for name in unsupported_properties: with self.assertRaisesRegex( PandasNotImplementedError, "property.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name), ): getattr(psdf, name) deprecated_properties = [ name for (name, type_) in missing_properties if type_.fget.__name__ == "deprecated_property" ] for name in deprecated_properties: with self.assertRaisesRegex( PandasNotImplementedError, "property.*DataFrame.*{}.*is deprecated".format(name) ): getattr(psdf, name) def test_to_numpy(self): pdf = pd.DataFrame( { "a": [4, 2, 3, 4, 8, 6], "b": [1, 2, 9, 4, 2, 4], "c": ["one", "three", "six", "seven", "one", "5"], }, index=np.random.rand(6), ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.to_numpy(), pdf.values) def test_to_pandas(self): pdf, psdf = self.df_pair self.assert_eq(psdf.to_pandas(), pdf) def test_isin(self): pdf = pd.DataFrame( { "a": [4, 2, 3, 4, 8, 6], "b": [1, 2, 9, 4, 2, 4], "c": ["one", "three", "six", "seven", "one", "5"], }, index=np.random.rand(6), ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.isin([4, "six"]), pdf.isin([4, "six"])) # Seems like pandas has a bug when passing `np.array` as parameter self.assert_eq(psdf.isin(np.array([4, "six"])), pdf.isin([4, "six"])) self.assert_eq( psdf.isin({"a": [2, 8], "c": ["three", "one"]}), pdf.isin({"a": [2, 8], "c": ["three", "one"]}), ) self.assert_eq( psdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}), pdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}), ) msg = "'DataFrame' object has no attribute {'e'}" with self.assertRaisesRegex(AttributeError, msg): psdf.isin({"e": [5, 7], "a": [1, 6]}) msg = "DataFrame and Series are not supported" with self.assertRaisesRegex(NotImplementedError, msg): psdf.isin(pdf) msg = "Values should be iterable, Series, DataFrame or dict." with self.assertRaisesRegex(TypeError, msg): psdf.isin(1) pdf = pd.DataFrame( { "a": [4, 2, 3, 4, 8, 6], "b": [1, None, 9, 4, None, 4], "c": [None, 5, None, 3, 2, 1], }, ) psdf = ps.from_pandas(pdf) if LooseVersion(pd.__version__) >= LooseVersion("1.2"): self.assert_eq(psdf.isin([4, 3, 1, 1, None]), pdf.isin([4, 3, 1, 1, None])) else: expected = pd.DataFrame( { "a": [True, False, True, True, False, False], "b": [True, False, False, True, False, True], "c": [False, False, False, True, False, True], } ) self.assert_eq(psdf.isin([4, 3, 1, 1, None]), expected) if LooseVersion(pd.__version__) >= LooseVersion("1.2"): self.assert_eq( psdf.isin({"b": [4, 3, 1, 1, None]}), pdf.isin({"b": [4, 3, 1, 1, None]}) ) else: expected = pd.DataFrame( { "a": [False, False, False, False, False, False], "b": [True, False, False, True, False, True], "c": [False, False, False, False, False, False], } ) self.assert_eq(psdf.isin({"b": [4, 3, 1, 1, None]}), expected) def test_merge(self): left_pdf = pd.DataFrame( { "lkey": ["foo", "bar", "baz", "foo", "bar", "l"], "value": [1, 2, 3, 5, 6, 7], "x": list("abcdef"), }, columns=["lkey", "value", "x"], ) right_pdf = pd.DataFrame( { "rkey": ["baz", "foo", "bar", "baz", "foo", "r"], "value": [4, 5, 6, 7, 8, 9], "y": list("efghij"), }, columns=["rkey", "value", "y"], ) right_ps = pd.Series(list("defghi"), name="x", index=[5, 6, 7, 8, 9, 10]) left_psdf = ps.from_pandas(left_pdf) right_psdf = ps.from_pandas(right_pdf) right_psser = ps.from_pandas(right_ps) def check(op, right_psdf=right_psdf, right_pdf=right_pdf): k_res = op(left_psdf, right_psdf) k_res = k_res.to_pandas() k_res = k_res.sort_values(by=list(k_res.columns)) k_res = k_res.reset_index(drop=True) p_res = op(left_pdf, right_pdf) p_res = p_res.sort_values(by=list(p_res.columns)) p_res = p_res.reset_index(drop=True) self.assert_eq(k_res, p_res) check(lambda left, right: left.merge(right)) check(lambda left, right: left.merge(right, on="value")) check(lambda left, right: left.merge(right, on=("value",))) check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey")) check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey"))) check( lambda left, right: left.set_index("lkey").merge( right, left_index=True, right_on="rkey" ) ) check( lambda left, right: left.merge( right.set_index("rkey"), left_on="lkey", right_index=True ) ) check( lambda left, right: left.set_index("lkey").merge( right.set_index("rkey"), left_index=True, right_index=True ) ) # MultiIndex check( lambda left, right: left.merge( right, left_on=["lkey", "value"], right_on=["rkey", "value"] ) ) check( lambda left, right: left.set_index(["lkey", "value"]).merge( right, left_index=True, right_on=["rkey", "value"] ) ) check( lambda left, right: left.merge( right.set_index(["rkey", "value"]), left_on=["lkey", "value"], right_index=True ) ) # TODO: when both left_index=True and right_index=True with multi-index # check(lambda left, right: left.set_index(['lkey', 'value']).merge( # right.set_index(['rkey', 'value']), left_index=True, right_index=True)) # join types for how in ["inner", "left", "right", "outer"]: check(lambda left, right: left.merge(right, on="value", how=how)) check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey", how=how)) # suffix check( lambda left, right: left.merge( right, left_on="lkey", right_on="rkey", suffixes=["_left", "_right"] ) ) # Test Series on the right check(lambda left, right: left.merge(right), right_psser, right_ps) check( lambda left, right: left.merge(right, left_on="x", right_on="x"), right_psser, right_ps ) check( lambda left, right: left.set_index("x").merge(right, left_index=True, right_on="x"), right_psser, right_ps, ) # Test join types with Series for how in ["inner", "left", "right", "outer"]: check(lambda left, right: left.merge(right, how=how), right_psser, right_ps) check( lambda left, right: left.merge(right, left_on="x", right_on="x", how=how), right_psser, right_ps, ) # suffix with Series check( lambda left, right: left.merge( right, suffixes=["_left", "_right"], how="outer", left_index=True, right_index=True, ), right_psser, right_ps, ) # multi-index columns left_columns = pd.MultiIndex.from_tuples([(10, "lkey"), (10, "value"), (20, "x")]) left_pdf.columns = left_columns left_psdf.columns = left_columns right_columns = pd.MultiIndex.from_tuples([(10, "rkey"), (10, "value"), (30, "y")]) right_pdf.columns = right_columns right_psdf.columns = right_columns check(lambda left, right: left.merge(right)) check(lambda left, right: left.merge(right, on=[(10, "value")])) check( lambda left, right: (left.set_index((10, "lkey")).merge(right.set_index((10, "rkey")))) ) check( lambda left, right: ( left.set_index((10, "lkey")).merge( right.set_index((10, "rkey")), left_index=True, right_index=True ) ) ) # TODO: when both left_index=True and right_index=True with multi-index columns # check(lambda left, right: left.merge(right, # left_on=[('a', 'lkey')], right_on=[('a', 'rkey')])) # check(lambda left, right: (left.set_index(('a', 'lkey')) # .merge(right, left_index=True, right_on=[('a', 'rkey')]))) # non-string names left_pdf.columns = [10, 100, 1000] left_psdf.columns = [10, 100, 1000] right_pdf.columns = [20, 100, 2000] right_psdf.columns = [20, 100, 2000] check(lambda left, right: left.merge(right)) check(lambda left, right: left.merge(right, on=[100])) check(lambda left, right: (left.set_index(10).merge(right.set_index(20)))) check( lambda left, right: ( left.set_index(10).merge(right.set_index(20), left_index=True, right_index=True) ) ) def test_merge_same_anchor(self): pdf = pd.DataFrame( { "lkey": ["foo", "bar", "baz", "foo", "bar", "l"], "rkey": ["baz", "foo", "bar", "baz", "foo", "r"], "value": [1, 1, 3, 5, 6, 7], "x": list("abcdef"), "y": list("efghij"), }, columns=["lkey", "rkey", "value", "x", "y"], ) psdf = ps.from_pandas(pdf) left_pdf = pdf[["lkey", "value", "x"]] right_pdf = pdf[["rkey", "value", "y"]] left_psdf = psdf[["lkey", "value", "x"]] right_psdf = psdf[["rkey", "value", "y"]] def check(op, right_psdf=right_psdf, right_pdf=right_pdf): k_res = op(left_psdf, right_psdf) k_res = k_res.to_pandas() k_res = k_res.sort_values(by=list(k_res.columns)) k_res = k_res.reset_index(drop=True) p_res = op(left_pdf, right_pdf) p_res = p_res.sort_values(by=list(p_res.columns)) p_res = p_res.reset_index(drop=True) self.assert_eq(k_res, p_res) check(lambda left, right: left.merge(right)) check(lambda left, right: left.merge(right, on="value")) check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey")) check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey"))) check( lambda left, right: left.set_index("lkey").merge( right, left_index=True, right_on="rkey" ) ) check( lambda left, right: left.merge( right.set_index("rkey"), left_on="lkey", right_index=True ) ) check( lambda left, right: left.set_index("lkey").merge( right.set_index("rkey"), left_index=True, right_index=True ) ) def test_merge_retains_indices(self): left_pdf = pd.DataFrame({"A": [0, 1]}) right_pdf = pd.DataFrame({"B": [1, 2]}, index=[1, 2]) left_psdf = ps.from_pandas(left_pdf) right_psdf = ps.from_pandas(right_pdf) self.assert_eq( left_psdf.merge(right_psdf, left_index=True, right_index=True), left_pdf.merge(right_pdf, left_index=True, right_index=True), ) self.assert_eq( left_psdf.merge(right_psdf, left_on="A", right_index=True), left_pdf.merge(right_pdf, left_on="A", right_index=True), ) self.assert_eq( left_psdf.merge(right_psdf, left_index=True, right_on="B"), left_pdf.merge(right_pdf, left_index=True, right_on="B"), ) self.assert_eq( left_psdf.merge(right_psdf, left_on="A", right_on="B"), left_pdf.merge(right_pdf, left_on="A", right_on="B"), ) def test_merge_how_parameter(self): left_pdf = pd.DataFrame({"A": [1, 2]}) right_pdf = pd.DataFrame({"B": ["x", "y"]}, index=[1, 2]) left_psdf = ps.from_pandas(left_pdf) right_psdf = ps.from_pandas(right_pdf) psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True) pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True) self.assert_eq( psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True), pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True), ) psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="left") pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="left") self.assert_eq( psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True), pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True), ) psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="right") pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="right") self.assert_eq( psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True), pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True), ) psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="outer") pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="outer") self.assert_eq( psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True), pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True), ) def test_merge_raises(self): left = ps.DataFrame( {"value": [1, 2, 3, 5, 6], "x": list("abcde")}, columns=["value", "x"], index=["foo", "bar", "baz", "foo", "bar"], ) right = ps.DataFrame( {"value": [4, 5, 6, 7, 8], "y": list("fghij")}, columns=["value", "y"], index=["baz", "foo", "bar", "baz", "foo"], ) with self.assertRaisesRegex(ValueError, "No common columns to perform merge on"): left[["x"]].merge(right[["y"]]) with self.assertRaisesRegex(ValueError, "not a combination of both"): left.merge(right, on="value", left_on="x") with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"): left.merge(right, left_on="x") with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"): left.merge(right, left_index=True) with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"): left.merge(right, right_on="y") with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"): left.merge(right, right_index=True) with self.assertRaisesRegex( ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)" ): left.merge(right, left_on="value", right_on=["value", "y"]) with self.assertRaisesRegex( ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)" ): left.merge(right, left_on=["value", "x"], right_on="value") with self.assertRaisesRegex(ValueError, "['inner', 'left', 'right', 'full', 'outer']"): left.merge(right, left_index=True, right_index=True, how="foo") with self.assertRaisesRegex(KeyError, "id"): left.merge(right, on="id") def test_append(self): pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")) psdf = ps.from_pandas(pdf) other_pdf = pd.DataFrame([[3, 4], [5, 6]], columns=list("BC"), index=[2, 3]) other_psdf = ps.from_pandas(other_pdf) self.assert_eq(psdf.append(psdf), pdf.append(pdf)) self.assert_eq(psdf.append(psdf, ignore_index=True), pdf.append(pdf, ignore_index=True)) # Assert DataFrames with non-matching columns self.assert_eq(psdf.append(other_psdf), pdf.append(other_pdf)) # Assert appending a Series fails msg = "DataFrames.append() does not support appending Series to DataFrames" with self.assertRaises(TypeError, msg=msg): psdf.append(psdf["A"]) # Assert using the sort parameter raises an exception msg = "The 'sort' parameter is currently not supported" with self.assertRaises(NotImplementedError, msg=msg): psdf.append(psdf, sort=True) # Assert using 'verify_integrity' only raises an exception for overlapping indices self.assert_eq( psdf.append(other_psdf, verify_integrity=True), pdf.append(other_pdf, verify_integrity=True), ) msg = "Indices have overlapping values" with self.assertRaises(ValueError, msg=msg): psdf.append(psdf, verify_integrity=True) # Skip integrity verification when ignore_index=True self.assert_eq( psdf.append(psdf, ignore_index=True, verify_integrity=True), pdf.append(pdf, ignore_index=True, verify_integrity=True), ) # Assert appending multi-index DataFrames multi_index_pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[[2, 3], [4, 5]]) multi_index_psdf = ps.from_pandas(multi_index_pdf) other_multi_index_pdf = pd.DataFrame( [[5, 6], [7, 8]], columns=list("AB"), index=[[2, 3], [6, 7]] ) other_multi_index_psdf = ps.from_pandas(other_multi_index_pdf) self.assert_eq( multi_index_psdf.append(multi_index_psdf), multi_index_pdf.append(multi_index_pdf) ) # Assert DataFrames with non-matching columns self.assert_eq( multi_index_psdf.append(other_multi_index_psdf), multi_index_pdf.append(other_multi_index_pdf), ) # Assert using 'verify_integrity' only raises an exception for overlapping indices self.assert_eq( multi_index_psdf.append(other_multi_index_psdf, verify_integrity=True), multi_index_pdf.append(other_multi_index_pdf, verify_integrity=True), ) with self.assertRaises(ValueError, msg=msg): multi_index_psdf.append(multi_index_psdf, verify_integrity=True) # Skip integrity verification when ignore_index=True self.assert_eq( multi_index_psdf.append(multi_index_psdf, ignore_index=True, verify_integrity=True), multi_index_pdf.append(multi_index_pdf, ignore_index=True, verify_integrity=True), ) # Assert trying to append DataFrames with different index levels msg = "Both DataFrames have to have the same number of index levels" with self.assertRaises(ValueError, msg=msg): psdf.append(multi_index_psdf) # Skip index level check when ignore_index=True self.assert_eq( psdf.append(multi_index_psdf, ignore_index=True), pdf.append(multi_index_pdf, ignore_index=True), ) columns = pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y")]) pdf.columns = columns psdf.columns = columns self.assert_eq(psdf.append(psdf), pdf.append(pdf)) def test_clip(self): pdf = pd.DataFrame( {"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3) ) psdf = ps.from_pandas(pdf) # Assert list-like values are not accepted for 'lower' and 'upper' msg = "List-like value are not supported for 'lower' and 'upper' at the moment" with self.assertRaises(TypeError, msg=msg): psdf.clip(lower=[1]) with self.assertRaises(TypeError, msg=msg): psdf.clip(upper=[1]) # Assert no lower or upper self.assert_eq(psdf.clip(), pdf.clip()) # Assert lower only self.assert_eq(psdf.clip(1), pdf.clip(1)) # Assert upper only self.assert_eq(psdf.clip(upper=3), pdf.clip(upper=3)) # Assert lower and upper self.assert_eq(psdf.clip(1, 3), pdf.clip(1, 3)) pdf["clip"] = pdf.A.clip(lower=1, upper=3) psdf["clip"] = psdf.A.clip(lower=1, upper=3) self.assert_eq(psdf, pdf) # Assert behavior on string values str_psdf = ps.DataFrame({"A": ["a", "b", "c"]}, index=np.random.rand(3)) self.assert_eq(str_psdf.clip(1, 3), str_psdf) def test_binary_operators(self): pdf = pd.DataFrame( {"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3) ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf + psdf.copy(), pdf + pdf.copy()) self.assert_eq(psdf + psdf.loc[:, ["A", "B"]], pdf + pdf.loc[:, ["A", "B"]]) self.assert_eq(psdf.loc[:, ["A", "B"]] + psdf, pdf.loc[:, ["A", "B"]] + pdf) self.assertRaisesRegex( ValueError, "it comes from a different dataframe", lambda: ps.range(10).add(ps.range(10)), ) self.assertRaisesRegex( TypeError, "add with a sequence is currently not supported", lambda: ps.range(10).add(ps.range(10).id), ) psdf_other = psdf.copy() psdf_other.columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")]) self.assertRaisesRegex( ValueError, "cannot join with no overlapping index names", lambda: psdf.add(psdf_other), ) def test_binary_operator_add(self): # Positive pdf = pd.DataFrame({"a": ["x"], "b": ["y"], "c": [1], "d": [2]}) psdf = ps.from_pandas(pdf) self.assert_eq(psdf["a"] + psdf["b"], pdf["a"] + pdf["b"]) self.assert_eq(psdf["c"] + psdf["d"], pdf["c"] + pdf["d"]) # Negative ks_err_msg = "Addition can not be applied to given types" self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + psdf["c"]) self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + psdf["a"]) self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + "literal") self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" + psdf["c"]) self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 + psdf["a"]) self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + 1) def test_binary_operator_sub(self): # Positive pdf = pd.DataFrame({"a": [2], "b": [1]}) psdf = ps.from_pandas(pdf) self.assert_eq(psdf["a"] - psdf["b"], pdf["a"] - pdf["b"]) # Negative psdf = ps.DataFrame({"a": ["x"], "b": [1]}) ks_err_msg = "Subtraction can not be applied to given types" self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] - psdf["a"]) self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] - "literal") self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" - psdf["b"]) ks_err_msg = "Subtraction can not be applied to strings" self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - psdf["b"]) self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 - psdf["a"]) self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - 1) psdf = ps.DataFrame({"a": ["x"], "b": ["y"]}) self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - psdf["b"]) def test_binary_operator_truediv(self): # Positive pdf = pd.DataFrame({"a": [3], "b": [2]}) psdf = ps.from_pandas(pdf) self.assert_eq(psdf["a"] / psdf["b"], pdf["a"] / pdf["b"]) # Negative psdf = ps.DataFrame({"a": ["x"], "b": [1]}) ks_err_msg = "True division can not be applied to given types" self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] / psdf["a"]) self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] / "literal") self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" / psdf["b"]) ks_err_msg = "True division can not be applied to strings" self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] / psdf["b"]) self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 / psdf["a"]) def test_binary_operator_floordiv(self): psdf = ps.DataFrame({"a": ["x"], "b": [1]}) ks_err_msg = "Floor division can not be applied to strings" self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] // psdf["b"]) self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 // psdf["a"]) ks_err_msg = "Floor division can not be applied to given types" self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] // psdf["a"]) self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] // "literal") self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" // psdf["b"]) def test_binary_operator_mod(self): # Positive pdf = pd.DataFrame({"a": [3], "b": [2]}) psdf = ps.from_pandas(pdf) self.assert_eq(psdf["a"] % psdf["b"], pdf["a"] % pdf["b"]) # Negative psdf = ps.DataFrame({"a": ["x"], "b": [1]}) ks_err_msg = "Modulo can not be applied to given types" self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] % psdf["a"]) self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] % "literal") ks_err_msg = "Modulo can not be applied to strings" self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] % psdf["b"]) self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 % psdf["a"]) def test_binary_operator_multiply(self): # Positive pdf = pd.DataFrame({"a": ["x", "y"], "b": [1, 2], "c": [3, 4]}) psdf = ps.from_pandas(pdf) self.assert_eq(psdf["b"] * psdf["c"], pdf["b"] * pdf["c"]) self.assert_eq(psdf["c"] * psdf["b"], pdf["c"] * pdf["b"]) self.assert_eq(psdf["a"] * psdf["b"], pdf["a"] * pdf["b"]) self.assert_eq(psdf["b"] * psdf["a"], pdf["b"] * pdf["a"]) self.assert_eq(psdf["a"] * 2, pdf["a"] * 2) self.assert_eq(psdf["b"] * 2, pdf["b"] * 2) self.assert_eq(2 * psdf["a"], 2 * pdf["a"]) self.assert_eq(2 * psdf["b"], 2 * pdf["b"]) # Negative psdf = ps.DataFrame({"a": ["x"], "b": [2]}) ks_err_msg = "Multiplication can not be applied to given types" self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] * "literal") self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" * psdf["b"]) self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * "literal") self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * psdf["a"]) self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * 0.1) self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 0.1 * psdf["a"]) self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" * psdf["a"]) def test_sample(self): pdf = pd.DataFrame({"A": [0, 2, 4]}) psdf = ps.from_pandas(pdf) # Make sure the tests run, but we can't check the result because they are non-deterministic. psdf.sample(frac=0.1) psdf.sample(frac=0.2, replace=True) psdf.sample(frac=0.2, random_state=5) psdf["A"].sample(frac=0.2) psdf["A"].sample(frac=0.2, replace=True) psdf["A"].sample(frac=0.2, random_state=5) with self.assertRaises(ValueError): psdf.sample() with self.assertRaises(NotImplementedError): psdf.sample(n=1) def test_add_prefix(self): pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4)) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.add_prefix("col_"), psdf.add_prefix("col_")) columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")]) pdf.columns = columns psdf.columns = columns self.assert_eq(pdf.add_prefix("col_"), psdf.add_prefix("col_")) def test_add_suffix(self): pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4)) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.add_suffix("first_series"), psdf.add_suffix("first_series")) columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")]) pdf.columns = columns psdf.columns = columns self.assert_eq(pdf.add_suffix("first_series"), psdf.add_suffix("first_series")) def test_join(self): # check basic function pdf1 = pd.DataFrame( {"key": ["K0", "K1", "K2", "K3"], "A": ["A0", "A1", "A2", "A3"]}, columns=["key", "A"] ) pdf2 = pd.DataFrame( {"key": ["K0", "K1", "K2"], "B": ["B0", "B1", "B2"]}, columns=["key", "B"] ) psdf1 = ps.from_pandas(pdf1) psdf2 = ps.from_pandas(pdf2) join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right") join_pdf.sort_values(by=list(join_pdf.columns), inplace=True) join_psdf = psdf1.join(psdf2, lsuffix="_left", rsuffix="_right") join_psdf.sort_values(by=list(join_psdf.columns), inplace=True) self.assert_eq(join_pdf, join_psdf) # join with duplicated columns in Series with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"): ks1 = ps.Series(["A1", "A5"], index=[1, 2], name="A") psdf1.join(ks1, how="outer") # join with duplicated columns in DataFrame with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"): psdf1.join(psdf2, how="outer") # check `on` parameter join_pdf = pdf1.join(pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right") join_pdf.sort_values(by=list(join_pdf.columns), inplace=True) join_psdf = psdf1.join(psdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right") join_psdf.sort_values(by=list(join_psdf.columns), inplace=True) self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True)) join_pdf = pdf1.set_index("key").join( pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right" ) join_pdf.sort_values(by=list(join_pdf.columns), inplace=True) join_psdf = psdf1.set_index("key").join( psdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right" ) join_psdf.sort_values(by=list(join_psdf.columns), inplace=True) self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True)) # multi-index columns columns1 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "A")]) columns2 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "B")]) pdf1.columns = columns1 pdf2.columns = columns2 psdf1.columns = columns1 psdf2.columns = columns2 join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right") join_pdf.sort_values(by=list(join_pdf.columns), inplace=True) join_psdf = psdf1.join(psdf2, lsuffix="_left", rsuffix="_right") join_psdf.sort_values(by=list(join_psdf.columns), inplace=True) self.assert_eq(join_pdf, join_psdf) # check `on` parameter join_pdf = pdf1.join( pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right" ) join_pdf.sort_values(by=list(join_pdf.columns), inplace=True) join_psdf = psdf1.join( psdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right" ) join_psdf.sort_values(by=list(join_psdf.columns), inplace=True) self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True)) join_pdf = pdf1.set_index(("x", "key")).join( pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right" ) join_pdf.sort_values(by=list(join_pdf.columns), inplace=True) join_psdf = psdf1.set_index(("x", "key")).join( psdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right" ) join_psdf.sort_values(by=list(join_psdf.columns), inplace=True) self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True)) # multi-index midx1 = pd.MultiIndex.from_tuples( [("w", "a"), ("x", "b"), ("y", "c"), ("z", "d")], names=["index1", "index2"] ) midx2 = pd.MultiIndex.from_tuples( [("w", "a"), ("x", "b"), ("y", "c")], names=["index1", "index2"] ) pdf1.index = midx1 pdf2.index = midx2 psdf1 = ps.from_pandas(pdf1) psdf2 = ps.from_pandas(pdf2) join_pdf = pdf1.join(pdf2, on=["index1", "index2"], rsuffix="_right") join_pdf.sort_values(by=list(join_pdf.columns), inplace=True) join_psdf = psdf1.join(psdf2, on=["index1", "index2"], rsuffix="_right") join_psdf.sort_values(by=list(join_psdf.columns), inplace=True) self.assert_eq(join_pdf, join_psdf) with self.assertRaisesRegex( ValueError, r'len\(left_on\) must equal the number of levels in the index of "right"' ): psdf1.join(psdf2, on=["index1"], rsuffix="_right") def test_replace(self): pdf = pd.DataFrame( { "name": ["Ironman", "Captain America", "Thor", "Hulk"], "weapon": ["Mark-45", "Shield", "Mjolnir", "Smash"], }, index=np.random.rand(4), ) psdf = ps.from_pandas(pdf) with self.assertRaisesRegex( NotImplementedError, "replace currently works only for method='pad" ): psdf.replace(method="bfill") with self.assertRaisesRegex( NotImplementedError, "replace currently works only when limit=None" ): psdf.replace(limit=10) with self.assertRaisesRegex( NotImplementedError, "replace currently doesn't supports regex" ): psdf.replace(regex="") with self.assertRaisesRegex(ValueError, "Length of to_replace and value must be same"): psdf.replace(to_replace=["Ironman"], value=["Spiderman", "Doctor Strange"]) with self.assertRaisesRegex(TypeError, "Unsupported type function"): psdf.replace("Ironman", lambda x: "Spiderman") with self.assertRaisesRegex(TypeError, "Unsupported type function"): psdf.replace(lambda x: "Ironman", "Spiderman") self.assert_eq(psdf.replace("Ironman", "Spiderman"), pdf.replace("Ironman", "Spiderman")) self.assert_eq( psdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]), pdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]), ) self.assert_eq( psdf.replace(("Ironman", "Captain America"), ("Rescue", "Hawkeye")), pdf.replace(("Ironman", "Captain America"), ("Rescue", "Hawkeye")), ) # inplace pser = pdf.name psser = psdf.name pdf.replace("Ironman", "Spiderman", inplace=True) psdf.replace("Ironman", "Spiderman", inplace=True) self.assert_eq(psdf, pdf) self.assert_eq(psser, pser) pdf = pd.DataFrame( {"A": [0, 1, 2, 3, np.nan], "B": [5, 6, 7, 8, np.nan], "C": ["a", "b", "c", "d", None]}, index=np.random.rand(5), ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4)) self.assert_eq( psdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]), pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]), ) self.assert_eq(psdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200})) self.assert_eq( psdf.replace({"A": [0, np.nan], "B": [5, np.nan]}, 100), pdf.replace({"A": [0, np.nan], "B": [5, np.nan]}, 100), ) self.assert_eq( psdf.replace({"A": {0: 100, 4: 400, np.nan: 700}}), pdf.replace({"A": {0: 100, 4: 400, np.nan: 700}}), ) self.assert_eq( psdf.replace({"X": {0: 100, 4: 400, np.nan: 700}}), pdf.replace({"X": {0: 100, 4: 400, np.nan: 700}}), ) self.assert_eq(psdf.replace({"C": ["a", None]}, "e"), pdf.replace({"C": ["a", None]}, "e")) # multi-index columns columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")]) pdf.columns = columns psdf.columns = columns self.assert_eq(psdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4)) self.assert_eq( psdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]), pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]), ) self.assert_eq(psdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200})) self.assert_eq( psdf.replace({("X", "A"): [0, np.nan], ("X", "B"): 5}, 100), pdf.replace({("X", "A"): [0, np.nan], ("X", "B"): 5}, 100), ) self.assert_eq( psdf.replace({("X", "A"): {0: 100, 4: 400, np.nan: 700}}), pdf.replace({("X", "A"): {0: 100, 4: 400, np.nan: 700}}), ) self.assert_eq( psdf.replace({("X", "B"): {0: 100, 4: 400, np.nan: 700}}), pdf.replace({("X", "B"): {0: 100, 4: 400, np.nan: 700}}), ) self.assert_eq( psdf.replace({("Y", "C"): ["a", None]}, "e"), pdf.replace({("Y", "C"): ["a", None]}, "e"), ) def test_update(self): # check base function def get_data(left_columns=None, right_columns=None): left_pdf = pd.DataFrame( {"A": ["1", "2", "3", "4"], "B": ["100", "200", np.nan, np.nan]}, columns=["A", "B"] ) right_pdf = pd.DataFrame( {"B": ["x", np.nan, "y", np.nan], "C": ["100", "200", "300", "400"]}, columns=["B", "C"], ) left_psdf = ps.DataFrame( {"A": ["1", "2", "3", "4"], "B": ["100", "200", None, None]}, columns=["A", "B"] ) right_psdf = ps.DataFrame( {"B": ["x", None, "y", None], "C": ["100", "200", "300", "400"]}, columns=["B", "C"] ) if left_columns is not None: left_pdf.columns = left_columns left_psdf.columns = left_columns if right_columns is not None: right_pdf.columns = right_columns right_psdf.columns = right_columns return left_psdf, left_pdf, right_psdf, right_pdf left_psdf, left_pdf, right_psdf, right_pdf = get_data() pser = left_pdf.B psser = left_psdf.B left_pdf.update(right_pdf) left_psdf.update(right_psdf) self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_psdf.sort_values(by=["A", "B"])) self.assert_eq(psser.sort_index(), pser.sort_index()) left_psdf, left_pdf, right_psdf, right_pdf = get_data() left_pdf.update(right_pdf, overwrite=False) left_psdf.update(right_psdf, overwrite=False) self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_psdf.sort_values(by=["A", "B"])) with self.assertRaises(NotImplementedError): left_psdf.update(right_psdf, join="right") # multi-index columns left_columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")]) right_columns = pd.MultiIndex.from_tuples([("X", "B"), ("Y", "C")]) left_psdf, left_pdf, right_psdf, right_pdf = get_data( left_columns=left_columns, right_columns=right_columns ) left_pdf.update(right_pdf) left_psdf.update(right_psdf) self.assert_eq( left_pdf.sort_values(by=[("X", "A"), ("X", "B")]), left_psdf.sort_values(by=[("X", "A"), ("X", "B")]), ) left_psdf, left_pdf, right_psdf, right_pdf = get_data( left_columns=left_columns, right_columns=right_columns ) left_pdf.update(right_pdf, overwrite=False) left_psdf.update(right_psdf, overwrite=False) self.assert_eq( left_pdf.sort_values(by=[("X", "A"), ("X", "B")]), left_psdf.sort_values(by=[("X", "A"), ("X", "B")]), ) right_columns = pd.MultiIndex.from_tuples([("Y", "B"), ("Y", "C")]) left_psdf, left_pdf, right_psdf, right_pdf = get_data( left_columns=left_columns, right_columns=right_columns ) left_pdf.update(right_pdf) left_psdf.update(right_psdf) self.assert_eq( left_pdf.sort_values(by=[("X", "A"), ("X", "B")]), left_psdf.sort_values(by=[("X", "A"), ("X", "B")]), ) def test_pivot_table_dtypes(self): pdf = pd.DataFrame( { "a": [4, 2, 3, 4, 8, 6], "b": [1, 2, 2, 4, 2, 4], "e": [1, 2, 2, 4, 2, 4], "c": [1, 2, 9, 4, 7, 4], }, index=np.random.rand(6), ) psdf = ps.from_pandas(pdf) # Skip columns comparison by reset_index res_df = psdf.pivot_table( index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"} ).dtypes.reset_index(drop=True) exp_df = pdf.pivot_table( index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"} ).dtypes.reset_index(drop=True) self.assert_eq(res_df, exp_df) # Results don't have the same column's name # Todo: self.assert_eq(psdf.pivot_table(columns="a", values="b").dtypes, # pdf.pivot_table(columns="a", values="b").dtypes) # Todo: self.assert_eq(psdf.pivot_table(index=['c'], columns="a", values="b").dtypes, # pdf.pivot_table(index=['c'], columns="a", values="b").dtypes) # Todo: self.assert_eq(psdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes, # pdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes) # Todo: self.assert_eq(psdf.pivot_table(index=['e', 'c'], # columns="a", values="b", fill_value=999).dtypes, pdf.pivot_table(index=['e', 'c'], # columns="a", values="b", fill_value=999).dtypes) def test_pivot_table(self): pdf = pd.DataFrame( { "a": [4, 2, 3, 4, 8, 6], "b": [1, 2, 2, 4, 2, 4], "e": [10, 20, 20, 40, 20, 40], "c": [1, 2, 9, 4, 7, 4], "d": [-1, -2, -3, -4, -5, -6], }, index=np.random.rand(6), ) psdf = ps.from_pandas(pdf) # Checking if both DataFrames have the same results self.assert_eq( psdf.pivot_table(columns="a", values="b").sort_index(), pdf.pivot_table(columns="a", values="b").sort_index(), almost=True, ) self.assert_eq( psdf.pivot_table(index=["c"], columns="a", values="b").sort_index(), pdf.pivot_table(index=["c"], columns="a", values="b").sort_index(), almost=True, ) self.assert_eq( psdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(), pdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(), almost=True, ) self.assert_eq( psdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(), pdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(), almost=True, ) self.assert_eq( psdf.pivot_table( index=["c"], columns="a", values=["b", "e"], aggfunc="sum" ).sort_index(), pdf.pivot_table( index=["c"], columns="a", values=["b", "e"], aggfunc="sum" ).sort_index(), almost=True, ) self.assert_eq( psdf.pivot_table( index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum" ).sort_index(), pdf.pivot_table( index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum" ).sort_index(), almost=True, ) self.assert_eq( psdf.pivot_table( index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"} ).sort_index(), pdf.pivot_table( index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"} ).sort_index(), almost=True, ) self.assert_eq( psdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(), pdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(), almost=True, ) self.assert_eq( psdf.pivot_table( index=["e", "c"], columns="a", values="b", fill_value=999 ).sort_index(), pdf.pivot_table(index=["e", "c"], columns="a", values="b", fill_value=999).sort_index(), almost=True, ) # multi-index columns columns = pd.MultiIndex.from_tuples( [("x", "a"), ("x", "b"), ("y", "e"), ("z", "c"), ("w", "d")] ) pdf.columns = columns psdf.columns = columns self.assert_eq( psdf.pivot_table(columns=("x", "a"), values=("x", "b")).sort_index(), pdf.pivot_table(columns=[("x", "a")], values=[("x", "b")]).sort_index(), almost=True, ) self.assert_eq( psdf.pivot_table( index=[("z", "c")], columns=("x", "a"), values=[("x", "b")] ).sort_index(), pdf.pivot_table( index=[("z", "c")], columns=[("x", "a")], values=[("x", "b")] ).sort_index(), almost=True, ) self.assert_eq( psdf.pivot_table( index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e")] ).sort_index(), pdf.pivot_table( index=[("z", "c")], columns=[("x", "a")], values=[("x", "b"), ("y", "e")] ).sort_index(), almost=True, ) self.assert_eq( psdf.pivot_table( index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e"), ("w", "d")] ).sort_index(), pdf.pivot_table( index=[("z", "c")], columns=[("x", "a")], values=[("x", "b"), ("y", "e"), ("w", "d")], ).sort_index(), almost=True, ) self.assert_eq( psdf.pivot_table( index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e")], aggfunc={("x", "b"): "mean", ("y", "e"): "sum"}, ).sort_index(), pdf.pivot_table( index=[("z", "c")], columns=[("x", "a")], values=[("x", "b"), ("y", "e")], aggfunc={("x", "b"): "mean", ("y", "e"): "sum"}, ).sort_index(), almost=True, ) def test_pivot_table_and_index(self): # https://github.com/databricks/koalas/issues/805 pdf = pd.DataFrame( { "A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"], "B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"], "C": [ "small", "large", "large", "small", "small", "large", "small", "small", "large", ], "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], "E": [2, 4, 5, 5, 6, 6, 8, 9, 9], }, columns=["A", "B", "C", "D", "E"], index=np.random.rand(9), ) psdf = ps.from_pandas(pdf) ptable = pdf.pivot_table( values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0 ).sort_index() ktable = psdf.pivot_table( values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0 ).sort_index() self.assert_eq(ktable, ptable) self.assert_eq(ktable.index, ptable.index) self.assert_eq(repr(ktable.index), repr(ptable.index)) def test_stack(self): pdf_single_level_cols = pd.DataFrame( [[0, 1], [2, 3]], index=["cat", "dog"], columns=["weight", "height"] ) psdf_single_level_cols = ps.from_pandas(pdf_single_level_cols) self.assert_eq( psdf_single_level_cols.stack().sort_index(), pdf_single_level_cols.stack().sort_index() ) multicol1 = pd.MultiIndex.from_tuples( [("weight", "kg"), ("weight", "pounds")], names=["x", "y"] ) pdf_multi_level_cols1 = pd.DataFrame( [[1, 2], [2, 4]], index=["cat", "dog"], columns=multicol1 ) psdf_multi_level_cols1 = ps.from_pandas(pdf_multi_level_cols1) self.assert_eq( psdf_multi_level_cols1.stack().sort_index(), pdf_multi_level_cols1.stack().sort_index() ) multicol2 = pd.MultiIndex.from_tuples([("weight", "kg"), ("height", "m")]) pdf_multi_level_cols2 = pd.DataFrame( [[1.0, 2.0], [3.0, 4.0]], index=["cat", "dog"], columns=multicol2 ) psdf_multi_level_cols2 = ps.from_pandas(pdf_multi_level_cols2) self.assert_eq( psdf_multi_level_cols2.stack().sort_index(), pdf_multi_level_cols2.stack().sort_index() ) pdf = pd.DataFrame( { ("y", "c"): [True, True], ("x", "b"): [False, False], ("x", "c"): [True, False], ("y", "a"): [False, True], } ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.stack().sort_index(), pdf.stack().sort_index()) self.assert_eq(psdf[[]].stack().sort_index(), pdf[[]].stack().sort_index(), almost=True) def test_unstack(self): pdf = pd.DataFrame( np.random.randn(3, 3), index=pd.MultiIndex.from_tuples([("rg1", "x"), ("rg1", "y"), ("rg2", "z")]), ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.unstack().sort_index(), pdf.unstack().sort_index(), almost=True) self.assert_eq( psdf.unstack().unstack().sort_index(), pdf.unstack().unstack().sort_index(), almost=True ) def test_pivot_errors(self): psdf = ps.range(10) with self.assertRaisesRegex(ValueError, "columns should be set"): psdf.pivot(index="id") with self.assertRaisesRegex(ValueError, "values should be set"): psdf.pivot(index="id", columns="id") def test_pivot_table_errors(self): pdf = pd.DataFrame( { "a": [4, 2, 3, 4, 8, 6], "b": [1, 2, 2, 4, 2, 4], "e": [1, 2, 2, 4, 2, 4], "c": [1, 2, 9, 4, 7, 4], }, index=np.random.rand(6), ) psdf = ps.from_pandas(pdf) self.assertRaises(KeyError, lambda: psdf.pivot_table(index=["c"], columns="a", values=5)) msg = "index should be a None or a list of columns." with self.assertRaisesRegex(TypeError, msg): psdf.pivot_table(index="c", columns="a", values="b") msg = "pivot_table doesn't support aggfunc as dict and without index." with self.assertRaisesRegex(NotImplementedError, msg): psdf.pivot_table(columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}) msg = "columns should be one column name." with self.assertRaisesRegex(TypeError, msg): psdf.pivot_table(columns=["a"], values=["b"], aggfunc={"b": "mean", "e": "sum"}) msg = "Columns in aggfunc must be the same as values." with self.assertRaisesRegex(ValueError, msg): psdf.pivot_table( index=["e", "c"], columns="a", values="b", aggfunc={"b": "mean", "e": "sum"} ) msg = "values can't be a list without index." with self.assertRaisesRegex(NotImplementedError, msg): psdf.pivot_table(columns="a", values=["b", "e"]) msg = "Wrong columns A." with self.assertRaisesRegex(ValueError, msg): psdf.pivot_table( index=["c"], columns="A", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"} ) msg = "values should be one column or list of columns." with self.assertRaisesRegex(TypeError, msg): psdf.pivot_table(columns="a", values=(["b"], ["c"])) msg = "aggfunc must be a dict mapping from column name to aggregate functions" with self.assertRaisesRegex(TypeError, msg): psdf.pivot_table(columns="a", values="b", aggfunc={"a": lambda x: sum(x)}) psdf = ps.DataFrame( { "A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"], "B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"], "C": [ "small", "large", "large", "small", "small", "large", "small", "small", "large", ], "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], "E": [2, 4, 5, 5, 6, 6, 8, 9, 9], }, columns=["A", "B", "C", "D", "E"], index=np.random.rand(9), ) msg = "values should be a numeric type." with self.assertRaisesRegex(TypeError, msg): psdf.pivot_table( index=["C"], columns="A", values=["B", "E"], aggfunc={"B": "mean", "E": "sum"} ) msg = "values should be a numeric type." with self.assertRaisesRegex(TypeError, msg): psdf.pivot_table(index=["C"], columns="A", values="B", aggfunc={"B": "mean"}) def test_transpose(self): # TODO: what if with random index? pdf1 = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}, columns=["col1", "col2"]) psdf1 = ps.from_pandas(pdf1) pdf2 = pd.DataFrame( data={"score": [9, 8], "kids": [0, 0], "age": [12, 22]}, columns=["score", "kids", "age"], ) psdf2 = ps.from_pandas(pdf2) self.assert_eq(pdf1.transpose().sort_index(), psdf1.transpose().sort_index()) self.assert_eq(pdf2.transpose().sort_index(), psdf2.transpose().sort_index()) with option_context("compute.max_rows", None): self.assert_eq(pdf1.transpose().sort_index(), psdf1.transpose().sort_index()) self.assert_eq(pdf2.transpose().sort_index(), psdf2.transpose().sort_index()) pdf3 = pd.DataFrame( { ("cg1", "a"): [1, 2, 3], ("cg1", "b"): [4, 5, 6], ("cg2", "c"): [7, 8, 9], ("cg3", "d"): [9, 9, 9], }, index=pd.MultiIndex.from_tuples([("rg1", "x"), ("rg1", "y"), ("rg2", "z")]), ) psdf3 = ps.from_pandas(pdf3) self.assert_eq(pdf3.transpose().sort_index(), psdf3.transpose().sort_index()) with option_context("compute.max_rows", None): self.assert_eq(pdf3.transpose().sort_index(), psdf3.transpose().sort_index()) def _test_cummin(self, pdf, psdf): self.assert_eq(pdf.cummin(), psdf.cummin()) self.assert_eq(pdf.cummin(skipna=False), psdf.cummin(skipna=False)) self.assert_eq(pdf.cummin().sum(), psdf.cummin().sum()) def test_cummin(self): pdf = pd.DataFrame( [[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]], columns=list("AB"), index=np.random.rand(5), ) psdf = ps.from_pandas(pdf) self._test_cummin(pdf, psdf) def test_cummin_multiindex_columns(self): arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])] pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays) pdf.at["C", ("A", "two")] = None psdf = ps.from_pandas(pdf) self._test_cummin(pdf, psdf) def _test_cummax(self, pdf, psdf): self.assert_eq(pdf.cummax(), psdf.cummax()) self.assert_eq(pdf.cummax(skipna=False), psdf.cummax(skipna=False)) self.assert_eq(pdf.cummax().sum(), psdf.cummax().sum()) def test_cummax(self): pdf = pd.DataFrame( [[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]], columns=list("AB"), index=np.random.rand(5), ) psdf = ps.from_pandas(pdf) self._test_cummax(pdf, psdf) def test_cummax_multiindex_columns(self): arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])] pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays) pdf.at["C", ("A", "two")] = None psdf = ps.from_pandas(pdf) self._test_cummax(pdf, psdf) def _test_cumsum(self, pdf, psdf): self.assert_eq(pdf.cumsum(), psdf.cumsum()) self.assert_eq(pdf.cumsum(skipna=False), psdf.cumsum(skipna=False)) self.assert_eq(pdf.cumsum().sum(), psdf.cumsum().sum()) def test_cumsum(self): pdf = pd.DataFrame( [[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]], columns=list("AB"), index=np.random.rand(5), ) psdf = ps.from_pandas(pdf) self._test_cumsum(pdf, psdf) def test_cumsum_multiindex_columns(self): arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])] pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays) pdf.at["C", ("A", "two")] = None psdf = ps.from_pandas(pdf) self._test_cumsum(pdf, psdf) def _test_cumprod(self, pdf, psdf): self.assert_eq(pdf.cumprod(), psdf.cumprod(), almost=True) self.assert_eq(pdf.cumprod(skipna=False), psdf.cumprod(skipna=False), almost=True) self.assert_eq(pdf.cumprod().sum(), psdf.cumprod().sum(), almost=True) def test_cumprod(self): pdf = pd.DataFrame( [[2.0, 1.0, 1], [5, None, 2], [1.0, -1.0, -3], [2.0, 0, 4], [4.0, 9.0, 5]], columns=list("ABC"), index=np.random.rand(5), ) psdf = ps.from_pandas(pdf) self._test_cumprod(pdf, psdf) def test_cumprod_multiindex_columns(self): arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])] pdf = pd.DataFrame(np.random.rand(3, 4), index=["A", "C", "B"], columns=arrays) pdf.at["C", ("A", "two")] = None psdf = ps.from_pandas(pdf) self._test_cumprod(pdf, psdf) def test_drop_duplicates(self): pdf = pd.DataFrame( {"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5) ) psdf = ps.from_pandas(pdf) # inplace is False for keep in ["first", "last", False]: with self.subTest(keep=keep): self.assert_eq( pdf.drop_duplicates(keep=keep).sort_index(), psdf.drop_duplicates(keep=keep).sort_index(), ) self.assert_eq( pdf.drop_duplicates("a", keep=keep).sort_index(), psdf.drop_duplicates("a", keep=keep).sort_index(), ) self.assert_eq( pdf.drop_duplicates(["a", "b"], keep=keep).sort_index(), psdf.drop_duplicates(["a", "b"], keep=keep).sort_index(), ) self.assert_eq( pdf.set_index("a", append=True).drop_duplicates(keep=keep).sort_index(), psdf.set_index("a", append=True).drop_duplicates(keep=keep).sort_index(), ) self.assert_eq( pdf.set_index("a", append=True).drop_duplicates("b", keep=keep).sort_index(), psdf.set_index("a", append=True).drop_duplicates("b", keep=keep).sort_index(), ) columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")]) pdf.columns = columns psdf.columns = columns # inplace is False for keep in ["first", "last", False]: with self.subTest("multi-index columns", keep=keep): self.assert_eq( pdf.drop_duplicates(keep=keep).sort_index(), psdf.drop_duplicates(keep=keep).sort_index(), ) self.assert_eq( pdf.drop_duplicates(("x", "a"), keep=keep).sort_index(), psdf.drop_duplicates(("x", "a"), keep=keep).sort_index(), ) self.assert_eq( pdf.drop_duplicates([("x", "a"), ("y", "b")], keep=keep).sort_index(), psdf.drop_duplicates([("x", "a"), ("y", "b")], keep=keep).sort_index(), ) # inplace is True subset_list = [None, "a", ["a", "b"]] for subset in subset_list: pdf = pd.DataFrame( {"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5) ) psdf = ps.from_pandas(pdf) pser = pdf.a psser = psdf.a pdf.drop_duplicates(subset=subset, inplace=True) psdf.drop_duplicates(subset=subset, inplace=True) self.assert_eq(psdf.sort_index(), pdf.sort_index()) self.assert_eq(psser.sort_index(), pser.sort_index()) # multi-index columns, inplace is True subset_list = [None, ("x", "a"), [("x", "a"), ("y", "b")]] for subset in subset_list: pdf = pd.DataFrame( {"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5) ) psdf = ps.from_pandas(pdf) columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")]) pdf.columns = columns psdf.columns = columns pser = pdf[("x", "a")] psser = psdf[("x", "a")] pdf.drop_duplicates(subset=subset, inplace=True) psdf.drop_duplicates(subset=subset, inplace=True) self.assert_eq(psdf.sort_index(), pdf.sort_index()) self.assert_eq(psser.sort_index(), pser.sort_index()) # non-string names pdf = pd.DataFrame( {10: [1, 2, 2, 2, 3], 20: ["a", "a", "a", "c", "d"]}, index=np.random.rand(5) ) psdf = ps.from_pandas(pdf) self.assert_eq( pdf.drop_duplicates(10, keep=keep).sort_index(), psdf.drop_duplicates(10, keep=keep).sort_index(), ) self.assert_eq( pdf.drop_duplicates([10, 20], keep=keep).sort_index(), psdf.drop_duplicates([10, 20], keep=keep).sort_index(), ) def test_reindex(self): index = pd.Index(["A", "B", "C", "D", "E"]) columns = pd.Index(["numbers"]) pdf = pd.DataFrame([1.0, 2.0, 3.0, 4.0, None], index=index, columns=columns) psdf = ps.from_pandas(pdf) columns2 = pd.Index(["numbers", "2", "3"], name="cols2") self.assert_eq( pdf.reindex(columns=columns2).sort_index(), psdf.reindex(columns=columns2).sort_index(), ) columns = pd.Index(["numbers"], name="cols") pdf.columns = columns psdf.columns = columns self.assert_eq( pdf.reindex(["A", "B", "C"], columns=["numbers", "2", "3"]).sort_index(), psdf.reindex(["A", "B", "C"], columns=["numbers", "2", "3"]).sort_index(), ) self.assert_eq( pdf.reindex(["A", "B", "C"], index=["numbers", "2", "3"]).sort_index(), psdf.reindex(["A", "B", "C"], index=["numbers", "2", "3"]).sort_index(), ) self.assert_eq( pdf.reindex(index=["A", "B"]).sort_index(), psdf.reindex(index=["A", "B"]).sort_index() ) self.assert_eq( pdf.reindex(index=["A", "B", "2", "3"]).sort_index(), psdf.reindex(index=["A", "B", "2", "3"]).sort_index(), ) self.assert_eq( pdf.reindex(index=["A", "E", "2", "3"], fill_value=0).sort_index(), psdf.reindex(index=["A", "E", "2", "3"], fill_value=0).sort_index(), ) self.assert_eq( pdf.reindex(columns=["numbers"]).sort_index(), psdf.reindex(columns=["numbers"]).sort_index(), ) self.assert_eq( pdf.reindex(columns=["numbers"], copy=True).sort_index(), psdf.reindex(columns=["numbers"], copy=True).sort_index(), ) # Using float as fill_value to avoid int64/32 clash self.assert_eq( pdf.reindex(columns=["numbers", "2", "3"], fill_value=0.0).sort_index(), psdf.reindex(columns=["numbers", "2", "3"], fill_value=0.0).sort_index(), ) columns2 = pd.Index(["numbers", "2", "3"]) self.assert_eq( pdf.reindex(columns=columns2).sort_index(), psdf.reindex(columns=columns2).sort_index(), ) columns2 = pd.Index(["numbers", "2", "3"], name="cols2") self.assert_eq( pdf.reindex(columns=columns2).sort_index(), psdf.reindex(columns=columns2).sort_index(), ) # Reindexing single Index on single Index pindex2 = pd.Index(["A", "C", "D", "E", "0"], name="index2") kindex2 = ps.from_pandas(pindex2) for fill_value in [None, 0]: self.assert_eq( pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(), psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(), ) pindex2 = pd.DataFrame({"index2": ["A", "C", "D", "E", "0"]}).set_index("index2").index kindex2 = ps.from_pandas(pindex2) for fill_value in [None, 0]: self.assert_eq( pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(), psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(), ) # Reindexing MultiIndex on single Index pindex = pd.MultiIndex.from_tuples( [("A", "B"), ("C", "D"), ("F", "G")], names=["name1", "name2"] ) kindex = ps.from_pandas(pindex) self.assert_eq( pdf.reindex(index=pindex, fill_value=0.0).sort_index(), psdf.reindex(index=kindex, fill_value=0.0).sort_index(), ) self.assertRaises(TypeError, lambda: psdf.reindex(columns=["numbers", "2", "3"], axis=1)) self.assertRaises(TypeError, lambda: psdf.reindex(columns=["numbers", "2", "3"], axis=2)) self.assertRaises(TypeError, lambda: psdf.reindex(columns="numbers")) self.assertRaises(TypeError, lambda: psdf.reindex(index=["A", "B", "C"], axis=1)) self.assertRaises(TypeError, lambda: psdf.reindex(index=123)) # Reindexing MultiIndex on MultiIndex pdf = pd.DataFrame({"numbers": [1.0, 2.0, None]}, index=pindex) psdf = ps.from_pandas(pdf) pindex2 = pd.MultiIndex.from_tuples( [("A", "G"), ("C", "D"), ("I", "J")], names=["name1", "name2"] ) kindex2 = ps.from_pandas(pindex2) for fill_value in [None, 0.0]: self.assert_eq( pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(), psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(), ) pindex2 = ( pd.DataFrame({"index_level_1": ["A", "C", "I"], "index_level_2": ["G", "D", "J"]}) .set_index(["index_level_1", "index_level_2"]) .index ) kindex2 = ps.from_pandas(pindex2) for fill_value in [None, 0.0]: self.assert_eq( pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(), psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(), ) columns = pd.MultiIndex.from_tuples([("X", "numbers")], names=["cols1", "cols2"]) pdf.columns = columns psdf.columns = columns # Reindexing MultiIndex index on MultiIndex columns and MultiIndex index for fill_value in [None, 0.0]: self.assert_eq( pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(), psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(), ) index = pd.Index(["A", "B", "C", "D", "E"]) pdf = pd.DataFrame(data=[1.0, 2.0, 3.0, 4.0, None], index=index, columns=columns) psdf = ps.from_pandas(pdf) pindex2 = pd.Index(["A", "C", "D", "E", "0"], name="index2") kindex2 = ps.from_pandas(pindex2) # Reindexing single Index on MultiIndex columns and single Index for fill_value in [None, 0.0]: self.assert_eq( pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(), psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(), ) for fill_value in [None, 0.0]: self.assert_eq( pdf.reindex( columns=[("X", "numbers"), ("Y", "2"), ("Y", "3")], fill_value=fill_value ).sort_index(), psdf.reindex( columns=[("X", "numbers"), ("Y", "2"), ("Y", "3")], fill_value=fill_value ).sort_index(), ) columns2 = pd.MultiIndex.from_tuples( [("X", "numbers"), ("Y", "2"), ("Y", "3")], names=["cols3", "cols4"] ) self.assert_eq( pdf.reindex(columns=columns2).sort_index(), psdf.reindex(columns=columns2).sort_index(), ) self.assertRaises(TypeError, lambda: psdf.reindex(columns=["X"])) self.assertRaises(ValueError, lambda: psdf.reindex(columns=[("X",)])) def test_reindex_like(self): data = [[1.0, 2.0], [3.0, None], [None, 4.0]] index = pd.Index(["A", "B", "C"], name="index") columns = pd.Index(["numbers", "values"], name="cols") pdf = pd.DataFrame(data=data, index=index, columns=columns) psdf = ps.from_pandas(pdf) # Reindexing single Index on single Index data2 = [[5.0, None], [6.0, 7.0], [8.0, None]] index2 = pd.Index(["A", "C", "D"], name="index2") columns2 = pd.Index(["numbers", "F"], name="cols2") pdf2 = pd.DataFrame(data=data2, index=index2, columns=columns2) psdf2 = ps.from_pandas(pdf2) self.assert_eq( pdf.reindex_like(pdf2).sort_index(), psdf.reindex_like(psdf2).sort_index(), ) pdf2 = pd.DataFrame({"index_level_1": ["A", "C", "I"]}) psdf2 = ps.from_pandas(pdf2) self.assert_eq( pdf.reindex_like(pdf2.set_index(["index_level_1"])).sort_index(), psdf.reindex_like(psdf2.set_index(["index_level_1"])).sort_index(), ) # Reindexing MultiIndex on single Index index2 = pd.MultiIndex.from_tuples( [("A", "G"), ("C", "D"), ("I", "J")], names=["name3", "name4"] ) pdf2 = pd.DataFrame(data=data2, index=index2) psdf2 = ps.from_pandas(pdf2) self.assert_eq( pdf.reindex_like(pdf2).sort_index(), psdf.reindex_like(psdf2).sort_index(), ) self.assertRaises(TypeError, lambda: psdf.reindex_like(index2)) self.assertRaises(AssertionError, lambda: psdf2.reindex_like(psdf)) # Reindexing MultiIndex on MultiIndex columns2 = pd.MultiIndex.from_tuples( [("numbers", "third"), ("values", "second")], names=["cols3", "cols4"] ) pdf2.columns = columns2 psdf2.columns = columns2 columns = pd.MultiIndex.from_tuples( [("numbers", "first"), ("values", "second")], names=["cols1", "cols2"] ) index = pd.MultiIndex.from_tuples( [("A", "B"), ("C", "D"), ("E", "F")], names=["name1", "name2"] ) pdf = pd.DataFrame(data=data, index=index, columns=columns) psdf = ps.from_pandas(pdf) self.assert_eq( pdf.reindex_like(pdf2).sort_index(), psdf.reindex_like(psdf2).sort_index(), ) def test_melt(self): pdf = pd.DataFrame( {"A": [1, 3, 5], "B": [2, 4, 6], "C": [7, 8, 9]}, index=np.random.rand(3) ) psdf = ps.from_pandas(pdf) self.assert_eq( psdf.melt().sort_values(["variable", "value"]).reset_index(drop=True), pdf.melt().sort_values(["variable", "value"]), ) self.assert_eq( psdf.melt(id_vars="A").sort_values(["variable", "value"]).reset_index(drop=True), pdf.melt(id_vars="A").sort_values(["variable", "value"]), ) self.assert_eq( psdf.melt(id_vars=["A", "B"]).sort_values(["variable", "value"]).reset_index(drop=True), pdf.melt(id_vars=["A", "B"]).sort_values(["variable", "value"]), ) self.assert_eq( psdf.melt(id_vars=("A", "B")).sort_values(["variable", "value"]).reset_index(drop=True), pdf.melt(id_vars=("A", "B")).sort_values(["variable", "value"]), ) self.assert_eq( psdf.melt(id_vars=["A"], value_vars=["C"]) .sort_values(["variable", "value"]) .reset_index(drop=True), pdf.melt(id_vars=["A"], value_vars=["C"]).sort_values(["variable", "value"]), ) self.assert_eq( psdf.melt(id_vars=["A"], value_vars=["B"], var_name="myVarname", value_name="myValname") .sort_values(["myVarname", "myValname"]) .reset_index(drop=True), pdf.melt( id_vars=["A"], value_vars=["B"], var_name="myVarname", value_name="myValname" ).sort_values(["myVarname", "myValname"]), ) self.assert_eq( psdf.melt(value_vars=("A", "B")) .sort_values(["variable", "value"]) .reset_index(drop=True), pdf.melt(value_vars=("A", "B")).sort_values(["variable", "value"]), ) self.assertRaises(KeyError, lambda: psdf.melt(id_vars="Z")) self.assertRaises(KeyError, lambda: psdf.melt(value_vars="Z")) # multi-index columns TEN = 10.0 TWELVE = 20.0 columns = pd.MultiIndex.from_tuples([(TEN, "A"), (TEN, "B"), (TWELVE, "C")]) pdf.columns = columns psdf.columns = columns self.assert_eq( psdf.melt().sort_values(["variable_0", "variable_1", "value"]).reset_index(drop=True), pdf.melt().sort_values(["variable_0", "variable_1", "value"]), ) self.assert_eq( psdf.melt(id_vars=[(TEN, "A")]) .sort_values(["variable_0", "variable_1", "value"]) .reset_index(drop=True), pdf.melt(id_vars=[(TEN, "A")]) .sort_values(["variable_0", "variable_1", "value"]) .rename(columns=name_like_string), ) self.assert_eq( psdf.melt(id_vars=[(TEN, "A")], value_vars=[(TWELVE, "C")]) .sort_values(["variable_0", "variable_1", "value"]) .reset_index(drop=True), pdf.melt(id_vars=[(TEN, "A")], value_vars=[(TWELVE, "C")]) .sort_values(["variable_0", "variable_1", "value"]) .rename(columns=name_like_string), ) self.assert_eq( psdf.melt( id_vars=[(TEN, "A")], value_vars=[(TEN, "B")], var_name=["myV1", "myV2"], value_name="myValname", ) .sort_values(["myV1", "myV2", "myValname"]) .reset_index(drop=True), pdf.melt( id_vars=[(TEN, "A")], value_vars=[(TEN, "B")], var_name=["myV1", "myV2"], value_name="myValname", ) .sort_values(["myV1", "myV2", "myValname"]) .rename(columns=name_like_string), ) columns.names = ["v0", "v1"] pdf.columns = columns psdf.columns = columns self.assert_eq( psdf.melt().sort_values(["v0", "v1", "value"]).reset_index(drop=True), pdf.melt().sort_values(["v0", "v1", "value"]), ) self.assertRaises(ValueError, lambda: psdf.melt(id_vars=(TEN, "A"))) self.assertRaises(ValueError, lambda: psdf.melt(value_vars=(TEN, "A"))) self.assertRaises(KeyError, lambda: psdf.melt(id_vars=[TEN])) self.assertRaises(KeyError, lambda: psdf.melt(id_vars=[(TWELVE, "A")])) self.assertRaises(KeyError, lambda: psdf.melt(value_vars=[TWELVE])) self.assertRaises(KeyError, lambda: psdf.melt(value_vars=[(TWELVE, "A")])) # non-string names pdf.columns = [10.0, 20.0, 30.0] psdf.columns = [10.0, 20.0, 30.0] self.assert_eq( psdf.melt().sort_values(["variable", "value"]).reset_index(drop=True), pdf.melt().sort_values(["variable", "value"]), ) self.assert_eq( psdf.melt(id_vars=10.0).sort_values(["variable", "value"]).reset_index(drop=True), pdf.melt(id_vars=10.0).sort_values(["variable", "value"]), ) self.assert_eq( psdf.melt(id_vars=[10.0, 20.0]) .sort_values(["variable", "value"]) .reset_index(drop=True), pdf.melt(id_vars=[10.0, 20.0]).sort_values(["variable", "value"]), ) self.assert_eq( psdf.melt(id_vars=(10.0, 20.0)) .sort_values(["variable", "value"]) .reset_index(drop=True), pdf.melt(id_vars=(10.0, 20.0)).sort_values(["variable", "value"]), ) self.assert_eq( psdf.melt(id_vars=[10.0], value_vars=[30.0]) .sort_values(["variable", "value"]) .reset_index(drop=True), pdf.melt(id_vars=[10.0], value_vars=[30.0]).sort_values(["variable", "value"]), ) self.assert_eq( psdf.melt(value_vars=(10.0, 20.0)) .sort_values(["variable", "value"]) .reset_index(drop=True), pdf.melt(value_vars=(10.0, 20.0)).sort_values(["variable", "value"]), ) def test_all(self): pdf = pd.DataFrame( { "col1": [False, False, False], "col2": [True, False, False], "col3": [0, 0, 1], "col4": [0, 1, 2], "col5": [False, False, None], "col6": [True, False, None], }, index=np.random.rand(3), ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.all(), pdf.all()) columns = pd.MultiIndex.from_tuples( [ ("a", "col1"), ("a", "col2"), ("a", "col3"), ("b", "col4"), ("b", "col5"), ("c", "col6"), ] ) pdf.columns = columns psdf.columns = columns self.assert_eq(psdf.all(), pdf.all()) columns.names = ["X", "Y"] pdf.columns = columns psdf.columns = columns self.assert_eq(psdf.all(), pdf.all()) with self.assertRaisesRegex( NotImplementedError, 'axis should be either 0 or "index" currently.' ): psdf.all(axis=1) def test_any(self): pdf = pd.DataFrame( { "col1": [False, False, False], "col2": [True, False, False], "col3": [0, 0, 1], "col4": [0, 1, 2], "col5": [False, False, None], "col6": [True, False, None], }, index=np.random.rand(3), ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.any(), pdf.any()) columns = pd.MultiIndex.from_tuples( [ ("a", "col1"), ("a", "col2"), ("a", "col3"), ("b", "col4"), ("b", "col5"), ("c", "col6"), ] ) pdf.columns = columns psdf.columns = columns self.assert_eq(psdf.any(), pdf.any()) columns.names = ["X", "Y"] pdf.columns = columns psdf.columns = columns self.assert_eq(psdf.any(), pdf.any()) with self.assertRaisesRegex( NotImplementedError, 'axis should be either 0 or "index" currently.' ): psdf.any(axis=1) def test_rank(self): pdf = pd.DataFrame( data={"col1": [1, 2, 3, 1], "col2": [3, 4, 3, 1]}, columns=["col1", "col2"], index=np.random.rand(4), ) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.rank().sort_index(), psdf.rank().sort_index()) self.assert_eq(pdf.rank().sum(), psdf.rank().sum()) self.assert_eq( pdf.rank(ascending=False).sort_index(), psdf.rank(ascending=False).sort_index() ) self.assert_eq(pdf.rank(method="min").sort_index(), psdf.rank(method="min").sort_index()) self.assert_eq(pdf.rank(method="max").sort_index(), psdf.rank(method="max").sort_index()) self.assert_eq( pdf.rank(method="first").sort_index(), psdf.rank(method="first").sort_index() ) self.assert_eq( pdf.rank(method="dense").sort_index(), psdf.rank(method="dense").sort_index() ) msg = "method must be one of 'average', 'min', 'max', 'first', 'dense'" with self.assertRaisesRegex(ValueError, msg): psdf.rank(method="nothing") # multi-index columns columns = pd.MultiIndex.from_tuples([("x", "col1"), ("y", "col2")]) pdf.columns = columns psdf.columns = columns self.assert_eq(pdf.rank().sort_index(), psdf.rank().sort_index()) def test_round(self): pdf = pd.DataFrame( { "A": [0.028208, 0.038683, 0.877076], "B": [0.992815, 0.645646, 0.149370], "C": [0.173891, 0.577595, 0.491027], }, columns=["A", "B", "C"], index=np.random.rand(3), ) psdf = ps.from_pandas(pdf) pser = pd.Series([1, 0, 2], index=["A", "B", "C"]) psser = ps.Series([1, 0, 2], index=["A", "B", "C"]) self.assert_eq(pdf.round(2), psdf.round(2)) self.assert_eq(pdf.round({"A": 1, "C": 2}), psdf.round({"A": 1, "C": 2})) self.assert_eq(pdf.round({"A": 1, "D": 2}), psdf.round({"A": 1, "D": 2})) self.assert_eq(pdf.round(pser), psdf.round(psser)) msg = "decimals must be an integer, a dict-like or a Series" with self.assertRaisesRegex(TypeError, msg): psdf.round(1.5) # multi-index columns columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")]) pdf.columns = columns psdf.columns = columns pser = pd.Series([1, 0, 2], index=columns) psser = ps.Series([1, 0, 2], index=columns) self.assert_eq(pdf.round(2), psdf.round(2)) self.assert_eq( pdf.round({("X", "A"): 1, ("Y", "C"): 2}), psdf.round({("X", "A"): 1, ("Y", "C"): 2}) ) self.assert_eq(pdf.round({("X", "A"): 1, "Y": 2}), psdf.round({("X", "A"): 1, "Y": 2})) self.assert_eq(pdf.round(pser), psdf.round(psser)) # non-string names pdf = pd.DataFrame( { 10: [0.028208, 0.038683, 0.877076], 20: [0.992815, 0.645646, 0.149370], 30: [0.173891, 0.577595, 0.491027], }, index=np.random.rand(3), ) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.round({10: 1, 30: 2}), psdf.round({10: 1, 30: 2})) def test_shift(self): pdf = pd.DataFrame( { "Col1": [10, 20, 15, 30, 45], "Col2": [13, 23, 18, 33, 48], "Col3": [17, 27, 22, 37, 52], }, index=np.random.rand(5), ) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.shift(3), psdf.shift(3)) self.assert_eq(pdf.shift().shift(-1), psdf.shift().shift(-1)) self.assert_eq(pdf.shift().sum().astype(int), psdf.shift().sum()) # Need the expected result since pandas 0.23 does not support `fill_value` argument. pdf1 = pd.DataFrame( {"Col1": [0, 0, 0, 10, 20], "Col2": [0, 0, 0, 13, 23], "Col3": [0, 0, 0, 17, 27]}, index=pdf.index, ) self.assert_eq(pdf1, psdf.shift(periods=3, fill_value=0)) msg = "should be an int" with self.assertRaisesRegex(TypeError, msg): psdf.shift(1.5) # multi-index columns columns = pd.MultiIndex.from_tuples([("x", "Col1"), ("x", "Col2"), ("y", "Col3")]) pdf.columns = columns psdf.columns = columns self.assert_eq(pdf.shift(3), psdf.shift(3)) self.assert_eq(pdf.shift().shift(-1), psdf.shift().shift(-1)) def test_diff(self): pdf = pd.DataFrame( {"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]}, index=np.random.rand(6), ) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.diff(), psdf.diff()) self.assert_eq(pdf.diff().diff(-1), psdf.diff().diff(-1)) self.assert_eq(pdf.diff().sum().astype(int), psdf.diff().sum()) msg = "should be an int" with self.assertRaisesRegex(TypeError, msg): psdf.diff(1.5) msg = 'axis should be either 0 or "index" currently.' with self.assertRaisesRegex(NotImplementedError, msg): psdf.diff(axis=1) # multi-index columns columns = pd.MultiIndex.from_tuples([("x", "Col1"), ("x", "Col2"), ("y", "Col3")]) pdf.columns = columns psdf.columns = columns self.assert_eq(pdf.diff(), psdf.diff()) def test_duplicated(self): pdf = pd.DataFrame( {"a": [1, 1, 2, 3], "b": [1, 1, 1, 4], "c": [1, 1, 1, 5]}, index=np.random.rand(4) ) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index()) self.assert_eq( pdf.duplicated(keep="last").sort_index(), psdf.duplicated(keep="last").sort_index(), ) self.assert_eq( pdf.duplicated(keep=False).sort_index(), psdf.duplicated(keep=False).sort_index(), ) self.assert_eq( pdf.duplicated(subset="b").sort_index(), psdf.duplicated(subset="b").sort_index(), ) self.assert_eq( pdf.duplicated(subset=["b"]).sort_index(), psdf.duplicated(subset=["b"]).sort_index(), ) with self.assertRaisesRegex(ValueError, "'keep' only supports 'first', 'last' and False"): psdf.duplicated(keep="false") with self.assertRaisesRegex(KeyError, "'d'"): psdf.duplicated(subset=["d"]) pdf.index.name = "x" psdf.index.name = "x" self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index()) # multi-index self.assert_eq( pdf.set_index("a", append=True).duplicated().sort_index(), psdf.set_index("a", append=True).duplicated().sort_index(), ) self.assert_eq( pdf.set_index("a", append=True).duplicated(keep=False).sort_index(), psdf.set_index("a", append=True).duplicated(keep=False).sort_index(), ) self.assert_eq( pdf.set_index("a", append=True).duplicated(subset=["b"]).sort_index(), psdf.set_index("a", append=True).duplicated(subset=["b"]).sort_index(), ) # mutli-index columns columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")]) pdf.columns = columns psdf.columns = columns self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index()) self.assert_eq( pdf.duplicated(subset=("x", "b")).sort_index(), psdf.duplicated(subset=("x", "b")).sort_index(), ) self.assert_eq( pdf.duplicated(subset=[("x", "b")]).sort_index(), psdf.duplicated(subset=[("x", "b")]).sort_index(), ) # non-string names pdf = pd.DataFrame( {10: [1, 1, 2, 3], 20: [1, 1, 1, 4], 30: [1, 1, 1, 5]}, index=np.random.rand(4) ) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index()) self.assert_eq( pdf.duplicated(subset=10).sort_index(), psdf.duplicated(subset=10).sort_index(), ) def test_ffill(self): idx = np.random.rand(6) pdf = pd.DataFrame( { "x": [np.nan, 2, 3, 4, np.nan, 6], "y": [1, 2, np.nan, 4, np.nan, np.nan], "z": [1, 2, 3, 4, np.nan, np.nan], }, index=idx, ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.ffill(), pdf.ffill()) self.assert_eq(psdf.ffill(limit=1), pdf.ffill(limit=1)) pser = pdf.y psser = psdf.y psdf.ffill(inplace=True) pdf.ffill(inplace=True) self.assert_eq(psdf, pdf) self.assert_eq(psser, pser) self.assert_eq(psser[idx[2]], pser[idx[2]]) def test_bfill(self): idx = np.random.rand(6) pdf = pd.DataFrame( { "x": [np.nan, 2, 3, 4, np.nan, 6], "y": [1, 2, np.nan, 4, np.nan, np.nan], "z": [1, 2, 3, 4, np.nan, np.nan], }, index=idx, ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.bfill(), pdf.bfill()) self.assert_eq(psdf.bfill(limit=1), pdf.bfill(limit=1)) pser = pdf.x psser = psdf.x psdf.bfill(inplace=True) pdf.bfill(inplace=True) self.assert_eq(psdf, pdf) self.assert_eq(psser, pser) self.assert_eq(psser[idx[0]], pser[idx[0]]) def test_filter(self): pdf = pd.DataFrame( { "aa": ["aa", "bd", "bc", "ab", "ce"], "ba": [1, 2, 3, 4, 5], "cb": [1.0, 2.0, 3.0, 4.0, 5.0], "db": [1.0, np.nan, 3.0, np.nan, 5.0], } ) pdf = pdf.set_index("aa") psdf = ps.from_pandas(pdf) self.assert_eq( psdf.filter(items=["ab", "aa"], axis=0).sort_index(), pdf.filter(items=["ab", "aa"], axis=0).sort_index(), ) with option_context("compute.isin_limit", 0): self.assert_eq( psdf.filter(items=["ab", "aa"], axis=0).sort_index(), pdf.filter(items=["ab", "aa"], axis=0).sort_index(), ) self.assert_eq( psdf.filter(items=["ba", "db"], axis=1).sort_index(), pdf.filter(items=["ba", "db"], axis=1).sort_index(), ) self.assert_eq(psdf.filter(like="b", axis="index"), pdf.filter(like="b", axis="index")) self.assert_eq(psdf.filter(like="c", axis="columns"), pdf.filter(like="c", axis="columns")) self.assert_eq( psdf.filter(regex="b.*", axis="index"), pdf.filter(regex="b.*", axis="index") ) self.assert_eq( psdf.filter(regex="b.*", axis="columns"), pdf.filter(regex="b.*", axis="columns") ) pdf = pdf.set_index("ba", append=True) psdf = ps.from_pandas(pdf) self.assert_eq( psdf.filter(items=[("aa", 1), ("bd", 2)], axis=0).sort_index(), pdf.filter(items=[("aa", 1), ("bd", 2)], axis=0).sort_index(), ) with self.assertRaisesRegex(TypeError, "Unsupported type list"): psdf.filter(items=[["aa", 1], ("bd", 2)], axis=0) with self.assertRaisesRegex(ValueError, "The item should not be empty."): psdf.filter(items=[(), ("bd", 2)], axis=0) self.assert_eq(psdf.filter(like="b", axis=0), pdf.filter(like="b", axis=0)) self.assert_eq(psdf.filter(regex="b.*", axis=0), pdf.filter(regex="b.*", axis=0)) with self.assertRaisesRegex(ValueError, "items should be a list-like object"): psdf.filter(items="b") with self.assertRaisesRegex(ValueError, "No axis named"): psdf.filter(regex="b.*", axis=123) with self.assertRaisesRegex(TypeError, "Must pass either `items`, `like`"): psdf.filter() with self.assertRaisesRegex(TypeError, "mutually exclusive"): psdf.filter(regex="b.*", like="aaa") # multi-index columns pdf = pd.DataFrame( { ("x", "aa"): ["aa", "ab", "bc", "bd", "ce"], ("x", "ba"): [1, 2, 3, 4, 5], ("y", "cb"): [1.0, 2.0, 3.0, 4.0, 5.0], ("z", "db"): [1.0, np.nan, 3.0, np.nan, 5.0], } ) pdf = pdf.set_index(("x", "aa")) psdf = ps.from_pandas(pdf) self.assert_eq( psdf.filter(items=["ab", "aa"], axis=0).sort_index(), pdf.filter(items=["ab", "aa"], axis=0).sort_index(), ) self.assert_eq( psdf.filter(items=[("x", "ba"), ("z", "db")], axis=1).sort_index(), pdf.filter(items=[("x", "ba"), ("z", "db")], axis=1).sort_index(), ) self.assert_eq(psdf.filter(like="b", axis="index"), pdf.filter(like="b", axis="index")) self.assert_eq(psdf.filter(like="c", axis="columns"), pdf.filter(like="c", axis="columns")) self.assert_eq( psdf.filter(regex="b.*", axis="index"), pdf.filter(regex="b.*", axis="index") ) self.assert_eq( psdf.filter(regex="b.*", axis="columns"), pdf.filter(regex="b.*", axis="columns") ) def test_pipe(self): psdf = ps.DataFrame( {"category": ["A", "A", "B"], "col1": [1, 2, 3], "col2": [4, 5, 6]}, columns=["category", "col1", "col2"], ) self.assertRaisesRegex( ValueError, "arg is both the pipe target and a keyword argument", lambda: psdf.pipe((lambda x: x, "arg"), arg="1"), ) def test_transform(self): pdf = pd.DataFrame( { "a": [1, 2, 3, 4, 5, 6] * 100, "b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100, "c": [1, 4, 9, 16, 25, 36] * 100, }, columns=["a", "b", "c"], index=np.random.rand(600), ) psdf = ps.DataFrame(pdf) self.assert_eq( psdf.transform(lambda x: x + 1).sort_index(), pdf.transform(lambda x: x + 1).sort_index(), ) self.assert_eq( psdf.transform(lambda x, y: x + y, y=2).sort_index(), pdf.transform(lambda x, y: x + y, y=2).sort_index(), ) with option_context("compute.shortcut_limit", 500): self.assert_eq( psdf.transform(lambda x: x + 1).sort_index(), pdf.transform(lambda x: x + 1).sort_index(), ) self.assert_eq( psdf.transform(lambda x, y: x + y, y=1).sort_index(), pdf.transform(lambda x, y: x + y, y=1).sort_index(), ) with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"): psdf.transform(1) # multi-index columns columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")]) pdf.columns = columns psdf.columns = columns self.assert_eq( psdf.transform(lambda x: x + 1).sort_index(), pdf.transform(lambda x: x + 1).sort_index(), ) with option_context("compute.shortcut_limit", 500): self.assert_eq( psdf.transform(lambda x: x + 1).sort_index(), pdf.transform(lambda x: x + 1).sort_index(), ) def test_apply(self): pdf = pd.DataFrame( { "a": [1, 2, 3, 4, 5, 6] * 100, "b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100, "c": [1, 4, 9, 16, 25, 36] * 100, }, columns=["a", "b", "c"], index=np.random.rand(600), ) psdf = ps.DataFrame(pdf) self.assert_eq( psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index() ) self.assert_eq( psdf.apply(lambda x, b: x + b, args=(1,)).sort_index(), pdf.apply(lambda x, b: x + b, args=(1,)).sort_index(), ) self.assert_eq( psdf.apply(lambda x, b: x + b, b=1).sort_index(), pdf.apply(lambda x, b: x + b, b=1).sort_index(), ) with option_context("compute.shortcut_limit", 500): self.assert_eq( psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index() ) self.assert_eq( psdf.apply(lambda x, b: x + b, args=(1,)).sort_index(), pdf.apply(lambda x, b: x + b, args=(1,)).sort_index(), ) self.assert_eq( psdf.apply(lambda x, b: x + b, b=1).sort_index(), pdf.apply(lambda x, b: x + b, b=1).sort_index(), ) # returning a Series self.assert_eq( psdf.apply(lambda x: len(x), axis=1).sort_index(), pdf.apply(lambda x: len(x), axis=1).sort_index(), ) self.assert_eq( psdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(), pdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(), ) with option_context("compute.shortcut_limit", 500): self.assert_eq( psdf.apply(lambda x: len(x), axis=1).sort_index(), pdf.apply(lambda x: len(x), axis=1).sort_index(), ) self.assert_eq( psdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(), pdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(), ) with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"): psdf.apply(1) with self.assertRaisesRegex(TypeError, "The given function.*1 or 'column'; however"): def f1(_) -> ps.DataFrame[int]: pass psdf.apply(f1, axis=0) with self.assertRaisesRegex(TypeError, "The given function.*0 or 'index'; however"): def f2(_) -> ps.Series[int]: pass psdf.apply(f2, axis=1) # multi-index columns columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")]) pdf.columns = columns psdf.columns = columns self.assert_eq( psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index() ) with option_context("compute.shortcut_limit", 500): self.assert_eq( psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index() ) # returning a Series self.assert_eq( psdf.apply(lambda x: len(x), axis=1).sort_index(), pdf.apply(lambda x: len(x), axis=1).sort_index(), ) with option_context("compute.shortcut_limit", 500): self.assert_eq( psdf.apply(lambda x: len(x), axis=1).sort_index(), pdf.apply(lambda x: len(x), axis=1).sort_index(), ) def test_apply_with_type(self): pdf = self.pdf psdf = ps.from_pandas(pdf) def identify1(x) -> ps.DataFrame[int, int]: return x # Type hints set the default column names, and we use default index for # pandas API on Spark. Here we ignore both diff. actual = psdf.apply(identify1, axis=1) expected = pdf.apply(identify1, axis=1) self.assert_eq(sorted(actual["c0"].to_numpy()), sorted(expected["a"].to_numpy())) self.assert_eq(sorted(actual["c1"].to_numpy()), sorted(expected["b"].to_numpy())) def identify2(x) -> ps.DataFrame[slice("a", int), slice("b", int)]: # noqa: F405 return x actual = psdf.apply(identify2, axis=1) expected = pdf.apply(identify2, axis=1) self.assert_eq(sorted(actual["a"].to_numpy()), sorted(expected["a"].to_numpy())) self.assert_eq(sorted(actual["b"].to_numpy()), sorted(expected["b"].to_numpy())) def test_apply_batch(self): pdf = pd.DataFrame( { "a": [1, 2, 3, 4, 5, 6] * 100, "b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100, "c": [1, 4, 9, 16, 25, 36] * 100, }, columns=["a", "b", "c"], index=np.random.rand(600), ) psdf = ps.DataFrame(pdf) self.assert_eq( psdf.pandas_on_spark.apply_batch(lambda pdf, a: pdf + a, args=(1,)).sort_index(), (pdf + 1).sort_index(), ) with option_context("compute.shortcut_limit", 500): self.assert_eq( psdf.pandas_on_spark.apply_batch(lambda pdf: pdf + 1).sort_index(), (pdf + 1).sort_index(), ) self.assert_eq( psdf.pandas_on_spark.apply_batch(lambda pdf, b: pdf + b, b=1).sort_index(), (pdf + 1).sort_index(), ) with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"): psdf.pandas_on_spark.apply_batch(1) with self.assertRaisesRegex(TypeError, "The given function.*frame as its type hints"): def f2(_) -> ps.Series[int]: pass psdf.pandas_on_spark.apply_batch(f2) with self.assertRaisesRegex(ValueError, "The given function should return a frame"): psdf.pandas_on_spark.apply_batch(lambda pdf: 1) # multi-index columns columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")]) pdf.columns = columns psdf.columns = columns self.assert_eq( psdf.pandas_on_spark.apply_batch(lambda x: x + 1).sort_index(), (pdf + 1).sort_index() ) with option_context("compute.shortcut_limit", 500): self.assert_eq( psdf.pandas_on_spark.apply_batch(lambda x: x + 1).sort_index(), (pdf + 1).sort_index(), ) def test_apply_batch_with_type(self): pdf = self.pdf psdf = ps.from_pandas(pdf) def identify1(x) -> ps.DataFrame[int, int]: return x # Type hints set the default column names, and we use default index for # pandas API on Spark. Here we ignore both diff. actual = psdf.pandas_on_spark.apply_batch(identify1) expected = pdf self.assert_eq(sorted(actual["c0"].to_numpy()), sorted(expected["a"].to_numpy())) self.assert_eq(sorted(actual["c1"].to_numpy()), sorted(expected["b"].to_numpy())) def identify2(x) -> ps.DataFrame[slice("a", int), slice("b", int)]: # noqa: F405 return x actual = psdf.pandas_on_spark.apply_batch(identify2) expected = pdf self.assert_eq(sorted(actual["a"].to_numpy()), sorted(expected["a"].to_numpy())) self.assert_eq(sorted(actual["b"].to_numpy()), sorted(expected["b"].to_numpy())) pdf = pd.DataFrame( {"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [[e] for e in [4, 5, 6, 3, 2, 1, 0, 0, 0]]}, index=np.random.rand(9), ) psdf = ps.from_pandas(pdf) def identify3(x) -> ps.DataFrame[float, [int, List[int]]]: return x actual = psdf.pandas_on_spark.apply_batch(identify3) actual.columns = ["a", "b"] self.assert_eq(actual, pdf) # For NumPy typing, NumPy version should be 1.21+ and Python version should be 3.8+ if sys.version_info >= (3, 8) and LooseVersion(np.__version__) >= LooseVersion("1.21"): import numpy.typing as ntp psdf = ps.from_pandas(pdf) def identify4( x, ) -> ps.DataFrame[float, [int, ntp.NDArray[int]]]: # type: ignore[name-defined] return x actual = psdf.pandas_on_spark.apply_batch(identify4) actual.columns = ["a", "b"] self.assert_eq(actual, pdf) arrays = [[1, 2, 3, 4, 5, 6, 7, 8, 9], ["a", "b", "c", "d", "e", "f", "g", "h", "i"]] idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color")) pdf = pd.DataFrame( {"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [[e] for e in [4, 5, 6, 3, 2, 1, 0, 0, 0]]}, index=idx, ) psdf = ps.from_pandas(pdf) def identify4(x) -> ps.DataFrame[[int, str], [int, List[int]]]: return x actual = psdf.pandas_on_spark.apply_batch(identify4) actual.index.names = ["number", "color"] actual.columns = ["a", "b"] self.assert_eq(actual, pdf) def identify5( x, ) -> ps.DataFrame[ [("number", int), ("color", str)], [("a", int), ("b", List[int])] # noqa: F405 ]: return x actual = psdf.pandas_on_spark.apply_batch(identify5) self.assert_eq(actual, pdf) def test_transform_batch(self): pdf = pd.DataFrame( { "a": [1, 2, 3, 4, 5, 6] * 100, "b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100, "c": [1, 4, 9, 16, 25, 36] * 100, }, columns=["a", "b", "c"], index=np.random.rand(600), ) psdf = ps.DataFrame(pdf) self.assert_eq( psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.c + 1).sort_index(), (pdf.c + 1).sort_index(), ) self.assert_eq( psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf + a, 1).sort_index(), (pdf + 1).sort_index(), ) self.assert_eq( psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf.c + a, a=1).sort_index(), (pdf.c + 1).sort_index(), ) with option_context("compute.shortcut_limit", 500): self.assert_eq( psdf.pandas_on_spark.transform_batch(lambda pdf: pdf + 1).sort_index(), (pdf + 1).sort_index(), ) self.assert_eq( psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.b + 1).sort_index(), (pdf.b + 1).sort_index(), ) self.assert_eq( psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf + a, 1).sort_index(), (pdf + 1).sort_index(), ) self.assert_eq( psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf.c + a, a=1).sort_index(), (pdf.c + 1).sort_index(), ) with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"): psdf.pandas_on_spark.transform_batch(1) with self.assertRaisesRegex(ValueError, "The given function should return a frame"): psdf.pandas_on_spark.transform_batch(lambda pdf: 1) with self.assertRaisesRegex( ValueError, "transform_batch cannot produce aggregated results" ): psdf.pandas_on_spark.transform_batch(lambda pdf: pd.Series(1)) # multi-index columns columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")]) pdf.columns = columns psdf.columns = columns self.assert_eq( psdf.pandas_on_spark.transform_batch(lambda x: x + 1).sort_index(), (pdf + 1).sort_index(), ) with option_context("compute.shortcut_limit", 500): self.assert_eq( psdf.pandas_on_spark.transform_batch(lambda x: x + 1).sort_index(), (pdf + 1).sort_index(), ) def test_transform_batch_with_type(self): pdf = self.pdf psdf = ps.from_pandas(pdf) def identify1(x) -> ps.DataFrame[int, int]: return x # Type hints set the default column names, and we use default index for # pandas API on Spark. Here we ignore both diff. actual = psdf.pandas_on_spark.transform_batch(identify1) expected = pdf self.assert_eq(sorted(actual["c0"].to_numpy()), sorted(expected["a"].to_numpy())) self.assert_eq(sorted(actual["c1"].to_numpy()), sorted(expected["b"].to_numpy())) def identify2(x) -> ps.DataFrame[slice("a", int), slice("b", int)]: # noqa: F405 return x actual = psdf.pandas_on_spark.transform_batch(identify2) expected = pdf self.assert_eq(sorted(actual["a"].to_numpy()), sorted(expected["a"].to_numpy())) self.assert_eq(sorted(actual["b"].to_numpy()), sorted(expected["b"].to_numpy())) def test_transform_batch_same_anchor(self): psdf = ps.range(10) psdf["d"] = psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.id + 1) self.assert_eq( psdf, pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]), ) psdf = ps.range(10) def plus_one(pdf) -> ps.Series[np.int64]: return pdf.id + 1 psdf["d"] = psdf.pandas_on_spark.transform_batch(plus_one) self.assert_eq( psdf, pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]), ) psdf = ps.range(10) def plus_one(ser) -> ps.Series[np.int64]: return ser + 1 psdf["d"] = psdf.id.pandas_on_spark.transform_batch(plus_one) self.assert_eq( psdf, pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]), ) def test_empty_timestamp(self): pdf = pd.DataFrame( { "t": [ datetime(2019, 1, 1, 0, 0, 0), datetime(2019, 1, 2, 0, 0, 0), datetime(2019, 1, 3, 0, 0, 0), ] }, index=np.random.rand(3), ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf[psdf["t"] != psdf["t"]], pdf[pdf["t"] != pdf["t"]]) self.assert_eq(psdf[psdf["t"] != psdf["t"]].dtypes, pdf[pdf["t"] != pdf["t"]].dtypes) def test_to_spark(self): psdf = ps.from_pandas(self.pdf) with self.assertRaisesRegex(ValueError, "'index_col' cannot be overlapped"): psdf.to_spark(index_col="a") with self.assertRaisesRegex(ValueError, "length of index columns.*1.*3"): psdf.to_spark(index_col=["x", "y", "z"]) def test_keys(self): pdf = pd.DataFrame( [[1, 2], [4, 5], [7, 8]], index=["cobra", "viper", "sidewinder"], columns=["max_speed", "shield"], ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.keys(), pdf.keys()) def test_quantile(self): pdf, psdf = self.df_pair self.assert_eq(psdf.quantile(0.5), pdf.quantile(0.5)) self.assert_eq(psdf.quantile([0.25, 0.5, 0.75]), pdf.quantile([0.25, 0.5, 0.75])) self.assert_eq(psdf.loc[[]].quantile(0.5), pdf.loc[[]].quantile(0.5)) self.assert_eq( psdf.loc[[]].quantile([0.25, 0.5, 0.75]), pdf.loc[[]].quantile([0.25, 0.5, 0.75]) ) with self.assertRaisesRegex( NotImplementedError, 'axis should be either 0 or "index" currently.' ): psdf.quantile(0.5, axis=1) with self.assertRaisesRegex(TypeError, "accuracy must be an integer; however"): psdf.quantile(accuracy="a") with self.assertRaisesRegex(TypeError, "q must be a float or an array of floats;"): psdf.quantile(q="a") with self.assertRaisesRegex(TypeError, "q must be a float or an array of floats;"): psdf.quantile(q=["a"]) with self.assertRaisesRegex( ValueError, r"percentiles should all be in the interval \[0, 1\]" ): psdf.quantile(q=[1.1]) self.assert_eq( psdf.quantile(0.5, numeric_only=False), pdf.quantile(0.5, numeric_only=False) ) self.assert_eq( psdf.quantile([0.25, 0.5, 0.75], numeric_only=False), pdf.quantile([0.25, 0.5, 0.75], numeric_only=False), ) # multi-index column columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")]) pdf.columns = columns psdf.columns = columns self.assert_eq(psdf.quantile(0.5), pdf.quantile(0.5)) self.assert_eq(psdf.quantile([0.25, 0.5, 0.75]), pdf.quantile([0.25, 0.5, 0.75])) pdf = pd.DataFrame({"x": ["a", "b", "c"]}) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.quantile(0.5), pdf.quantile(0.5)) self.assert_eq(psdf.quantile([0.25, 0.5, 0.75]), pdf.quantile([0.25, 0.5, 0.75])) with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"): psdf.quantile(0.5, numeric_only=False) with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"): psdf.quantile([0.25, 0.5, 0.75], numeric_only=False) def test_pct_change(self): pdf = pd.DataFrame( {"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0], "c": [300, 200, 400, 200]}, index=np.random.rand(4), ) pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")]) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.pct_change(2), pdf.pct_change(2), check_exact=False) self.assert_eq(psdf.pct_change().sum(), pdf.pct_change().sum(), check_exact=False) def test_where(self): pdf, psdf = self.df_pair # pandas requires `axis` argument when the `other` is Series. # `axis` is not fully supported yet in pandas-on-Spark. self.assert_eq( psdf.where(psdf > 2, psdf.a + 10, axis=0), pdf.where(pdf > 2, pdf.a + 10, axis=0) ) with self.assertRaisesRegex(TypeError, "type of cond must be a DataFrame or Series"): psdf.where(1) def test_mask(self): psdf = ps.from_pandas(self.pdf) with self.assertRaisesRegex(TypeError, "type of cond must be a DataFrame or Series"): psdf.mask(1) def test_query(self): pdf = pd.DataFrame({"A": range(1, 6), "B": range(10, 0, -2), "C": range(10, 5, -1)}) psdf = ps.from_pandas(pdf) exprs = ("A > B", "A < C", "C == B") for expr in exprs: self.assert_eq(psdf.query(expr), pdf.query(expr)) # test `inplace=True` for expr in exprs: dummy_psdf = psdf.copy() dummy_pdf = pdf.copy() pser = dummy_pdf.A psser = dummy_psdf.A dummy_pdf.query(expr, inplace=True) dummy_psdf.query(expr, inplace=True) self.assert_eq(dummy_psdf, dummy_pdf) self.assert_eq(psser, pser) # invalid values for `expr` invalid_exprs = (1, 1.0, (exprs[0],), [exprs[0]]) for expr in invalid_exprs: with self.assertRaisesRegex( TypeError, "expr must be a string to be evaluated, {} given".format(type(expr).__name__), ): psdf.query(expr) # invalid values for `inplace` invalid_inplaces = (1, 0, "True", "False") for inplace in invalid_inplaces: with self.assertRaisesRegex( TypeError, 'For argument "inplace" expected type bool, received type {}.'.format( type(inplace).__name__ ), ): psdf.query("a < b", inplace=inplace) # doesn't support for MultiIndex columns columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")]) psdf.columns = columns with self.assertRaisesRegex(TypeError, "Doesn't support for MultiIndex columns"): psdf.query("('A', 'Z') > ('B', 'X')") def test_take(self): pdf = pd.DataFrame( {"A": range(0, 50000), "B": range(100000, 0, -2), "C": range(100000, 50000, -1)} ) psdf = ps.from_pandas(pdf) # axis=0 (default) self.assert_eq(psdf.take([1, 2]).sort_index(), pdf.take([1, 2]).sort_index()) self.assert_eq(psdf.take([-1, -2]).sort_index(), pdf.take([-1, -2]).sort_index()) self.assert_eq( psdf.take(range(100, 110)).sort_index(), pdf.take(range(100, 110)).sort_index() ) self.assert_eq( psdf.take(range(-110, -100)).sort_index(), pdf.take(range(-110, -100)).sort_index() ) self.assert_eq( psdf.take([10, 100, 1000, 10000]).sort_index(), pdf.take([10, 100, 1000, 10000]).sort_index(), ) self.assert_eq( psdf.take([-10, -100, -1000, -10000]).sort_index(), pdf.take([-10, -100, -1000, -10000]).sort_index(), ) # axis=1 self.assert_eq( psdf.take([1, 2], axis=1).sort_index(), pdf.take([1, 2], axis=1).sort_index() ) self.assert_eq( psdf.take([-1, -2], axis=1).sort_index(), pdf.take([-1, -2], axis=1).sort_index() ) self.assert_eq( psdf.take(range(1, 3), axis=1).sort_index(), pdf.take(range(1, 3), axis=1).sort_index(), ) self.assert_eq( psdf.take(range(-1, -3), axis=1).sort_index(), pdf.take(range(-1, -3), axis=1).sort_index(), ) self.assert_eq( psdf.take([2, 1], axis=1).sort_index(), pdf.take([2, 1], axis=1).sort_index(), ) self.assert_eq( psdf.take([-1, -2], axis=1).sort_index(), pdf.take([-1, -2], axis=1).sort_index(), ) # MultiIndex columns columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")]) psdf.columns = columns pdf.columns = columns # MultiIndex columns with axis=0 (default) self.assert_eq(psdf.take([1, 2]).sort_index(), pdf.take([1, 2]).sort_index()) self.assert_eq(psdf.take([-1, -2]).sort_index(), pdf.take([-1, -2]).sort_index()) self.assert_eq( psdf.take(range(100, 110)).sort_index(), pdf.take(range(100, 110)).sort_index() ) self.assert_eq( psdf.take(range(-110, -100)).sort_index(), pdf.take(range(-110, -100)).sort_index() ) self.assert_eq( psdf.take([10, 100, 1000, 10000]).sort_index(), pdf.take([10, 100, 1000, 10000]).sort_index(), ) self.assert_eq( psdf.take([-10, -100, -1000, -10000]).sort_index(), pdf.take([-10, -100, -1000, -10000]).sort_index(), ) # axis=1 self.assert_eq( psdf.take([1, 2], axis=1).sort_index(), pdf.take([1, 2], axis=1).sort_index() ) self.assert_eq( psdf.take([-1, -2], axis=1).sort_index(), pdf.take([-1, -2], axis=1).sort_index() ) self.assert_eq( psdf.take(range(1, 3), axis=1).sort_index(), pdf.take(range(1, 3), axis=1).sort_index(), ) self.assert_eq( psdf.take(range(-1, -3), axis=1).sort_index(), pdf.take(range(-1, -3), axis=1).sort_index(), ) self.assert_eq( psdf.take([2, 1], axis=1).sort_index(), pdf.take([2, 1], axis=1).sort_index(), ) self.assert_eq( psdf.take([-1, -2], axis=1).sort_index(), pdf.take([-1, -2], axis=1).sort_index(), ) # Checking the type of indices. self.assertRaises(TypeError, lambda: psdf.take(1)) self.assertRaises(TypeError, lambda: psdf.take("1")) self.assertRaises(TypeError, lambda: psdf.take({1, 2})) self.assertRaises(TypeError, lambda: psdf.take({1: None, 2: None})) def test_axes(self): pdf = self.pdf psdf = ps.from_pandas(pdf) self.assert_eq(pdf.axes, psdf.axes) # multi-index columns columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")]) pdf.columns = columns psdf.columns = columns self.assert_eq(pdf.axes, psdf.axes) def test_udt(self): sparse_values = {0: 0.1, 1: 1.1} sparse_vector = SparseVector(len(sparse_values), sparse_values) pdf = pd.DataFrame({"a": [sparse_vector], "b": [10]}) psdf = ps.from_pandas(pdf) self.assert_eq(psdf, pdf) def test_eval(self): pdf = pd.DataFrame({"A": range(1, 6), "B": range(10, 0, -2)}) psdf = ps.from_pandas(pdf) # operation between columns (returns Series) self.assert_eq(pdf.eval("A + B"), psdf.eval("A + B")) self.assert_eq(pdf.eval("A + A"), psdf.eval("A + A")) # assignment (returns DataFrame) self.assert_eq(pdf.eval("C = A + B"), psdf.eval("C = A + B")) self.assert_eq(pdf.eval("A = A + A"), psdf.eval("A = A + A")) # operation between scalars (returns scalar) self.assert_eq(pdf.eval("1 + 1"), psdf.eval("1 + 1")) # complicated operations with assignment self.assert_eq( pdf.eval("B = A + B // (100 + 200) * (500 - B) - 10.5"), psdf.eval("B = A + B // (100 + 200) * (500 - B) - 10.5"), ) # inplace=True (only support for assignment) pdf.eval("C = A + B", inplace=True) psdf.eval("C = A + B", inplace=True) self.assert_eq(pdf, psdf) pser = pdf.A psser = psdf.A pdf.eval("A = B + C", inplace=True) psdf.eval("A = B + C", inplace=True) self.assert_eq(pdf, psdf) self.assert_eq(pser, psser) # doesn't support for multi-index columns columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b"), ("z", "c")]) psdf.columns = columns self.assertRaises(TypeError, lambda: psdf.eval("x.a + y.b")) @unittest.skipIf(not have_tabulate, tabulate_requirement_message) def test_to_markdown(self): pdf = pd.DataFrame(data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.to_markdown(), psdf.to_markdown()) def test_cache(self): pdf = pd.DataFrame( [(0.2, 0.3), (0.0, 0.6), (0.6, 0.0), (0.2, 0.1)], columns=["dogs", "cats"] ) psdf = ps.from_pandas(pdf) with psdf.spark.cache() as cached_df: self.assert_eq(isinstance(cached_df, CachedDataFrame), True) self.assert_eq( repr(cached_df.spark.storage_level), repr(StorageLevel(True, True, False, True)) ) def test_persist(self): pdf = pd.DataFrame( [(0.2, 0.3), (0.0, 0.6), (0.6, 0.0), (0.2, 0.1)], columns=["dogs", "cats"] ) psdf = ps.from_pandas(pdf) storage_levels = [ StorageLevel.DISK_ONLY, StorageLevel.MEMORY_AND_DISK, StorageLevel.MEMORY_ONLY, StorageLevel.OFF_HEAP, ] for storage_level in storage_levels: with psdf.spark.persist(storage_level) as cached_df: self.assert_eq(isinstance(cached_df, CachedDataFrame), True) self.assert_eq(repr(cached_df.spark.storage_level), repr(storage_level)) self.assertRaises(TypeError, lambda: psdf.spark.persist("DISK_ONLY")) def test_squeeze(self): axises = [None, 0, 1, "rows", "index", "columns"] # Multiple columns pdf = pd.DataFrame([[1, 2], [3, 4]], columns=["a", "b"], index=["x", "y"]) psdf = ps.from_pandas(pdf) for axis in axises: self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis)) # Multiple columns with MultiIndex columns columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X")]) pdf.columns = columns psdf.columns = columns for axis in axises: self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis)) # Single column with single value pdf = pd.DataFrame([[1]], columns=["a"], index=["x"]) psdf = ps.from_pandas(pdf) for axis in axises: self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis)) # Single column with single value with MultiIndex column columns = pd.MultiIndex.from_tuples([("A", "Z")]) pdf.columns = columns psdf.columns = columns for axis in axises: self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis)) # Single column with multiple values pdf = pd.DataFrame([1, 2, 3, 4], columns=["a"]) psdf = ps.from_pandas(pdf) for axis in axises: self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis)) # Single column with multiple values with MultiIndex column pdf.columns = columns psdf.columns = columns for axis in axises: self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis)) def test_rfloordiv(self): pdf = pd.DataFrame( {"angles": [0, 3, 4], "degrees": [360, 180, 360]}, index=["circle", "triangle", "rectangle"], columns=["angles", "degrees"], ) psdf = ps.from_pandas(pdf) expected_result = pdf.rfloordiv(10) self.assert_eq(psdf.rfloordiv(10), expected_result) def test_truncate(self): pdf1 = pd.DataFrame( { "A": ["a", "b", "c", "d", "e", "f", "g"], "B": ["h", "i", "j", "k", "l", "m", "n"], "C": ["o", "p", "q", "r", "s", "t", "u"], }, index=[-500, -20, -1, 0, 400, 550, 1000], ) psdf1 = ps.from_pandas(pdf1) pdf2 = pd.DataFrame( { "A": ["a", "b", "c", "d", "e", "f", "g"], "B": ["h", "i", "j", "k", "l", "m", "n"], "C": ["o", "p", "q", "r", "s", "t", "u"], }, index=[1000, 550, 400, 0, -1, -20, -500], ) psdf2 = ps.from_pandas(pdf2) self.assert_eq(psdf1.truncate(), pdf1.truncate()) self.assert_eq(psdf1.truncate(before=-20), pdf1.truncate(before=-20)) self.assert_eq(psdf1.truncate(after=400), pdf1.truncate(after=400)) self.assert_eq(psdf1.truncate(copy=False), pdf1.truncate(copy=False)) self.assert_eq(psdf1.truncate(-20, 400, copy=False), pdf1.truncate(-20, 400, copy=False)) # The bug for these tests has been fixed in pandas 1.1.0. if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"): self.assert_eq(psdf2.truncate(0, 550), pdf2.truncate(0, 550)) self.assert_eq(psdf2.truncate(0, 550, copy=False), pdf2.truncate(0, 550, copy=False)) else: expected_psdf = ps.DataFrame( {"A": ["b", "c", "d"], "B": ["i", "j", "k"], "C": ["p", "q", "r"]}, index=[550, 400, 0], ) self.assert_eq(psdf2.truncate(0, 550), expected_psdf) self.assert_eq(psdf2.truncate(0, 550, copy=False), expected_psdf) # axis = 1 self.assert_eq(psdf1.truncate(axis=1), pdf1.truncate(axis=1)) self.assert_eq(psdf1.truncate(before="B", axis=1), pdf1.truncate(before="B", axis=1)) self.assert_eq(psdf1.truncate(after="A", axis=1), pdf1.truncate(after="A", axis=1)) self.assert_eq(psdf1.truncate(copy=False, axis=1), pdf1.truncate(copy=False, axis=1)) self.assert_eq(psdf2.truncate("B", "C", axis=1), pdf2.truncate("B", "C", axis=1)) self.assert_eq( psdf1.truncate("B", "C", copy=False, axis=1), pdf1.truncate("B", "C", copy=False, axis=1), ) # MultiIndex columns columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "Z")]) pdf1.columns = columns psdf1.columns = columns pdf2.columns = columns psdf2.columns = columns self.assert_eq(psdf1.truncate(), pdf1.truncate()) self.assert_eq(psdf1.truncate(before=-20), pdf1.truncate(before=-20)) self.assert_eq(psdf1.truncate(after=400), pdf1.truncate(after=400)) self.assert_eq(psdf1.truncate(copy=False), pdf1.truncate(copy=False)) self.assert_eq(psdf1.truncate(-20, 400, copy=False), pdf1.truncate(-20, 400, copy=False)) # The bug for these tests has been fixed in pandas 1.1.0. if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"): self.assert_eq(psdf2.truncate(0, 550), pdf2.truncate(0, 550)) self.assert_eq(psdf2.truncate(0, 550, copy=False), pdf2.truncate(0, 550, copy=False)) else: expected_psdf.columns = columns self.assert_eq(psdf2.truncate(0, 550), expected_psdf) self.assert_eq(psdf2.truncate(0, 550, copy=False), expected_psdf) # axis = 1 self.assert_eq(psdf1.truncate(axis=1), pdf1.truncate(axis=1)) self.assert_eq(psdf1.truncate(before="B", axis=1), pdf1.truncate(before="B", axis=1)) self.assert_eq(psdf1.truncate(after="A", axis=1), pdf1.truncate(after="A", axis=1)) self.assert_eq(psdf1.truncate(copy=False, axis=1), pdf1.truncate(copy=False, axis=1)) self.assert_eq(psdf2.truncate("B", "C", axis=1), pdf2.truncate("B", "C", axis=1)) self.assert_eq( psdf1.truncate("B", "C", copy=False, axis=1), pdf1.truncate("B", "C", copy=False, axis=1), ) # Exceptions psdf = ps.DataFrame( { "A": ["a", "b", "c", "d", "e", "f", "g"], "B": ["h", "i", "j", "k", "l", "m", "n"], "C": ["o", "p", "q", "r", "s", "t", "u"], }, index=[-500, 100, 400, 0, -1, 550, -20], ) msg = "truncate requires a sorted index" with self.assertRaisesRegex(ValueError, msg): psdf.truncate() psdf = ps.DataFrame( { "A": ["a", "b", "c", "d", "e", "f", "g"], "B": ["h", "i", "j", "k", "l", "m", "n"], "C": ["o", "p", "q", "r", "s", "t", "u"], }, index=[-500, -20, -1, 0, 400, 550, 1000], ) msg = "Truncate: -20 must be after 400" with self.assertRaisesRegex(ValueError, msg): psdf.truncate(400, -20) msg = "Truncate: B must be after C" with self.assertRaisesRegex(ValueError, msg): psdf.truncate("C", "B", axis=1) def test_explode(self): pdf = pd.DataFrame({"A": [[-1.0, np.nan], [0.0, np.inf], [1.0, -np.inf]], "B": 1}) pdf.index.name = "index" pdf.columns.name = "columns" psdf = ps.from_pandas(pdf) expected_result1 = pdf.explode("A") expected_result2 = pdf.explode("B") self.assert_eq(psdf.explode("A"), expected_result1, almost=True) self.assert_eq(psdf.explode("B"), expected_result2) self.assert_eq(psdf.explode("A").index.name, expected_result1.index.name) self.assert_eq(psdf.explode("A").columns.name, expected_result1.columns.name) self.assertRaises(TypeError, lambda: psdf.explode(["A", "B"])) # MultiIndex midx = pd.MultiIndex.from_tuples( [("x", "a"), ("x", "b"), ("y", "c")], names=["index1", "index2"] ) pdf.index = midx psdf = ps.from_pandas(pdf) expected_result1 = pdf.explode("A") expected_result2 = pdf.explode("B") self.assert_eq(psdf.explode("A"), expected_result1, almost=True) self.assert_eq(psdf.explode("B"), expected_result2) self.assert_eq(psdf.explode("A").index.names, expected_result1.index.names) self.assert_eq(psdf.explode("A").columns.name, expected_result1.columns.name) self.assertRaises(TypeError, lambda: psdf.explode(["A", "B"])) # MultiIndex columns columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X")], names=["column1", "column2"]) pdf.columns = columns psdf.columns = columns expected_result1 = pdf.explode(("A", "Z")) expected_result2 = pdf.explode(("B", "X")) expected_result3 = pdf.A.explode("Z") self.assert_eq(psdf.explode(("A", "Z")), expected_result1, almost=True) self.assert_eq(psdf.explode(("B", "X")), expected_result2) self.assert_eq(psdf.explode(("A", "Z")).index.names, expected_result1.index.names) self.assert_eq(psdf.explode(("A", "Z")).columns.names, expected_result1.columns.names) self.assert_eq(psdf.A.explode("Z"), expected_result3, almost=True) self.assertRaises(TypeError, lambda: psdf.explode(["A", "B"])) self.assertRaises(ValueError, lambda: psdf.explode("A")) def test_spark_schema(self): psdf = ps.DataFrame( { "a": list("abc"), "b": list(range(1, 4)), "c": np.arange(3, 6).astype("i1"), "d": np.arange(4.0, 7.0, dtype="float64"), "e": [True, False, True], "f": pd.date_range("20130101", periods=3), }, columns=["a", "b", "c", "d", "e", "f"], ) actual = psdf.spark.schema() expected = ( StructType() .add("a", "string", False) .add("b", "long", False) .add("c", "byte", False) .add("d", "double", False) .add("e", "boolean", False) .add("f", "timestamp", False) ) self.assertEqual(actual, expected) actual = psdf.spark.schema("index") expected = ( StructType() .add("index", "long", False) .add("a", "string", False) .add("b", "long", False) .add("c", "byte", False) .add("d", "double", False) .add("e", "boolean", False) .add("f", "timestamp", False) ) self.assertEqual(actual, expected) def test_print_schema(self): psdf = ps.DataFrame( {"a": list("abc"), "b": list(range(1, 4)), "c": np.arange(3, 6).astype("i1")}, columns=["a", "b", "c"], ) prev = sys.stdout try: out = StringIO() sys.stdout = out psdf.spark.print_schema() actual = out.getvalue().strip() self.assertTrue("a: string" in actual, actual) self.assertTrue("b: long" in actual, actual) self.assertTrue("c: byte" in actual, actual) out = StringIO() sys.stdout = out psdf.spark.print_schema(index_col="index") actual = out.getvalue().strip() self.assertTrue("index: long" in actual, actual) self.assertTrue("a: string" in actual, actual) self.assertTrue("b: long" in actual, actual) self.assertTrue("c: byte" in actual, actual) finally: sys.stdout = prev def test_explain_hint(self): psdf1 = ps.DataFrame( {"lkey": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 5]}, columns=["lkey", "value"], ) psdf2 = ps.DataFrame( {"rkey": ["foo", "bar", "baz", "foo"], "value": [5, 6, 7, 8]}, columns=["rkey", "value"], ) merged = psdf1.merge(psdf2.spark.hint("broadcast"), left_on="lkey", right_on="rkey") prev = sys.stdout try: out = StringIO() sys.stdout = out merged.spark.explain() actual = out.getvalue().strip() self.assertTrue("Broadcast" in actual, actual) finally: sys.stdout = prev def test_mad(self): pdf = pd.DataFrame( { "A": [1, 2, None, 4, np.nan], "B": [-0.1, 0.2, -0.3, np.nan, 0.5], "C": ["a", "b", "c", "d", "e"], } ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.mad(), pdf.mad()) self.assert_eq(psdf.mad(axis=1), pdf.mad(axis=1)) with self.assertRaises(ValueError): psdf.mad(axis=2) # MultiIndex columns columns = pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("A", "Z")]) pdf.columns = columns psdf.columns = columns self.assert_eq(psdf.mad(), pdf.mad()) self.assert_eq(psdf.mad(axis=1), pdf.mad(axis=1)) pdf = pd.DataFrame({"A": [True, True, False, False], "B": [True, False, False, True]}) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.mad(), pdf.mad()) self.assert_eq(psdf.mad(axis=1), pdf.mad(axis=1)) def test_abs(self): pdf = pd.DataFrame({"a": [-2, -1, 0, 1]}) psdf = ps.from_pandas(pdf) self.assert_eq(abs(psdf), abs(pdf)) self.assert_eq(np.abs(psdf), np.abs(pdf)) def test_iteritems(self): pdf = pd.DataFrame( {"species": ["bear", "bear", "marsupial"], "population": [1864, 22000, 80000]}, index=["panda", "polar", "koala"], columns=["species", "population"], ) psdf = ps.from_pandas(pdf) for (p_name, p_items), (k_name, k_items) in zip(pdf.iteritems(), psdf.iteritems()): self.assert_eq(p_name, k_name) self.assert_eq(p_items, k_items) def test_tail(self): pdf = pd.DataFrame({"x": range(1000)}) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.tail(), psdf.tail()) self.assert_eq(pdf.tail(10), psdf.tail(10)) self.assert_eq(pdf.tail(-990), psdf.tail(-990)) self.assert_eq(pdf.tail(0), psdf.tail(0)) self.assert_eq(pdf.tail(-1001), psdf.tail(-1001)) self.assert_eq(pdf.tail(1001), psdf.tail(1001)) self.assert_eq((pdf + 1).tail(), (psdf + 1).tail()) self.assert_eq((pdf + 1).tail(10), (psdf + 1).tail(10)) self.assert_eq((pdf + 1).tail(-990), (psdf + 1).tail(-990)) self.assert_eq((pdf + 1).tail(0), (psdf + 1).tail(0)) self.assert_eq((pdf + 1).tail(-1001), (psdf + 1).tail(-1001)) self.assert_eq((pdf + 1).tail(1001), (psdf + 1).tail(1001)) with self.assertRaisesRegex(TypeError, "bad operand type for unary -: 'str'"): psdf.tail("10") def test_last_valid_index(self): pdf = pd.DataFrame( {"a": [1, 2, 3, None], "b": [1.0, 2.0, 3.0, None], "c": [100, 200, 400, None]}, index=["Q", "W", "E", "R"], ) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.last_valid_index(), psdf.last_valid_index()) self.assert_eq(pdf[[]].last_valid_index(), psdf[[]].last_valid_index()) # MultiIndex columns pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")]) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.last_valid_index(), psdf.last_valid_index()) # Empty DataFrame pdf = pd.Series([]).to_frame() psdf = ps.Series([]).to_frame() self.assert_eq(pdf.last_valid_index(), psdf.last_valid_index()) def test_last(self): index = pd.date_range("2018-04-09", periods=4, freq="2D") pdf = pd.DataFrame([1, 2, 3, 4], index=index) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.last("1D"), psdf.last("1D")) self.assert_eq(pdf.last(DateOffset(days=1)), psdf.last(DateOffset(days=1))) with self.assertRaisesRegex(TypeError, "'last' only supports a DatetimeIndex"): ps.DataFrame([1, 2, 3, 4]).last("1D") def test_first(self): index = pd.date_range("2018-04-09", periods=4, freq="2D") pdf = pd.DataFrame([1, 2, 3, 4], index=index) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.first("1D"), psdf.first("1D")) self.assert_eq(pdf.first(DateOffset(days=1)), psdf.first(DateOffset(days=1))) with self.assertRaisesRegex(TypeError, "'first' only supports a DatetimeIndex"): ps.DataFrame([1, 2, 3, 4]).first("1D") def test_first_valid_index(self): pdf = pd.DataFrame( {"a": [None, 2, 3, 2], "b": [None, 2.0, 3.0, 1.0], "c": [None, 200, 400, 200]}, index=["Q", "W", "E", "R"], ) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.first_valid_index(), psdf.first_valid_index()) self.assert_eq(pdf[[]].first_valid_index(), psdf[[]].first_valid_index()) # MultiIndex columns pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")]) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.first_valid_index(), psdf.first_valid_index()) # Empty DataFrame pdf = pd.Series([]).to_frame() psdf = ps.Series([]).to_frame() self.assert_eq(pdf.first_valid_index(), psdf.first_valid_index()) pdf = pd.DataFrame( {"a": [None, 2, 3, 2], "b": [None, 2.0, 3.0, 1.0], "c": [None, 200, 400, 200]}, index=[ datetime(2021, 1, 1), datetime(2021, 2, 1), datetime(2021, 3, 1), datetime(2021, 4, 1), ], ) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.first_valid_index(), psdf.first_valid_index()) def test_product(self): pdf = pd.DataFrame( {"A": [1, 2, 3, 4, 5], "B": [10, 20, 30, 40, 50], "C": ["a", "b", "c", "d", "e"]} ) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.prod(), psdf.prod().sort_index()) # Named columns pdf.columns.name = "Koalas" psdf = ps.from_pandas(pdf) self.assert_eq(pdf.prod(), psdf.prod().sort_index()) # MultiIndex columns pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")]) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.prod(), psdf.prod().sort_index()) # Named MultiIndex columns pdf.columns.names = ["Hello", "Koalas"] psdf = ps.from_pandas(pdf) self.assert_eq(pdf.prod(), psdf.prod().sort_index()) # No numeric columns pdf = pd.DataFrame({"key": ["a", "b", "c"], "val": ["x", "y", "z"]}) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.prod(), psdf.prod().sort_index()) # No numeric named columns pdf.columns.name = "Koalas" psdf = ps.from_pandas(pdf) self.assert_eq(pdf.prod(), psdf.prod().sort_index(), almost=True) # No numeric MultiIndex columns pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y")]) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.prod(), psdf.prod().sort_index(), almost=True) # No numeric named MultiIndex columns pdf.columns.names = ["Hello", "Koalas"] psdf = ps.from_pandas(pdf) self.assert_eq(pdf.prod(), psdf.prod().sort_index(), almost=True) # All NaN columns pdf = pd.DataFrame( { "A": [np.nan, np.nan, np.nan, np.nan, np.nan], "B": [10, 20, 30, 40, 50], "C": ["a", "b", "c", "d", "e"], } ) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.prod(), psdf.prod().sort_index(), check_exact=False) # All NaN named columns pdf.columns.name = "Koalas" psdf = ps.from_pandas(pdf) self.assert_eq(pdf.prod(), psdf.prod().sort_index(), check_exact=False) # All NaN MultiIndex columns pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")]) psdf = ps.from_pandas(pdf) self.assert_eq(pdf.prod(), psdf.prod().sort_index(), check_exact=False) # All NaN named MultiIndex columns pdf.columns.names = ["Hello", "Koalas"] psdf = ps.from_pandas(pdf) self.assert_eq(pdf.prod(), psdf.prod().sort_index(), check_exact=False) def test_from_dict(self): data = {"row_1": [3, 2, 1, 0], "row_2": [10, 20, 30, 40]} pdf = pd.DataFrame.from_dict(data) psdf = ps.DataFrame.from_dict(data) self.assert_eq(pdf, psdf) pdf = pd.DataFrame.from_dict(data, dtype="int8") psdf = ps.DataFrame.from_dict(data, dtype="int8") self.assert_eq(pdf, psdf) pdf = pd.DataFrame.from_dict(data, orient="index", columns=["A", "B", "C", "D"]) psdf = ps.DataFrame.from_dict(data, orient="index", columns=["A", "B", "C", "D"]) self.assert_eq(pdf, psdf) def test_pad(self): pdf = pd.DataFrame( { "A": [None, 3, None, None], "B": [2, 4, None, 3], "C": [None, None, None, 1], "D": [0, 1, 5, 4], }, columns=["A", "B", "C", "D"], ) psdf = ps.from_pandas(pdf) if LooseVersion(pd.__version__) >= LooseVersion("1.1"): self.assert_eq(pdf.pad(), psdf.pad()) # Test `inplace=True` pdf.pad(inplace=True) psdf.pad(inplace=True) self.assert_eq(pdf, psdf) else: expected = ps.DataFrame( { "A": [None, 3, 3, 3], "B": [2.0, 4.0, 4.0, 3.0], "C": [None, None, None, 1], "D": [0, 1, 5, 4], }, columns=["A", "B", "C", "D"], ) self.assert_eq(expected, psdf.pad()) # Test `inplace=True` psdf.pad(inplace=True) self.assert_eq(expected, psdf) def test_backfill(self): pdf = pd.DataFrame( { "A": [None, 3, None, None], "B": [2, 4, None, 3], "C": [None, None, None, 1], "D": [0, 1, 5, 4], }, columns=["A", "B", "C", "D"], ) psdf = ps.from_pandas(pdf) if LooseVersion(pd.__version__) >= LooseVersion("1.1"): self.assert_eq(pdf.backfill(), psdf.backfill()) # Test `inplace=True` pdf.backfill(inplace=True) psdf.backfill(inplace=True) self.assert_eq(pdf, psdf) else: expected = ps.DataFrame( { "A": [3.0, 3.0, None, None], "B": [2.0, 4.0, 3.0, 3.0], "C": [1.0, 1.0, 1.0, 1.0], "D": [0, 1, 5, 4], }, columns=["A", "B", "C", "D"], ) self.assert_eq(expected, psdf.backfill()) # Test `inplace=True` psdf.backfill(inplace=True) self.assert_eq(expected, psdf) def test_align(self): pdf1 = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=[10, 20, 30]) psdf1 = ps.from_pandas(pdf1) for join in ["outer", "inner", "left", "right"]: for axis in [None, 0, 1]: psdf_l, psdf_r = psdf1.align(psdf1[["b"]], join=join, axis=axis) pdf_l, pdf_r = pdf1.align(pdf1[["b"]], join=join, axis=axis) self.assert_eq(psdf_l, pdf_l) self.assert_eq(psdf_r, pdf_r) psdf_l, psdf_r = psdf1[["a"]].align(psdf1[["b", "a"]], join=join, axis=axis) pdf_l, pdf_r = pdf1[["a"]].align(pdf1[["b", "a"]], join=join, axis=axis) self.assert_eq(psdf_l, pdf_l) self.assert_eq(psdf_r, pdf_r) psdf_l, psdf_r = psdf1[["b", "a"]].align(psdf1[["a"]], join=join, axis=axis) pdf_l, pdf_r = pdf1[["b", "a"]].align(pdf1[["a"]], join=join, axis=axis) self.assert_eq(psdf_l, pdf_l) self.assert_eq(psdf_r, pdf_r) psdf_l, psdf_r = psdf1.align(psdf1["b"], axis=0) pdf_l, pdf_r = pdf1.align(pdf1["b"], axis=0) self.assert_eq(psdf_l, pdf_l) self.assert_eq(psdf_r, pdf_r) psdf_l, psser_b = psdf1[["a"]].align(psdf1["b"], axis=0) pdf_l, pser_b = pdf1[["a"]].align(pdf1["b"], axis=0) self.assert_eq(psdf_l, pdf_l) self.assert_eq(psser_b, pser_b) self.assertRaises(ValueError, lambda: psdf1.align(psdf1, join="unknown")) self.assertRaises(ValueError, lambda: psdf1.align(psdf1["b"])) self.assertRaises(TypeError, lambda: psdf1.align(["b"])) self.assertRaises(NotImplementedError, lambda: psdf1.align(psdf1["b"], axis=1)) pdf2 = pd.DataFrame({"a": [4, 5, 6], "d": ["d", "e", "f"]}, index=[10, 11, 12]) psdf2 = ps.from_pandas(pdf2) for join in ["outer", "inner", "left", "right"]: psdf_l, psdf_r = psdf1.align(psdf2, join=join, axis=1) pdf_l, pdf_r = pdf1.align(pdf2, join=join, axis=1) self.assert_eq(psdf_l.sort_index(), pdf_l.sort_index()) self.assert_eq(psdf_r.sort_index(), pdf_r.sort_index()) def test_between_time(self): idx = pd.date_range("2018-04-09", periods=4, freq="1D20min") pdf = pd.DataFrame({"A": [1, 2, 3, 4]}, index=idx) psdf = ps.from_pandas(pdf) self.assert_eq( pdf.between_time("0:15", "0:45").sort_index(), psdf.between_time("0:15", "0:45").sort_index(), ) pdf.index.name = "ts" psdf = ps.from_pandas(pdf) self.assert_eq( pdf.between_time("0:15", "0:45").sort_index(), psdf.between_time("0:15", "0:45").sort_index(), ) # Column label is 'index' pdf.columns = pd.Index(["index"]) psdf = ps.from_pandas(pdf) self.assert_eq( pdf.between_time("0:15", "0:45").sort_index(), psdf.between_time("0:15", "0:45").sort_index(), ) # Both index name and column label are 'index' pdf.index.name = "index" psdf = ps.from_pandas(pdf) self.assert_eq( pdf.between_time("0:15", "0:45").sort_index(), psdf.between_time("0:15", "0:45").sort_index(), ) # Index name is 'index', column label is ('X', 'A') pdf.columns = pd.MultiIndex.from_arrays([["X"], ["A"]]) psdf = ps.from_pandas(pdf) self.assert_eq( pdf.between_time("0:15", "0:45").sort_index(), psdf.between_time("0:15", "0:45").sort_index(), ) with self.assertRaisesRegex( NotImplementedError, "between_time currently only works for axis=0" ): psdf.between_time("0:15", "0:45", axis=1) psdf = ps.DataFrame({"A": [1, 2, 3, 4]}) with self.assertRaisesRegex(TypeError, "Index must be DatetimeIndex"): psdf.between_time("0:15", "0:45") def test_at_time(self): idx = pd.date_range("2018-04-09", periods=4, freq="1D20min") pdf = pd.DataFrame({"A": [1, 2, 3, 4]}, index=idx) psdf = ps.from_pandas(pdf) psdf.at_time("0:20") self.assert_eq( pdf.at_time("0:20").sort_index(), psdf.at_time("0:20").sort_index(), ) # Index name is 'ts' pdf.index.name = "ts" psdf = ps.from_pandas(pdf) self.assert_eq( pdf.at_time("0:20").sort_index(), psdf.at_time("0:20").sort_index(), ) # Index name is 'ts', column label is 'index' pdf.columns = pd.Index(["index"]) psdf = ps.from_pandas(pdf) self.assert_eq( pdf.at_time("0:40").sort_index(), psdf.at_time("0:40").sort_index(), ) # Both index name and column label are 'index' pdf.index.name = "index" psdf = ps.from_pandas(pdf) self.assert_eq( pdf.at_time("0:40").sort_index(), psdf.at_time("0:40").sort_index(), ) # Index name is 'index', column label is ('X', 'A') pdf.columns = pd.MultiIndex.from_arrays([["X"], ["A"]]) psdf = ps.from_pandas(pdf) self.assert_eq( pdf.at_time("0:40").sort_index(), psdf.at_time("0:40").sort_index(), ) with self.assertRaisesRegex(NotImplementedError, "'asof' argument is not supported"): psdf.at_time("0:15", asof=True) with self.assertRaisesRegex(NotImplementedError, "at_time currently only works for axis=0"): psdf.at_time("0:15", axis=1) psdf = ps.DataFrame({"A": [1, 2, 3, 4]}) with self.assertRaisesRegex(TypeError, "Index must be DatetimeIndex"): psdf.at_time("0:15") def test_astype(self): psdf = self.psdf msg = "Only a column name can be used for the key in a dtype mappings argument." with self.assertRaisesRegex(KeyError, msg): psdf.astype({"c": float}) def test_describe(self): pdf, psdf = self.df_pair # numeric columns self.assert_eq(psdf.describe(), pdf.describe()) psdf.a += psdf.a pdf.a += pdf.a self.assert_eq(psdf.describe(), pdf.describe()) # string columns psdf = ps.DataFrame({"A": ["a", "b", "b", "c"], "B": ["d", "e", "f", "f"]}) pdf = psdf.to_pandas() self.assert_eq(psdf.describe(), pdf.describe().astype(str)) psdf.A += psdf.A pdf.A += pdf.A self.assert_eq(psdf.describe(), pdf.describe().astype(str)) # timestamp columns psdf = ps.DataFrame( { "A": [ pd.Timestamp("2020-10-20"), pd.Timestamp("2021-06-02"), pd.Timestamp("2021-06-02"), pd.Timestamp("2022-07-11"), ], "B": [ pd.Timestamp("2021-11-20"), pd.Timestamp("2023-06-02"), pd.Timestamp("2026-07-11"), pd.Timestamp("2026-07-11"), ], } ) pdf = psdf.to_pandas() # NOTE: Set `datetime_is_numeric=True` for pandas: # FutureWarning: Treating datetime data as categorical rather than numeric in `.describe` is deprecated # and will be removed in a future version of pandas. Specify `datetime_is_numeric=True` to silence this # warning and adopt the future behavior now. # NOTE: Compare the result except percentiles, since we use approximate percentile # so the result is different from pandas. if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"): self.assert_eq( psdf.describe().loc[["count", "mean", "min", "max"]], pdf.describe(datetime_is_numeric=True) .astype(str) .loc[["count", "mean", "min", "max"]], ) else: self.assert_eq( psdf.describe(), ps.DataFrame( { "A": [ "4", "2021-07-16 18:00:00", "2020-10-20 00:00:00", "2020-10-20 00:00:00", "2021-06-02 00:00:00", "2021-06-02 00:00:00", "2022-07-11 00:00:00", ], "B": [ "4", "2024-08-02 18:00:00", "2021-11-20 00:00:00", "2021-11-20 00:00:00", "2023-06-02 00:00:00", "2026-07-11 00:00:00", "2026-07-11 00:00:00", ], }, index=["count", "mean", "min", "25%", "50%", "75%", "max"], ), ) # String & timestamp columns psdf = ps.DataFrame( { "A": ["a", "b", "b", "c"], "B": [ pd.Timestamp("2021-11-20"), pd.Timestamp("2023-06-02"), pd.Timestamp("2026-07-11"), pd.Timestamp("2026-07-11"), ], } ) pdf = psdf.to_pandas() if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"): self.assert_eq( psdf.describe().loc[["count", "mean", "min", "max"]], pdf.describe(datetime_is_numeric=True) .astype(str) .loc[["count", "mean", "min", "max"]], ) psdf.A += psdf.A pdf.A += pdf.A self.assert_eq( psdf.describe().loc[["count", "mean", "min", "max"]], pdf.describe(datetime_is_numeric=True) .astype(str) .loc[["count", "mean", "min", "max"]], ) else: expected_result = ps.DataFrame( { "B": [ "4", "2024-08-02 18:00:00", "2021-11-20 00:00:00", "2021-11-20 00:00:00", "2023-06-02 00:00:00", "2026-07-11 00:00:00", "2026-07-11 00:00:00", ] }, index=["count", "mean", "min", "25%", "50%", "75%", "max"], ) self.assert_eq( psdf.describe(), expected_result, ) psdf.A += psdf.A self.assert_eq( psdf.describe(), expected_result, ) # Numeric & timestamp columns psdf = ps.DataFrame( { "A": [1, 2, 2, 3], "B": [ pd.Timestamp("2021-11-20"), pd.Timestamp("2023-06-02"), pd.Timestamp("2026-07-11"), pd.Timestamp("2026-07-11"), ], } ) pdf = psdf.to_pandas() if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"): pandas_result = pdf.describe(datetime_is_numeric=True) pandas_result.B = pandas_result.B.astype(str) self.assert_eq( psdf.describe().loc[["count", "mean", "min", "max"]], pandas_result.loc[["count", "mean", "min", "max"]], ) psdf.A += psdf.A pdf.A += pdf.A pandas_result = pdf.describe(datetime_is_numeric=True) pandas_result.B = pandas_result.B.astype(str) self.assert_eq( psdf.describe().loc[["count", "mean", "min", "max"]], pandas_result.loc[["count", "mean", "min", "max"]], ) else: self.assert_eq( psdf.describe(), ps.DataFrame( { "A": [4, 2, 1, 1, 2, 2, 3, 0.816497], "B": [ "4", "2024-08-02 18:00:00", "2021-11-20 00:00:00", "2021-11-20 00:00:00", "2023-06-02 00:00:00", "2026-07-11 00:00:00", "2026-07-11 00:00:00", "None", ], }, index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"], ), ) psdf.A += psdf.A self.assert_eq( psdf.describe(), ps.DataFrame( { "A": [4, 4, 2, 2, 4, 4, 6, 1.632993], "B": [ "4", "2024-08-02 18:00:00", "2021-11-20 00:00:00", "2021-11-20 00:00:00", "2023-06-02 00:00:00", "2026-07-11 00:00:00", "2026-07-11 00:00:00", "None", ], }, index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"], ), ) # Include None column psdf = ps.DataFrame( { "a": [1, 2, 3], "b": [pd.Timestamp(1),
pd.Timestamp(1)
pandas.Timestamp
import os import pandas as pd from .utils.dataConverter import dataToList class cleanerToCSV: """Accepts the path to the directory containing scripts and converts the text after cleaning to CSV file in a given directory """ def __init__(self, directoryPath, savePath, nConversation=1): """Initates the process and saves a CSV file with rows of conversation Arguments: directoryPath {str} -- Path to transcript folder savePath {str} -- Path to save CSV file in Keyword Arguments: nConversation {int} -- [description] (default: {1}) """ self.directoryPath = directoryPath self.contents = os.listdir(directoryPath) self.strings = [] self.stickTogeatherIndex = nConversation tempStrings = dataToList.getStrings(self.contents, self.directoryPath) self.totalLines = len(tempStrings) if self.stickTogeatherIndex == 1: self.strings = tempStrings else: self.makeConversations(tempStrings) df =
pd.DataFrame(data={"Text": self.strings})
pandas.DataFrame
""" count step """ import os import sys import random from collections import defaultdict from itertools import groupby import subprocess import numpy as np import pandas as pd from scipy.io import mmwrite from scipy.sparse import coo_matrix import pysam import celescope.tools.utils as utils from celescope.tools.cellranger3.cell_calling_3 import cell_calling_3 from celescope.tools.__init__ import MATRIX_FILE_NAME, FEATURE_FILE_NAME, BARCODE_FILE_NAME from celescope.tools.cellranger3 import get_plot_elements from celescope.tools.step import Step, s_common TOOLS_DIR = os.path.dirname(__file__) random.seed(0) np.random.seed(0) class Count(Step): def __init__(self, args, step): Step.__init__(self, args, step) self.force_cell_num = args.force_cell_num self.cell_calling_method = args.cell_calling_method self.expected_cell_num = int(args.expected_cell_num) self.bam = args.bam if args.genomeDir and args.genomeDir != "None": _refFlat, self.gtf_file, _ = utils.glob_genomeDir(args.genomeDir) else: self.gtf_file = args.gtf self.id_name = utils.get_id_name_dict(self.gtf_file) # output files self.count_detail_file = f'{self.outdir}/{self.sample}_count_detail.txt' self.marked_count_file = f'{self.outdir}/{self.sample}_counts.txt' self.raw_matrix_10X_dir = f'{self.outdir}/{self.sample}_all_matrix' self.cell_matrix_10X_dir = f'{self.outdir}/{self.sample}_matrix_10X' self.downsample_file = f'{self.outdir}/{self.sample}_downsample.txt' def run(self): self.bam2table() df = pd.read_table(self.count_detail_file, header=0) # df_sum df_sum = Count.get_df_sum(df) # export all matrix self.write_matrix_10X(df, self.raw_matrix_10X_dir) # call cells cell_bc, _threshold = self.cell_calling(df_sum) # get cell stats CB_describe = self.get_cell_stats(df_sum, cell_bc) # export cell matrix df_cell = df.loc[df['Barcode'].isin(cell_bc), :] self.write_matrix_10X(df_cell, self.cell_matrix_10X_dir) (CB_total_Genes, CB_reads_count, reads_mapped_to_transcriptome) = self.cell_summary( df, cell_bc) # downsampling cell_bc = set(cell_bc) saturation, res_dict = self.downsample(df_cell) # summary self.get_summary(saturation, CB_describe, CB_total_Genes, CB_reads_count, reads_mapped_to_transcriptome) self.report_prepare() self.add_content_item('metric', downsample_summary=res_dict) self.clean_up() def report_prepare(self): df0 = pd.read_table(self.downsample_file, header=0) self.add_data_item(percentile=df0['percent'].tolist()) self.add_data_item(MedianGeneNum=df0['median_geneNum'].tolist()) self.add_data_item(Saturation=df0['saturation'].tolist()) self.add_data_item(chart=get_plot_elements.plot_barcode_rank(self.marked_count_file)) self.add_data_item(umi_summary=True) @staticmethod def correct_umi(umi_dict, percent=0.1): """ Correct umi_dict in place. Args: umi_dict: {umi_seq: umi_count} percent: if hamming_distance(low_seq, high_seq) == 1 and low_count / high_count < percent, merge low to high. Returns: n_corrected_umi: int n_corrected_read: int """ n_corrected_umi = 0 n_corrected_read = 0 # sort by value(UMI count) first, then key(UMI sequence) umi_arr = sorted( umi_dict.items(), key=lambda kv: (kv[1], kv[0]), reverse=True) while True: # break when only highest in umi_arr if len(umi_arr) == 1: break umi_low = umi_arr.pop() low_seq = umi_low[0] low_count = umi_low[1] for umi_kv in umi_arr: high_seq = umi_kv[0] high_count = umi_kv[1] if float(low_count / high_count) > percent: break if utils.hamming_distance(low_seq, high_seq) == 1: n_low = umi_dict[low_seq] n_corrected_umi += 1 n_corrected_read += n_low # merge umi_dict[high_seq] += n_low del (umi_dict[low_seq]) break return n_corrected_umi, n_corrected_read @utils.add_log def bam2table(self): """ bam to detail table must be used on name_sorted bam """ samfile = pysam.AlignmentFile(self.bam, "rb") with open(self.count_detail_file, 'wt') as fh1: fh1.write('\t'.join(['Barcode', 'geneID', 'UMI', 'count']) + '\n') def keyfunc(x): return x.query_name.split('_', 1)[0] for _, g in groupby(samfile, keyfunc): gene_umi_dict = defaultdict(lambda: defaultdict(int)) for seg in g: (barcode, umi) = seg.query_name.split('_')[:2] if not seg.has_tag('XT'): continue gene_id = seg.get_tag('XT') gene_umi_dict[gene_id][umi] += 1 for gene_id in gene_umi_dict: Count.correct_umi(gene_umi_dict[gene_id]) # output for gene_id in gene_umi_dict: for umi in gene_umi_dict[gene_id]: fh1.write('%s\t%s\t%s\t%s\n' % (barcode, gene_id, umi, gene_umi_dict[gene_id][umi])) samfile.close() @utils.add_log def cell_calling(self, df_sum): cell_calling_method = self.cell_calling_method if (self.force_cell_num is not None) and (self.force_cell_num != 'None'): cell_bc, UMI_threshold = self.force_cell(df_sum) elif cell_calling_method == 'auto': cell_bc, UMI_threshold = self.auto_cell(df_sum) elif cell_calling_method == 'cellranger3': cell_bc, UMI_threshold = self.cellranger3_cell(df_sum) elif cell_calling_method == 'inflection': _cell_bc, UMI_threshold = self.auto_cell(df_sum) cell_bc, UMI_threshold = self.inflection_cell(df_sum, UMI_threshold) return cell_bc, UMI_threshold @utils.add_log def force_cell(self, df_sum): force_cell_num = int(self.force_cell_num) cell_range = int(force_cell_num * 0.1) cell_low = force_cell_num - cell_range cell_high = force_cell_num + cell_range df_barcode_count = df_sum.groupby( ['UMI']).size().reset_index( name='barcode_counts') sorted_df = df_barcode_count.sort_values("UMI", ascending=False) sorted_df["barcode_cumsum"] = sorted_df["barcode_counts"].cumsum() for i in range(sorted_df.shape[0]): if sorted_df.iloc[i, :]["barcode_cumsum"] >= cell_low: index_low = i - 1 break for i in range(sorted_df.shape[0]): if sorted_df.iloc[i, :]["barcode_cumsum"] >= cell_high: index_high = i break df_sub = sorted_df.iloc[index_low:index_high + 1, :] threshold = df_sub.iloc[np.argmax( np.diff(df_sub["barcode_cumsum"])), :]["UMI"] cell_bc = Count.get_cell_bc(df_sum, threshold, col='UMI') return cell_bc, threshold @staticmethod def find_threshold(df_sum, idx): return int(df_sum.iloc[idx - 1, df_sum.columns == 'UMI']) @staticmethod def get_cell_bc(df_sum, threshold, col='UMI'): return list(df_sum[df_sum[col] >= threshold].index) @utils.add_log def auto_cell(self, df_sum): idx = int(self.expected_cell_num * 0.01) barcode_number = df_sum.shape[0] idx = int(min(barcode_number, idx)) if idx == 0: sys.exit("cell number equals zero!") # calculate read counts threshold threshold = int(Count.find_threshold(df_sum, idx) * 0.1) threshold = max(1, threshold) cell_bc = Count.get_cell_bc(df_sum, threshold) return cell_bc, threshold @utils.add_log def cellranger3_cell(self, df_sum): cell_bc, initial_cell_num = cell_calling_3(self.raw_matrix_10X_dir, self.expected_cell_num) threshold = Count.find_threshold(df_sum, initial_cell_num) return cell_bc, threshold @utils.add_log def inflection_cell(self, df_sum, threshold): app = f'{TOOLS_DIR}/rescue.R' cmd = ( f'Rscript {app} ' f'--matrix_dir {self.raw_matrix_10X_dir} ' f'--outdir {self.outdir} ' f'--sample {self.sample} ' f'--threshold {threshold}' ) Count.inflection_cell.logger.info(cmd) subprocess.check_call(cmd, shell=True) out_file = f'{self.outdir}/{self.sample}_rescue.tsv' df = pd.read_csv(out_file, sep='\t') inflection = int(df.loc[:, 'inflection']) threshold = inflection cell_bc = Count.get_cell_bc(df_sum, threshold) return cell_bc, threshold @staticmethod def get_df_sum(df, col='UMI'): def num_gt2(x): return
pd.Series.sum(x[x > 1])
pandas.Series.sum
import json import dml import prov.model import datetime import pandas as pd import uuid class masterList(dml.Algorithm): contributor = 'ashwini_gdukuray_justini_utdesai' reads = ['ashwini_gdukuray_justini_utdesai.massHousing', 'ashwini_gdukuray_justini_utdesai.secretary', 'ashwini_gdukuray_justini_utdesai.validZipCodes'] writes = ['ashwini_gdukuray_justini_utdesai.masterList'] @staticmethod def execute(trial=False): '''Retrieve some data sets (not using the API here for the sake of simplicity).''' startTime = datetime.datetime.now() # Set up the database connection. client = dml.pymongo.MongoClient() repo = client.repo repo.authenticate('ashwini_gdukuray_justini_utdesai', 'ashwini_gdukuray_justini_utdesai') # Need to standardize the columns and field structure of massHousing and secretary and union the two # in order to create a master MBE list, and then store it in the DB massHousing = repo['ashwini_gdukuray_justini_utdesai.massHousing'] secretary = repo['ashwini_gdukuray_justini_utdesai.secretary'] validZips = repo['ashwini_gdukuray_justini_utdesai.validZipCodes'] if (trial): massHousingDF = pd.DataFrame(list(massHousing.find()))[:100] secretaryDF = pd.DataFrame(list(secretary.find()))[:100] validZipsDF = pd.DataFrame(list(validZips.find()))[:100] else: massHousingDF = pd.DataFrame(list(massHousing.find())) secretaryDF = pd.DataFrame(list(secretary.find())) validZipsDF = pd.DataFrame(list(validZips.find())) # clean up secretary dataset # convert zip codes to strings and 5 digits long secretaryDF['Zip'] = secretaryDF['Zip'].astype('str') secretaryDF['Zip'] = secretaryDF['Zip'].apply(lambda zipCode: ((5 - len(zipCode))*'0' + zipCode \ if len(zipCode) < 5 else zipCode)[:5]) secretaryDF = secretaryDF.loc[secretaryDF['MBE - Y/N'] == 'Y'] secretaryDF = secretaryDF[['Business Name', 'Address', 'City', 'Zip', 'State', 'Description of Services']] secretaryDF = secretaryDF.rename(index=str, columns={'Description of Services': 'Industry'}) # create a more uniform ID businessIDs = [] for index, row in secretaryDF.iterrows(): busName = row['Business Name'] cleanedText = busName.upper().strip().replace(' ','').replace('.','').replace(',','').replace('-','') businessIDs.append(cleanedText) secretaryDF['B_ID'] = pd.Series(businessIDs, index=secretaryDF.index) # clean up massHousing dataset massHousingDF['Zip'] = massHousingDF['Zip'].apply(lambda zipCode: zipCode[:5]) massHousingDF = massHousingDF.loc[massHousingDF['Ownership and Certification Information'].str.contains('Ownership: Minority')] massHousingDF = massHousingDF[['Business Name', 'Address', 'City', 'Zip', 'State', 'Primary Trade', 'Primary Other/Consulting Description']] businessIDs = [] for index, row in massHousingDF.iterrows(): busName = row['Business Name'] cleanedText = busName.upper().strip().replace(' ','').replace('.','').replace(',','').replace('-','') businessIDs.append(cleanedText) if (row['Primary Trade'] == 'Other: Specify' or row['Primary Trade'] == 'Consultant: Specify'): row['Primary Trade'] = row['Primary Other/Consulting Description'] massHousingDF['B_ID'] = pd.Series(businessIDs, index=massHousingDF.index) massHousingDF = massHousingDF.rename(index=str, columns={'Primary Trade': 'Industry'}) massHousingDF = massHousingDF.drop(columns=['Primary Other/Consulting Description']) # merge and create masterList preMasterList = pd.merge(massHousingDF, secretaryDF, how='outer', on=['B_ID', 'City', 'Zip']) preDict = {'B_ID': [], 'Business Name': [], 'Address': [], 'City': [], 'Zip': [], 'State': [], 'Industry': []} for index, row in preMasterList.iterrows(): desc = row['Industry_x'] preDict['B_ID'].append(row['B_ID']) preDict['City'].append(row['City']) preDict['Zip'].append(row['Zip']) if pd.isnull(desc): preDict['Business Name'].append(row['Business Name_y']) preDict['State'].append(row['State_y']) preDict['Address'].append(row['Address_y']) preDict['Industry'].append(row['Industry_y']) else: preDict['Business Name'].append(row['Business Name_x']) preDict['State'].append(row['State_x']) preDict['Address'].append(row['Address_x']) preDict['Industry'].append(row['Industry_x']) masterList =
pd.DataFrame(preDict)
pandas.DataFrame
""" Tools for reading/writing BIDS data files. """ from os.path import join import warnings import json import numpy as np import pandas as pd from bids.utils import listify from .entities import NodeIndex from .variables import SparseRunVariable, DenseRunVariable, SimpleVariable BASE_ENTITIES = ['subject', 'session', 'task', 'run'] ALL_ENTITIES = BASE_ENTITIES + ['datatype', 'suffix', 'acquisition'] def load_variables(layout, types=None, levels=None, skip_empty=True, dataset=None, scope='all', **kwargs): """A convenience wrapper for one or more load_*_variables() calls. Parameters ---------- layout : :obj:`bids.layout.BIDSLayout` BIDSLayout containing variable files. types : str or list Types of variables to retrieve. All valid values reflect the filename stipulated in the BIDS spec for each kind of variable. Valid values include: 'events', 'physio', 'stim', 'scans', 'participants', 'sessions', and 'regressors'. levels : str or list Optional level(s) of variables to load. Valid values are 'run', 'session', 'subject', or 'dataset'. This is simply a shorthand way to specify types--e.g., 'run' will be converted to types=['events', 'physio', 'stim', 'regressors']. skip_empty : bool Whether or not to skip empty Variables (i.e., where there are no rows/records in a file after applying any filtering operations like dropping NaNs). dataset : NodeIndex An existing NodeIndex container to store the loaded data in. Can be used to iteratively construct a dataset that contains otherwise heterogeneous sets of variables. If None, a new NodeIndex is used. scope : str or list The scope of the space to search for variables. See docstring for BIDSLayout for details and valid predefined values. kwargs : dict Optional keyword arguments to pass onto the individual load_*_variables() calls. Returns ------- A NodeIndex instance. Examples -------- >>> load_variables(layout, ['events', 'physio'], subject='01') # returns all variables stored in _events.tsv and _physio.tsv.gz files # for runs that belong to subject with id '01'. """ TYPES = ['events', 'physio', 'stim', 'scans', 'participants', 'sessions', 'regressors'] types = listify(types) if types is None: if levels is not None: types = [] lev_map = { 'run': ['events', 'physio', 'stim', 'regressors'], 'session': ['scans'], 'subject': ['sessions'], 'dataset': ['participants'] } [types.extend(lev_map[l.lower()]) for l in listify(levels)] else: types = TYPES bad_types = set(types) - set(TYPES) if bad_types: raise ValueError("Invalid variable types: %s" % bad_types) dataset = dataset or NodeIndex() run_types = list({'events', 'physio', 'stim', 'regressors'} - set(types)) type_flags = {t: False for t in run_types} if len(type_flags) < 4: _kwargs = kwargs.copy() _kwargs.update(type_flags) dataset = _load_time_variables(layout, dataset, scope=scope, **_kwargs) for t in ({'scans', 'sessions', 'participants'} & set(types)): kwargs.pop('suffix', None) # suffix is always one of values aboves dataset = _load_tsv_variables(layout, t, dataset, scope=scope, **kwargs) return dataset def _load_time_variables(layout, dataset=None, columns=None, scan_length=None, drop_na=True, events=True, physio=True, stim=True, regressors=True, skip_empty=True, scope='all', **selectors): """Loads all variables found in *_events.tsv files and returns them as a BIDSVariableCollection. Parameters ---------- layout : :obj:`bids.layout.BIDSLayout` A BIDSLayout to scan. dataset : NodeIndex A BIDS NodeIndex container. If None, a new one is initialized. columns : list Optional list of names specifying which columns in the event files to read. By default, reads all columns found. scan_length : float Optional duration of runs (in seconds). By default, this will be extracted from the BOLD image. However, in cases where the user doesn't have access to the images (e.g., because only file handles are locally available), a fixed duration can be manually specified as a fallback. drop_na : bool If True, removes all events where amplitude is n/a. If False, leaves n/a values intact. Note that in the latter case, transformations that requires numeric values may fail. events : bool If True, extracts variables from events.tsv files. physio : bool If True, extracts variables from _physio files. stim : bool If True, extracts variables from _stim files. skip_empty : bool Whether or not to skip empty Variables (i.e., where there are no rows/records in a file, or all onsets, durations, and amplitudes are 0). scope : str or list The scope of the space to search for variables. See docstring for BIDSLayout for details and valid predefined values. selectors : dict Optional keyword arguments passed on to the BIDSLayout instance's get() method; can be used to constrain which data are loaded. Returns ------- A NodeIndex instance. """ # Extract any non-keyword arguments selectors = selectors.copy() if dataset is None: dataset = NodeIndex() selectors['datatype'] = 'func' selectors['suffix'] = 'bold' images = layout.get(return_type='object', extension='nii.gz', scope=scope, **selectors) if not images: raise ValueError("No functional images that match criteria found.") # Main loop over images for img_obj in images: entities = img_obj.entities img_f = img_obj.path # Run is not mandatory, but we need a default for proper indexing if 'run' in entities: entities['run'] = int(entities['run']) tr = layout.get_metadata(img_f, scope=scope)['RepetitionTime'] # Get duration of run: first try to get it directly from the image # header; if that fails, try to get NumberOfVolumes from the # run metadata; if that fails, look for a scan_length argument. try: import nibabel as nb img = nb.load(img_f) duration = img.shape[3] * tr except Exception as e: if scan_length is not None: duration = scan_length else: msg = ("Unable to extract scan duration from one or more " "BOLD runs, and no scan_length argument was provided " "as a fallback. Please check that the image files are " "available, or manually specify the scan duration.") raise ValueError(msg) # We don't want to pass all the image file's entities onto get_node(), # as there can be unhashable nested slice timing values, and this also # slows down querying unnecessarily. Instead, pick out files only based # on the core BIDS entities and any entities explicitly passed as # selectors. # TODO: one downside of this approach is the stripped entities also # won't be returned in the resulting node due to the way things are # implemented. Consider adding a flag to control this. select_on = {k: v for (k, v) in entities.items() if k in BASE_ENTITIES or k in selectors} # If a matching node already exists, return it result = dataset.get_nodes('run', select_on) if result: if len(result) > 1: raise ValueError("More than one existing Node matches the " "specified entities! You may need to pass " "additional selectors to narrow the search.") return result[0] # Otherwise create a new node and use that. # We first convert any entity values that are currently collections to # JSON strings to prevent nasty hashing problems downstream. Note that # isinstance() isn't as foolproof as actually trying to hash the # value, but the latter is likely to be slower, and since values are # coming from JSON or filenames, there's no real chance of encountering # anything but a list or dict. entities = { k: (json.dumps(v) if isinstance(v, (list, dict)) else v) for (k, v) in entities.items() } run = dataset.create_node('run', entities, image_file=img_f, duration=duration, repetition_time=tr) run_info = run.get_info() # Process event files if events: dfs = layout.get_nearest( img_f, extension='tsv', suffix='events', all_=True, full_search=True, ignore_strict_entities=['suffix', 'extension']) for _data in dfs: _data = pd.read_csv(_data, sep='\t') if 'amplitude' in _data.columns: if (_data['amplitude'].astype(int) == 1).all() and \ 'trial_type' in _data.columns: msg = ("Column 'amplitude' with constant value 1 " "is unnecessary in event files; ignoring it.") _data = _data.drop('amplitude', axis=1) else: msg = ("Column name 'amplitude' is reserved; " "renaming it to 'amplitude_'.") _data = _data.rename( columns={'amplitude': 'amplitude_'}) warnings.warn(msg) _data = _data.replace('n/a', np.nan) # Replace BIDS' n/a _data = _data.apply(pd.to_numeric, errors='ignore') _cols = columns or list(set(_data.columns.tolist()) - {'onset', 'duration'}) # Construct a DataFrame for each extra column for col in _cols: df = _data[['onset', 'duration']].copy() df['amplitude'] = _data[col].values # Add in all of the run's entities as new columns for # index for entity, value in entities.items(): if entity in ALL_ENTITIES: df[entity] = value if drop_na: df = df.dropna(subset=['amplitude']) if df.empty: continue var = SparseRunVariable( name=col, data=df, run_info=run_info, source='events') run.add_variable(var) # Process confound files if regressors: sub_ents = {k: v for k, v in entities.items() if k in BASE_ENTITIES} confound_files = layout.get(suffix='regressors', scope=scope, **sub_ents) for cf in confound_files: _data = pd.read_csv(cf.path, sep='\t', na_values='n/a') if columns is not None: conf_cols = list(set(_data.columns) & set(columns)) _data = _data.loc[:, conf_cols] for col in _data.columns: sr = 1. / run.repetition_time var = DenseRunVariable(name=col, values=_data[[col]], run_info=run_info, source='regressors', sampling_rate=sr) run.add_variable(var) # Process recordinging files rec_types = [] if physio: rec_types.append('physio') if stim: rec_types.append('stim') if rec_types: rec_files = layout.get_nearest( img_f, extension='tsv.gz', all_=True, suffix=rec_types, ignore_strict_entities=['suffix', 'extension'], full_search=True) for rf in rec_files: metadata = layout.get_metadata(rf) if not metadata: raise ValueError("No .json sidecar found for '%s'." % rf) data =
pd.read_csv(rf, sep='\t')
pandas.read_csv
#!/usr/bin/env python # coding: utf-8 # ### Bits and pieces for Shop Env Monitor # # Source: thingspeak # In[183]: import json import thingspeak as thingspeak import pandas as pd import numpy as np import datetime import urllib.request today = datetime.datetime.utcnow().strftime('%Y-%m-%dT00:00:00Z') yesterday = (datetime.datetime.utcnow()-datetime.timedelta(days=1)).strftime('%Y-%m-%dT00:00:00Z') sbell = thingspeak.TSAccount('https://api.thingspeak.com/','869L0PHK8GKAIIYQ') jsonout = json.loads("{}") jsonout.update({'datetime':datetime.datetime.utcnow().timestamp()}) jsonout.update({ "messages": "Time is when last downloaded, not time of last measurement."}) jsonout.update({ "days": datetime.datetime.utcnow().day}) ### cellar sbellc = thingspeak.TSChannel(acc_host_addr='https://api.thingspeak.com/',api_key='<KEY>' ,ch_id=1037066) dt11 = sbellc.get_a_channel_field_feed(['field1','field2'],parameters={'minutes':2}) dt11_df = pd.DataFrame(dt11['feeds']) dt11_df = dt11_df.set_index(pd.DatetimeIndex(dt11_df['created_at'])) dt11_df['field1'] = dt11_df['field1'].astype('float64') dt11_df['field2'] = dt11_df['field2'].astype('float64') dt11_df.rename(columns = {'field1':'temperature','field2':'humidity'},inplace = True) if dt11_df['temperature'].mean() <= 10: jsonout.update({"Temp_Cellar_Alert": "alert alert-info"}) elif (dt11_df['temperature'].mean() > 10) and (dt11_df['temperature'].mean() < 20): jsonout.update({"Temp_Cellar_Alert": "alert alert-warning"}) elif (dt11_df['temperature'].mean() >= 20): jsonout.update({"Temp_Cellar_Alert": "alert alert-danger"}) else: jsonout.update({"Temp_Cellar_Alert": ""}) jsonout.update({"Temp_Cellar": dt11_df['temperature'].mean()}) ### ### shop sbellc = thingspeak.TSChannel(acc_host_addr='https://api.thingspeak.com/',api_key='<KEY>' ,ch_id=843357) bmp = sbellc.get_a_channel_field_feed(['field3','field4'],parameters={'minutes':15}) bmp_df =
pd.DataFrame(bmp['feeds'])
pandas.DataFrame
#!/usr/bin/env python # -*- coding:utf-8 -*- """ Date: 2021/9/23 15:38 Desc: Drewry集装箱指数 https://www.drewry.co.uk/supply-chain-advisors/supply-chain-expertise/world-container-index-assessed-by-drewry https://infogram.com/world-container-index-1h17493095xl4zj """ import pandas as pd import requests from bs4 import BeautifulSoup from akshare.utils import demjson def drewry_wci_index(symbol: str = "composite") -> pd.DataFrame: """ Drewry 集装箱指数 https://infogram.com/world-container-index-1h17493095xl4zj :return: choice of {"composite", "shanghai-rotterdam", "rotterdam-shanghai", "shanghai-los angeles", "los angeles-shanghai", "shanghai-genoa", "new york-rotterdam", "rotterdam-new york"} :type: str :return: Drewry 集装箱指数 :rtype: pandas.DataFrame """ symbol_map = { "composite": 0, "shanghai-rotterdam": 1, "rotterdam-shanghai": 2, "shanghai-los angeles": 3, "los angeles-shanghai": 4, "shanghai-genoa": 5, "new york-rotterdam": 6, "rotterdam-new york": 7, } url = "https://infogram.com/world-container-index-1h17493095xl4zj" r = requests.get(url) soup = BeautifulSoup(r.text, "lxml") data_text = soup.find_all("script")[-5].string.strip("window.infographicData=")[:-1] data_json = demjson.decode(data_text) temp_df = pd.DataFrame(data_json["elements"][2]["data"][symbol_map[symbol]]) temp_df = temp_df.iloc[1:, :] temp_df.columns = ["date", "wci"] day = temp_df["date"].str.split("-", expand=True).iloc[:, 0].str.strip() month = temp_df["date"].str.split("-", expand=True).iloc[:, 1].str.strip() year = temp_df["date"].str.split("-", expand=True).iloc[:, 2].str.strip() temp_df["date"] = day + "-" + month + "-" + year temp_df["date"] =
pd.to_datetime(temp_df["date"])
pandas.to_datetime
# -*- coding: utf-8 -*- import pandas as pd import numpy as np from src import ( FEATURES_PRICE_MODEL_Q1, FEATURES_REVENUE_MODEL_Q1, PATH_DAILY_REVENUE, PATH_LISTINGS, REFERENCE_DATE, ) from src.features.build_features import ( build_daily_features, build_date_features, build_listings_features, ) def load_data(): """Loads the datasets to be used on analysis. Returns ------- tuple Returns respectively the listings and the daily revenue datasets. """ # Importing Datasets df_listings = pd.read_csv(PATH_LISTINGS) df_daily_revenue = pd.read_csv(PATH_DAILY_REVENUE) # Data Cleaning df_listings = clean_listings_dataset(df_listings) df_daily_revenue = clean_daily_revenue_dataset(df_daily_revenue) # Building Features df_listings = build_listings_features(df_listings) df_daily_revenue = build_daily_features(df_daily_revenue) return df_listings, df_daily_revenue def clean_listings_dataset(df_listings: pd.DataFrame): """Data cleaning and casting process for listings dataset. Parameters ---------- df_listings : pd.DataFrame Pandas dataframe with information about listings. Returns ------- pd.DataFrame Returns the listing dataframe with the casting and missing treatment made. """ df_listings["Comissão"] = ( df_listings["Comissão"].str.replace(",", ".").astype(float) ) df_listings["Cama Casal"] = ( df_listings["Cama Casal"] .replace("Quantidade de Camas Casal", np.nan) .str.replace(",", ".") .astype(float) .clip(-128, 127) .astype("Int8") ) df_listings["Cama Solteiro"] = ( df_listings["Cama Solteiro"] .replace("Quantidade de Camas Solteiro", np.nan) .str.replace(",", ".") .astype(float) .clip(-128, 127) .astype("Int8") ) df_listings["Cama Queen"] = ( df_listings["Cama Queen"] .replace("Quantidade de Camas Queen", np.nan) .str.replace(",", ".") .astype(float) .clip(-128, 127) .astype("Int8") ) df_listings["Cama King"] = ( df_listings["Cama King"] .replace("Quantidade de Camas King", np.nan) .str.replace(",", ".") .astype(float) .clip(-128, 127) .astype("Int8") ) df_listings["Sofá Cama Solteiro"] = ( df_listings["Sofá Cama Solteiro"] .replace("Quantidade de Sofás Cama Solteiro", np.nan) .str.replace(",", ".") .astype(float) .clip(-128, 127) .astype("Int8") ) df_listings["Travesseiros"] = ( df_listings["Travesseiros"] .str.replace(",", ".") .astype(float) .clip(-128, 127) .astype("Int8") ) df_listings["Banheiros"] = ( df_listings["Banheiros"] .replace("Banheiros", np.nan) .str.replace(",", ".") .astype(float) .round(0) .clip(-128, 127) .astype("Int8") ) df_listings["Taxa de Limpeza"] = ( df_listings["Taxa de Limpeza"].str.replace(",", ".").astype(float) ) df_listings["Capacidade"] = ( df_listings["Capacidade"] .replace("Capacidade", np.nan) .str.replace(",", ".") .astype(float) .clip(-128, 127) .astype("Int8") ) df_listings["Data Inicial do contrato"] = pd.to_datetime( df_listings["Data Inicial do contrato"], dayfirst=True ) return df_listings def clean_daily_revenue_dataset(df_daily_revenue: pd.DataFrame): """Data cleaning and casting process for daily revenue dataset. Parameters ---------- df_daily_revenue : pd.DataFrame Pandas dataframe with information about daily revenue. Returns ------- pd.DataFrame Returns the daily revenue dataframe with the casting and missing treatment made. """ df_daily_revenue["date"] = pd.to_datetime(df_daily_revenue["date"]) df_daily_revenue["occupancy"] = ( df_daily_revenue["occupancy"].clip(0, 1).astype("Int8") ) df_daily_revenue["blocked"] = df_daily_revenue["blocked"].clip(0, 1).astype("Int8") df_daily_revenue["creation_date"] = pd.to_datetime( df_daily_revenue["creation_date"] ) df_daily_revenue = df_daily_revenue.loc[ df_daily_revenue["date"] <= pd.to_datetime(REFERENCE_DATE) ] return df_daily_revenue def make_predict_dataset_price_q1(): """Creates the dataset to apply the price model to answer question 1. Returns ------- pd.DataFrame A pandas series with the dataframe ready to be inputed on the preprocessing pipeline and model predict method. """ data_pred = pd.DataFrame({}) data_pred["date"] = ( pd.date_range( start=pd.to_datetime("2020-03-01"), end=pd.to_datetime("2020-03-31") ).to_list() + pd.date_range( start=
pd.to_datetime("2021-03-01")
pandas.to_datetime
# -*- python -*- # -*- coding utf-8 -*- # # This file is part of GDSCTools software # # Copyright (c) 2015 - Wellcome Trust Sanger Institute # All rights reserved # Copyright (c) 2016 - Institut Pasteur # All rights reserved # # File author(s): <NAME> <<EMAIL>> # File author(s): <NAME> <<EMAIL>> # # Distributed under the BSD 3-Clause License. # See accompanying file LICENSE.txt distributed with this software # # website: http://github.com/CancerRxGene/gdsctools # ############################################################################## """Look for IC50 vs and genomic features associations using Regression methods""" import itertools import warnings import pandas as pd import pylab import numpy as np from easydev import Progress from gdsctools.models import BaseModels from gdsctools.boxswarm import BoxSwarm from sklearn.linear_model import enet_path from sklearn import preprocessing from sklearn import model_selection from sklearn import linear_model # must use the module rather than classes to __all__ = ["Regression", 'GDSCRidge', "GDSCLasso", "GDSCElasticNet", "RegressionCVResults"] """book keeping from statsmodels.formula.api import OLS if self.settings.regression_method == 'ElasticNet': self.data_lm = OLS(odof.Y, df.values).fit_regularized( alpha=self.settings.regression_alpha, L1_wt=self.settings.regression_L1_wt) elif self.settings.regression_method == 'OLS': self.data_lm = OLS(odof.Y, df.values).fit() elif self.settings.regression_method == 'Ridge': self.data_lm = OLS(odof.Y, df.values).fit_regularized( alpha=self.settings.regression_alpha, L1_wt=0) elif self.settings.regression_method == 'Lasso': self.data_lm = OLS(odof.Y, df.values).fit_regularized( alpha=self.settings.regression_alpha, L1_wt=1) """ class RegressionCVResults(object): """Simple data structure to hold some results of the regression analysis - :attr:`model` - :attr:`kfold`: number of folds used - :attr:`Rp` - :attr:`alpha`: best alpha parameter - :attr:`ln_alpha` best alpha parameter (log scale) """ def __init__(self, model, Rp, kfold=None): self.model = model self.Rp = Rp self.kfold = kfold def _get_alpha(self): return self.model.alpha_ alpha = property(_get_alpha) def _get_ln_alpha(self): return pylab.log(self.alpha) ln_alpha = property(_get_ln_alpha) def _get_coefficients(self): return self.model.coef_ coefficients = property(_get_coefficients) def __str__(self): txt = "Best alpha on %s folds: %s (%.2f in log scale); Rp=%s" %\ (self.kfold, self.alpha, self.ln_alpha, self.Rp) return txt class Regression(BaseModels): """Base class for all Regression analysis In the :class:`gdsctools.anova.ANOVA` case, the regression is based on the OLS method and is computed for a given drug and a given feature (:term:`ODOF`). Then, the analysis is repeated across all features for a given drug (:term:`ODAF`) and finally extended to all drugs (:term:`ADAF`). So, there is one test for each combination of drug and feature. Here, all features for a given drug are taken together to perform a Regression analysis (:term:`ODAF`). The regression algorithm implemented so far are: - Ridge - Lasso - ElasticNet - LassoLars Based on tools from the scikit-learn library. """ def __init__(self, ic50, genomic_features=None, verbose=False): """.. rubric:: Constructor :param ic50: an IC50 file :param genomic_features: a genomic feature file see :ref:`data` for help on the input data formats. """ super(Regression, self).__init__(ic50, genomic_features, verbose=verbose, set_media_factor=False) self.scale = False def _get_one_drug_data(self, name, randomize_Y=False): """Returns X and Y for a given drug, dropping NAs :param name: drug name :param randomize_Y: randomize Y - drops NA - drops TISSUE_FACTOR - drops MSI factor """ Y = self.ic50.df[name] Y.dropna(inplace=True) X = self.features.df.loc[Y.index].copy() try:X = X.drop('TISSUE_FACTOR', axis=1) except:pass try: X = X.drop('MSI_FACTOR', axis=1) except:pass if self.scale is True: columns = X.columns # cast is essential here otherwise ValueError is raised X = preprocessing.scale(X.astype(float)) X = pd.DataFrame(X, columns=columns) if randomize_Y: Y = Y.copy() pylab.shuffle(Y.values) return X, Y def _fit_model(self, drug_name, model): """call fit method of a model given a drug name Save the current X, Y, model fitter in _X, _Y and _model attributes """ X, Y = self._get_one_drug_data(drug_name) model.fit(X, Y) return model def plot_importance(self, drug_name, model=None, fontsize=11, max_label_length=35, orientation="vertical"): """Plot the absolute weights found by a fittd model. :param str drug_name: :param model: a model :param int fontsize: (defaults to 11) :param max_label_length: 35 by default :param orientation: orientation of the plot (vertical or horizontal) :return: the dataframe with the weights (may be empty) .. note:: if no weights are different from zeros, no plots are created. """ X, Y = self._get_one_drug_data(drug_name) if model is None: model = self.get_best_model(drug_name) model.fit(X, Y) df =
pd.DataFrame({'name': X.columns, 'weight': model.coef_})
pandas.DataFrame
import dash from dash import dcc, html, dash_table, callback from dash.dependencies import Input, Output import dash_bootstrap_components as dbc import plotly.graph_objects as go import plotly.graph_objects as go import pandas as pd df = pd.read_csv("Amazon.csv") external_stylesheets = [dbc.themes.LUX] dash_app = dash.Dash(__name__, external_stylesheets=external_stylesheets) app = dash_app.server # create datatable function from dataframe def create_table(dataframe, max_rows=10): # round the values to 2 decimal places dataframe = dataframe.round(2) # Convert the High, Low, Open, Close, Adj Close columns to dollar values dataframe["High"] = dataframe["High"].map("${:,.2f}".format) dataframe["Low"] = dataframe["Low"].map("${:,.2f}".format) dataframe["Open"] = dataframe["Open"].map("${:,.2f}".format) dataframe["Close"] = dataframe["Close"].map("${:,.2f}".format) dataframe["Adj Close"] = dataframe["Adj Close"].map("${:,.2f}".format) # Sort dataframe by newest date first: dataframe = dataframe.sort_values(by="Date", ascending=False) table = dash_table.DataTable( data=dataframe.to_dict("records"), columns=[{"name": i, "id": i} for i in dataframe.columns], style_table={"overflowX": "scroll"}, # style_cell={"textAlign": "center"}, style_header={"backgroundColor": "white", "fontWeight": "bold"}, style_data_conditional=[ {"if": {"row_index": "odd"}, "backgroundColor": "rgb(248, 248, 248)"} ], style_as_list_view=True, style_cell={ "height": "auto", "minWidth": "0px", "maxWidth": "180px", "width": "180px", "whiteSpace": "normal", }, fixed_rows={"headers": True, "data": 0}, filter_action="native", sort_action="native", sort_mode="multi", ) return table def create_candlestick(df): fig = go.Figure() fig.add_trace( go.Candlestick( x=df["Date"], open=df["Open"], high=df["High"], low=df["Low"], close=df["Close"], ) ) return fig dash_app.layout = html.Div( [ dcc.Store(id="memory", data=df.to_dict("records")), dbc.Container( [ dbc.Row( [ dbc.Col( html.H1("Amazon Dashboard", className="display-3"), className="mb-2", ) ] ), dbc.Row( [ dbc.Col( html.H6(children="Visualising the Amazon Stock Price"), className="mb-4", ) ] ), dbc.Row( [ dbc.Col( dbc.Card( html.H3( children="Latest Update", className="text-center text-light bg-dark", ), body=True, color="dark", ), className="mb-4", ) ] ), dcc.RadioItems( id="table_type", options=[], value="Condensed table", labelStyle={"display": "inline-block"}, ), html.Div(id="table-output"), dbc.Row( [ dbc.Col( dbc.Card( html.H3( children="Candlestick Chart", className="text-center text-light bg-dark", ), body=True, color="dark", ), className="mt-4 mb-5", ) ] ), html.Div(id="candlestick-output"), ] ), ] ) @callback( Output("table-output", "children"), Output("candlestick-output", "children"), Input("memory", "data"), ) def update_page(data): if data is None: return html.Div(), html.Div() else: dataframe =
pd.DataFrame.from_dict(data)
pandas.DataFrame.from_dict
''' MIT License Copyright (c) 2020 Minciencia Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' import requests import utils import pandas as pd import datetime as dt import numpy as np from itertools import groupby import time class vacunacion: def __init__(self,output,indicador): self.output = output self.indicador = indicador self.my_files = { 'vacunacion_fabricante': 'https://raw.githubusercontent.com/IgnacioAcunaF/covid19-vaccination/master/output/chile-vaccination-type.csv', 'vacunacion_region': 'https://raw.githubusercontent.com/IgnacioAcunaF/covid19-vaccination/master/output/chile-vaccination.csv', 'vacunacion_edad': 'https://github.com/IgnacioAcunaF/covid19-vaccination/raw/master/output/chile-vaccination-ages.csv', 'vacunacion_grupo': 'https://github.com/IgnacioAcunaF/covid19-vaccination/raw/master/output/chile-vaccination-groups.csv', } self.path = '../input/Vacunacion' def get_last(self): ## baja el archivo que corresponde if self.indicador == 'fabricante': print('Retrieving files') print('vacunacion_fabricante') r = requests.get(self.my_files['vacunacion_fabricante']) content = r.content csv_file = open(self.path + '/' + 'vacunacion_fabricante' + '.csv', 'wb') csv_file.write(content) csv_file.close() elif self.indicador == 'campana': print('Retrieving files') print('vacunacion_region') r = requests.get(self.my_files['vacunacion_region']) content = r.content csv_file = open(self.path + '/' + 'vacunacion_region' + '.csv', 'wb') csv_file.write(content) csv_file.close() elif self.indicador == 'edad': print('Retrieving files') print('vacunacion_edad') r = requests.get(self.my_files['vacunacion_edad']) content = r.content csv_file = open(self.path + '/' + 'vacunacion_edad' + '.csv', 'wb') csv_file.write(content) csv_file.close() elif self.indicador == 'caracteristicas_del_vacunado': print('Retrieving files') print('vacunacion_grupo') r = requests.get(self.my_files['vacunacion_grupo']) content = r.content csv_file = open(self.path + '/' + 'vacunacion_grupo' + '.csv', 'wb') csv_file.write(content) csv_file.close() ## selecciona el archivo que corresponde if self.indicador == 'fabricante': print('reading files') print('vacunacion_fabricante') self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_fabricante.csv') elif self.indicador == 'campana': print('reading files') print('vacunacion_region') self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_region.csv') elif self.indicador == 'edad': print('reading files') print('vacunacion_edad') self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_edad.csv') elif self.indicador == 'caracteristicas_del_vacunado': print('reading files') print('vacunacion_grupo') self.last_added = pd.read_csv('../input/Vacunacion/vacunacion_grupo.csv') elif self.indicador == 'vacunas_region': print('reading files') print('vacunacion por region por dia') aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_1.csv', sep=';', encoding='ISO-8859-1') aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_1_2.csv', sep=';', encoding='ISO-8859-1') self.last_added = pd.concat([aux, aux_2], ignore_index=True) elif self.indicador == 'vacunas_comuna': print('reading files') print('vacunacion por comuna por dia') aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_1.csv', sep=';', encoding='ISO-8859-1') aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_1_2.csv', sep=';', encoding='ISO-8859-1') self.last_added = pd.concat([aux, aux_2], ignore_index=True) elif self.indicador == 'vacunas_edad_region': print('reading files') print('vacunacion por region por edad') aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_2.csv', sep=';', encoding='ISO-8859-1') aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_2_2.csv', sep=';', encoding='ISO-8859-1') self.last_added = pd.concat([aux, aux_2], ignore_index=True) elif self.indicador == 'vacunas_edad_sexo': print('reading files') print('vacunacion por sexo por edad') aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_3.csv', sep=';', encoding='ISO-8859-1') aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_3_2.csv', sep=';', encoding='ISO-8859-1') self.last_added = pd.concat([aux, aux_2], ignore_index=True) print('vacunacion por sexo por edad y FECHA') aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_6.csv', sep=';', encoding='ISO-8859-1') aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_6_2.csv', sep=';', encoding='ISO-8859-1') self.last_edad_fecha = pd.concat([aux, aux_2], ignore_index=True) elif self.indicador == 'vacunas_prioridad': print('reading files') print('vacunacion por grupos prioritarios') self.last_added = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_8.csv', sep=';', encoding='ISO-8859-1') # aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_8_2.csv', sep=';', encoding='ISO-8859-1') # self.last_added = pd.concat([aux, aux_2], ignore_index=True) elif self.indicador == 'vacunas_comuna_edad': print('reading files') print('vacunacion por comuna por edad') aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_2.csv', sep=';', encoding='ISO-8859-1') aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_2_2.csv', sep=';', encoding='ISO-8859-1') self.last_added = pd.concat([aux, aux_2], ignore_index=True) elif self.indicador == 'vacunas_establecimiento': print('reading files') print('vacunacion por establecimiento') aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_7.csv', sep=';', encoding='ISO-8859-1') aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_7_2.csv', sep=';', encoding='ISO-8859-1') self.last_added = pd.concat([aux, aux_2], ignore_index=True) elif self.indicador == 'vacunas_fabricante': print('reading files') print('vacunacion por fabricante y fecha') aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_7.csv', sep=';', encoding='ISO-8859-1') aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_7_2.csv', sep=';', encoding='ISO-8859-1') self.last_added = pd.concat([aux, aux_2], ignore_index=True) elif self.indicador == 'vacunas_fabricante_edad': print('reading files') print('vacunacion por fabricante y edad') aux = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_9.csv', sep=';', encoding='ISO-8859-1') aux_2 = pd.read_csv('../input/Vacunacion/WORK_ARCHIVO_9_2.csv', sep=';', encoding='ISO-8859-1') self.last_added = pd.concat([aux, aux_2], ignore_index=True) def last_to_csv(self): if self.indicador == 'fabricante': ## campana por fabricante self.last_added.rename(columns={'Dose': 'Dosis'}, inplace=True) self.last_added.rename(columns={'Type': 'Fabricante'}, inplace=True) self.last_added["Dosis"] = self.last_added["Dosis"].replace({"First": "Primera", "Second": "Segunda", "Third": "Tercera", "Fourth": "Cuarta", "Unique": "Unica" }) identifiers = ['Fabricante', 'Dosis'] variables = [x for x in self.last_added.columns if x not in identifiers] self.last_added = self.last_added[identifiers + variables] self.last_added.to_csv(self.output + '.csv', index=False) df_t = self.last_added.T df_t.to_csv(self.output + '_t.csv', header=False) df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Fecha'], value_name='Cantidad') df_std.to_csv(self.output + '_std.csv', index=False) elif self.indicador == 'campana': ## campana por region self.last_added.rename(columns={'Dose': 'Dosis'}, inplace=True) utils.regionName(self.last_added) self.last_added["Dosis"] = self.last_added["Dosis"].replace({"First": "Primera", "Second": "Segunda", "Third": "Tercera", "Fourth": "Cuarta", "Unique": "Unica" }) identifiers = ['Region', 'Dosis'] variables = [x for x in self.last_added.columns if x not in identifiers] self.last_added = self.last_added[identifiers + variables] self.last_added.to_csv(self.output + '.csv', index=False) df_t = self.last_added.T df_t.to_csv(self.output + '_t.csv', header=False) df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Fecha'], value_name='Cantidad') df_std.to_csv(self.output + '_std.csv', index=False) elif self.indicador == 'edad': ## campana por edad self.last_added.rename(columns={'Dose': 'Dosis', 'Age':'Rango_etario'}, inplace=True) self.last_added["Dosis"] = self.last_added["Dosis"].replace({"First": "Primera", "Second": "Segunda" }) identifiers = ['Rango_etario', 'Dosis'] variables = [x for x in self.last_added.columns if x not in identifiers] self.last_added = self.last_added[identifiers + variables] self.last_added.to_csv(self.output + '.csv', index=False) df_t = self.last_added.T df_t.to_csv(self.output + '_t.csv', header=False) df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Fecha'], value_name='Cantidad') df_std.to_csv(self.output + '_std.csv', index=False) elif self.indicador == 'caracteristicas_del_vacunado': ## campana por caracter del vacunado self.last_added.rename(columns={'Dose': 'Dosis', 'Group':'Grupo'}, inplace=True) self.last_added["Dosis"] = self.last_added["Dosis"].replace({"First": "Primera", "Second": "Segunda" }) identifiers = ['Grupo', 'Dosis'] variables = [x for x in self.last_added.columns if x not in identifiers] self.last_added = self.last_added[identifiers + variables] self.last_added.to_csv(self.output + '.csv', index=False) df_t = self.last_added.T df_t.to_csv(self.output + '_t.csv', header=False) df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Fecha'], value_name='Cantidad') df_std.to_csv(self.output + '_std.csv', index=False) elif self.indicador == 'vacunas_region': self.last_added.rename(columns={'REGION_CORTO': 'Region', 'COD_COMUNA_FINAL': 'Comuna', 'FECHA_INMUNIZACION': 'Fecha', 'SUM_of_SUM_of_2aDOSIS': 'Segunda_comuna', 'SUM_of_SUM_of_1aDOSIS': 'Primera_comuna', 'SUM_of_SUM_of_ÚnicaDOSIS':'Unica_comuna', 'SUM_of_4_Dosis':'Cuarta_comuna', 'SUM_of_Refuerzo_DOSIS':'Refuerzo_comuna'}, inplace=True) self.last_added = self.last_added.dropna(subset=['Fecha']) self.last_added['Fecha'] = pd.to_datetime(self.last_added['Fecha'],format='%d/%m/%Y').dt.strftime("%Y-%m-%d") self.last_added.sort_values(by=['Region','Fecha'], inplace=True) utils.regionName(self.last_added) regiones = pd.DataFrame(self.last_added['Region'].unique()) #transformar ## agrupar por comuna self.last_added['Primera'] = self.last_added.groupby(['Region','Fecha'])['Primera_comuna'].transform('sum') self.last_added['Segunda'] = self.last_added.groupby(['Region','Fecha'])['Segunda_comuna'].transform('sum') self.last_added['Unica'] = self.last_added.groupby(['Region', 'Fecha'])['Unica_comuna'].transform('sum') self.last_added['Refuerzo'] = self.last_added.groupby(['Region', 'Fecha'])['Refuerzo_comuna'].transform('sum') self.last_added['Cuarta'] = self.last_added.groupby(['Region', 'Fecha'])['Cuarta_comuna'].transform( 'sum') self.last_added = self.last_added[['Region','Fecha','Primera','Segunda','Unica','Refuerzo','Cuarta']] self.last_added.drop_duplicates(inplace=True) ##llenar fechas para cada region y crear total idx = pd.date_range(self.last_added['Fecha'].min(), self.last_added['Fecha'].max()) df = pd.DataFrame() total = pd.DataFrame(columns=['Region','Fecha','Primera','Segunda','Unica','Refuerzo','Cuarta']) total = utils.fill_in_missing_dates(total, 'Fecha', 0, idx) total["Region"] = total["Region"].replace({0: 'Total'}) for region in regiones[0]: df_region = self.last_added.loc[self.last_added['Region'] == region] df_region = utils.fill_in_missing_dates(df_region,'Fecha',0,idx) df_region["Region"] = df_region["Region"].replace({0:region}) total['Primera'] = df_region['Primera'] + total['Primera'] total['Segunda'] = df_region['Segunda'] + total['Segunda'] total['Unica'] = df_region['Unica'] + total['Unica'] total['Refuerzo'] = df_region['Refuerzo'] + total ['Refuerzo'] total['Cuarta'] = df_region['Cuarta'] + total['Cuarta'] df = df.append(df_region, ignore_index=True) total = total.append(df,ignore_index=True) total['Fecha'] = total['Fecha'].dt.strftime("%Y-%m-%d") self.last_added = total ##sumar totales self.last_added['Primera'] = pd.to_numeric(self.last_added['Primera']) self.last_added['Segunda'] = pd.to_numeric(self.last_added['Segunda']) self.last_added['Unica'] = pd.to_numeric(self.last_added['Unica']) self.last_added['Refuerzo'] = pd.to_numeric(self.last_added['Refuerzo']) self.last_added['Cuarta'] = pd.to_numeric(self.last_added['Cuarta']) self.last_added['Primera'] = self.last_added.groupby(['Region'])['Primera'].transform('cumsum') self.last_added['Segunda'] = self.last_added.groupby(['Region'])['Segunda'].transform('cumsum') self.last_added['Unica'] = self.last_added.groupby(['Region'])['Unica'].transform('cumsum') self.last_added['Refuerzo'] = self.last_added.groupby(['Region'])['Refuerzo'].transform('cumsum') self.last_added['Cuarta'] = self.last_added.groupby(['Region'])['Cuarta'].transform('cumsum') self.last_added['Total'] = self.last_added.sum(numeric_only=True, axis=1) ##transformar en input df = pd.DataFrame() regiones = pd.DataFrame(self.last_added['Region'].unique()) for region in regiones[0]: df_region = self.last_added.loc[self.last_added['Region'] == region] df_region.set_index('Fecha',inplace=True) df_region = df_region[['Primera','Segunda','Unica','Refuerzo','Cuarta']].T df_region.reset_index(drop=True, inplace=True) df = df.append(df_region, ignore_index=True) new_col = ['Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta', 'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta', 'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta', 'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta', 'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta', 'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta', 'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta', 'Primera', 'Segunda','Unica','Refuerzo','Cuarta','Primera', 'Segunda','Unica','Refuerzo','Cuarta', 'Primera', 'Segunda','Unica','Refuerzo','Cuarta'] df.insert(0, column='Dosis', value=new_col) new_col = pd.DataFrame() for region in regiones[0]: col = [region,region,region,region,region] new_col = new_col.append(col, ignore_index=True) df.insert(0, column='Region', value=new_col) self.last_added = df identifiers = ['Region', 'Dosis'] variables = [x for x in self.last_added.columns if x not in identifiers] self.last_added = self.last_added[identifiers + variables] self.last_added.to_csv(self.output + '.csv', index=False) df_t = self.last_added.T df_t.to_csv(self.output + '_t.csv', header=False) df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Fecha'], value_name='Cantidad') df_std.to_csv(self.output + '_std.csv', index=False) df_std.to_json(self.output + '.json',orient='values',force_ascii=False) elif self.indicador == 'vacunas_edad_region': self.last_added.rename(columns={'NOMBRE_REGION': 'Region', 'COD_COMUNA': 'Comuna', 'EDAD_ANOS': 'Edad', 'POBLACION':'Poblacion', '2aDOSIS_RES': 'Segunda_comuna', '1aDOSIS_RES': 'Primera_comuna', '4aDOSIS':'Cuarta_comuna', 'Refuerzo_DOSIS':'Refuerzo_comuna', 'ÚnicaDOSIS':'Unica_comuna'}, inplace=True) self.last_added.sort_values(by=['Region', 'Edad'], inplace=True) utils.regionName(self.last_added) regiones = pd.DataFrame(self.last_added['Region'].unique()) # transformar ## agrupar por comuna self.last_added['Primera'] = self.last_added.groupby(['Region', 'Edad'])['Primera_comuna'].transform('sum') self.last_added['Segunda'] = self.last_added.groupby(['Region', 'Edad'])['Segunda_comuna'].transform('sum') self.last_added['Unica'] = self.last_added.groupby(['Region', 'Edad'])['Unica_comuna'].transform('sum') self.last_added['Refuerzo'] = self.last_added.groupby(['Region', 'Edad'])['Refuerzo_comuna'].transform('sum') self.last_added['Cuarta'] = self.last_added.groupby(['Region', 'Edad'])['Cuarta_comuna'].transform('sum') self.last_added['Poblacion'] = self.last_added.groupby(['Region','Edad'])['Poblacion'].transform('sum') self.last_added = self.last_added[['Region', 'Edad', 'Poblacion','Primera', 'Segunda','Unica','Refuerzo','Cuarta']] self.last_added.drop_duplicates(inplace=True) ##crear total df = pd.DataFrame() total = pd.DataFrame(columns=['Region', 'Edad','Poblacion','Primera', 'Segunda','Unica','Refuerzo','Cuarta']) total['Edad'] = list(range(15, 81)) total["Region"] = total["Region"].fillna('Total') for region in regiones[0]: df_region = self.last_added.loc[self.last_added['Region'] == region] df_region.reset_index(drop=True, inplace=True) total['Primera'] = total.Primera.fillna(0) + df_region.Primera.fillna(0) total['Segunda'] = total.Segunda.fillna(0) + df_region.Segunda.fillna(0) total['Unica'] = total.Unica.fillna(0) + df_region.Unica.fillna(0) total['Refuerzo'] = total.Refuerzo.fillna(0) + df_region.Refuerzo.fillna(0) total['Cuarta'] = total.Cuarta.fillna(0) + df_region.Cuarta.fillna(0) total['Poblacion'] = total.Poblacion.fillna(0) + df_region.Poblacion.fillna(0) df = df.append(df_region, ignore_index=True) total = total.append(df, ignore_index=True) self.last_added = total ##transformar en input df = pd.DataFrame() regiones = pd.DataFrame(self.last_added['Region'].unique()) for region in regiones[0]: df_region = self.last_added.loc[self.last_added['Region'] == region] df_region.set_index('Edad', inplace=True) df_region = df_region[['Primera', 'Segunda','Unica','Refuerzo','Cuarta']].T df_region.reset_index(drop=True, inplace=True) df = df.append(df_region, ignore_index=True) new_col = ['Primera', 'Segunda', 'Unica','Refuerzo','Cuarta','Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta', 'Primera', 'Segunda', 'Unica','Refuerzo','Cuarta'] df.insert(0, column='Dosis', value=new_col) new_col = pd.DataFrame() for region in regiones[0]: col = [region, region,region] new_col = new_col.append(col, ignore_index=True) df.insert(0, column='Region', value=new_col) self.last_added = df identifiers = ['Region','Dosis'] variables = [x for x in self.last_added.columns if x not in identifiers] self.last_added = self.last_added[identifiers + variables] self.last_added.to_csv(self.output + '.csv', index=False) df_t = self.last_added.T df_t.to_csv(self.output + '_t.csv', header=False) df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Edad'], value_name='Cantidad') df_std.to_csv(self.output + '_std.csv', index=False) df_std.to_json(self.output + '.json',orient='values',force_ascii=False) elif self.indicador == 'vacunas_edad_sexo': #Por región, totales self.last_added.rename(columns={'NOMBRE_REGION': 'Region', 'SEXO1': 'Sexo', 'EDAD_ANOS': 'Edad', 'POBLACION':'Poblacion', 'SUM_of_1aDOSIS': 'Primera', 'SUM_of_2aDOSIS': 'Segunda', 'SUM_of_ÚnicaDOSIS':'Unica', 'SUM_of_Refuerzo_DOSIS':'Refuerzo', 'SUM_of_4_Dosis':'Cuarta'}, inplace=True) self.last_added.sort_values(by=['Sexo','Edad'], inplace=True) self.last_added = self.last_added[['Sexo','Edad','Primera','Segunda','Unica','Refuerzo','Cuarta']] sexo = pd.DataFrame(self.last_added['Sexo'].unique()) ##crear total df = pd.DataFrame() for sex in sexo[0]: total = pd.DataFrame(columns=['Sexo', 'Edad', 'Primera', 'Segunda','Unica','Refuerzo','Cuarta']) total['Edad'] = list(range(self.last_added.Edad.min(), self.last_added.Edad.max() + 1)) df_sex = self.last_added.loc[self.last_added['Sexo'] == sex] df_sex.reset_index(drop=True, inplace=True) df_sex.index = df_sex['Edad'] total.index = total['Edad'] total['Sexo'] = total.Sexo.fillna(sex) total['Primera'] = total.Primera.fillna(0) + df_sex.Primera.fillna(0) total['Segunda'] = total.Segunda.fillna(0) + df_sex.Segunda.fillna(0) total['Unica'] = total.Unica.fillna(0) + df_sex.Unica.fillna(0) total['Refuerzo'] = total.Refuerzo.fillna(0) + df_sex.Refuerzo.fillna(0) total['Cuarta'] = total.Cuarta.fillna(0) + df_sex.Cuarta.fillna(0) df = df.append(total, ignore_index=True) self.last_added = df ##transformar en input df = pd.DataFrame() sexo = pd.DataFrame(self.last_added['Sexo'].unique()) for sex in sexo[0]: df_sex = self.last_added.loc[self.last_added['Sexo'] == sex] df_sex.set_index('Edad', inplace=True) df_sex = df_sex[['Primera', 'Segunda','Unica','Refuerzo','Cuarta']].T df_sex.reset_index(drop=True, inplace=True) df = df.append(df_sex, ignore_index=True) new_col = ['Primera', 'Segunda','Unica','Refuerzo','Cuarta', 'Primera', 'Segunda','Unica','Refuerzo','Cuarta', 'Primera', 'Segunda','Unica','Refuerzo','Cuarta', 'Primera', 'Segunda','Unica','Refuerzo','Cuarta'] df.insert(0, column='Dosis', value=new_col) new_col = pd.DataFrame() for sex in sexo[0]: col = [sex, sex,sex] new_col = new_col.append(col, ignore_index=True) df.insert(0, column='Sexo', value=new_col) self.last_added = df identifiers = ['Sexo','Dosis'] variables = [x for x in self.last_added.columns if x not in identifiers] self.last_added = self.last_added[identifiers + variables] self.last_added.to_csv(self.output + '.csv', index=False) df_t = self.last_added.T df_t.to_csv(self.output + '_t.csv', header=False) df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Edad'], value_name='Cantidad') df_std.to_csv(self.output + '_std.csv', index=False) df_std.to_json(self.output + '.json', orient='values', force_ascii=False) # Por fecha, totales self.last_edad_fecha.rename(columns={'FECHA_INMUNIZACION': 'Fecha', 'EDAD_ANOS': 'Edad', 'SUM_of_1aDOSIS': 'Primera', 'SUM_of_2aDOSIS': 'Segunda', 'SUM_of_SUM_of_ÚnicaDOSIS': 'Unica', 'SUM_of_Refuerzo_DOSIS':'Refuerzo', 'SUM_of_4aDOSIS':'Cuarta'}, inplace=True) self.last_edad_fecha['Fecha'] = pd.to_datetime(self.last_edad_fecha['Fecha'], format='%d/%m/%Y').dt.strftime("%Y-%m-%d") self.last_edad_fecha.sort_values(by=['Fecha', 'Edad'], inplace=True) self.last_edad_fecha.reset_index(drop=True,inplace=True) self.last_edad_fecha.dropna(subset=['Fecha'],inplace=True) columns_name = self.last_edad_fecha.columns.values maxSE = self.last_edad_fecha[columns_name[0]].max() minSE = self.last_edad_fecha[columns_name[0]].min() #print(minSE, maxSE) lenSE = (pd.to_datetime(maxSE) - pd.to_datetime(minSE)).days + 1 startdate = pd.to_datetime(minSE) date_list = pd.date_range(startdate, periods=lenSE).tolist() date_list = [dt.datetime.strftime(x, "%Y-%m-%d") for x in date_list] #print(date_list) self.last_edad_fecha['Total'] = self.last_edad_fecha['Primera'].fillna(0) + self.last_edad_fecha['Segunda'].fillna(0) + self.last_edad_fecha['Unica'].fillna(0) + self.last_edad_fecha['Refuerzo'].fillna(0) + self.last_edad_fecha['Cuarta'].fillna(0) for k in [2, 3, 4,5,6,7]: edades = self.last_edad_fecha[columns_name[1]].unique() edades = edades[~np.isnan(edades)] edades = np.sort(edades) df = pd.DataFrame(np.zeros((len(edades), lenSE))) df.insert(0, 'Edad', edades) df.set_index('Edad',inplace=True) dicts = {} keys = range(lenSE) for i in keys: dicts[i] = date_list[i] df.rename(columns=dicts, inplace=True) for index, row in self.last_edad_fecha.iterrows(): df[row['Fecha']][row['Edad']] = row[k] df.reset_index(inplace=True) if k == 2: name = '../output/producto78/vacunados_edad_fecha' + '_1eraDosis.csv' df.to_csv(name, index=False) dft = df.T dft.to_csv(name.replace('.csv', '_T.csv'), header=False) identifiers = ['Edad'] variables = [x for x in df.columns if x not in identifiers] outputDF2_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='Fecha', value_name='Primera Dosis') outputDF2_std.to_csv(name.replace('.csv', '_std.csv'), index=False) if k == 3: name = '../output/producto78/vacunados_edad_fecha' + '_2daDosis.csv' df.to_csv(name, index=False) dft = df.T dft.to_csv(name.replace('.csv', '_T.csv'), header=False) identifiers = ['Edad'] variables = [x for x in df.columns if x not in identifiers] outputDF2_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='Fecha', value_name='Segunda Dosis') outputDF2_std.to_csv(name.replace('.csv', '_std.csv'), index=False) if k == 4: name = '../output/producto78/vacunados_edad_fecha' + '_UnicaDosis.csv' df.to_csv(name, index=False) dft = df.T dft.to_csv(name.replace('.csv', '_T.csv'), header=False) identifiers = ['Edad'] variables = [x for x in df.columns if x not in identifiers] outputDF2_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='Fecha', value_name='Unica Dosis') outputDF2_std.to_csv(name.replace('.csv', '_std.csv'), index=False) if k == 5: name = '../output/producto78/vacunados_edad_fecha' + '_Refuerzo.csv' df.to_csv(name, index=False) dft = df.T dft.to_csv(name.replace('.csv', '_T.csv'), header=False) identifiers = ['Edad'] variables = [x for x in df.columns if x not in identifiers] outputDF2_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='Fecha', value_name='Dosis Refuerzo') outputDF2_std.to_csv(name.replace('.csv', '_std.csv'), index=False) if k == 6: name = '../output/producto78/vacunados_edad_fecha' + '_Cuarta.csv' df.to_csv(name, index=False) dft = df.T dft.to_csv(name.replace('.csv', '_T.csv'), header=False) identifiers = ['Edad'] variables = [x for x in df.columns if x not in identifiers] outputDF2_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='Fecha', value_name='Cuarta Dosis') outputDF2_std.to_csv(name.replace('.csv', '_std.csv'), index=False) if k == 7: name = '../output/producto78/vacunados_edad_fecha' + '_total.csv' df.to_csv(name, index=False) dft = df.T dft.to_csv(name.replace('.csv', '_T.csv'), header=False) identifiers = ['Edad'] variables = [x for x in df.columns if x not in identifiers] outputDF2_std = pd.melt(df, id_vars=identifiers, value_vars=variables, var_name='Fecha', value_name='Total vacunados') outputDF2_std.to_csv(name.replace('.csv', '_std.csv'), index=False) elif self.indicador == 'vacunas_prioridad': self.last_added.rename(columns={'CRITERIO': 'Grupo', 'SUB_CRITERIO': 'Subgrupo', '1aDOSIS1': 'Primera', '2aDOSIS1': 'Segunda'}, inplace=True) self.last_added.sort_values(by=['Grupo', 'Subgrupo'], inplace=True) self.last_added = self.last_added[['Grupo', 'Subgrupo', 'Primera', 'Segunda']] self.last_added['Primera'] = self.last_added.groupby(['Grupo', 'Subgrupo'])['Primera'].transform('sum') self.last_added['Segunda'] = self.last_added.groupby(['Grupo', 'Subgrupo'])['Segunda'].transform('sum') self.last_added = self.last_added[['Grupo', 'Subgrupo', 'Primera', 'Segunda']] self.last_added.drop_duplicates(inplace=True) ##transformar en input df = pd.DataFrame() grupos = pd.DataFrame(self.last_added['Grupo'].unique()) for grupo in grupos[0]: df_grupo = self.last_added.loc[self.last_added['Grupo'] == grupo] df_grupo.set_index('Subgrupo', inplace=True) df_grupo = df_grupo[['Primera', 'Segunda']].T df_grupo.reset_index(drop=True, inplace=True) df = df.append(df_grupo, ignore_index=True) new_col = ['Primera', 'Segunda', 'Primera', 'Segunda', 'Primera', 'Segunda', 'Primera', 'Segunda', 'Primera', 'Segunda', 'Primera', 'Segunda'] df.insert(0, column='Dosis', value=new_col) new_col = pd.DataFrame() for grupo in grupos[0]: col = [grupo, grupo] new_col = new_col.append(col, ignore_index=True) df.insert(0, column='Grupo', value=new_col) self.last_added = df identifiers = ['Grupo', 'Dosis'] variables = [x for x in self.last_added.columns if x not in identifiers] self.last_added = self.last_added[identifiers + variables] self.last_added.to_csv(self.output + '.csv', index=False) df_t = self.last_added.T df_t.to_csv(self.output + '_t.csv', header=False) df_std = pd.melt(self.last_added, id_vars=identifiers, value_vars=variables, var_name=['Subgrupo'], value_name='Cantidad') df_std.to_csv(self.output + '_std.csv', index=False) df_std.to_json(self.output + '.json',orient='values',force_ascii=False) elif self.indicador == 'vacunas_comuna': ##template por comuna df_base = pd.read_csv('../input/DistribucionDEIS/baseFiles/DEIS_template.csv') df_base['Codigo region'] = df_base['Codigo region'].fillna(0) df_base['Codigo comuna'] = df_base['Codigo comuna'].fillna(0) df_base['Comuna'] = df_base['Comuna'].fillna(0) todrop = df_base.loc[df_base['Comuna'] == 0] df_base.drop(todrop.index, inplace=True) df_base['Codigo region'] = df_base['Codigo region'].astype(int) df_base['Codigo comuna'] = df_base['Codigo comuna'].astype(int) desconocido = df_base['Codigo comuna'] != 0 df_base['Codigo comuna'].where(desconocido, '', inplace=True) Comp = df_base.loc[df_base['Comuna'] != 'Total'] Comp.reset_index(inplace=True) utils.desconocidoName(Comp) # for k in range(len(Comp)): # if Comp.loc[k, 'Codigo region'] < 10: # Comp.loc[k, 'Codigo region'] = '0' + str(Comp.loc[k, 'Codigo region']) # else: # Comp.loc[k, 'Codigo region'] = str(Comp.loc[k, 'Codigo region']) # # if Comp.loc[k, 'Codigo comuna'] != '': # if Comp.loc[k, 'Codigo comuna'] < 10000: # Comp.loc[k, 'Codigo comuna'] = '0' + str(Comp.loc[k, 'Codigo comuna']) # else: # Comp.loc[k, 'Codigo comuna'] = str(Comp.loc[k, 'Codigo comuna']) comuna = Comp['Comuna'] self.last_added.rename(columns={'REGION_CORTO': 'region_residencia', 'COD_COMUNA_FINAL': 'Codigo comuna', 'FECHA_INMUNIZACION': 'Fecha', 'SUM_of_SUM_of_2aDOSIS': 'Segunda_comuna', 'SUM_of_SUM_of_1aDOSIS': 'Primera_comuna', 'SUM_of_SUM_of_ÚnicaDOSIS':'Unica_comuna', 'SUM_of_Refuerzo_DOSIS':'Refuerzo_comuna', 'SUM_of_4_Dosis':'Cuarta_comuna'}, inplace=True) self.last_added = self.last_added.dropna(subset=['Fecha']) self.last_added['Fecha'] = pd.to_datetime(self.last_added['Fecha'],format='%d/%m/%Y').dt.strftime("%Y-%m-%d") self.last_added.sort_values(by=['region_residencia','Fecha'], inplace=True) self.last_added.reset_index(drop=True, inplace=True) utils.regionDEISName(self.last_added) # for k in self.last_added.loc[self.last_added['Codigo comuna'] < 10000].index: # self.last_added.loc[k, 'Codigo comuna'] = '0' + str(self.last_added.loc[k, 'Codigo comuna']) df_sup = Comp[['Codigo comuna', 'Comuna']] df_sup['Codigo comuna'] = df_sup['Codigo comuna'].replace('', 0) self.last_added = self.last_added.merge(df_sup, on="Codigo comuna", how="left") self.last_added.set_index('Comuna', inplace=True) columns_name = self.last_added.columns.values maxSE = self.last_added[columns_name[2]].max() minSE = self.last_added[columns_name[2]].min() #print(minSE, maxSE) lenSE = (pd.to_datetime(maxSE) - pd.to_datetime(minSE)).days + 1 startdate = pd.to_datetime(minSE) date_list = pd.date_range(startdate, periods=lenSE).tolist() date_list = [dt.datetime.strftime(x, "%Y-%m-%d") for x in date_list] #print(date_list) SE_comuna = self.last_added[columns_name[2]] def edad2rango(df, comuna): cols = df.columns.tolist() df2 = pd.DataFrame(columns=cols) p = 0 for row in comuna: aux = df.loc[df.index == row] aux2 = aux.groupby(['Fecha']).sum() aux2['Comuna'] = row aux2.set_index(['Comuna'], inplace=True) identifiers = ['region_residencia', 'Codigo comuna', 'Fecha'] temp = aux[identifiers].copy() temp.drop_duplicates(keep='first', inplace=True) temp2 = pd.concat([temp, aux2], axis=1) if p == 0: df2 = temp2 p += 1 else: df2 = pd.concat([df2, temp2], axis=0) return df2 dfv = edad2rango(self.last_added, comuna) for k in [3,4,5,6,7]: df = pd.DataFrame(np.zeros((len(comuna), lenSE))) dicts = {} keys = range(lenSE) # values = [i for i in range(lenSE)] for i in keys: dicts[i] = date_list[i] df.rename(columns=dicts, inplace=True) value_comuna = dfv[columns_name[k]] value_comuna.fillna(0,inplace=True) SE_comuna = dfv['Fecha'].copy() i=0 for row in dfv.index: idx = comuna.loc[comuna == row].index.values if idx.size > 0: col = SE_comuna[i] df[col][idx] = value_comuna[i].astype(int) i += 1 df_output = pd.concat([Comp, df], axis=1) df_output.drop(columns=['index'], axis=1, inplace=True) nComunas = [len(list(group)) for key, group in groupby(df_output['Codigo region'])] identifiers = ['Region', 'Codigo region', 'Comuna', 'Codigo comuna'] variables = [x for x in df_output.columns if x not in identifiers] begRow = 0 for i in range(len(nComunas)): endRow = begRow + nComunas[i] firstList = df_output[identifiers].iloc[endRow - 1].values.tolist() firstList[2] = 'Total' firstList[3] = '' valuesTotal = df_output[variables][begRow:endRow].sum(axis=0).tolist() regionTotal = pd.DataFrame((firstList + valuesTotal), index=df_output.columns.values).transpose() if i < len(nComunas) - 1: blank_line = pd.Series(np.empty((len(regionTotal), 0)).tolist()) regionTotal = pd.concat([regionTotal, blank_line], axis=0) regionTotal.drop(columns=0, axis=1, inplace=True) temp =
pd.concat([df_output.iloc[begRow:endRow], regionTotal], axis=0)
pandas.concat
import numpy as np import pandas as pd import pdb import os import math import argparse ''' how to use ex: python3 /users/primasan/projects/muat/preprocessing/notebook/tcga/tcga_create_simplified_data.py --muat-dir '/users/primasan/projects/muat/' --tcga-dir '/scratch/project_2001668/data/tcga/alltcga/' --simplified-dir '/scratch/project_2001668/data/tcga/simplified/' ''' def get_args(): parser = argparse.ArgumentParser(description='preprocessing args') parser.add_argument('--tcga-dir', type=str,help='tcga directory: all .csv per samples per class') parser.add_argument('--muat-dir', type=str,help='muat project directory') parser.add_argument('--simplified-dir', type=str,help='output directory for simplification of mutation (3 bp)') args = parser.parse_args() return args if __name__ == '__main__': args = get_args() #muat_dir = '/users/primasan/projects/muat/' muat_dir = args.muat_dir metadata = pd.read_csv(muat_dir + 'extfile/metadata_icgc_pcawg.tsv',sep='\t',index_col=0) dictMutation = pd.read_csv(muat_dir + 'extfile/dictMutation.csv',index_col=0) dictChpos = pd.read_csv(muat_dir + 'extfile/dictChpos.csv',index_col=0) dictGES = pd.read_csv(muat_dir + 'extfile/dictGES.csv',index_col=0) #pcawg dir : directory of all .csv pcawg_dir = args.tcga_dir #export directory (the files here will be combined with epigenetics data) simplified_data = args.simplified_dir all_class = os.listdir(pcawg_dir) for i in all_class: pcawg_histology = i os.makedirs(simplified_data + pcawg_histology, exist_ok=True) allsamples = os.listdir(pcawg_dir + pcawg_histology) for j in allsamples: onesamples = j read_sample =
pd.read_csv(pcawg_dir + pcawg_histology + '/' + onesamples)
pandas.read_csv
import datetime import os today=datetime.date.today() net_records = [] error_list=[] try: from NOKIA1 import * net_records = net_records + records except: error_list.append("NOKIA") try: from lgmain import * net_records = net_records + records except: error_list.append("LG") try: from asus_json import * net_records = net_records + records except: error_list.append("ASUS") try: from archos import * net_records = net_records + records except: error_list.append("ARCHOS") try: from doogee import * net_records = net_records + records except: error_list.append("DOOGEE") import pandas as pd path='C:\\LavaWebScraper\\countrywise\\' df =
pd.DataFrame(net_records, columns = ['COUNTRY', 'COMPANY', 'MODEL', 'USP', 'DISPLAY', 'CAMERA', 'MEMORY', 'BATTERY', 'THICKNESS', 'PROCESSOR', 'EXTRAS/ LINKS'])
pandas.DataFrame
""" date: January 2021 author: <NAME> contact: le<EMAIL> """ import os import pandas as pd import glob import regex as re def clean_vars(path, text): file_id_clean = re.findall(r'\w{2}\d{4}', path) clean_text = re.sub(r'\n|\t', ' ', text) clean_text = re.sub(r'\s{2,}', ' ', clean_text) if clean_text[0] == ' ': clean_text = clean_text[1:] return file_id_clean[0], clean_text if __name__ == '__main__': os.chdir('/home/leops95/GitHub/sots/') metadata =
pd.read_csv('metadata.csv')
pandas.read_csv
### preprocessing """ code is taken from tunguz - Surprise Me 2! https://www.kaggle.com/tunguz/surprise-me-2/code """ import glob, re import numpy as np import pandas as pd from sklearn import * from datetime import datetime import matplotlib.pyplot as plt data = { 'tra': pd.read_csv('../input/air_visit_data.csv'), 'as': pd.read_csv('../input/air_store_info.csv'), 'hs': pd.read_csv('../input/hpg_store_info.csv'), 'ar': pd.read_csv('../input/air_reserve.csv'), 'hr': pd.read_csv('../input/hpg_reserve.csv'), 'id': pd.read_csv('../input/store_id_relation.csv'), 'tes': pd.read_csv('../input/sample_submission.csv'), 'hol': pd.read_csv('../input/date_info.csv').rename(columns={'calendar_date':'visit_date'}) } data['hr'] = pd.merge(data['hr'], data['id'], how='inner', on=['hpg_store_id']) for df in ['ar','hr']: data[df]['visit_datetime'] = pd.to_datetime(data[df]['visit_datetime']) data[df]['visit_dow'] = data[df]['visit_datetime'].dt.dayofweek data[df]['visit_datetime'] = data[df]['visit_datetime'].dt.date data[df]['reserve_datetime'] =
pd.to_datetime(data[df]['reserve_datetime'])
pandas.to_datetime
"""Models. Changes affecting results or their presentation should also update constants.py `change_date`, """ from __future__ import annotations from datetime import date, datetime, timedelta from logging import INFO, basicConfig, getLogger from sys import stdout from typing import Dict, Generator, Tuple, Sequence, Optional import numpy as np import pandas as pd from .constants import EPSILON, CHANGE_DATE from .parameters import Parameters basicConfig( level=INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", stream=stdout, ) logger = getLogger(__name__) class SimSirModel: def __init__(self, p: Parameters): self.rates = { key: d.rate for key, d in p.dispositions.items() } self.days = { key: d.days for key, d in p.dispositions.items() } self.keys = ("susceptible", "infected", "recovered") # An estimate of the number of infected people on the day that # the first hospitalized case is seen # # Note: this should not be an integer. infected = ( 1.0 / p.market_share / p.non_icu.rate ) susceptible = p.population - infected gamma = 1.0 / p.infectious_days self.gamma = gamma self.susceptible = susceptible self.infected = infected self.recovered = p.recovered if p.doubling_time is not None: # Back-projecting to when the first hospitalized case would have been admitted logger.info('Using doubling_time: %s', p.doubling_time) intrinsic_growth_rate = get_growth_rate(p.doubling_time) self.beta = get_beta(intrinsic_growth_rate, gamma, self.susceptible, 0.0) self.beta_t = get_beta(intrinsic_growth_rate, self.gamma, self.susceptible, p.relative_contact_rate) if p.mitigation_date is None: self.i_day = 0 # seed to the full length temp_n_days = p.n_days p.n_days = 1000 raw = self.run_projection(p, [(self.beta, p.n_days)]) self.i_day = i_day = int(get_argmin_ds(raw["census_non_icu"], p.covid_census_value)) p.n_days = temp_n_days self.raw = self.run_projection(p, self.gen_policy(p)) logger.info('Set i_day = %s', i_day) else: projections = {} best_i_day = -1 best_i_day_loss = float('inf') temp_n_days = p.n_days p.n_days = 1000 for i_day in range(90): self.i_day = i_day raw = self.run_projection(p, self.gen_policy(p)) # Don't fit against results that put the peak before the present day if raw["census_non_icu"].argmax() < i_day: continue loss = get_loss(raw["census_non_icu"][i_day], p.covid_census_value) if loss < best_i_day_loss: best_i_day_loss = loss best_i_day = i_day p.n_days = temp_n_days self.i_day = best_i_day raw = self.run_projection(p, self.gen_policy(p)) self.raw = raw logger.info( 'Estimated date_first_hospitalized: %s; current_date: %s; i_day: %s', p.covid_census_date - timedelta(days=self.i_day), p.covid_census_date, self.i_day) elif p.date_first_hospitalized is not None: # Fitting spread parameter to observed hospital census (dates of 1 patient and today) self.i_day = (p.covid_census_date - p.date_first_hospitalized).days self.covid_census_value = p.covid_census_value logger.info( 'Using date_first_hospitalized: %s; current_date: %s; i_day: %s, current_hospitalized: %s', p.date_first_hospitalized, p.covid_census_date, self.i_day, p.covid_census_value, ) # Make an initial coarse estimate dts = np.linspace(1, 15, 15) min_loss = self.get_argmin_doubling_time(p, dts) # Refine the coarse estimate for iteration in range(4): dts = np.linspace(dts[min_loss-1], dts[min_loss+1], 15) min_loss = self.get_argmin_doubling_time(p, dts) p.doubling_time = dts[min_loss] logger.info('Estimated doubling_time: %s', p.doubling_time) intrinsic_growth_rate = get_growth_rate(p.doubling_time) self.beta = get_beta(intrinsic_growth_rate, self.gamma, self.susceptible, 0.0) self.beta_t = get_beta(intrinsic_growth_rate, self.gamma, self.susceptible, p.relative_contact_rate) self.raw = self.run_projection(p, self.gen_policy(p)) self.population = p.population else: logger.info( 'doubling_time: %s; date_first_hospitalized: %s', p.doubling_time, p.date_first_hospitalized, ) raise AssertionError('doubling_time or date_first_hospitalized must be provided.') self.raw["date"] = self.raw["day"].astype("timedelta64[D]") + np.datetime64(p.covid_census_date) self.raw_df = pd.DataFrame(data=self.raw) self.dispositions_df = pd.DataFrame(data={ 'day': self.raw['day'], 'date': self.raw['date'], 'ever_non_icu': self.raw['ever_non_icu'], 'ever_icu': self.raw['ever_icu'], 'ever_ventilators': self.raw['ever_ventilators'], }) self.admits_df = pd.DataFrame(data={ 'day': self.raw['day'], 'date': self.raw['date'], 'non_icu': self.raw['admits_non_icu'], 'icu': self.raw['admits_icu'], 'ventilators': self.raw['admits_ventilators'], 'total': self.raw['admits_total'] }) self.census_df = pd.DataFrame(data={ 'day': self.raw['day'], 'date': self.raw['date'], 'non_icu': self.raw['census_non_icu'], 'icu': self.raw['census_icu'], 'ventilators': self.raw['census_ventilators'], 'total': self.raw['census_total'], }) self.beds_df = build_beds_df(self.census_df, p) self.ppe_df = build_ppe_df(self.census_df, p) self.staffing_df = build_staffing_df(self.census_df, p) logger.info('len(np.arange(-i_day, n_days+1)): %s', len(np.arange(-self.i_day, p.n_days+1))) logger.info('len(raw_df): %s', len(self.raw_df)) self.infected = self.raw_df['infected'].values[self.i_day] self.susceptible = self.raw_df['susceptible'].values[self.i_day] self.recovered = self.raw_df['recovered'].values[self.i_day] self.intrinsic_growth_rate = intrinsic_growth_rate # r_t is r_0 after distancing self.r_t = self.beta_t / gamma * susceptible self.r_naught = self.beta / gamma * susceptible doubling_time_t = 1.0 / np.log2( self.beta_t * susceptible - gamma + 1) self.doubling_time_t = doubling_time_t self.sim_sir_w_date_df = build_sim_sir_w_date_df(self.raw_df, p.covid_census_date, self.keys) self.sim_sir_w_date_floor_df = build_floor_df(self.sim_sir_w_date_df, self.keys) self.admits_floor_df = build_floor_df(self.admits_df, p.dispositions.keys()) self.census_floor_df = build_floor_df(self.census_df, p.dispositions.keys()) self.beds_floor_df = build_floor_df(self.beds_df, p.dispositions.keys()) self.ppe_floor_df = build_floor_df(self.ppe_df, self.ppe_df.columns[2:]) self.staffing_floor_df = build_floor_df(self.staffing_df, self.staffing_df.columns[2:]) self.daily_growth_rate = get_growth_rate(p.doubling_time) self.daily_growth_rate_t = get_growth_rate(self.doubling_time_t) def get_argmin_doubling_time(self, p: Parameters, dts): losses = np.full(dts.shape[0], np.inf) for i, i_dt in enumerate(dts): intrinsic_growth_rate = get_growth_rate(i_dt) self.beta = get_beta(intrinsic_growth_rate, self.gamma, self.susceptible, 0.0) self.beta_t = get_beta(intrinsic_growth_rate, self.gamma, self.susceptible, p.relative_contact_rate) raw = self.run_projection(p, self.gen_policy(p)) # Skip values the would put the fit past peak peak_admits_day = raw["admits_non_icu"].argmax() if peak_admits_day < 0: continue predicted = raw["census_non_icu"][self.i_day] loss = get_loss(self.covid_census_value, predicted) losses[i] = loss min_loss = pd.Series(losses).argmin() return min_loss def gen_policy(self, p: Parameters) -> Sequence[Tuple[float, int]]: if p.mitigation_date is not None: mitigation_day = -(p.covid_census_date - p.mitigation_date).days else: mitigation_day = 0 total_days = self.i_day + p.n_days if mitigation_day < -self.i_day: mitigation_day = -self.i_day pre_mitigation_days = self.i_day + mitigation_day post_mitigation_days = total_days - pre_mitigation_days return [ (self.beta, pre_mitigation_days), (self.beta_t, post_mitigation_days), ] def run_projection(self, p: Parameters, policy: Sequence[Tuple[float, int]]): raw = sim_sir( self.susceptible, self.infected, p.recovered, self.gamma, -self.i_day, policy ) calculate_dispositions(raw, self.rates, p.market_share) calculate_admits(raw, self.rates) calculate_census(raw, self.days) return raw def get_loss(current_hospitalized, predicted) -> float: """Squared error: predicted vs. actual current hospitalized.""" return (current_hospitalized - predicted) ** 2.0 def get_argmin_ds(census, current_hospitalized: float) -> float: # By design, this forbids choosing a day after the peak # If that's a problem, see #381 peak_day = census.argmax() losses = (census[:peak_day] - current_hospitalized) ** 2.0 return losses.argmin() def get_beta( intrinsic_growth_rate: float, gamma: float, susceptible: float, relative_contact_rate: float ) -> float: return ( (intrinsic_growth_rate + gamma) / susceptible * (1.0 - relative_contact_rate) ) def get_growth_rate(doubling_time: Optional[float]) -> float: """Calculates average daily growth rate from doubling time.""" if doubling_time is None or doubling_time == 0.0: return 0.0 return (2.0 ** (1.0 / doubling_time) - 1.0) def sir( s: float, i: float, r: float, beta: float, gamma: float, n: float ) -> Tuple[float, float, float]: """The SIR model, one time step.""" s_n = (-beta * s * i) + s i_n = (beta * s * i - gamma * i) + i r_n = gamma * i + r scale = n / (s_n + i_n + r_n) return s_n * scale, i_n * scale, r_n * scale def sim_sir( s: float, i: float, r: float, gamma: float, i_day: int, policies: Sequence[Tuple[float, int]] ): """Simulate SIR model forward in time, returning a dictionary of daily arrays Parameter order has changed to allow multiple (beta, n_days) to reflect multiple changing social distancing policies. """ s, i, r = (float(v) for v in (s, i, r)) n = s + i + r d = i_day total_days = 1 for beta, days in policies: total_days += days d_a = np.empty(total_days, "int") s_a = np.empty(total_days, "float") i_a = np.empty(total_days, "float") r_a = np.empty(total_days, "float") index = 0 for beta, n_days in policies: for _ in range(n_days): d_a[index] = d s_a[index] = s i_a[index] = i r_a[index] = r index += 1 s, i, r = sir(s, i, r, beta, gamma, n) d += 1 d_a[index] = d s_a[index] = s i_a[index] = i r_a[index] = r return { "day": d_a, "susceptible": s_a, "infected": i_a, "recovered": r_a, "ever_infected": i_a + r_a } def build_sim_sir_w_date_df( raw_df: pd.DataFrame, current_date: datetime, keys: Sequence[str], ) -> pd.DataFrame: day = raw_df.day return pd.DataFrame({ "day": day, "date": day.astype('timedelta64[D]') + np.datetime64(current_date), **{ key: raw_df[key] for key in keys } }) def build_floor_df(df, keys): """Build floor sim sir w date.""" return pd.DataFrame({ "day": df.day, "date": df.date, **{ key: np.floor(df[key]) for key in keys } }) def calculate_dispositions( raw: Dict, rates: Dict[str, float], market_share: float, ): """Build dispositions dataframe of patients adjusted by rate and market_share.""" for key, rate in rates.items(): raw["ever_" + key] = raw["ever_infected"] * rate * market_share raw[key] = raw["ever_infected"] * rate * market_share def calculate_admits(raw: Dict, rates): """Build admits dataframe from dispositions.""" for key in rates.keys(): ever = raw["ever_" + key] admit = np.empty_like(ever) admit[0] = np.nan admit[1:] = ever[1:] - ever[:-1] raw["admits_"+key] = admit raw[key] = admit raw['admits_total'] = np.floor(raw['admits_non_icu']) + np.floor(raw['admits_icu']) def calculate_census( raw: Dict, lengths_of_stay: Dict[str, int], ): """Average Length of Stay for each disposition of COVID-19 case (total guesses)""" n_days = raw["day"].shape[0] for key, los in lengths_of_stay.items(): cumsum = np.empty(n_days + los) cumsum[:los+1] = 0.0 cumsum[los+1:] = raw["admits_" + key][1:].cumsum() census = cumsum[los:] - cumsum[:-los] raw["census_" + key] = census raw['census_total'] = np.floor(raw['census_non_icu']) + np.floor(raw['census_icu']) def build_beds_df( census_df: pd.DataFrames, p, ) -> pd.DataFrame: """ALOS for each category of COVID-19 case (total guesses)""" beds_df = pd.DataFrame() beds_df["day"] = census_df["day"] beds_df["date"] = census_df["date"] # If hospitalized < 0 and there's space in icu, start borrowing if possible # If ICU < 0, raise alarms. No changes. beds_df["non_icu"] = p.total_covid_beds - p.icu_covid_beds - census_df["non_icu"] beds_df["icu"] = p.icu_covid_beds - census_df["icu"] beds_df["ventilators"] = p.covid_ventilators - census_df["ventilators"] beds_df["total"] = p.total_covid_beds - census_df["non_icu"] - census_df["icu"] # beds_df = beds_df.head(n_days) # Shift people to ICU if main hospital is full and ICU is not. # And vice versa if p.beds_borrow: new_hosp = [] new_icu = [] for row in beds_df.itertuples(): if row.non_icu < 0 and row.icu > 0: # ICU to Non-ICU needed = min(abs(row.non_icu), row.icu) new_hosp.append(row.non_icu + needed) new_icu.append(row.icu - needed) elif row.non_icu > 0 and row.icu < 0: # Non-ICU to ICU needed = min(abs(row.icu), row.non_icu) new_hosp.append(row.non_icu - needed) new_icu.append(row.icu + needed) else: new_hosp.append(row.non_icu) new_icu.append(row.icu) beds_df["non_icu"] = new_hosp beds_df["icu"] = new_icu return beds_df def build_ppe_df( census_df: pd.DataFrames, p, ) -> pd.DataFrame: """ALOS for each category of COVID-19 case (total guesses)""" ppe_df =
pd.DataFrame()
pandas.DataFrame
from itertools import product import numpy as np import pandas as pd import pytest from cudf.core.dataframe import DataFrame, Series from cudf.tests.utils import INTEGER_TYPES, NUMERIC_TYPES, assert_eq, gen_rand params_sizes = [0, 1, 2, 5] def _gen_params(): for t, n in product(NUMERIC_TYPES, params_sizes): if (t == np.int8 or t == np.int16) and n > 20: # to keep data in range continue yield t, n @pytest.mark.parametrize("dtype,nelem", list(_gen_params())) def test_cumsum(dtype, nelem): if dtype == np.int8: # to keep data in range data = gen_rand(dtype, nelem, low=-2, high=2) else: data = gen_rand(dtype, nelem) decimal = 4 if dtype == np.float32 else 6 # series gs = Series(data) ps = pd.Series(data) np.testing.assert_array_almost_equal( gs.cumsum().to_array(), ps.cumsum(), decimal=decimal ) # dataframe series (named series) gdf = DataFrame() gdf["a"] = Series(data) pdf = pd.DataFrame() pdf["a"] = pd.Series(data) np.testing.assert_array_almost_equal( gdf.a.cumsum().to_array(), pdf.a.cumsum(), decimal=decimal ) def test_cumsum_masked(): data = [1, 2, None, 4, 5] float_types = ["float32", "float64"] for type_ in float_types: gs = Series(data).astype(type_) ps = pd.Series(data).astype(type_) assert_eq(gs.cumsum(), ps.cumsum()) for type_ in INTEGER_TYPES: gs = Series(data).astype(type_) got = gs.cumsum() expected = pd.Series([1, 3, np.nan, 7, 12], dtype="float64") assert_eq(got, expected) @pytest.mark.parametrize("dtype,nelem", list(_gen_params())) def test_cummin(dtype, nelem): if dtype == np.int8: # to keep data in range data = gen_rand(dtype, nelem, low=-2, high=2) else: data = gen_rand(dtype, nelem) decimal = 4 if dtype == np.float32 else 6 # series gs = Series(data) ps = pd.Series(data) np.testing.assert_array_almost_equal( gs.cummin().to_array(), ps.cummin(), decimal=decimal ) # dataframe series (named series) gdf = DataFrame() gdf["a"] = Series(data) pdf = pd.DataFrame() pdf["a"] = pd.Series(data) np.testing.assert_array_almost_equal( gdf.a.cummin().to_array(), pdf.a.cummin(), decimal=decimal ) def test_cummin_masked(): data = [1, 2, None, 4, 5] float_types = ["float32", "float64"] for type_ in float_types: gs = Series(data).astype(type_) ps = pd.Series(data).astype(type_) assert_eq(gs.cummin(), ps.cummin()) for type_ in INTEGER_TYPES: gs = Series(data).astype(type_) expected = pd.Series([1, 1, np.nan, 1, 1]).astype("float64") assert_eq(gs.cummin(), expected) @pytest.mark.parametrize("dtype,nelem", list(_gen_params())) def test_cummax(dtype, nelem): if dtype == np.int8: # to keep data in range data = gen_rand(dtype, nelem, low=-2, high=2) else: data = gen_rand(dtype, nelem) decimal = 4 if dtype == np.float32 else 6 # series gs = Series(data) ps = pd.Series(data) np.testing.assert_array_almost_equal( gs.cummax().to_array(), ps.cummax(), decimal=decimal ) # dataframe series (named series) gdf = DataFrame() gdf["a"] = Series(data) pdf = pd.DataFrame() pdf["a"] = pd.Series(data) np.testing.assert_array_almost_equal( gdf.a.cummax().to_array(), pdf.a.cummax(), decimal=decimal ) def test_cummax_masked(): data = [1, 2, None, 4, 5] float_types = ["float32", "float64"] for type_ in float_types: gs = Series(data).astype(type_) ps = pd.Series(data).astype(type_) assert_eq(gs.cummax(), ps.cummax()) for type_ in INTEGER_TYPES: gs = Series(data).astype(type_) expected =
pd.Series([1, 2, np.nan, 4, 5])
pandas.Series
# standard libraries import os # third-party libraries import pandas as pd # local imports from .. import count_data THIS_DIR = os.path.dirname(os.path.abspath(__file__)) class TestCsvToDf: """ Tests converting a csv with various headers into a processible DataFrame """ def test_timestamp(self): """ Check if a csv w/ a timestamp is properly converted to the desired DataFrame """ data = os.path.join(THIS_DIR, 'test_timestamp.csv') element_id = 'tagID' timestamp = 'timestamp' lat = 'lat' lon = 'lon' test_df = count_data.csv_to_df(data, element_id=element_id, timestamp=timestamp, lat=lat, lon=lon) assert pd.util.hash_pandas_object(test_df).sum() == -6761865716520410554 def test_timestamp_ba(self): """ Check if a csv w/ a timestamp and grouped counts is properly converted to the desired DataFrame """ data = os.path.join(THIS_DIR, 'test_timestamp_ba.csv') element_id = 'tagID' timestamp = 'timestamp' boardings = 'boardings' alightings = 'alightings' lat = 'lat' lon = 'lon' test_df = count_data.csv_to_df(data, element_id=element_id, timestamp=timestamp, boardings=boardings, alightings=alightings, lat=lat, lon=lon) assert pd.util.hash_pandas_object(test_df).sum() == 7008548250528393651 def test_session(self): """ Check if a csv w/ session times is properly converted to the desired DataFrame """ data = os.path.join(THIS_DIR, 'test_session.csv') element_id = 'MacPIN' session_start = 'SessionStart_Epoch' session_end = 'SessionEnd_Epoch' lat = 'GPS_LAT' lon = 'GPS_LONG' test_df = count_data.csv_to_df(data, element_id=element_id, session_start=session_start, session_end=session_end, lat=lat, lon=lon) assert pd.util.hash_pandas_object(test_df).sum() == 7098407329788286247 def test_session_ba(self): """ Check if a csv w/ session times and grouped counts is properly converted to the desired DataFrame """ data = os.path.join(THIS_DIR, 'test_session_ba.csv') element_id = 'MacPIN' session_start = 'SessionStart_Epoch' session_end = 'SessionEnd_Epoch' boardings = 'boardings' alightings = 'alightings' lat = 'GPS_LAT' lon = 'GPS_LONG' test_df = count_data.csv_to_df(data, element_id=element_id, session_start=session_start, session_end=session_end, boardings=boardings, alightings=alightings, lat=lat, lon=lon) assert pd.util.hash_pandas_object(test_df).sum() == 2589903708124850504 class TestStandardizeDatetime: """ Tests ensuring all times are datetime format """ def test_no_change_needed(self): """ Tests if all timestamps are already datetime and no change is needed """ test_times = ['2018-02-22 20:08:00', '2018-02-09 18:05:00', '2018-02-09 18:26:00'] test_df = pd.DataFrame(test_times, columns=['timestamp']) test_df['timestamp'] = pd.to_datetime(test_df['timestamp']) processed_df = count_data.standardize_datetime(test_df) assert processed_df['timestamp'].dtype == 'datetime64[ns]' def test_timestamp_epoch(self): """ Tests if timestamp is an epoch time """ test_times = ['1519330080', '1518199500', '1518200760'] test_df = pd.DataFrame(test_times, columns=['timestamp']) processed_df = count_data.standardize_datetime(test_df) assert processed_df['timestamp'].dtype == 'datetime64[ns]' def test_session_epoch(self): """ Tests if session times are epoch times """ test_times = [['1519330080', '1518199500'], ['1518200760', '1519330080'], ['1518199500', '1518200760']] test_df = pd.DataFrame(test_times, columns=['session_start', 'session_end']) processed_df = count_data.standardize_datetime(test_df) assert processed_df['session_start'].dtype == 'datetime64[ns]' assert processed_df['session_end'].dtype == 'datetime64[ns]' class TestStandardizeEpoch: """ Tests ensuring all times are unix epoch """ def test_no_change_needed(self): """ Tests if all timestamps are already epochs and no change is needed """ test_times = [1519330080, 1518199500, 1518200760] test_df = pd.DataFrame(test_times, columns=['timestamp']) processed_df = count_data.standardize_epoch(test_df) assert processed_df['timestamp'].dtype == 'int64' def test_timestamp_datetime(self): """ Tests if timestamp is a datetime """ test_times = ['2018-02-22 20:08:00', '2018-02-09 18:05:00', '2018-02-09 18:26:00'] test_df = pd.DataFrame(test_times, columns=['timestamp']) test_df['timestamp'] = pd.to_datetime(test_df['timestamp']) processed_df = count_data.standardize_epoch(test_df) assert processed_df['timestamp'].dtype == 'int64' def test_session_datetime(self): """ Tests if session times are datetimes """ test_times = [['2018-02-22 20:08:00', '2018-02-09 18:05:00'], ['2018-02-09 18:26:00', '2018-02-22 20:08:00'], ['2018-02-09 18:05:00', '2018-02-09 18:26:00']] test_df = pd.DataFrame(test_times, columns=['session_start', 'session_end']) test_df['session_start'] = pd.to_datetime(test_df['session_start']) test_df['session_end'] = pd.to_datetime(test_df['session_end']) processed_df = count_data.standardize_epoch(test_df) assert processed_df['session_start'].dtype == 'int64' assert processed_df['session_end'].dtype == 'int64' class TestSessionLengthFilter: """ Tests limiting the length of sessions to be included in candidate sessions """ def test_filter_sessions(self): """ Tests if dataframes with sessions are correctly filtered """ session_max = 100 test_sessions = [[1519330080, 1519330090], [151899500, 1518209500], [1518200760, 1518200770]] filtered_sessions = [[1519330080, 1519330090], [1518200760, 1518200770]] test_df = pd.DataFrame(test_sessions, columns=['session_start', 'session_end']) filtered_df = pd.DataFrame(filtered_sessions, columns=['session_start', 'session_end']) filtered_test_df = count_data.session_length_filter(test_df, session_max) assert filtered_test_df.equals(filtered_df) def test_no_sessions(self): """ Tests if dataframes with single timestamps are correctly not changed """ session_max = 100 test_timestamps = [1519330080, 1518199500, 1518200760] test_df = pd.DataFrame(test_timestamps, columns=['timestamp']) filtered_test_df = count_data.session_length_filter(test_df, session_max) assert filtered_test_df.equals(test_df) class TestTimeRangeJoinNp: """ Tests range joining two dataframes based on time """ def test_d1timestamp_d2session_np(self): """ Tests with data1 having a timestamp and data2 having session times """ time_range = 100 data1_list = [[1519330080, 'bob1'], [1519330030, 'bob1'], [1518200760, 'sue1']] data2_list = [[1519330050, 1519330150, 'bob2'], [1518200780, 1518200980, 'sue2'], [1529200760, 1529200790, 'earl2']] target_list = [[1519330080, 'bob1', 1519330050, 1519330150, 'bob2'], [1519330030, 'bob1', 1519330050, 1519330150, 'bob2'], [1518200760, 'sue1', 1518200780, 1518200980, 'sue2']] data1 = pd.DataFrame(data1_list, columns=['timestamp1', 'name1']) data2 = pd.DataFrame(data2_list, columns=['session_start2', 'session_end2', 'name2']) target = pd.DataFrame(target_list, columns=['timestamp1', 'name1', 'session_start2', 'session_end2', 'name2']) df_range_join = count_data.time_range_join_np(data1, data2, time_range) assert df_range_join.equals(target) def test_d1session_d2timestamp_np(self): """ Tests with data1 having session times and data2 having a timestamp """ time_range = 100 data1_list = [[1519330050, 1519330150, 'bob1'], [1518200780, 1518200980, 'sue1'], [1529200760, 1529200790, 'earl1']] data2_list = [[1519330080, 'bob2'], [1519330030, 'bob2'], [1518200760, 'sue2']] target_list = [[1519330050, 1519330150, 'bob1', 1519330080, 'bob2'], [1519330050, 1519330150, 'bob1', 1519330030, 'bob2'], [1518200780, 1518200980, 'sue1', 1518200760, 'sue2']] data1 = pd.DataFrame(data1_list, columns=['session_start1', 'session_end1', 'name1']) data2 = pd.DataFrame(data2_list, columns=['timestamp2', 'name2']) target =
pd.DataFrame(target_list, columns=['session_start1', 'session_end1', 'name1', 'timestamp2', 'name2'])
pandas.DataFrame
import pandas as pd from .datastore import merge_postcodes from .types import ErrorDefinition from .utils import add_col_to_tables_CONTINUOUSLY_LOOKED_AFTER as add_CLA_column # Check 'Episodes' present before use! def validate_165(): error = ErrorDefinition( code = '165', description = 'Data entry for mother status is invalid.', affected_fields = ['MOTHER', 'SEX', 'ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM'] ) def _validate(dfs): if 'Header' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs: return {} else: header = dfs['Header'] episodes = dfs['Episodes'] oc3 = dfs['OC3'] collection_start = dfs['metadata']['collection_start'] collection_end = dfs['metadata']['collection_end'] valid_values = ['0','1'] # prepare to merge oc3.reset_index(inplace=True) header.reset_index(inplace=True) episodes.reset_index(inplace=True) collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce') collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce') episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') episodes['EPS'] = (episodes['DECOM']>=collection_start) & (episodes['DECOM']<=collection_end) episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum') merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']).merge(oc3, on='CHILD', how='left') # Raise error if provided <MOTHER> is not a valid value. value_validity = merged['MOTHER'].notna() & (~merged['MOTHER'].isin(valid_values)) # If not provided female = (merged['SEX']=='1') eps_in_year = (merged['EPS_COUNT']>0) none_provided = (merged['ACTIV'].isna()& merged['ACCOM'].isna()& merged['IN_TOUCH'].isna()) # If provided <MOTHER> must be a valid value. If not provided <MOTHER> then either <GENDER> is male or no episode record for current year and any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided mask = value_validity | (merged['MOTHER'].isna() & (female & (eps_in_year | none_provided))) # That is, if value not provided and child is a female with eps in current year or no values of IN_TOUCH, ACTIV and ACCOM, then raise error. error_locs_eps = merged.loc[mask, 'index_eps'] error_locs_header = merged.loc[mask, 'index_er'] error_locs_oc3 = merged.loc[mask, 'index'] return {'Header':error_locs_header.dropna().unique().tolist(), 'OC3':error_locs_oc3.dropna().unique().tolist()} return error, _validate def validate_1014(): error = ErrorDefinition( code='1014', description='UASC information is not required for care leavers', affected_fields=['ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM'] ) def _validate(dfs): if 'UASC' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs: return {} else: uasc = dfs['UASC'] episodes = dfs['Episodes'] oc3 = dfs['OC3'] collection_start = dfs['metadata']['collection_start'] collection_end = dfs['metadata']['collection_end'] # prepare to merge oc3.reset_index(inplace=True) uasc.reset_index(inplace=True) episodes.reset_index(inplace=True) collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce') collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce') episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce') date_check = ( ((episodes['DECOM'] >= collection_start) & (episodes['DECOM'] <= collection_end)) | ((episodes['DEC'] >= collection_start) & (episodes['DEC'] <= collection_end)) | ((episodes['DECOM'] <= collection_start) & episodes['DEC'].isna()) ) episodes['EPS'] = date_check episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum') # inner merge to take only episodes of children which are also found on the uasc table merged = episodes.merge(uasc, on='CHILD', how='inner', suffixes=['_eps', '_sc']).merge(oc3, on='CHILD', how='left') # adding suffixes with the secondary merge here does not go so well yet. some_provided = (merged['ACTIV'].notna() | merged['ACCOM'].notna() | merged['IN_TOUCH'].notna()) mask = (merged['EPS_COUNT'] == 0) & some_provided error_locs_uasc = merged.loc[mask, 'index_sc'] error_locs_oc3 = merged.loc[mask, 'index'] return {'UASC': error_locs_uasc.unique().tolist(), 'OC3': error_locs_oc3.unique().tolist()} return error, _validate # !# not sure what this rule is actually supposed to be getting at - description is confusing def validate_197B(): error = ErrorDefinition( code='197B', description="SDQ score or reason for no SDQ should be reported for 4- or 17-year-olds.", affected_fields=['SDQ_REASON', 'DOB'], ) def _validate(dfs): if 'OC2' not in dfs or 'Episodes' not in dfs: return {} oc2 = add_CLA_column(dfs, 'OC2') start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce') endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce') oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce') ERRRR = ( ( (oc2['DOB'] + pd.DateOffset(years=4) == start) # ??? | (oc2['DOB'] + pd.DateOffset(years=17) == start) ) & oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2['SDQ_SCORE'].isna() & oc2['SDQ_REASON'].isna() ) return {'OC2': oc2[ERRRR].index.to_list()} return error, _validate def validate_157(): error = ErrorDefinition( code='157', description="Child is aged 4 years or over at the beginning of the year or 16 years or under at the end of the " "year and Strengths and Difficulties Questionnaire (SDQ) 1 has been recorded as the reason for no " "Strengths and Difficulties Questionnaire (SDQ) score.", affected_fields=['SDQ_REASON', 'DOB'], ) def _validate(dfs): if 'OC2' not in dfs or 'Episodes' not in dfs: return {} oc2 = add_CLA_column(dfs, 'OC2') start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce') endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce') oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce') ERRRR = ( oc2['CONTINUOUSLY_LOOKED_AFTER'] & (oc2['DOB'] + pd.DateOffset(years=4) <= start) & (oc2['DOB'] + pd.DateOffset(years=16) >= endo) & oc2['SDQ_SCORE'].isna() & (oc2['SDQ_REASON'] == 'SDQ1') ) return {'OC2': oc2[ERRRR].index.to_list()} return error, _validate def validate_357(): error = ErrorDefinition( code='357', description='If this is the first episode ever for this child, reason for new episode must be S. ' 'Check whether there is an episode immediately preceding this one, which has been left out. ' 'If not the reason for new episode code must be amended to S.', affected_fields=['RNE'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} eps = dfs['Episodes'] eps['DECOM'] = pd.to_datetime(eps['DECOM'], format='%d/%m/%Y', errors='coerce') eps = eps.loc[eps['DECOM'].notnull()] first_eps = eps.loc[eps.groupby('CHILD')['DECOM'].idxmin()] errs = first_eps[first_eps['RNE'] != 'S'].index.to_list() return {'Episodes': errs} return error, _validate def validate_117(): error = ErrorDefinition( code='117', description='Date of decision that a child should/should no longer be placed for adoption is beyond the current collection year or after the child ceased to be looked after.', affected_fields=['DATE_PLACED_CEASED', 'DATE_PLACED', 'DEC', 'REC', 'DECOM'], ) def _validate(dfs): if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs: return {} else: episodes = dfs['Episodes'] placed_adoption = dfs['PlacedAdoption'] collection_end = dfs['metadata']['collection_end'] # datetime placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'], format='%d/%m/%Y', errors='coerce') placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y', errors='coerce') collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce') episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce') episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') # Drop nans and continuing episodes episodes = episodes.dropna(subset=['DECOM']) episodes = episodes[episodes['REC'] != 'X1'] episodes = episodes.loc[episodes.groupby('CHILD')['DECOM'].idxmax()] # prepare to merge placed_adoption.reset_index(inplace=True) episodes.reset_index(inplace=True) p4a_cols = ['DATE_PLACED', 'DATE_PLACED_CEASED'] # latest episodes merged = episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa']) mask = ( (merged['DATE_PLACED'] > collection_end) | (merged['DATE_PLACED'] > merged['DEC']) | (merged['DATE_PLACED_CEASED'] > collection_end) | (merged['DATE_PLACED_CEASED'] > merged['DEC']) ) # If provided <DATE_PLACED> and/or <DATE_PLACED_CEASED> must not be > <COLLECTION_END_DATE> or <DEC> of latest episode where <REC> not = 'X1' pa_error_locs = merged.loc[mask, 'index_pa'] eps_error_locs = merged.loc[mask, 'index_eps'] return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()} return error, _validate def validate_118(): error = ErrorDefinition( code='118', description='Date of decision that a child should no longer be placed for adoption is before the current collection year or before the date the child started to be looked after.', affected_fields=['DECOM', 'DECOM', 'LS'] ) def _validate(dfs): if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs): return {} else: placed_adoption = dfs['PlacedAdoption'] episodes = dfs['Episodes'] collection_start = dfs['metadata']['collection_start'] code_list = ['V3', 'V4'] # datetime episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'], format='%d/%m/%Y', errors='coerce') collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce') # <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4' filter_by_ls = episodes[~(episodes['LS'].isin(code_list))] earliest_episode_idxs = filter_by_ls.groupby('CHILD')['DECOM'].idxmin() earliest_episodes = episodes[episodes.index.isin(earliest_episode_idxs)] # prepare to merge placed_adoption.reset_index(inplace=True) earliest_episodes.reset_index(inplace=True) # merge merged = earliest_episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa']) # drop rows where DATE_PLACED_CEASED is not provided merged = merged.dropna(subset=['DATE_PLACED_CEASED']) # If provided <DATE_PLACED_CEASED> must not be prior to <COLLECTION_START_DATE> or <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4' mask = (merged['DATE_PLACED_CEASED'] < merged['DECOM']) | (merged['DATE_PLACED_CEASED'] < collection_start) # error locations pa_error_locs = merged.loc[mask, 'index_pa'] eps_error_locs = merged.loc[mask, 'index_eps'] return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()} return error, _validate def validate_352(): error = ErrorDefinition( code='352', description='Child who started to be looked after was aged 18 or over.', affected_fields=['DECOM', 'RNE'], ) def _validate(dfs): if 'Header' not in dfs: return {} if 'Episodes' not in dfs: return {} else: header = dfs['Header'] episodes = dfs['Episodes'] header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce') episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') header['DOB18'] = header['DOB'] + pd.DateOffset(years=18) episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'), indicator=True).set_index('index') care_start = episodes_merged['RNE'].str.upper().astype(str).isin(['S']) started_over_18 = episodes_merged['DOB18'] <= episodes_merged['DECOM'] error_mask = care_start & started_over_18 error_locations = episodes.index[error_mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_209(): error = ErrorDefinition( code='209', description='Child looked after is of school age and should not have an unknown Unique Pupil Number (UPN) code of UN1.', affected_fields=['UPN', 'DOB'] ) def _validate(dfs): if 'Header' not in dfs: return {} else: header = dfs['Header'] collection_start = dfs['metadata']['collection_start'] # convert to datetime header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce') collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce') yr = collection_start.year - 1 reference_date = pd.to_datetime('31/08/' + str(yr), format='%d/%m/%Y', errors='coerce') # If <DOB> >= 4 years prior to 31/08/YYYY then <UPN> should not be 'UN1' Note: YYYY in this instance refers to the year prior to the collection start (for collection year 2019-2020, it would be looking at the 31/08/2018). mask = (reference_date >= (header['DOB'] + pd.offsets.DateOffset(years=4))) & (header['UPN'] == 'UN1') # error locations error_locs_header = header.index[mask] return {'Header': error_locs_header.tolist()} return error, _validate def validate_198(): error = ErrorDefinition( code='198', description="Child has not been looked after continuously for at least 12 months at 31 March but a reason " "for no Strengths and Difficulties (SDQ) score has been completed. ", affected_fields=['SDQ_REASON'], ) def _validate(dfs): if 'Episodes' not in dfs or 'OC2' not in dfs: return {} oc2 = add_CLA_column(dfs, 'OC2') error_mask = oc2['SDQ_REASON'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER'] error_locs = oc2.index[error_mask].to_list() return {'OC2': error_locs} return error, _validate def validate_185(): error = ErrorDefinition( code='185', description="Child has not been looked after continuously for at least 12 months at " + "31 March but a Strengths and Difficulties (SDQ) score has been completed.", affected_fields=['SDQ_SCORE'], ) def _validate(dfs): if 'Episodes' not in dfs or 'OC2' not in dfs: return {} oc2 = add_CLA_column(dfs, 'OC2') error_mask = oc2['SDQ_SCORE'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER'] error_locs = oc2.index[error_mask].to_list() return {'OC2': error_locs} return error, _validate def validate_186(): error = ErrorDefinition( code='186', description="Children aged 4 or over at the start of the year and children aged under 17 at the " + "end of the year and who have been looked after for at least 12 months continuously " + "should have a Strengths and Difficulties (SDQ) score completed.", affected_fields=['SDQ_SCORE'], ) def _validate(dfs): if 'Episodes' not in dfs or 'OC2' not in dfs: return {} oc2 = dfs['OC2'] collection_start_str = dfs['metadata']['collection_start'] collection_end_str = dfs['metadata']['collection_end'] collection_start = pd.to_datetime(collection_start_str, format='%d/%m/%Y', errors='coerce') collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce') oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce') oc2 = add_CLA_column(dfs, 'OC2') oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4) oc2['17th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=17) error_mask = ( (oc2['4th_bday'] <= collection_start) & (oc2['17th_bday'] > collection_end) & oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2['SDQ_SCORE'].isna() ) oc2_errors = oc2.loc[error_mask].index.to_list() return {'OC2': oc2_errors} return error, _validate def validate_187(): error = ErrorDefinition( code='187', description="Child cannot be looked after continuously for 12 months at " + "31 March (OC2) and have any of adoption or care leavers returns completed.", affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR', # OC3 'IN_TOUCH', 'ACTIV', 'ACCOM'], # AD1 ) def _validate(dfs): if ( 'OC3' not in dfs or 'AD1' not in dfs or 'Episodes' not in dfs ): return {} # add 'CONTINUOUSLY_LOOKED_AFTER' column ad1, oc3 = add_CLA_column(dfs, ['AD1', 'OC3']) # OC3 should_be_blank = ['IN_TOUCH', 'ACTIV', 'ACCOM'] oc3_mask = oc3['CONTINUOUSLY_LOOKED_AFTER'] & oc3[should_be_blank].notna().any(axis=1) oc3_error_locs = oc3[oc3_mask].index.to_list() # AD1 should_be_blank = ['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR'] ad1_mask = ad1['CONTINUOUSLY_LOOKED_AFTER'] & ad1[should_be_blank].notna().any(axis=1) ad1_error_locs = ad1[ad1_mask].index.to_list() return {'AD1': ad1_error_locs, 'OC3': oc3_error_locs} return error, _validate def validate_188(): error = ErrorDefinition( code='188', description="Child is aged under 4 years at the end of the year, " "but a Strengths and Difficulties (SDQ) score or a reason " "for no SDQ score has been completed. ", affected_fields=['SDQ_SCORE', 'SDQ_REASON'], ) def _validate(dfs): if 'OC2' not in dfs: return {} oc2 = dfs['OC2'] collection_end_str = dfs['metadata']['collection_end'] collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce') oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce') oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4) error_mask = ( (oc2['4th_bday'] > collection_end) & oc2[['SDQ_SCORE', 'SDQ_REASON']].notna().any(axis=1) ) oc2_errors = oc2.loc[error_mask].index.to_list() return {'OC2': oc2_errors} return error, _validate def validate_190(): error = ErrorDefinition( code='190', description="Child has not been looked after continuously for at least 12 months at 31 March but one or more " "data items relating to children looked after for 12 months have been completed.", affected_fields=['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'] , # AD1 ) def _validate(dfs): if ( 'OC2' not in dfs or 'Episodes' not in dfs ): return {} # add 'CONTINUOUSLY_LOOKED_AFTER' column oc2 = add_CLA_column(dfs, 'OC2') should_be_blank = ['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'] mask = ~oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_blank].notna().any(axis=1) error_locs = oc2[mask].index.to_list() return {'OC2': error_locs} return error, _validate def validate_191(): error = ErrorDefinition( code='191', description="Child has been looked after continuously for at least 12 months at 31 March but one or more " "data items relating to children looked after for 12 months have been left blank.", affected_fields=['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'], # OC2 ) def _validate(dfs): if ( 'OC2' not in dfs or 'Episodes' not in dfs ): return {} # add 'CONTINUOUSLY_LOOKED_AFTER' column oc2 = add_CLA_column(dfs, 'OC2') should_be_present = ['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'] mask = oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_present].isna().any(axis=1) error_locs = oc2[mask].index.to_list() return {'OC2': error_locs} return error, _validate def validate_607(): error = ErrorDefinition( code='607', description='Child ceased to be looked after in the year, but mother field has not been completed.', affected_fields=['DEC', 'REC', 'MOTHER', 'LS', 'SEX'] ) def _validate(dfs): if 'Header' not in dfs or 'Episodes' not in dfs: return {} else: header = dfs['Header'] episodes = dfs['Episodes'] collection_start = dfs['metadata']['collection_start'] collection_end = dfs['metadata']['collection_end'] code_list = ['V3', 'V4'] # convert to datetiime format episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce') collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce') collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce') # prepare to merge episodes.reset_index(inplace=True) header.reset_index(inplace=True) merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']) # CEASED_TO_BE_LOOKED_AFTER = DEC is not null and REC is filled but not equal to X1 CEASED_TO_BE_LOOKED_AFTER = merged['DEC'].notna() & ((merged['REC'] != 'X1') & merged['REC'].notna()) # and <LS> not = ‘V3’ or ‘V4’ check_LS = ~(merged['LS'].isin(code_list)) # and <DEC> is in <CURRENT_COLLECTION_YEAR check_DEC = (collection_start <= merged['DEC']) & (merged['DEC'] <= collection_end) # Where <CEASED_TO_BE_LOOKED_AFTER> = ‘Y’, and <LS> not = ‘V3’ or ‘V4’ and <DEC> is in <CURRENT_COLLECTION_YEAR> and <SEX> = ‘2’ then <MOTHER> should be provided. mask = CEASED_TO_BE_LOOKED_AFTER & check_LS & check_DEC & (merged['SEX'] == '2') & (merged['MOTHER'].isna()) header_error_locs = merged.loc[mask, 'index_er'] eps_error_locs = merged.loc[mask, 'index_eps'] return {'Episodes': eps_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()} return error, _validate def validate_210(): error = ErrorDefinition( code='210', description='Children looked after for more than a week at 31 March should not have an unknown Unique Pupil Number (UPN) code of UN4.', affected_fields=['UPN', 'DECOM'] ) def _validate(dfs): if 'Header' not in dfs or 'Episodes' not in dfs: return {} else: header = dfs['Header'] episodes = dfs['Episodes'] collection_end = dfs['metadata']['collection_end'] # convert to datetime episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce') yr = collection_end.year reference_date = ref_date = pd.to_datetime('24/03/' + str(yr), format='%d/%m/%Y', errors='coerce') # prepare to merge episodes.reset_index(inplace=True) header.reset_index(inplace=True) # the logical way is to merge left on UPN but that will be a one to many merge and may not go as well as a many to one merge that we've been doing. merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']) # If <UPN> = 'UN4' then no episode <DECOM> must be >` = 24/03/YYYY Note: YYYY refers to the current collection year. mask = (merged['UPN'] == 'UN4') & (merged['DECOM'] >= reference_date) # error locations error_locs_header = merged.loc[mask, 'index_er'] error_locs_eps = merged.loc[mask, 'index_eps'] return {'Episodes': error_locs_eps.tolist(), 'Header': error_locs_header.unique().tolist()} return error, _validate def validate_1010(): error = ErrorDefinition( code='1010', description='This child has no episodes loaded for current year even though there was an open episode of ' + 'care at the end of the previous year, and care leaver data has been entered.', affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'], ) def _validate(dfs): if 'Episodes' not in dfs or 'Episodes_last' not in dfs or 'OC3' not in dfs: return {} else: episodes = dfs['Episodes'] episodes_last = dfs['Episodes_last'] oc3 = dfs['OC3'] # convert DECOM to datetime, drop missing/invalid sort by CHILD then DECOM, episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce') episodes_last = episodes_last.dropna(subset=['DECOM']).sort_values(['CHILD', 'DECOM'], ascending=True) # Keep only the final episode for each child (ie where the following row has a different CHILD value) episodes_last = episodes_last[ episodes_last['CHILD'].shift(-1) != episodes_last['CHILD'] ] # Keep only the final episodes that were still open episodes_last = episodes_last[episodes_last['DEC'].isna()] # The remaining children ought to have episode data in the current year if they are in OC3 has_current_episodes = oc3['CHILD'].isin(episodes['CHILD']) has_open_episode_last = oc3['CHILD'].isin(episodes_last['CHILD']) error_mask = ~has_current_episodes & has_open_episode_last validation_error_locations = oc3.index[error_mask] return {'OC3': validation_error_locations.tolist()} return error, _validate def validate_525(): error = ErrorDefinition( code='525', description='A child for whom the decision to be placed for adoption has been reversed cannot be adopted during the year.', affected_fields=['DATE_PLACED_CEASED', 'DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR'] ) def _validate(dfs): if 'PlacedAdoption' not in dfs or 'AD1' not in dfs: return {} else: placed_adoption = dfs['PlacedAdoption'] ad1 = dfs['AD1'] # prepare to merge placed_adoption.reset_index(inplace=True) ad1.reset_index(inplace=True) merged = placed_adoption.merge(ad1, on='CHILD', how='left', suffixes=['_placed', '_ad1']) # If <DATE_PLACED_CEASED> not Null, then <DATE_INT>; <DATE_MATCH>; <FOSTER_CARE>; <NB_ADOPTR>; <SEX_ADOPTR>; and <LS_ADOPTR> should not be provided mask = merged['DATE_PLACED_CEASED'].notna() & ( merged['DATE_INT'].notna() | merged['DATE_MATCH'].notna() | merged['FOSTER_CARE'].notna() | merged['NB_ADOPTR'].notna() | merged['SEX_ADOPTR'].notna() | merged['LS_ADOPTR'].notna()) # error locations pa_error_locs = merged.loc[mask, 'index_placed'] ad_error_locs = merged.loc[mask, 'index_ad1'] # return result return {'PlacedAdoption': pa_error_locs.tolist(), 'AD1': ad_error_locs.tolist()} return error, _validate def validate_335(): error = ErrorDefinition( code='335', description='The current foster value (0) suggests that child is not adopted by current foster carer, but last placement is A2, A3, or A5. Or the current foster value (1) suggests that child is adopted by current foster carer, but last placement is A1, A4 or A6.', affected_fields=['PLACE', 'FOSTER_CARE'] ) def _validate(dfs): if 'Episodes' not in dfs or 'AD1' not in dfs: return {} else: episodes = dfs['Episodes'] ad1 = dfs['AD1'] # prepare to merge episodes.reset_index(inplace=True) ad1.reset_index(inplace=True) merged = episodes.merge(ad1, on='CHILD', how='left', suffixes=['_eps', '_ad1']) # Where <PL> = 'A2', 'A3' or 'A5' and <DEC> = 'E1', 'E11', 'E12' <FOSTER_CARE> should not be '0'; Where <PL> = ‘A1’, ‘A4’ or ‘A6’ and <REC> = ‘E1’, ‘E11’, ‘E12’ <FOSTER_CARE> should not be ‘1’. mask = ( merged['REC'].isin(['E1', 'E11', 'E12']) & ( (merged['PLACE'].isin(['A2', 'A3', 'A5']) & (merged['FOSTER_CARE'].astype(str) == '0')) | (merged['PLACE'].isin(['A1', 'A4', 'A6']) & (merged['FOSTER_CARE'].astype(str) == '1')) ) ) eps_error_locs = merged.loc[mask, 'index_eps'] ad1_error_locs = merged.loc[mask, 'index_ad1'] # use .unique since join is many to one return {'Episodes': eps_error_locs.tolist(), 'AD1': ad1_error_locs.unique().tolist()} return error, _validate def validate_215(): error = ErrorDefinition( code='215', description='Child has care leaver information but one or more data items relating to children looked after for 12 months have been completed.', affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM', 'CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'] ) def _validate(dfs): if 'OC3' not in dfs or 'OC2' not in dfs: return {} else: oc3 = dfs['OC3'] oc2 = dfs['OC2'] # prepare to merge oc3.reset_index(inplace=True) oc2.reset_index(inplace=True) merged = oc3.merge(oc2, on='CHILD', how='left', suffixes=['_3', '_2']) # If any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided then <CONVICTED>; <HEALTH_CHECK>; <IMMUNISATIONS>; <TEETH_CHECK>; <HEALTH_ASSESSMENT>; <SUBSTANCE MISUSE>; <INTERVENTION_RECEIVED>; <INTERVENTION_OFFERED>; should not be provided mask = (merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna()) & ( merged['CONVICTED'].notna() | merged['HEALTH_CHECK'].notna() | merged['IMMUNISATIONS'].notna() | merged['TEETH_CHECK'].notna() | merged['HEALTH_ASSESSMENT'].notna() | merged[ 'SUBSTANCE_MISUSE'].notna() | merged['INTERVENTION_RECEIVED'].notna() | merged[ 'INTERVENTION_OFFERED'].notna()) # error locations oc3_error_locs = merged.loc[mask, 'index_3'] oc2_error_locs = merged.loc[mask, 'index_2'] return {'OC3': oc3_error_locs.tolist(), 'OC2': oc2_error_locs.tolist()} return error, _validate def validate_399(): error = ErrorDefinition( code='399', description='Mother field, review field or participation field are completed but ' + 'child is looked after under legal status V3 or V4.', affected_fields=['MOTHER', 'LS', 'REVIEW', 'REVIEW_CODE'] ) def _validate(dfs): if 'Episodes' not in dfs or 'Header' not in dfs or 'Reviews' not in dfs: return {} else: episodes = dfs['Episodes'] header = dfs['Header'] reviews = dfs['Reviews'] code_list = ['V3', 'V4'] # prepare to merge episodes['index_eps'] = episodes.index header['index_hdr'] = header.index reviews['index_revs'] = reviews.index # merge merged = (episodes.merge(header, on='CHILD', how='left') .merge(reviews, on='CHILD', how='left')) # If <LS> = 'V3' or 'V4' then <MOTHER>, <REVIEW> and <REVIEW_CODE> should not be provided mask = merged['LS'].isin(code_list) & ( merged['MOTHER'].notna() | merged['REVIEW'].notna() | merged['REVIEW_CODE'].notna()) # Error locations eps_errors = merged.loc[mask, 'index_eps'] header_errors = merged.loc[mask, 'index_hdr'].unique() revs_errors = merged.loc[mask, 'index_revs'].unique() return {'Episodes': eps_errors.tolist(), 'Header': header_errors.tolist(), 'Reviews': revs_errors.tolist()} return error, _validate def validate_189(): error = ErrorDefinition( code='189', description='Child is aged 17 years or over at the beginning of the year, but an Strengths and Difficulties ' + '(SDQ) score or a reason for no Strengths and Difficulties (SDQ) score has been completed.', affected_fields=['DOB', 'SDQ_SCORE', 'SDQ_REASON'] ) def _validate(dfs): if 'OC2' not in dfs: return {} else: oc2 = dfs['OC2'] collection_start = dfs['metadata']['collection_start'] # datetime format allows appropriate comparison between dates oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce') collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce') # If <DOB> >17 years prior to <COLLECTION_START_DATE> then <SDQ_SCORE> and <SDQ_REASON> should not be provided mask = ((oc2['DOB'] + pd.offsets.DateOffset(years=17)) <= collection_start) & ( oc2['SDQ_REASON'].notna() | oc2['SDQ_SCORE'].notna()) # That is, raise error if collection_start > DOB + 17years oc_error_locs = oc2.index[mask] return {'OC2': oc_error_locs.tolist()} return error, _validate def validate_226(): error = ErrorDefinition( code='226', description='Reason for placement change is not required.', affected_fields=['REASON_PLACE_CHANGE', 'PLACE'] ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] code_list = ['T0', 'T1', 'T2', 'T3', 'T4'] episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') # create column to see previous REASON_PLACE_CHANGE episodes = episodes.sort_values(['CHILD', 'DECOM']) episodes['PREVIOUS_REASON'] = episodes.groupby('CHILD')['REASON_PLACE_CHANGE'].shift(1) # If <PL> = 'T0'; 'T1'; 'T2'; 'T3' or 'T4' then <REASON_PLACE_CHANGE> should be null in current episode and current episode - 1 mask = episodes['PLACE'].isin(code_list) & ( episodes['REASON_PLACE_CHANGE'].notna() | episodes['PREVIOUS_REASON'].notna()) # error locations error_locs = episodes.index[mask] return {'Episodes': error_locs.tolist()} return error, _validate def validate_358(): error = ErrorDefinition( code='358', description='Child with this legal status should not be under 10.', affected_fields=['DECOM', 'DOB', 'LS'] ) def _validate(dfs): if 'Episodes' not in dfs or 'Header' not in dfs: return {} else: episodes = dfs['Episodes'] header = dfs['Header'] code_list = ['J1', 'J2', 'J3'] # convert dates to datetime format episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce') # prepare to merge episodes.reset_index(inplace=True) header.reset_index(inplace=True) merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']) # Where <LS> = ‘J1’ or ‘J2’ or ‘J3’ then <DOB> should <= to 10 years prior to <DECOM> mask = merged['LS'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=10) < merged['DECOM']) # That is, raise error if DECOM > DOB + 10years # error locations header_error_locs = merged.loc[mask, 'index_er'] episode_error_locs = merged.loc[mask, 'index_eps'] # one to many join implies use .unique on the 'one' return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()} return error, _validate def validate_407(): error = ErrorDefinition( code='407', description='Reason episode ceased is Special Guardianship Order, but child has reached age 18.', affected_fields=['DEC', 'DOB', 'REC'] ) def _validate(dfs): if 'Episodes' not in dfs or 'Header' not in dfs: return {} else: episodes = dfs['Episodes'] header = dfs['Header'] code_list = ['E45', 'E46', 'E47', 'E48'] # convert dates to datetime format episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce') header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce') # prepare to merge episodes.reset_index(inplace=True) header.reset_index(inplace=True) merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']) # If <REC> = ‘E45’ or ‘E46’ or ‘E47’ or ‘E48’ then <DOB> must be < 18 years prior to <DEC> mask = merged['REC'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=18) < merged['DEC']) # That is, raise error if DEC > DOB + 10years # error locations header_error_locs = merged.loc[mask, 'index_er'] episode_error_locs = merged.loc[mask, 'index_eps'] # one to many join implies use .unique on the 'one' return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()} return error, _validate def validate_1007(): error = ErrorDefinition( code='1007', description='Care leaver information is not required for 17- or 18-year olds who are still looked after.', affected_fields=['DEC', 'REC', 'DOB', 'IN_TOUCH', 'ACTIV', 'ACCOM'] ) def _validate(dfs): if 'Episodes' not in dfs or 'OC3' not in dfs: return {} else: episodes = dfs['Episodes'] oc3 = dfs['OC3'] collection_end = dfs['metadata']['collection_end'] # convert dates to datetime format oc3['DOB'] = pd.to_datetime(oc3['DOB'], format='%d/%m/%Y', errors='coerce') collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce') # prepare to merge episodes.reset_index(inplace=True) oc3.reset_index(inplace=True) merged = episodes.merge(oc3, on='CHILD', how='left', suffixes=['_eps', '_oc3']) # If <DOB> < 19 and >= to 17 years prior to <COLLECTION_END_DATE> and current episode <DEC> and or <REC> not provided then <IN_TOUCH>, <ACTIV> and <ACCOM> should not be provided check_age = (merged['DOB'] + pd.offsets.DateOffset(years=17) <= collection_end) & ( merged['DOB'] + pd.offsets.DateOffset(years=19) > collection_end) # That is, check that 17<=age<19 check_dec_rec = merged['REC'].isna() | merged['DEC'].isna() # if either DEC or REC are absent mask = check_age & check_dec_rec & ( merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna()) # Then raise an error if either IN_TOUCH, ACTIV, or ACCOM have been provided too # error locations oc3_error_locs = merged.loc[mask, 'index_oc3'] episode_error_locs = merged.loc[mask, 'index_eps'] # one to many join implies use .unique on the 'one' return {'Episodes': episode_error_locs.tolist(), 'OC3': oc3_error_locs.unique().tolist()} return error, _validate def validate_442(): error = ErrorDefinition( code='442', description='Unique Pupil Number (UPN) field is not completed.', affected_fields=['UPN', 'LS'] ) def _validate(dfs): if ('Episodes' not in dfs) or ('Header' not in dfs): return {} else: episodes = dfs['Episodes'] header = dfs['Header'] episodes.reset_index(inplace=True) header.reset_index(inplace=True) code_list = ['V3', 'V4'] # merge left on episodes to get all children for which episodes have been recorded even if they do not exist on the header. merged = episodes.merge(header, on=['CHILD'], how='left', suffixes=['_eps', '_er']) # Where any episode present, with an <LS> not = 'V3' or 'V4' then <UPN> must be provided mask = (~merged['LS'].isin(code_list)) & merged['UPN'].isna() episode_error_locs = merged.loc[mask, 'index_eps'] header_error_locs = merged.loc[mask, 'index_er'] return {'Episodes': episode_error_locs.tolist(), # Select unique values since many episodes are joined to one header # and multiple errors will be raised for the same index. 'Header': header_error_locs.dropna().unique().tolist()} return error, _validate def validate_344(): error = ErrorDefinition( code='344', description='The record shows the young person has died or returned home to live with parent(s) or someone with parental responsibility for a continuous period of 6 months or more, but activity and/or accommodation on leaving care have been completed.', affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'] ) def _validate(dfs): if 'OC3' not in dfs: return {} else: oc3 = dfs['OC3'] # If <IN_TOUCH> = 'DIED' or 'RHOM' then <ACTIV> and <ACCOM> should not be provided mask = ((oc3['IN_TOUCH'] == 'DIED') | (oc3['IN_TOUCH'] == 'RHOM')) & ( oc3['ACTIV'].notna() | oc3['ACCOM'].notna()) error_locations = oc3.index[mask] return {'OC3': error_locations.to_list()} return error, _validate def validate_345(): error = ErrorDefinition( code='345', description='The data collection record shows the local authority is in touch with this young person, but activity and/or accommodation data items are zero.', affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'] ) def _validate(dfs): if 'OC3' not in dfs: return {} else: oc3 = dfs['OC3'] # If <IN_TOUCH> = 'Yes' then <ACTIV> and <ACCOM> must be provided mask = (oc3['IN_TOUCH'] == 'YES') & (oc3['ACTIV'].isna() | oc3['ACCOM'].isna()) error_locations = oc3.index[mask] return {'OC3': error_locations.to_list()} return error, _validate def validate_384(): error = ErrorDefinition( code='384', description='A child receiving respite care cannot be in a long-term foster placement ', affected_fields=['PLACE', 'LS'] ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] # Where <LS> = 'V3' or 'V4' then <PL> must not be 'U1' or 'U4' mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & ( (episodes['PLACE'] == 'U1') | (episodes['PLACE'] == 'U4')) error_locations = episodes.index[mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_390(): error = ErrorDefinition( code='390', description='Reason episode ceased is adopted but child has not been previously placed for adoption.', affected_fields=['PLACE', 'REC'] ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] # If <REC> = 'E11' or 'E12' then <PL> must be one of 'A3', 'A4', 'A5' or 'A6' mask = ((episodes['REC'] == 'E11') | (episodes['REC'] == 'E12')) & ~( (episodes['PLACE'] == 'A3') | (episodes['PLACE'] == 'A4') | (episodes['PLACE'] == 'A5') | ( episodes['PLACE'] == 'A6')) error_locations = episodes.index[mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_378(): error = ErrorDefinition( code='378', description='A child who is placed with parent(s) cannot be looked after under a single period of accommodation under Section 20 of the Children Act 1989.', affected_fields=['PLACE', 'LS'] ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] # the & sign supercedes the ==, so brackets are necessary here mask = (episodes['PLACE'] == 'P1') & (episodes['LS'] == 'V2') error_locations = episodes.index[mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_398(): error = ErrorDefinition( code='398', description='Distance field completed but child looked after under legal status V3 or V4.', affected_fields=['LS', 'HOME_POST', 'PL_POST'] ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & ( episodes['HOME_POST'].notna() | episodes['PL_POST'].notna()) error_locations = episodes.index[mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_451(): error = ErrorDefinition( code='451', description='Child is still freed for adoption, but freeing orders could not be applied for since 30 December 2005.', affected_fields=['DEC', 'REC', 'LS'] ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] mask = episodes['DEC'].isna() & episodes['REC'].isna() & (episodes['LS'] == 'D1') error_locations = episodes.index[mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_519(): error = ErrorDefinition( code='519', description='Data entered on the legal status of adopters shows civil partnership couple, but data entered on genders of adopters does not show it as a couple.', affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'] ) def _validate(dfs): if 'AD1' not in dfs: return {} else: ad1 = dfs['AD1'] mask = (ad1['LS_ADOPTR'] == 'L2') & ( (ad1['SEX_ADOPTR'] != 'MM') & (ad1['SEX_ADOPTR'] != 'FF') & (ad1['SEX_ADOPTR'] != 'MF')) error_locations = ad1.index[mask] return {'AD1': error_locations.to_list()} return error, _validate def validate_520(): error = ErrorDefinition( code='520', description='Data entry on the legal status of adopters shows different gender married couple but data entry on genders of adopters shows it as a same gender couple.', affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'] ) def _validate(dfs): if 'AD1' not in dfs: return {} else: ad1 = dfs['AD1'] # check condition mask = (ad1['LS_ADOPTR'] == 'L11') & (ad1['SEX_ADOPTR'] != 'MF') error_locations = ad1.index[mask] return {'AD1': error_locations.to_list()} return error, _validate def validate_522(): error = ErrorDefinition( code='522', description='Date of decision that the child should be placed for adoption must be on or before the date that a child should no longer be placed for adoption.', affected_fields=['DATE_PLACED', 'DATE_PLACED_CEASED'] ) def _validate(dfs): if 'PlacedAdoption' not in dfs: return {} else: placed_adoption = dfs['PlacedAdoption'] # Convert to datetimes placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'], format='%d/%m/%Y', errors='coerce') placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y', errors='coerce') # Boolean mask mask = placed_adoption['DATE_PLACED_CEASED'] > placed_adoption['DATE_PLACED'] error_locations = placed_adoption.index[mask] return {'PlacedAdoption': error_locations.to_list()} return error, _validate def validate_563(): error = ErrorDefinition( code='563', description='The child should no longer be placed for adoption but the date of the decision that the child should be placed for adoption is blank', affected_fields=['DATE_PLACED', 'REASON_PLACED_CEASED', 'DATE_PLACED_CEASED'], ) def _validate(dfs): if 'PlacedAdoption' not in dfs: return {} else: placed_adoption = dfs['PlacedAdoption'] mask = placed_adoption['REASON_PLACED_CEASED'].notna() & placed_adoption['DATE_PLACED_CEASED'].notna() & \ placed_adoption['DATE_PLACED'].isna() error_locations = placed_adoption.index[mask] return {'PlacedAdoption': error_locations.to_list()} return error, _validate def validate_544(): error = ErrorDefinition( code='544', description="Any child who has conviction information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.", affected_fields=['CONVICTED', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'], ) def _validate(dfs): if 'OC2' not in dfs: return {} else: oc2 = dfs['OC2'] convict = oc2['CONVICTED'].astype(str) == '1' immunisations = oc2['IMMUNISATIONS'].isna() teeth_ck = oc2['TEETH_CHECK'].isna() health_ass = oc2['HEALTH_ASSESSMENT'].isna() sub_misuse = oc2['SUBSTANCE_MISUSE'].isna() error_mask = convict & (immunisations | teeth_ck | health_ass | sub_misuse) validation_error_locations = oc2.index[error_mask] return {'OC2': validation_error_locations.to_list()} return error, _validate def validate_634(): error = ErrorDefinition( code='634', description='There are entries for previous permanence options, but child has not started to be looked after from 1 April 2016 onwards.', affected_fields=['LA_PERM', 'PREV_PERM', 'DATE_PERM', 'DECOM'] ) def _validate(dfs): if 'Episodes' not in dfs or 'PrevPerm' not in dfs: return {} else: episodes = dfs['Episodes'] prevperm = dfs['PrevPerm'] collection_start = dfs['metadata']['collection_start'] # convert date field to appropriate format episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce') # the maximum date has the highest possibility of satisfying the condition episodes['LAST_DECOM'] = episodes.groupby('CHILD')['DECOM'].transform('max') # prepare to merge episodes.reset_index(inplace=True) prevperm.reset_index(inplace=True) merged = prevperm.merge(episodes, on='CHILD', how='left', suffixes=['_prev', '_eps']) # If <PREV_PERM> or <LA_PERM> or <DATE_PERM> provided, then at least 1 episode must have a <DECOM> later than 01/04/2016 mask = (merged['PREV_PERM'].notna() | merged['DATE_PERM'].notna() | merged['LA_PERM'].notna()) & ( merged['LAST_DECOM'] < collection_start) eps_error_locs = merged.loc[mask, 'index_eps'] prevperm_error_locs = merged.loc[mask, 'index_prev'] # return {'PrevPerm':prevperm_error_locs} return {'Episodes': eps_error_locs.unique().tolist(), 'PrevPerm': prevperm_error_locs.unique().tolist()} return error, _validate def validate_158(): error = ErrorDefinition( code='158', description='If a child has been recorded as receiving an intervention for their substance misuse problem, then the additional item on whether an intervention was offered should be left blank.', affected_fields=['INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'], ) def _validate(dfs): if 'OC2' not in dfs: return {} else: oc2 = dfs['OC2'] error_mask = oc2['INTERVENTION_RECEIVED'].astype(str).eq('1') & oc2['INTERVENTION_OFFERED'].notna() error_locations = oc2.index[error_mask] return {'OC2': error_locations.tolist()} return error, _validate def validate_133(): error = ErrorDefinition( code='133', description='Data entry for accommodation after leaving care is invalid. If reporting on a childs accommodation after leaving care the data entry must be valid', affected_fields=['ACCOM'], ) def _validate(dfs): if 'OC3' not in dfs: return {} else: oc3 = dfs['OC3'] valid_codes = ['B1', 'B2', 'C1', 'C2', 'D1', 'D2', 'E1', 'E2', 'G1', 'G2', 'H1', 'H2', 'K1', 'K2', 'R1', 'R2', 'S2', 'T1', 'T2', 'U1', 'U2', 'V1', 'V2', 'W1', 'W2', 'X2', 'Y1', 'Y2', 'Z1', 'Z2', '0'] error_mask = ~oc3['ACCOM'].isna() & ~oc3['ACCOM'].isin(valid_codes) error_locations = oc3.index[error_mask] return {'OC3': error_locations.tolist()} return error, _validate def validate_565(): error = ErrorDefinition( code='565', description='The date that the child started to be missing or away from placement without authorisation has been completed but whether the child was missing or away from placement without authorisation has not been completed.', affected_fields=['MISSING', 'MIS_START'] ) def _validate(dfs): if 'Missing' not in dfs: return {} else: missing = dfs['Missing'] mask = missing['MIS_START'].notna() & missing['MISSING'].isna() error_locations = missing.index[mask] return {'Missing': error_locations.to_list()} return error, _validate def validate_433(): error = ErrorDefinition( code='433', description='The reason for new episode suggests that this is a continuation episode, but the episode does not start on the same day as the last episode finished.', affected_fields=['RNE', 'DECOM'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] episodes['original_index'] = episodes.index episodes.sort_values(['CHILD', 'DECOM', 'DEC'], inplace=True) episodes[['PREVIOUS_DEC', 'PREVIOUS_CHILD']] = episodes[['DEC', 'CHILD']].shift(1) rne_is_ongoing = episodes['RNE'].str.upper().astype(str).isin(['P', 'L', 'T', 'U', 'B']) date_mismatch = episodes['PREVIOUS_DEC'] != episodes['DECOM'] missing_date = episodes['PREVIOUS_DEC'].isna() | episodes['DECOM'].isna() same_child = episodes['PREVIOUS_CHILD'] == episodes['CHILD'] error_mask = rne_is_ongoing & (date_mismatch | missing_date) & same_child error_locations = episodes['original_index'].loc[error_mask].sort_values() return {'Episodes': error_locations.to_list()} return error, _validate def validate_437(): error = ErrorDefinition( code='437', description='Reason episode ceased is child has died or is aged 18 or over but there are further episodes.', affected_fields=['REC'], ) # !# potential false negatives, as this only operates on the current year's data def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') episodes.sort_values(['CHILD', 'DECOM'], inplace=True) episodes[['NEXT_DECOM', 'NEXT_CHILD']] = episodes[['DECOM', 'CHILD']].shift(-1) # drop rows with missing DECOM as invalid/missing values can lead to errors episodes = episodes.dropna(subset=['DECOM']) ceased_e2_e15 = episodes['REC'].str.upper().astype(str).isin(['E2', 'E15']) has_later_episode = episodes['CHILD'] == episodes['NEXT_CHILD'] error_mask = ceased_e2_e15 & has_later_episode error_locations = episodes.index[error_mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_547(): error = ErrorDefinition( code='547', description="Any child who has health promotion information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.", affected_fields=['HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'], ) def _validate(dfs): if 'OC2' not in dfs: return {} else: oc2 = dfs['OC2'] healthck = oc2['HEALTH_CHECK'].astype(str) == '1' immunisations = oc2['IMMUNISATIONS'].isna() teeth_ck = oc2['TEETH_CHECK'].isna() health_ass = oc2['HEALTH_ASSESSMENT'].isna() sub_misuse = oc2['SUBSTANCE_MISUSE'].isna() error_mask = healthck & (immunisations | teeth_ck | health_ass | sub_misuse) validation_error_locations = oc2.index[error_mask] return {'OC2': validation_error_locations.to_list()} return error, _validate def validate_635(): error = ErrorDefinition( code='635', description='There are entries for date of order and local authority code where previous permanence option was arranged but previous permanence code is Z1', affected_fields=['LA_PERM', 'DATE_PERM', 'PREV_PERM'] ) def _validate(dfs): if 'PrevPerm' not in dfs: return {} else: prev_perm = dfs['PrevPerm'] # raise and error if either LA_PERM or DATE_PERM are present, yet PREV_PERM is absent. mask = ((prev_perm['LA_PERM'].notna() | prev_perm['DATE_PERM'].notna()) & prev_perm['PREV_PERM'].isna()) error_locations = prev_perm.index[mask] return {'PrevPerm': error_locations.to_list()} return error, _validate def validate_550(): error = ErrorDefinition( code='550', description='A placement provider code of PR0 can only be associated with placement P1.', affected_fields=['PLACE', 'PLACE_PROVIDER'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] mask = (episodes['PLACE'] != 'P1') & episodes['PLACE_PROVIDER'].eq('PR0') validation_error_locations = episodes.index[mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_217(): error = ErrorDefinition( code='217', description='Children who are placed for adoption with current foster carers (placement types A3 or A5) must have a reason for new episode of S, T or U.', affected_fields=['PLACE', 'DECOM', 'RNE'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') max_decom_allowed = pd.to_datetime('01/04/2015', format='%d/%m/%Y', errors='coerce') reason_new_ep = ['S', 'T', 'U'] place_codes = ['A3', 'A5'] mask = (episodes['PLACE'].isin(place_codes) & (episodes['DECOM'] >= max_decom_allowed)) & ~episodes[ 'RNE'].isin(reason_new_ep) validation_error_mask = mask validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_518(): error = ErrorDefinition( code='518', description='If reporting legal status of adopters is L4 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females.', affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'], ) def _validate(dfs): if 'AD1' not in dfs: return {} else: AD1 = dfs['AD1'] error_mask = AD1['LS_ADOPTR'].eq('L4') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF']) error_locations = AD1.index[error_mask] return {'AD1': error_locations.tolist()} return error, _validate def validate_517(): error = ErrorDefinition( code='517', description='If reporting legal status of adopters is L3 then the genders of adopters should be coded as MF. MF = the adopting couple are male and female.', affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'], ) def _validate(dfs): if 'AD1' not in dfs: return {} else: AD1 = dfs['AD1'] error_mask = AD1['LS_ADOPTR'].eq('L3') & ~AD1['SEX_ADOPTR'].isin(['MF']) error_locations = AD1.index[error_mask] return {'AD1': error_locations.tolist()} return error, _validate def validate_558(): error = ErrorDefinition( code='558', description='If a child has been adopted, then the decision to place them for adoption has not been disrupted and the date of the decision that a child should no longer be placed for adoption should be left blank. if the REC code is either E11 or E12 then the DATE PLACED CEASED date should not be provided', affected_fields=['DATE_PLACED_CEASED', 'REC'], ) def _validate(dfs): if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs: return {} else: episodes = dfs['Episodes'] placedAdoptions = dfs['PlacedAdoption'] episodes = episodes.reset_index() rec_codes = ['E11', 'E12'] placeEpisodes = episodes[episodes['REC'].isin(rec_codes)] merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index') episodes_with_errors = merged[merged['DATE_PLACED_CEASED'].notna()] error_mask = episodes.index.isin(episodes_with_errors.index) error_locations = episodes.index[error_mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_453(): error = ErrorDefinition( code='453', description='Contradiction between placement distance in the last episode of the previous year and in the first episode of the current year.', affected_fields=['PL_DISTANCE'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} if 'Episodes_last' not in dfs: return {} else: episodes = dfs['Episodes'] episodes_last = dfs['Episodes_last'] episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce') episodes['PL_DISTANCE'] = pd.to_numeric(episodes['PL_DISTANCE'], errors='coerce') episodes_last['PL_DISTANCE'] = pd.to_numeric(episodes_last['PL_DISTANCE'], errors='coerce') # drop rows with missing DECOM before finding idxmin/max, as invalid/missing values can lead to errors episodes = episodes.dropna(subset=['DECOM']) episodes_last = episodes_last.dropna(subset=['DECOM']) episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin() episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax() episodes = episodes[episodes.index.isin(episodes_min)] episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)] episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'], suffixes=('', '_last'), indicator=True).set_index('index') in_both_years = episodes_merged['_merge'] == 'both' same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last'] last_year_open = episodes_merged['DEC_last'].isna() different_pl_dist = abs(episodes_merged['PL_DISTANCE'] - episodes_merged['PL_DISTANCE_last']) >= 0.2 error_mask = in_both_years & same_rne & last_year_open & different_pl_dist validation_error_locations = episodes.index[error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_516(): error = ErrorDefinition( code='516', description='The episode data submitted for this child does not show that he/she was with their former foster carer(s) during the year.If the code in the reason episode ceased is E45 or E46 the child must have a placement code of U1 to U6.', affected_fields=['REC', 'PLACE'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] place_codes = ['U1', 'U2', 'U3', 'U4', 'U5', 'U6'] rec_codes = ['E45', 'E46'] error_mask = episodes['REC'].isin(rec_codes) & ~episodes['PLACE'].isin(place_codes) validation_error_locations = episodes.index[error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_511(): error = ErrorDefinition( code='511', description='If reporting that the number of person(s) adopting the looked after child is two adopters then the code should only be MM, FF or MF. MM = the adopting couple are both males; FF = the adopting couple are both females; MF = The adopting couple are male and female.', affected_fields=['NB_ADOPTR', 'SEX_ADOPTR'], ) def _validate(dfs): if 'AD1' not in dfs: return {} else: AD1 = dfs['AD1'] mask = AD1['NB_ADOPTR'].astype(str).eq('2') & AD1['SEX_ADOPTR'].isin(['M1', 'F1']) validation_error_mask = mask validation_error_locations = AD1.index[validation_error_mask] return {'AD1': validation_error_locations.tolist()} return error, _validate def validate_524(): error = ErrorDefinition( code='524', description='If reporting legal status of adopters is L12 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females', affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'], ) def _validate(dfs): if 'AD1' not in dfs: return {} else: AD1 = dfs['AD1'] error_mask = AD1['LS_ADOPTR'].eq('L12') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF']) error_locations = AD1.index[error_mask] return {'AD1': error_locations.tolist()} return error, _validate def validate_441(): error = ErrorDefinition( code='441', description='Participation method indicates child was 4 years old or over at the time of the review, but the date of birth and review date indicates the child was under 4 years old.', affected_fields=['DOB', 'REVIEW', 'REVIEW_CODE'], ) def _validate(dfs): if 'Reviews' not in dfs: return {} else: reviews = dfs['Reviews'] reviews['DOB'] = pd.to_datetime(reviews['DOB'], format='%d/%m/%Y', errors='coerce') reviews['REVIEW'] = pd.to_datetime(reviews['REVIEW'], format='%d/%m/%Y', errors='coerce') reviews = reviews.dropna(subset=['REVIEW', 'DOB']) mask = reviews['REVIEW_CODE'].isin(['PN1', 'PN2', 'PN3', 'PN4', 'PN5', 'PN6', 'PN7']) & ( reviews['REVIEW'] < reviews['DOB'] + pd.offsets.DateOffset(years=4)) validation_error_mask = mask validation_error_locations = reviews.index[validation_error_mask] return {'Reviews': validation_error_locations.tolist()} return error, _validate def validate_184(): error = ErrorDefinition( code='184', description='Date of decision that a child should be placed for adoption is before the child was born.', affected_fields=['DATE_PLACED', # PlacedAdoptino 'DOB'], # Header ) def _validate(dfs): if 'Header' not in dfs or 'PlacedAdoption' not in dfs: return {} else: child_record = dfs['Header'] placed_for_adoption = dfs['PlacedAdoption'] all_data = (placed_for_adoption .reset_index() .merge(child_record, how='left', on='CHILD', suffixes=[None, '_P4A'])) all_data['DATE_PLACED'] = pd.to_datetime(all_data['DATE_PLACED'], format='%d/%m/%Y', errors='coerce') all_data['DOB'] = pd.to_datetime(all_data['DOB'], format='%d/%m/%Y', errors='coerce') mask = (all_data['DATE_PLACED'] >= all_data['DOB']) | all_data['DATE_PLACED'].isna() validation_error = ~mask validation_error_locations = all_data[validation_error]['index'].unique() return {'PlacedAdoption': validation_error_locations.tolist()} return error, _validate def validate_612(): error = ErrorDefinition( code='612', description="Date of birth field has been completed but mother field indicates child is not a mother.", affected_fields=['SEX', 'MOTHER', 'MC_DOB'], ) def _validate(dfs): if 'Header' not in dfs: return {} else: header = dfs['Header'] error_mask = ( ((header['MOTHER'].astype(str) == '0') | header['MOTHER'].isna()) & (header['SEX'].astype(str) == '2') & header['MC_DOB'].notna() ) validation_error_locations = header.index[error_mask] return {'Header': validation_error_locations.tolist()} return error, _validate def validate_552(): """ This error checks that the first adoption episode is after the last decision ! If there are multiple of either there may be unexpected results ! """ error = ErrorDefinition( code="552", description="Date of Decision to place a child for adoption should be on or prior to the date that the child was placed for adoption.", # Field that defines date of decision to place a child for adoption is DATE_PLACED and the start of adoption is defined by DECOM with 'A' placement types. affected_fields=['DATE_PLACED', 'DECOM'], ) def _validate(dfs): if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs): return {} else: # get the required datasets placed_adoption = dfs['PlacedAdoption'] episodes = dfs['Episodes'] # keep index values so that they stay the same when needed later on for error locations placed_adoption.reset_index(inplace=True) episodes.reset_index(inplace=True) adoption_eps = episodes[episodes['PLACE'].isin(['A3', 'A4', 'A5', 'A6'])].copy() # find most recent adoption decision placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y', errors='coerce') # remove rows where either of the required values have not been filled. placed_adoption = placed_adoption[placed_adoption['DATE_PLACED'].notna()] placed_adoption_inds = placed_adoption.groupby('CHILD')['DATE_PLACED'].idxmax(skipna=True) last_decision = placed_adoption.loc[placed_adoption_inds] # first time child started adoption adoption_eps["DECOM"] = pd.to_datetime(adoption_eps['DECOM'], format='%d/%m/%Y', errors='coerce') adoption_eps = adoption_eps[adoption_eps['DECOM'].notna()] adoption_eps_inds = adoption_eps.groupby('CHILD')['DECOM'].idxmin(skipna=True) # full information of first adoption first_adoption = adoption_eps.loc[adoption_eps_inds] # date of decision and date of start of adoption (DECOM) have to be put in one table merged = first_adoption.merge(last_decision, on=['CHILD'], how='left', suffixes=['_EP', '_PA']) # check to see if date of decision to place is less than or equal to date placed. decided_after_placed = merged["DECOM"] < merged["DATE_PLACED"] # find the corresponding location of error values per file. episode_error_locs = merged.loc[decided_after_placed, 'index_EP'] placedadoption_error_locs = merged.loc[decided_after_placed, 'index_PA'] return {"PlacedAdoption": placedadoption_error_locs.to_list(), "Episodes": episode_error_locs.to_list()} return error, _validate def validate_551(): error = ErrorDefinition( code='551', description='Child has been placed for adoption but there is no date of the decision that the child should be placed for adoption.', affected_fields=['DATE_PLACED', 'PLACE'], ) def _validate(dfs): if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs: return {} else: episodes = dfs['Episodes'] placedAdoptions = dfs['PlacedAdoption'] episodes = episodes.reset_index() place_codes = ['A3', 'A4', 'A5', 'A6'] placeEpisodes = episodes[episodes['PLACE'].isin(place_codes)] merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index') episodes_with_errors = merged[merged['DATE_PLACED'].isna()] error_mask = episodes.index.isin(episodes_with_errors.index) error_locations = episodes.index[error_mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_557(): error = ErrorDefinition( code='557', description="Child for whom the decision was made that they should be placed for adoption has left care " + "but was not adopted and information on the decision that they should no longer be placed for " + "adoption items has not been completed.", affected_fields=['DATE_PLACED_CEASED', 'REASON_PLACED_CEASED', # PlacedAdoption 'PLACE', 'LS', 'REC'], # Episodes ) def _validate(dfs): if 'Episodes' not in dfs: return {} if 'PlacedAdoption' not in dfs: return {} else: eps = dfs['Episodes'] placed = dfs['PlacedAdoption'] eps = eps.reset_index() placed = placed.reset_index() child_placed = eps['PLACE'].isin(['A3', 'A4', 'A5', 'A6']) order_granted = eps['LS'].isin(['D1', 'E1']) not_adopted = ~eps['REC'].isin(['E11', 'E12']) & eps['REC'].notna() placed['ceased_incomplete'] = ( placed['DATE_PLACED_CEASED'].isna() | placed['REASON_PLACED_CEASED'].isna() ) eps = eps[(child_placed | order_granted) & not_adopted] eps = eps.merge(placed, on='CHILD', how='left', suffixes=['_EP', '_PA'], indicator=True) eps = eps[(eps['_merge'] == 'left_only') | eps['ceased_incomplete']] EP_errors = eps['index_EP'] PA_errors = eps['index_PA'].dropna() return { 'Episodes': EP_errors.to_list(), 'PlacedAdoption': PA_errors.to_list(), } return error, _validate def validate_207(): error = ErrorDefinition( code='207', description='Mother status for the current year disagrees with the mother status already recorded for this child.', affected_fields=['MOTHER'], ) def _validate(dfs): if 'Header' not in dfs or 'Header_last' not in dfs: return {} else: header = dfs['Header'] header_last = dfs['Header_last'] header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'), indicator=True).set_index('index') in_both_years = header_merged['_merge'] == 'both' mother_is_different = header_merged['MOTHER'].astype(str) != header_merged['MOTHER_last'].astype(str) mother_was_true = header_merged['MOTHER_last'].astype(str) == '1' error_mask = in_both_years & mother_is_different & mother_was_true error_locations = header.index[error_mask] return {'Header': error_locations.to_list()} return error, _validate def validate_523(): error = ErrorDefinition( code='523', description="Date of decision that the child should be placed for adoption should be the same date as the decision that adoption is in the best interest (date should be placed).", affected_fields=['DATE_PLACED', 'DATE_INT'], ) def _validate(dfs): if ("AD1" not in dfs) or ("PlacedAdoption" not in dfs): return {} else: placed_adoption = dfs["PlacedAdoption"] ad1 = dfs["AD1"] # keep initial index values to be reused for locating errors later on. placed_adoption.reset_index(inplace=True) ad1.reset_index(inplace=True) # convert to datetime to enable comparison placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format="%d/%m/%Y", errors='coerce') ad1["DATE_INT"] = pd.to_datetime(ad1['DATE_INT'], format='%d/%m/%Y', errors='coerce') # drop rows where either of the required values have not been filled. placed_adoption = placed_adoption[placed_adoption["DATE_PLACED"].notna()] ad1 = ad1[ad1["DATE_INT"].notna()] # bring corresponding values together from both dataframes merged_df = placed_adoption.merge(ad1, on=['CHILD'], how='inner', suffixes=["_AD", "_PA"]) # find error values different_dates = merged_df['DATE_INT'] != merged_df['DATE_PLACED'] # map error locations to corresponding indices pa_error_locations = merged_df.loc[different_dates, 'index_PA'] ad1_error_locations = merged_df.loc[different_dates, 'index_AD'] return {"PlacedAdoption": pa_error_locations.to_list(), "AD1": ad1_error_locations.to_list()} return error, _validate def validate_3001(): error = ErrorDefinition( code='3001', description='Where care leavers information is being returned for a young person around their 17th birthday, the accommodation cannot be with their former foster carer(s).', affected_fields=['REC'], ) def _validate(dfs): if 'Header' not in dfs: return {} if 'OC3' not in dfs: return {} else: header = dfs['Header'] oc3 = dfs['OC3'] collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce') collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce') header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce') header['DOB17'] = header['DOB'] + pd.DateOffset(years=17) oc3_merged = oc3.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'), indicator=True).set_index('index') accom_foster = oc3_merged['ACCOM'].str.upper().astype(str).isin(['Z1', 'Z2']) age_17_in_year = (oc3_merged['DOB17'] <= collection_end) & (oc3_merged['DOB17'] >= collection_start) error_mask = accom_foster & age_17_in_year error_locations = oc3.index[error_mask] return {'OC3': error_locations.to_list()} return error, _validate def validate_389(): error = ErrorDefinition( code='389', description='Reason episode ceased is that child transferred to care of adult social care services, but child is aged under 16.', affected_fields=['REC'], ) def _validate(dfs): if 'Header' not in dfs: return {} if 'Episodes' not in dfs: return {} else: header = dfs['Header'] episodes = dfs['Episodes'] header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce') episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce') header['DOB16'] = header['DOB'] + pd.DateOffset(years=16) episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'), indicator=True).set_index('index') ceased_asc = episodes_merged['REC'].str.upper().astype(str).isin(['E7']) ceased_over_16 = episodes_merged['DOB16'] <= episodes_merged['DEC'] error_mask = ceased_asc & ~ceased_over_16 error_locations = episodes.index[error_mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_387(): error = ErrorDefinition( code='387', description='Reason episode ceased is child moved into independent living arrangement, but the child is aged under 14.', affected_fields=['REC'], ) def _validate(dfs): if 'Header' not in dfs: return {} if 'Episodes' not in dfs: return {} else: header = dfs['Header'] episodes = dfs['Episodes'] header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce') episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce') header['DOB14'] = header['DOB'] + pd.DateOffset(years=14) episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'), indicator=True).set_index('index') ceased_indep = episodes_merged['REC'].str.upper().astype(str).isin(['E5', 'E6']) ceased_over_14 = episodes_merged['DOB14'] <= episodes_merged['DEC'] dec_present = episodes_merged['DEC'].notna() error_mask = ceased_indep & ~ceased_over_14 & dec_present error_locations = episodes.index[error_mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_452(): error = ErrorDefinition( code='452', description='Contradiction between local authority of placement code in the last episode of the previous year and in the first episode of the current year.', affected_fields=['PL_LA'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} if 'Episodes_last' not in dfs: return {} else: episodes = dfs['Episodes'] episodes_last = dfs['Episodes_last'] episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce') episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin() episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax() episodes = episodes[episodes.index.isin(episodes_min)] episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)] episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'], suffixes=('', '_last'), indicator=True).set_index('index') in_both_years = episodes_merged['_merge'] == 'both' same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last'] last_year_open = episodes_merged['DEC_last'].isna() different_pl_la = episodes_merged['PL_LA'].astype(str) != episodes_merged['PL_LA_last'].astype(str) error_mask = in_both_years & same_rne & last_year_open & different_pl_la validation_error_locations = episodes.index[error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_386(): error = ErrorDefinition( code='386', description='Reason episode ceased is adopted but child has reached age 18.', affected_fields=['REC'], ) def _validate(dfs): if 'Header' not in dfs: return {} if 'Episodes' not in dfs: return {} else: header = dfs['Header'] episodes = dfs['Episodes'] header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce') episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce') header['DOB18'] = header['DOB'] + pd.DateOffset(years=18) episodes_merged = ( episodes .reset_index() .merge(header, how='left', on=['CHILD'], suffixes=('', '_header'), indicator=True) .set_index('index') .dropna(subset=['DOB18', 'DEC']) ) ceased_adopted = episodes_merged['REC'].str.upper().astype(str).isin(['E11', 'E12']) ceased_under_18 = episodes_merged['DOB18'] > episodes_merged['DEC'] error_mask = ceased_adopted & ~ceased_under_18 error_locations = episodes_merged.index[error_mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_363(): error = ErrorDefinition( code='363', description='Child assessment order (CAO) lasted longer than 7 days allowed in the Children Act 1989.', affected_fields=['LS', 'DECOM', 'DEC'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} episodes = dfs['Episodes'] collection_end_str = dfs['metadata']['collection_end'] L2_eps = episodes[episodes['LS'] == 'L3'].copy() L2_eps['original_index'] = L2_eps.index L2_eps = L2_eps[L2_eps['DECOM'].notna()] L2_eps.loc[L2_eps['DEC'].isna(), 'DEC'] = collection_end_str L2_eps['DECOM'] = pd.to_datetime(L2_eps['DECOM'], format='%d/%m/%Y', errors='coerce') L2_eps = L2_eps.dropna(subset=['DECOM']) L2_eps['DEC'] = pd.to_datetime(L2_eps['DEC'], format='%d/%m/%Y', errors='coerce') L2_eps.sort_values(['CHILD', 'DECOM']) L2_eps['index'] = pd.RangeIndex(0, len(L2_eps)) L2_eps['index+1'] = L2_eps['index'] + 1 L2_eps = L2_eps.merge(L2_eps, left_on='index', right_on='index+1', how='left', suffixes=[None, '_prev']) L2_eps = L2_eps[['original_index', 'DECOM', 'DEC', 'DEC_prev', 'CHILD', 'CHILD_prev', 'LS']] L2_eps['new_period'] = ( (L2_eps['DECOM'] > L2_eps['DEC_prev']) | (L2_eps['CHILD'] != L2_eps['CHILD_prev']) ) L2_eps['duration'] = (L2_eps['DEC'] - L2_eps['DECOM']).dt.days L2_eps['period_id'] = L2_eps['new_period'].astype(int).cumsum() L2_eps['period_duration'] = L2_eps.groupby('period_id')['duration'].transform(sum) error_mask = L2_eps['period_duration'] > 7 return {'Episodes': L2_eps.loc[error_mask, 'original_index'].to_list()} return error, _validate def validate_364(): error = ErrorDefinition( code='364', description='Sections 41-46 of Police and Criminal Evidence (PACE; 1984) severely limits ' + 'the time a child can be detained in custody in Local Authority (LA) accommodation.', affected_fields=['LS', 'DECOM', 'DEC'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} episodes = dfs['Episodes'] collection_end_str = dfs['metadata']['collection_end'] J2_eps = episodes[episodes['LS'] == 'J2'].copy() J2_eps['original_index'] = J2_eps.index J2_eps['DECOM'] = pd.to_datetime(J2_eps['DECOM'], format='%d/%m/%Y', errors='coerce') J2_eps = J2_eps[J2_eps['DECOM'].notna()] J2_eps.loc[J2_eps['DEC'].isna(), 'DEC'] = collection_end_str J2_eps['DEC'] = pd.to_datetime(J2_eps['DEC'], format='%d/%m/%Y', errors='coerce') J2_eps.sort_values(['CHILD', 'DECOM']) J2_eps['index'] = pd.RangeIndex(0, len(J2_eps)) J2_eps['index_prev'] = J2_eps['index'] + 1 J2_eps = J2_eps.merge(J2_eps, left_on='index', right_on='index_prev', how='left', suffixes=[None, '_prev']) J2_eps = J2_eps[['original_index', 'DECOM', 'DEC', 'DEC_prev', 'CHILD', 'CHILD_prev', 'LS']] J2_eps['new_period'] = ( (J2_eps['DECOM'] > J2_eps['DEC_prev']) | (J2_eps['CHILD'] != J2_eps['CHILD_prev']) ) J2_eps['duration'] = (J2_eps['DEC'] - J2_eps['DECOM']).dt.days J2_eps['period_id'] = J2_eps['new_period'].astype(int).cumsum() J2_eps['period_duration'] = J2_eps.groupby('period_id')['duration'].transform(sum) error_mask = J2_eps['period_duration'] > 21 return {'Episodes': J2_eps.loc[error_mask, 'original_index'].to_list()} return error, _validate def validate_365(): error = ErrorDefinition( code='365', description='Any individual short- term respite placement must not exceed 17 days.', affected_fields=['LS', 'DECOM', 'DEC'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} episodes = dfs['Episodes'] collection_end_str = dfs['metadata']['collection_end'] episodes.loc[episodes['DEC'].isna(), 'DEC'] = collection_end_str episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce') over_17_days = episodes['DEC'] > episodes['DECOM'] + pd.DateOffset(days=17) error_mask = (episodes['LS'] == 'V3') & over_17_days return {'Episodes': episodes.index[error_mask].to_list()} return error, _validate def validate_367(): error = ErrorDefinition( code='367', description='The maximum amount of respite care allowable is 75 days in any 12-month period.', affected_fields=['LS', 'DECOM', 'DEC'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} episodes = dfs['Episodes'] V3_eps = episodes[episodes['LS'] == 'V3'] V3_eps = V3_eps.dropna(subset=['DECOM']) # missing DECOM should get fixed before looking for this error collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce') collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce') V3_eps['DECOM_dt'] = pd.to_datetime(V3_eps['DECOM'], format='%d/%m/%Y', errors='coerce') V3_eps['DEC_dt'] = pd.to_datetime(V3_eps['DEC'], format='%d/%m/%Y', errors='coerce') # truncate episode start/end dates to collection start/end respectively V3_eps.loc[V3_eps['DEC'].isna() | (V3_eps['DEC_dt'] > collection_end), 'DEC_dt'] = collection_end V3_eps.loc[V3_eps['DECOM_dt'] < collection_start, 'DECOM_dt'] = collection_start V3_eps['duration'] = (V3_eps['DEC_dt'] - V3_eps['DECOM_dt']).dt.days V3_eps = V3_eps[V3_eps['duration'] > 0] V3_eps['year_total_duration'] = V3_eps.groupby('CHILD')['duration'].transform(sum) error_mask = V3_eps['year_total_duration'] > 75 return {'Episodes': V3_eps.index[error_mask].to_list()} return error, _validate def validate_440(): error = ErrorDefinition( code='440', description='Participation method indicates child was under 4 years old at the time of the review, but date of birth and review date indicates the child was 4 years old or over.', affected_fields=['DOB', 'REVIEW', 'REVIEW_CODE'], ) def _validate(dfs): if 'Reviews' not in dfs: return {} else: reviews = dfs['Reviews'] reviews['DOB'] = pd.to_datetime(reviews['DOB'], format='%d/%m/%Y', errors='coerce') reviews['REVIEW'] = pd.to_datetime(reviews['REVIEW'], format='%d/%m/%Y', errors='coerce') mask = reviews['REVIEW_CODE'].eq('PN0') & ( reviews['REVIEW'] > reviews['DOB'] + pd.offsets.DateOffset(years=4)) validation_error_mask = mask validation_error_locations = reviews.index[validation_error_mask] return {'Reviews': validation_error_locations.tolist()} return error, _validate def validate_445(): error = ErrorDefinition( code='445', description='D1 is not a valid code for episodes starting after December 2005.', affected_fields=['LS', 'DECOM'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') max_decom_allowed = pd.to_datetime('31/12/2005', format='%d/%m/%Y', errors='coerce') mask = episodes['LS'].eq('D1') & (episodes['DECOM'] > max_decom_allowed) validation_error_mask = mask validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_446(): error = ErrorDefinition( code='446', description='E1 is not a valid code for episodes starting before December 2005.', affected_fields=['LS', 'DECOM'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') min_decom_allowed = pd.to_datetime('01/12/2005', format='%d/%m/%Y', errors='coerce') mask = episodes['LS'].eq('E1') & (episodes['DECOM'] < min_decom_allowed) validation_error_mask = mask validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_208(): error = ErrorDefinition( code='208', description='Unique Pupil Number (UPN) for the current year disagrees with the Unique Pupil Number (UPN) already recorded for this child.', affected_fields=['UPN'], ) def _validate(dfs): if 'Header' not in dfs or 'Header_last' not in dfs: return {} else: header = dfs['Header'] header_last = dfs['Header_last'] header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'), indicator=True).set_index('index') in_both_years = header_merged['_merge'] == 'both' upn_is_different = header_merged['UPN'].str.upper().astype(str) != header_merged[ 'UPN_last'].str.upper().astype(str) upn_not_recorded = header_merged['UPN'].str.upper().astype(str).isin(['UN2', 'UN3', 'UN4', 'UN5', 'UN6']) & \ header_merged['UPN_last'].str.upper().astype(str).isin(['UN1']) error_mask = in_both_years & upn_is_different & ~upn_not_recorded error_locations = header.index[error_mask] return {'Header': error_locations.to_list()} return error, _validate def validate_204(): error = ErrorDefinition( code='204', description='Ethnic origin code disagrees with the ethnic origin already recorded for this child.', affected_fields=['ETHNIC'], ) def _validate(dfs): if 'Header' not in dfs or 'Header_last' not in dfs: return {} else: header = dfs['Header'] header_last = dfs['Header_last'] header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'), indicator=True).set_index('index') in_both_years = header_merged['_merge'] == 'both' ethnic_is_different = header_merged['ETHNIC'].astype(str).str.upper() != header_merged[ 'ETHNIC_last'].astype(str).str.upper() error_mask = in_both_years & ethnic_is_different error_locations = header.index[error_mask] return {'Header': error_locations.to_list()} return error, _validate def validate_203(): error = ErrorDefinition( code='203', description='Date of birth disagrees with the date of birth already recorded for this child.', affected_fields=['DOB'], ) def _validate(dfs): if 'Header' not in dfs or 'Header_last' not in dfs: return {} else: header = dfs['Header'] header_last = dfs['Header_last'] header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce') header_last['DOB'] = pd.to_datetime(header_last['DOB'], format='%d/%m/%Y', errors='coerce') header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'), indicator=True).set_index('index') in_both_years = header_merged['_merge'] == 'both' dob_is_different = header_merged['DOB'].astype(str) != header_merged['DOB_last'].astype(str) error_mask = in_both_years & dob_is_different error_locations = header.index[error_mask] return {'Header': error_locations.to_list()} return error, _validate def validate_530(): error = ErrorDefinition( code='530', description="A placement provider code of PR4 cannot be associated with placement P1.", affected_fields=['PLACE', 'PLACE_PROVIDER'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] mask = episodes['PLACE'].eq('P1') & episodes['PLACE_PROVIDER'].eq('PR4') validation_error_mask = mask validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_571(): error = ErrorDefinition( code='571', description='The date that the child ceased to be missing or away from placement without authorisation is before the start or after the end of the collection year.', affected_fields=['MIS_END'], ) def _validate(dfs): if 'Missing' not in dfs: return {} else: missing = dfs['Missing'] collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce') collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce') missing['fMIS_END'] = pd.to_datetime(missing['MIS_END'], format='%d/%m/%Y', errors='coerce') end_date_before_year = missing['fMIS_END'] < collection_start end_date_after_year = missing['fMIS_END'] > collection_end error_mask = end_date_before_year | end_date_after_year error_locations = missing.index[error_mask] return {'Missing': error_locations.to_list()} return error, _validate def validate_1005(): error = ErrorDefinition( code='1005', description='The end date of the missing episode or episode that the child was away from placement without authorisation is not a valid date.', affected_fields=['MIS_END'], ) def _validate(dfs): if 'Missing' not in dfs: return {} else: missing = dfs['Missing'] missing['fMIS_END'] = pd.to_datetime(missing['MIS_END'], format='%d/%m/%Y', errors='coerce') missing_end_date = missing['MIS_END'].isna() invalid_end_date = missing['fMIS_END'].isna() error_mask = ~missing_end_date & invalid_end_date error_locations = missing.index[error_mask] return {'Missing': error_locations.to_list()} return error, _validate def validate_1004(): error = ErrorDefinition( code='1004', description='The start date of the missing episode or episode that the child was away from placement without authorisation is not a valid date.', affected_fields=['MIS_START'], ) def _validate(dfs): if 'Missing' not in dfs: return {} else: missing = dfs['Missing'] missing['fMIS_START'] = pd.to_datetime(missing['MIS_START'], format='%d/%m/%Y', errors='coerce') missing_start_date = missing['MIS_START'].isna() invalid_start_date = missing['fMIS_START'].isna() error_mask = missing_start_date | invalid_start_date error_locations = missing.index[error_mask] return {'Missing': error_locations.to_list()} return error, _validate def validate_202(): error = ErrorDefinition( code='202', description='The gender code conflicts with the gender already recorded for this child.', affected_fields=['SEX'], ) def _validate(dfs): if 'Header' not in dfs or 'Header_last' not in dfs: return {} else: header = dfs['Header'] header_last = dfs['Header_last'] header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'), indicator=True).set_index('index') in_both_years = header_merged['_merge'] == 'both' sex_is_different = header_merged['SEX'].astype(str) != header_merged['SEX_last'].astype(str) error_mask = in_both_years & sex_is_different error_locations = header.index[error_mask] return {'Header': error_locations.to_list()} return error, _validate def validate_621(): error = ErrorDefinition( code='621', description="Mother’s field has been completed but date of birth shows that the mother is younger than her child.", affected_fields=['DOB', 'MC_DOB'], ) def _validate(dfs): if 'Header' not in dfs: return {} else: header = dfs['Header'] header['MC_DOB'] = pd.to_datetime(header['MC_DOB'], format='%d/%m/%Y', errors='coerce') header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce') mask = (header['MC_DOB'] > header['DOB']) | header['MC_DOB'].isna() validation_error_mask = ~mask validation_error_locations = header.index[validation_error_mask] return {'Header': validation_error_locations.tolist()} return error, _validate def validate_556(): error = ErrorDefinition( code='556', description='Date of decision that the child should be placed for adoption should be on or prior to the date that the freeing order was granted.', affected_fields=['DATE_PLACED', 'DECOM'], ) def _validate(dfs): if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs: return {} else: episodes = dfs['Episodes'] placedAdoptions = dfs['PlacedAdoption'] episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') placedAdoptions['DATE_PLACED'] = pd.to_datetime(placedAdoptions['DATE_PLACED'], format='%d/%m/%Y', errors='coerce') episodes = episodes.reset_index() D1Episodes = episodes[episodes['LS'] == 'D1'] merged = D1Episodes.reset_index().merge(placedAdoptions, how='left', on='CHILD', ).set_index('index') episodes_with_errors = merged[merged['DATE_PLACED'] > merged['DECOM']] error_mask = episodes.index.isin(episodes_with_errors.index) error_locations = episodes.index[error_mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_393(): error = ErrorDefinition( code='393', description='Child is looked after but mother field is not completed.', affected_fields=['MOTHER'], ) def _validate(dfs): if 'Header' not in dfs or 'Episodes' not in dfs: return {} else: header = dfs['Header'] episodes = dfs['Episodes'] header_female = header[header['SEX'].astype(str) == '2'] applicable_episodes = episodes[~episodes['LS'].str.upper().isin(['V3', 'V4'])] error_mask = header_female['CHILD'].isin(applicable_episodes['CHILD']) & header_female['MOTHER'].isna() error_locations = header_female.index[error_mask] return {'Header': error_locations.to_list()} return error, _validate def validate_NoE(): error = ErrorDefinition( code='NoE', description='This child has no episodes loaded for previous year even though child started to be looked after before this current year.', affected_fields=['DECOM'], ) def _validate(dfs): if 'Episodes' not in dfs or 'Episodes_last' not in dfs: return {} else: episodes = dfs['Episodes'] episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') episodes_last = dfs['Episodes_last'] episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce') collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce') episodes_before_year = episodes[episodes['DECOM'] < collection_start] episodes_merged = episodes_before_year.reset_index().merge(episodes_last, how='left', on=['CHILD'], indicator=True).set_index('index') episodes_not_matched = episodes_merged[episodes_merged['_merge'] == 'left_only'] error_mask = episodes.index.isin(episodes_not_matched.index) error_locations = episodes.index[error_mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_356(): error = ErrorDefinition( code='356', description='The date the episode ceased is before the date the same episode started.', affected_fields=['DECOM'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce') error_mask = episodes['DEC'].notna() & (episodes['DEC'] < episodes['DECOM']) return {'Episodes': episodes.index[error_mask].to_list()} return error, _validate def validate_611(): error = ErrorDefinition( code='611', description="Date of birth field is blank, but child is a mother.", affected_fields=['MOTHER', 'MC_DOB'], ) def _validate(dfs): if 'Header' not in dfs: return {} else: header = dfs['Header'] validation_error_mask = header['MOTHER'].astype(str).isin(['1']) & header['MC_DOB'].isna() validation_error_locations = header.index[validation_error_mask] return {'Header': validation_error_locations.tolist()} return error, _validate def validate_1009(): error = ErrorDefinition( code='1009', description='Reason for placement change is not a valid code.', affected_fields=['REASON_PLACE_CHANGE'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} episodes = dfs['Episodes'] code_list = [ 'CARPL', 'CLOSE', 'ALLEG', 'STAND', 'APPRR', 'CREQB', 'CREQO', 'CHILD', 'LAREQ', 'PLACE', 'CUSTOD', 'OTHER' ] mask = episodes['REASON_PLACE_CHANGE'].isin(code_list) | episodes['REASON_PLACE_CHANGE'].isna() validation_error_mask = ~mask validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_1006(): error = ErrorDefinition( code='1006', description='Missing type invalid.', affected_fields=['MISSING'], ) def _validate(dfs): if 'Missing' not in dfs: return {} missing_from_care = dfs['Missing'] code_list = ['M', 'A'] mask = missing_from_care['MISSING'].isin(code_list) | missing_from_care['MISSING'].isna() validation_error_mask = ~mask validation_error_locations = missing_from_care.index[validation_error_mask] return {'Missing': validation_error_locations.tolist()} return error, _validate def validate_631(): error = ErrorDefinition( code='631', description='Previous permanence option not a valid value.', affected_fields=['PREV_PERM'], ) def _validate(dfs): if 'PrevPerm' not in dfs: return {} previous_permanence = dfs['PrevPerm'] code_list = ['P1', 'P2', 'P3', 'P4', 'Z1'] mask = previous_permanence['PREV_PERM'].isin(code_list) | previous_permanence['PREV_PERM'].isna() validation_error_mask = ~mask validation_error_locations = previous_permanence.index[validation_error_mask] return {'PrevPerm': validation_error_locations.tolist()} return error, _validate def validate_196(): error = ErrorDefinition( code='196', description='Strengths and Difficulties (SDQ) reason is not a valid code.', affected_fields=['SDQ_REASON'], ) def _validate(dfs): if 'OC2' not in dfs: return {} oc2 = dfs['OC2'] code_list = ['SDQ1', 'SDQ2', 'SDQ3', 'SDQ4', 'SDQ5'] mask = oc2['SDQ_REASON'].isin(code_list) | oc2['SDQ_REASON'].isna() validation_error_mask = ~mask validation_error_locations = oc2.index[validation_error_mask] return {'OC2': validation_error_locations.tolist()} return error, _validate def validate_177(): error = ErrorDefinition( code='177', description='The legal status of adopter(s) code is not a valid code.', affected_fields=['LS_ADOPTR'], ) def _validate(dfs): if 'AD1' not in dfs: return {} adoptions = dfs['AD1'] code_list = ['L0', 'L11', 'L12', 'L2', 'L3', 'L4'] mask = adoptions['LS_ADOPTR'].isin(code_list) | adoptions['LS_ADOPTR'].isna() validation_error_mask = ~mask validation_error_locations = adoptions.index[validation_error_mask] return {'AD1': validation_error_locations.tolist()} return error, _validate def validate_176(): error = ErrorDefinition( code='176', description='The gender of adopter(s) at the date of adoption code is not a valid code.', affected_fields=['SEX_ADOPTR'], ) def _validate(dfs): if 'AD1' not in dfs: return {} adoptions = dfs['AD1'] code_list = ['M1', 'F1', 'MM', 'FF', 'MF'] mask = adoptions['SEX_ADOPTR'].isin(code_list) | adoptions['SEX_ADOPTR'].isna() validation_error_mask = ~mask validation_error_locations = adoptions.index[validation_error_mask] return {'AD1': validation_error_locations.tolist()} return error, _validate def validate_175(): error = ErrorDefinition( code='175', description='The number of adopter(s) code is not a valid code.', affected_fields=['NB_ADOPTR'], ) def _validate(dfs): if 'AD1' not in dfs: return {} adoptions = dfs['AD1'] code_list = ['1', '2'] mask = adoptions['NB_ADOPTR'].astype(str).isin(code_list) | adoptions['NB_ADOPTR'].isna() validation_error_mask = ~mask validation_error_locations = adoptions.index[validation_error_mask] return {'AD1': validation_error_locations.tolist()} return error, _validate def validate_132(): error = ErrorDefinition( code='132', description='Data entry for activity after leaving care is invalid.', affected_fields=['ACTIV'], ) def _validate(dfs): if 'OC3' not in dfs: return {} care_leavers = dfs['OC3'] code_list = [ 'F1', 'P1', 'F2', 'P2', 'F4', 'P4', 'F5', 'P5', 'G4', 'G5', 'G6', '0' ] mask = care_leavers['ACTIV'].astype(str).isin(code_list) | care_leavers['ACTIV'].isna() validation_error_mask = ~mask validation_error_locations = care_leavers.index[validation_error_mask] return {'OC3': validation_error_locations.tolist()} return error, _validate def validate_131(): error = ErrorDefinition( code='131', description='Data entry for being in touch after leaving care is invalid.', affected_fields=['IN_TOUCH'], ) def _validate(dfs): if 'OC3' not in dfs: return {} care_leavers = dfs['OC3'] code_list = [ 'YES', 'NO', 'DIED', 'REFU', 'NREQ', 'RHOM' ] mask = care_leavers['IN_TOUCH'].isin(code_list) | care_leavers['IN_TOUCH'].isna() validation_error_mask = ~mask validation_error_locations = care_leavers.index[validation_error_mask] return {'OC3': validation_error_locations.tolist()} return error, _validate def validate_120(): error = ErrorDefinition( code='120', description='The reason for the reversal of the decision that the child should be placed for adoption code is not valid.', affected_fields=['REASON_PLACED_CEASED'], ) def _validate(dfs): if 'PlacedAdoption' not in dfs: return {} placed_adoptions = dfs['PlacedAdoption'] code_list = ['RD1', 'RD2', 'RD3', 'RD4'] mask = placed_adoptions['REASON_PLACED_CEASED'].isin(code_list) | placed_adoptions[ 'REASON_PLACED_CEASED'].isna() validation_error_mask = ~mask validation_error_locations = placed_adoptions.index[validation_error_mask] return {'PlacedAdoption': validation_error_locations.tolist()} return error, _validate def validate_114(): error = ErrorDefinition( code='114', description='Data entry to record the status of former carer(s) of an adopted child is invalid.', affected_fields=['FOSTER_CARE'], ) def _validate(dfs): if 'AD1' not in dfs: return {} adoptions = dfs['AD1'] code_list = ['0', '1'] mask = adoptions['FOSTER_CARE'].astype(str).isin(code_list) | adoptions['FOSTER_CARE'].isna() validation_error_mask = ~mask validation_error_locations = adoptions.index[validation_error_mask] return {'AD1': validation_error_locations.tolist()} return error, _validate def validate_178(): error = ErrorDefinition( code='178', description='Placement provider code is not a valid code.', affected_fields=['PLACE_PROVIDER'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} episodes = dfs['Episodes'] code_list_placement_provider = ['PR0', 'PR1', 'PR2', 'PR3', 'PR4', 'PR5'] code_list_placement_with_no_provider = ['T0', 'T1', 'T2', 'T3', 'T4', 'Z1'] place_provider_needed_and_correct = episodes['PLACE_PROVIDER'].isin(code_list_placement_provider) & ~episodes[ 'PLACE'].isin(code_list_placement_with_no_provider) place_provider_not_provided = episodes['PLACE_PROVIDER'].isna() place_provider_not_needed = episodes['PLACE_PROVIDER'].isna() & episodes['PLACE'].isin( code_list_placement_with_no_provider) mask = place_provider_needed_and_correct | place_provider_not_provided | place_provider_not_needed validation_error_mask = ~mask validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_103(): error = ErrorDefinition( code='103', description='The ethnicity code is either not valid or has not been entered.', affected_fields=['ETHNIC'], ) def _validate(dfs): if 'Header' not in dfs: return {} header = dfs['Header'] code_list = [ 'WBRI', 'WIRI', 'WOTH', 'WIRT', 'WROM', 'MWBC', 'MWBA', 'MWAS', 'MOTH', 'AIND', 'APKN', 'ABAN', 'AOTH', 'BCRB', 'BAFR', 'BOTH', 'CHNE', 'OOTH', 'REFU', 'NOBT' ] mask = header['ETHNIC'].isin(code_list) validation_error_mask = ~mask validation_error_locations = header.index[validation_error_mask] return {'Header': validation_error_locations.tolist()} return error, _validate def validate_143(): error = ErrorDefinition( code='143', description='The reason for new episode code is not a valid code.', affected_fields=['RNE'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} episodes = dfs['Episodes'] code_list = ['S', 'P', 'L', 'T', 'U', 'B'] mask = episodes['RNE'].isin(code_list) | episodes['RNE'].isna() validation_error_mask = ~mask validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_144(): error = ErrorDefinition( code='144', description='The legal status code is not a valid code.', affected_fields=['LS'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} episodes = dfs['Episodes'] code_list = [ 'C1', 'C2', 'D1', 'E1', 'V2', 'V3', 'V4', 'J1', 'J2', 'J3', 'L1', 'L2', 'L3' ] mask = episodes['LS'].isin(code_list) | episodes['LS'].isna() validation_error_mask = ~mask validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_145(): error = ErrorDefinition( code='145', description='Category of need code is not a valid code.', affected_fields=['CIN'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} episodes = dfs['Episodes'] code_list = [ 'N1', 'N2', 'N3', 'N4', 'N5', 'N6', 'N7', 'N8', ] mask = episodes['CIN'].isin(code_list) | episodes['CIN'].isna() validation_error_mask = ~mask validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_146(): error = ErrorDefinition( code='146', description='Placement type code is not a valid code.', affected_fields=['PLACE'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} episodes = dfs['Episodes'] code_list = [ 'A3', 'A4', 'A5', 'A6', 'H5', 'K1', 'K2', 'P1', 'P2', 'P3', 'R1', 'R2', 'R3', 'R5', 'S1', 'T0', 'T1', 'T2', 'T3', 'T4', 'U1', 'U2', 'U3', 'U4', 'U5', 'U6', 'Z1' ] mask = episodes['PLACE'].isin(code_list) | episodes['PLACE'].isna() validation_error_mask = ~mask validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_149(): error = ErrorDefinition( code='149', description='Reason episode ceased code is not valid. ', affected_fields=['REC'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} episodes = dfs['Episodes'] code_list = [ 'E11', 'E12', 'E2', 'E3', 'E4A', 'E4B', 'E13', 'E41', 'E45', 'E46', 'E47', 'E48', 'E5', 'E6', 'E7', 'E8', 'E9', 'E14', 'E15', 'E16', 'E17', 'X1' ] mask = episodes['REC'].isin(code_list) | episodes['REC'].isna() validation_error_mask = ~mask validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_167(): error = ErrorDefinition( code='167', description='Data entry for participation is invalid or blank.', affected_fields=['REVIEW_CODE'], ) def _validate(dfs): if 'Reviews' not in dfs: return {} review = dfs['Reviews'] code_list = ['PN0', 'PN1', 'PN2', 'PN3', 'PN4', 'PN5', 'PN6', 'PN7'] mask = review['REVIEW'].notna() & review['REVIEW_CODE'].isin(code_list) | review['REVIEW'].isna() & review[ 'REVIEW_CODE'].isna() validation_error_mask = ~mask validation_error_locations = review.index[validation_error_mask] return {'Reviews': validation_error_locations.tolist()} return error, _validate def validate_101(): error = ErrorDefinition( code='101', description='Gender code is not valid.', affected_fields=['SEX'], ) def _validate(dfs): if 'Header' not in dfs: return {} header = dfs['Header'] code_list = ['1', '2'] mask = header['SEX'].astype(str).isin(code_list) validation_error_mask = ~mask validation_error_locations = header.index[validation_error_mask] return {'Header': validation_error_locations.tolist()} return error, _validate def validate_141(): error = ErrorDefinition( code='141', description='Date episode began is not a valid date.', affected_fields=['DECOM'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] mask = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce').notna() na_location = episodes['DECOM'].isna() validation_error_mask = ~mask & ~na_location validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_147(): error = ErrorDefinition( code='147', description='Date episode ceased is not a valid date.', affected_fields=['DEC'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] mask = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce').notna() na_location = episodes['DEC'].isna() validation_error_mask = ~mask & ~na_location validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_171(): error = ErrorDefinition( code='171', description="Date of birth of mother's child is not a valid date.", affected_fields=['MC_DOB'], ) def _validate(dfs): if 'Header' not in dfs: return {} else: header = dfs['Header'] mask = pd.to_datetime(header['MC_DOB'], format='%d/%m/%Y', errors='coerce').notna() na_location = header['MC_DOB'].isna() validation_error_mask = ~mask & ~na_location validation_error_locations = header.index[validation_error_mask] return {'Header': validation_error_locations.tolist()} return error, _validate def validate_102(): error = ErrorDefinition( code='102', description='Date of birth is not a valid date.', affected_fields=['DOB'], ) def _validate(dfs): if 'Header' not in dfs: return {} else: header = dfs['Header'] mask = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce').notna() validation_error_mask = ~mask validation_error_locations = header.index[validation_error_mask] return {'Header': validation_error_locations.tolist()} return error, _validate def validate_112(): error = ErrorDefinition( code='112', description='Date should be placed for adoption is not a valid date.', affected_fields=['DATE_INT'], ) def _validate(dfs): if 'AD1' not in dfs: return {} else: ad1 = dfs['AD1'] mask = pd.to_datetime(ad1['DATE_INT'], format='%d/%m/%Y', errors='coerce').notna() na_location = ad1['DATE_INT'].isna() validation_error_mask = ~mask & ~na_location validation_error_locations = ad1.index[validation_error_mask] return {'AD1': validation_error_locations.tolist()} return error, _validate def validate_115(): error = ErrorDefinition( code='115', description="Date of Local Authority's (LA) decision that a child should be placed for adoption is not a valid date.", affected_fields=['DATE_PLACED'], ) def _validate(dfs): if 'PlacedAdoption' not in dfs: return {} else: adopt = dfs['PlacedAdoption'] mask = pd.to_datetime(adopt['DATE_PLACED'], format='%d/%m/%Y', errors='coerce').notna() na_location = adopt['DATE_PLACED'].isna() validation_error_mask = ~mask & ~na_location validation_error_locations = adopt.index[validation_error_mask] return {'PlacedAdoption': validation_error_locations.tolist()} return error, _validate def validate_116(): error = ErrorDefinition( code='116', description="Date of Local Authority's (LA) decision that a child should no longer be placed for adoption is not a valid date.", affected_fields=['DATE_PLACED_CEASED'], ) def _validate(dfs): if 'PlacedAdoption' not in dfs: return {} else: adopt = dfs['PlacedAdoption'] mask = pd.to_datetime(adopt['DATE_PLACED_CEASED'], format='%d/%m/%Y', errors='coerce').notna() na_location = adopt['DATE_PLACED_CEASED'].isna() validation_error_mask = ~mask & ~na_location validation_error_locations = adopt.index[validation_error_mask] return {'PlacedAdoption': validation_error_locations.tolist()} return error, _validate def validate_392c(): error = ErrorDefinition( code='392c', description='Postcode(s) provided are invalid.', affected_fields=['HOME_POST', 'PL_POST'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] home_provided = episodes['HOME_POST'].notna() home_details = merge_postcodes(episodes, "HOME_POST") home_valid = home_details['pcd'].notna() pl_provided = episodes['PL_POST'].notna() pl_details = merge_postcodes(episodes, "PL_POST") pl_valid = pl_details['pcd'].notna() error_mask = (home_provided & ~home_valid) | (pl_provided & ~pl_valid) return {'Episodes': episodes.index[error_mask].tolist()} return error, _validate def validate_213(): error = ErrorDefinition( code='213', description='Placement provider information not required.', affected_fields=['PLACE_PROVIDER'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] mask = df['PLACE'].isin(['T0', 'T1', 'T2', 'T3', 'T4', 'Z1']) & df['PLACE_PROVIDER'].notna() return {'Episodes': df.index[mask].tolist()} return error, _validate def validate_168(): error = ErrorDefinition( code='168', description='Unique Pupil Number (UPN) is not valid. If unknown, default codes should be UN1, UN2, UN3, UN4 or UN5.', affected_fields=['UPN'], ) def _validate(dfs): if 'Header' not in dfs: return {} else: df = dfs['Header'] mask = df['UPN'].str.match(r'(^((?![IOS])[A-Z]){1}(\d{12}|\d{11}[A-Z]{1})$)|^(UN[1-5])$', na=False) mask = ~mask return {'Header': df.index[mask].tolist()} return error, _validate def validate_388(): error = ErrorDefinition( code='388', description='Reason episode ceased is coded new episode begins, but there is no continuation episode.', affected_fields=['REC'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] df['DECOM'] = pd.to_datetime(df['DECOM'], format='%d/%m/%Y', errors='coerce') df['DEC'] = pd.to_datetime(df['DEC'], format='%d/%m/%Y', errors='coerce') df['DECOM'] = df['DECOM'].fillna('01/01/1901') # Watch for potential future issues df = df.sort_values(['CHILD', 'DECOM']) df['DECOM_NEXT_EPISODE'] = df.groupby(['CHILD'])['DECOM'].shift(-1) # The max DECOM for each child is also the one with no next episode # And we also add the skipna option # grouped_decom_by_child = df.groupby(['CHILD'])['DECOM'].idxmax(skipna=True) no_next = df.DECOM_NEXT_EPISODE.isna() & df.CHILD.notna() # Dataframe with the maximum DECOM removed max_decom_removed = df[~no_next] # Dataframe with the maximum DECOM only max_decom_only = df[no_next] # Case 1: If reason episode ceased is coded X1 there must be a subsequent episode # starting on the same day. case1 = max_decom_removed[(max_decom_removed['REC'] == 'X1') & (max_decom_removed['DEC'].notna()) & (max_decom_removed['DECOM_NEXT_EPISODE'].notna()) & (max_decom_removed['DEC'] != max_decom_removed['DECOM_NEXT_EPISODE'])] # Case 2: If an episode ends but the child continues to be looked after, a new # episode should start on the same day.The reason episode ceased code of # the episode which ends must be X1. case2 = max_decom_removed[(max_decom_removed['REC'] != 'X1') & (max_decom_removed['REC'].notna()) & (max_decom_removed['DEC'].notna()) & (max_decom_removed['DECOM_NEXT_EPISODE'].notna()) & (max_decom_removed['DEC'] == max_decom_removed['DECOM_NEXT_EPISODE'])] # Case 3: If a child ceases to be looked after reason episode ceased code X1 must # not be used. case3 = max_decom_only[(max_decom_only['DEC'].notna()) & (max_decom_only['REC'] == 'X1')] mask_case1 = case1.index.tolist() mask_case2 = case2.index.tolist() mask_case3 = case3.index.tolist() mask = mask_case1 + mask_case2 + mask_case3 mask.sort() return {'Episodes': mask} return error, _validate def validate_113(): error = ErrorDefinition( code='113', description='Date matching child and adopter(s) is not a valid date.', affected_fields=['DATE_MATCH'], ) def _validate(dfs): if 'AD1' not in dfs: return {} else: ad1 = dfs['AD1'] mask = pd.to_datetime(ad1['DATE_MATCH'], format='%d/%m/%Y', errors='coerce').notna() na_location = ad1['DATE_MATCH'].isna() validation_error_mask = ~mask & ~na_location validation_error_locations = ad1.index[validation_error_mask] return {'AD1': validation_error_locations.tolist()} return error, _validate def validate_134(): error = ErrorDefinition( code='134', description='Data on adoption should not be entered for the OC3 cohort.', affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM', 'DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR'], ) def _validate(dfs): if 'OC3' not in dfs or 'AD1' not in dfs: return {} else: oc3 = dfs['OC3'] ad1 = dfs['AD1'] ad1['ad1_index'] = ad1.index all_data = ad1.merge(oc3, how='left', on='CHILD') na_oc3_data = ( all_data['IN_TOUCH'].isna() & all_data['ACTIV'].isna() & all_data['ACCOM'].isna() ) na_ad1_data = ( all_data['DATE_INT'].isna() & all_data['DATE_MATCH'].isna() & all_data['FOSTER_CARE'].isna() & all_data['NB_ADOPTR'].isna() & all_data['SEX_ADOPTR'].isna() & all_data['LS_ADOPTR'].isna() ) validation_error = ~na_oc3_data & ~na_ad1_data validation_error_locations = all_data.loc[validation_error, 'ad1_index'].unique() return {'AD1': validation_error_locations.tolist()} return error, _validate def validate_119(): error = ErrorDefinition( code='119', description='If the decision is made that a child should no longer be placed for adoption, then the date of this decision and the reason why this decision was made must be completed.', affected_fields=['REASON_PLACED_CEASED', 'DATE_PLACED_CEASED'], ) def _validate(dfs): if 'PlacedAdoption' not in dfs: return {} else: adopt = dfs['PlacedAdoption'] na_placed_ceased = adopt['DATE_PLACED_CEASED'].isna() na_reason_ceased = adopt['REASON_PLACED_CEASED'].isna() validation_error = (na_placed_ceased & ~na_reason_ceased) | (~na_placed_ceased & na_reason_ceased) validation_error_locations = adopt.index[validation_error] return {'PlacedAdoption': validation_error_locations.tolist()} return error, _validate def validate_159(): error = ErrorDefinition( code='159', description='If a child has been recorded as not receiving an intervention for their substance misuse problem, then the additional item on whether an intervention was offered should be completed as well.', affected_fields=['SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'], ) def _validate(dfs): if 'OC2' not in dfs: return {} else: oc2 = dfs['OC2'] mask1 = oc2['SUBSTANCE_MISUSE'].astype(str) == '1' mask2 = oc2['INTERVENTION_RECEIVED'].astype(str) == '0' mask3 = oc2['INTERVENTION_OFFERED'].isna() validation_error = mask1 & mask2 & mask3 validation_error_locations = oc2.index[validation_error] return {'OC2': validation_error_locations.tolist()} return error, _validate def validate_142(): error = ErrorDefinition( code='142', description='A new episode has started, but the previous episode has not ended.', affected_fields=['DEC', 'REC'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] df['DECOM'] = pd.to_datetime(df['DECOM'], format='%d/%m/%Y', errors='coerce') df['DEC'] = pd.to_datetime(df['DEC'], format='%d/%m/%Y', errors='coerce') df['DECOM'] = df['DECOM'].fillna('01/01/1901') # Watch for potential future issues df['DECOM'] = df['DECOM'].replace('01/01/1901', pd.NA) last_episodes = df.sort_values('DECOM').reset_index().groupby(['CHILD'])['index'].last() ended_episodes_df = df.loc[~df.index.isin(last_episodes)] ended_episodes_df = ended_episodes_df[(ended_episodes_df['DEC'].isna() | ended_episodes_df['REC'].isna()) & ended_episodes_df['CHILD'].notna() & ended_episodes_df[ 'DECOM'].notna()] mask = ended_episodes_df.index.tolist() return {'Episodes': mask} return error, _validate def validate_148(): error = ErrorDefinition( code='148', description='Date episode ceased and reason episode ceased must both be coded, or both left blank.', affected_fields=['DEC', 'REC'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] df['DEC'] = pd.to_datetime(df['DEC'], format='%d/%m/%Y', errors='coerce') mask = ((df['DEC'].isna()) & (df['REC'].notna())) | ((df['DEC'].notna()) & (df['REC'].isna())) return {'Episodes': df.index[mask].tolist()} return error, _validate def validate_151(): error = ErrorDefinition( code='151', description='All data items relating to a childs adoption must be coded or left blank.', affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTER', 'SEX_ADOPTR', 'LS_ADOPTR'], ) def _validate(dfs): if 'AD1' not in dfs: return {} else: ad1 = dfs['AD1'] na_date_int = ad1['DATE_INT'].isna() na_date_match = ad1['DATE_MATCH'].isna() na_foster_care = ad1['FOSTER_CARE'].isna() na_nb_adoptr = ad1['NB_ADOPTR'].isna() na_sex_adoptr = ad1['SEX_ADOPTR'].isna() na_lsadoptr = ad1['LS_ADOPTR'].isna() ad1_not_null = ( ~na_date_int & ~na_date_match & ~na_foster_care & ~na_nb_adoptr & ~na_sex_adoptr & ~na_lsadoptr) validation_error = ( ~na_date_int | ~na_date_match | ~na_foster_care | ~na_nb_adoptr | ~na_sex_adoptr | ~na_lsadoptr) & ~ad1_not_null validation_error_locations = ad1.index[validation_error] return {'AD1': validation_error_locations.tolist()} return error, _validate def validate_182(): error = ErrorDefinition( code='182', description='Data entries on immunisations, teeth checks, health assessments and substance misuse problem identified should be completed or all OC2 fields should be left blank.', affected_fields=['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'CONVICTED', 'HEALTH_CHECK', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'], ) def _validate(dfs): if 'OC2' not in dfs: return {} else: oc2 = dfs['OC2'] mask1 = ( oc2['IMMUNISATIONS'].isna() | oc2['TEETH_CHECK'].isna() | oc2['HEALTH_ASSESSMENT'].isna() | oc2['SUBSTANCE_MISUSE'].isna() ) mask2 = ( oc2['CONVICTED'].isna() & oc2['HEALTH_CHECK'].isna() & oc2['INTERVENTION_RECEIVED'].isna() & oc2['INTERVENTION_OFFERED'].isna() ) validation_error = mask1 & ~mask2 validation_error_locations = oc2.index[validation_error] return {'OC2': validation_error_locations.tolist()} return error, _validate def validate_214(): error = ErrorDefinition( code='214', description='Placement location information not required.', affected_fields=['PL_POST', 'URN'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] mask = df['LS'].isin(['V3', 'V4']) & ((df['PL_POST'].notna()) | (df['URN'].notna())) return {'Episodes': df.index[mask].tolist()} return error, _validate def validate_222(): error = ErrorDefinition( code='222', description='Ofsted Unique reference number (URN) should not be recorded for this placement type.', affected_fields=['URN'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] place_code_list = ['H5', 'P1', 'P2', 'P3', 'R1', 'R2', 'R5', 'T0', 'T1', 'T2', 'T3', 'T4', 'Z1'] mask = (df['PLACE'].isin(place_code_list)) & (df['URN'].notna()) & (df['URN'] != 'XXXXXX') return {'Episodes': df.index[mask].tolist()} return error, _validate def validate_366(): error = ErrorDefinition( code='366', description='A child cannot change placement during the course of an individual short-term respite break.', affected_fields=['RNE'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] mask = (df['LS'] == 'V3') & (df['RNE'] != 'S') return {'Episodes': df.index[mask].tolist()} return error, _validate def validate_628(): error = ErrorDefinition( code='628', description='Motherhood details are not required for care leavers who have not been looked after during the year.', affected_fields=['MOTHER'], ) def _validate(dfs): if 'Episodes' not in dfs or 'Header' not in dfs or 'OC3' not in dfs: return {} else: hea = dfs['Header'] epi = dfs['Episodes'] oc3 = dfs['OC3'] hea = hea.reset_index() oc3_no_nulls = oc3[oc3[['IN_TOUCH', 'ACTIV', 'ACCOM']].notna().any(axis=1)] hea_merge_epi = hea.merge(epi, how='left', on='CHILD', indicator=True) hea_not_in_epi = hea_merge_epi[hea_merge_epi['_merge'] == 'left_only'] cohort_to_check = hea_not_in_epi.merge(oc3_no_nulls, how='inner', on='CHILD') error_cohort = cohort_to_check[cohort_to_check['MOTHER'].notna()] error_list = list(set(error_cohort['index'].to_list())) error_list.sort() return {'Header': error_list} return error, _validate def validate_164(): error = ErrorDefinition( code='164', description='Distance is not valid. Please check a valid postcode has been entered.', affected_fields=['PL_DISTANCE'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] is_short_term = df['LS'].isin(['V3', 'V4']) distance = pd.to_numeric(df['PL_DISTANCE'], errors='coerce') # Use a bit of tolerance in these bounds distance_valid = distance.gt(-0.2) & distance.lt(1001.0) mask = ~is_short_term & ~distance_valid return {'Episodes': df.index[mask].tolist()} return error, _validate def validate_169(): error = ErrorDefinition( code='169', description='Local Authority (LA) of placement is not valid or is missing. Please check a valid postcode has been entered.', affected_fields=['PL_LA'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] is_short_term = df['LS'].isin(['V3', 'V4']) # Because PL_LA is derived, it will always be valid if present mask = ~is_short_term & df['PL_LA'].isna() return {'Episodes': df.index[mask].tolist()} return error, _validate def validate_179(): error = ErrorDefinition( code='179', description='Placement location code is not a valid code.', affected_fields=['PL_LOCATION'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] is_short_term = df['LS'].isin(['V3', 'V4']) # Because PL_LOCATION is derived, it will always be valid if present mask = ~is_short_term & df['PL_LOCATION'].isna() return {'Episodes': df.index[mask].tolist()} return error, _validate def validate_1015(): error = ErrorDefinition( code='1015', description='Placement provider is own provision but child not placed in own LA.', affected_fields=['PL_LA'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] local_authority = dfs['metadata']['localAuthority'] placement_fostering_or_adoption = df['PLACE'].isin([ 'A3', 'A4', 'A5', 'A6', 'U1', 'U2', 'U3', 'U4', 'U5', 'U6', ]) own_provision = df['PLACE_PROVIDER'].eq('PR1') is_short_term = df['LS'].isin(['V3', 'V4']) is_pl_la = df['PL_LA'].eq(local_authority) checked_episodes = ~placement_fostering_or_adoption & ~is_short_term & own_provision checked_episodes = checked_episodes & df['LS'].notna() & df['PLACE'].notna() mask = checked_episodes & ~is_pl_la return {'Episodes': df.index[mask].tolist()} return error, _validate def validate_411(): error = ErrorDefinition( code='411', description='Placement location code disagrees with LA of placement.', affected_fields=['PL_LOCATION'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] local_authority = dfs['metadata']['localAuthority'] mask = df['PL_LOCATION'].eq('IN') & df['PL_LA'].ne(local_authority) return {'Episodes': df.index[mask].tolist()} return error, _validate def validate_420(): error = ErrorDefinition( code='420', description='LA of placement completed but child is looked after under legal status V3 or V4.', affected_fields=['PL_LA'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] is_short_term = df['LS'].isin(['V3', 'V4']) mask = is_short_term & df['PL_LA'].notna() return {'Episodes': df.index[mask].tolist()} return error, _validate def validate_355(): error = ErrorDefinition( code='355', description='Episode appears to have lasted for less than 24 hours', affected_fields=['DECOM', 'DEC'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] mask = df['DECOM'].astype(str) == df['DEC'].astype(str) return {'Episodes': df.index[mask].tolist()} return error, _validate def validate_586(): error = ErrorDefinition( code='586', description='Dates of missing periods are before child’s date of birth.', affected_fields=['MIS_START'], ) def _validate(dfs): if 'Missing' not in dfs: return {} else: df = dfs['Missing'] df['DOB'] = pd.to_datetime(df['DOB'], format='%d/%m/%Y', errors='coerce') df['MIS_START'] = pd.to_datetime(df['MIS_START'], format='%d/%m/%Y', errors='coerce') error_mask = df['MIS_START'].notna() & (df['MIS_START'] <= df['DOB']) return {'Missing': df.index[error_mask].to_list()} return error, _validate def validate_630(): error = ErrorDefinition( code='630', description='Information on previous permanence option should be returned.', affected_fields=['RNE'], ) def _validate(dfs): if 'PrevPerm' not in dfs or 'Episodes' not in dfs: return {} else: epi = dfs['Episodes'] pre = dfs['PrevPerm'] epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce') collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce') epi = epi.reset_index() # Form the episode dataframe which has an 'RNE' of 'S' in this financial year epi_has_rne_of_S_in_year = epi[(epi['RNE'] == 'S') & (epi['DECOM'] >= collection_start)] # Merge to see # 1) which CHILD ids are missing from the PrevPerm file # 2) which CHILD are in the prevPerm file, but don't have the LA_PERM/DATE_PERM field completed where they should be # 3) which CHILD are in the PrevPerm file, but don't have the PREV_PERM field completed. merged_epi_preperm = epi_has_rne_of_S_in_year.merge(pre, on='CHILD', how='left', indicator=True) error_not_in_preperm = merged_epi_preperm['_merge'] == 'left_only' error_wrong_values_in_preperm = (merged_epi_preperm['PREV_PERM'] != 'Z1') & ( merged_epi_preperm[['LA_PERM', 'DATE_PERM']].isna().any(axis=1)) error_null_prev_perm = (merged_epi_preperm['_merge'] == 'both') & (merged_epi_preperm['PREV_PERM'].isna()) error_mask = error_not_in_preperm | error_wrong_values_in_preperm | error_null_prev_perm error_list = merged_epi_preperm[error_mask]['index'].to_list() error_list = list(set(error_list)) error_list.sort() return {'Episodes': error_list} return error, _validate def validate_501(): error = ErrorDefinition( code='501', description='A new episode has started before the end date of the previous episode.', affected_fields=['DECOM', 'DEC'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: epi = dfs['Episodes'] epi = epi.reset_index() epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce') epi['DEC'] = pd.to_datetime(epi['DEC'], format='%d/%m/%Y', errors='coerce') epi = epi.sort_values(['CHILD', 'DECOM']) epi_lead = epi.shift(1) epi_lead = epi_lead.reset_index() m_epi = epi.merge(epi_lead, left_on='index', right_on='level_0', suffixes=('', '_prev')) error_cohort = m_epi[(m_epi['CHILD'] == m_epi['CHILD_prev']) & (m_epi['DECOM'] < m_epi['DEC_prev'])] error_list = error_cohort['index'].to_list() error_list.sort() return {'Episodes': error_list} return error, _validate def validate_502(): error = ErrorDefinition( code='502', description='Last year’s record ended with an open episode. The date on which that episode started does not match the start date of the first episode on this year’s record.', affected_fields=['DECOM'], ) def _validate(dfs): if 'Episodes' not in dfs or 'Episodes_last' not in dfs: return {} else: epi = dfs['Episodes'] epi_last = dfs['Episodes_last'] epi = epi.reset_index() epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce') epi_last['DECOM'] = pd.to_datetime(epi_last['DECOM'], format='%d/%m/%Y', errors='coerce') epi_last_no_dec = epi_last[epi_last['DEC'].isna()] epi_min_decoms_index = epi[['CHILD', 'DECOM']].groupby(['CHILD'])['DECOM'].idxmin() epi_min_decom_df = epi.loc[epi_min_decoms_index, :] merged_episodes = epi_min_decom_df.merge(epi_last_no_dec, on='CHILD', how='inner') error_cohort = merged_episodes[merged_episodes['DECOM_x'] != merged_episodes['DECOM_y']] error_list = error_cohort['index'].to_list() error_list = list(set(error_list)) error_list.sort() return {'Episodes': error_list} return error, _validate def validate_153(): error = ErrorDefinition( code='153', description="All data items relating to a child's activity or accommodation after leaving care must be coded or left blank.", affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'], ) def _validate(dfs): if 'OC3' not in dfs: return {} oc3 = dfs['OC3'] oc3_not_na = ( oc3['IN_TOUCH'].notna() & oc3['ACTIV'].notna() & oc3['ACCOM'].notna() ) oc3_all_na = ( oc3['IN_TOUCH'].isna() & oc3['ACTIV'].isna() & oc3['ACCOM'].isna() ) validation_error = ~oc3_not_na & ~oc3_all_na validation_error_locations = oc3.index[validation_error] return {'OC3': validation_error_locations.to_list()} return error, _validate def validate_166(): error = ErrorDefinition( code='166', description="Date of review is invalid or blank.", affected_fields=['REVIEW'], ) def _validate(dfs): if 'Reviews' not in dfs: return {} else: review = dfs['Reviews'] error_mask = pd.to_datetime(review['REVIEW'], format='%d/%m/%Y', errors='coerce').isna() validation_error_locations = review.index[error_mask] return {'Reviews': validation_error_locations.to_list()} return error, _validate def validate_174(): error = ErrorDefinition( code='174', description="Mother's child date of birth is recorded but gender shows that the child is a male.", affected_fields=['SEX', 'MC_DOB'], ) def _validate(dfs): if 'Header' not in dfs: return {} else: header = dfs['Header'] child_is_male = header['SEX'].astype(str) == '1' mc_dob_recorded = header['MC_DOB'].notna() error_mask = child_is_male & mc_dob_recorded validation_error_locations = header.index[error_mask] return {'Header': validation_error_locations.to_list()} return error, _validate def validate_180(): error = ErrorDefinition( code='180', description="Data entry for the strengths and difficulties questionnaire (SDQ) score is invalid.", affected_fields=['SDQ_SCORE'], ) def _validate(dfs): if 'OC2' not in dfs: return {} else: oc2 = dfs['OC2'] oc2['SDQ_SCORE'] = pd.to_numeric(oc2['SDQ_SCORE'], errors='coerce') error_mask = oc2['SDQ_SCORE'].notna() & ~oc2['SDQ_SCORE'].isin(range(41)) validation_error_locations = oc2.index[error_mask] return {'OC2': validation_error_locations.to_list()} return error, _validate def validate_181(): error = ErrorDefinition( code='181', description="Data items relating to children looked after continuously for 12 months should be completed with a 0 or 1.", affected_fields=['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'], ) def _validate(dfs): if 'OC2' not in dfs: return {} else: oc2 = dfs['OC2'] code_list = ['0', '1'] fields_of_interest = ['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'] error_mask = ( oc2[fields_of_interest].notna() & ~oc2[fields_of_interest].astype(str).isin(['0', '1']) ).any(axis=1) validation_error_locations = oc2.index[error_mask] return {'OC2': validation_error_locations.tolist()} return error, _validate def validate_192(): error = ErrorDefinition( code='192', description="Child has been identified as having a substance misuse problem but the additional item on whether an intervention was received has been left blank.", affected_fields=['SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED'], ) def _validate(dfs): if 'OC2' not in dfs: return {} else: oc2 = dfs['OC2'] misuse = oc2['SUBSTANCE_MISUSE'].astype(str) == '1' intervention_blank = oc2['INTERVENTION_RECEIVED'].isna() error_mask = misuse & intervention_blank validation_error_locations = oc2.index[error_mask] return {'OC2': validation_error_locations.to_list()} return error, _validate def validate_193(): error = ErrorDefinition( code='193', description="Child not identified as having a substance misuse problem but at least one of the two additional items on whether an intervention were offered and received have been completed.", affected_fields=['SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'], ) def _validate(dfs): if 'OC2' not in dfs: return {} else: oc2 = dfs['OC2'] no_substance_misuse = oc2['SUBSTANCE_MISUSE'].isna() | (oc2['SUBSTANCE_MISUSE'].astype(str) == '0') intervention_not_blank = oc2['INTERVENTION_RECEIVED'].notna() | oc2['INTERVENTION_OFFERED'].notna() error_mask = no_substance_misuse & intervention_not_blank validation_error_locations = oc2.index[error_mask] return {'OC2': validation_error_locations.tolist()} return error, _validate def validate_197a(): error = ErrorDefinition( code='197a', description="Reason for no Strengths and Difficulties (SDQ) score is not required if Strengths and Difficulties Questionnaire score is filled in.", affected_fields=['SDQ_SCORE', 'SDQ_REASON'], ) def _validate(dfs): if 'OC2' not in dfs: return {} else: oc2 = dfs['OC2'] sdq_filled_in = oc2['SDQ_SCORE'].notna() reason_filled_in = oc2['SDQ_REASON'].notna() error_mask = sdq_filled_in & reason_filled_in validation_error_locations = oc2.index[error_mask] return {'OC2': validation_error_locations.tolist()} return error, _validate def validate_567(): error = ErrorDefinition( code='567', description='The date that the missing episode or episode that the child was away from placement without authorisation ended is before the date that it started.', affected_fields=['MIS_START', 'MIS_END'], ) def _validate(dfs): if 'Missing' not in dfs: return {} else: mis = dfs['Missing'] mis['MIS_START'] = pd.to_datetime(mis['MIS_START'], format='%d/%m/%Y', errors='coerce') mis['MIS_END'] = pd.to_datetime(mis['MIS_END'], format='%d/%m/%Y', errors='coerce') mis_error = mis[mis['MIS_START'] > mis['MIS_END']] return {'Missing': mis_error.index.to_list()} return error, _validate def validate_304(): error = ErrorDefinition( code='304', description='Date unaccompanied asylum-seeking child (UASC) status ceased must be on or before the 18th birthday of a child.', affected_fields=['DUC'], ) def _validate(dfs): if 'UASC' not in dfs: return {} else: uasc = dfs['UASC'] uasc['DOB'] = pd.to_datetime(uasc['DOB'], format='%d/%m/%Y', errors='coerce') uasc['DUC'] = pd.to_datetime(uasc['DUC'], format='%d/%m/%Y', errors='coerce') mask = uasc['DUC'].notna() & (uasc['DUC'] > uasc['DOB'] + pd.offsets.DateOffset(years=18)) return {'UASC': uasc.index[mask].to_list()} return error, _validate def validate_333(): error = ErrorDefinition( code='333', description='Date should be placed for adoption must be on or prior to the date of matching child with adopter(s).', affected_fields=['DATE_INT'], ) def _validate(dfs): if 'AD1' not in dfs: return {} else: adt = dfs['AD1'] adt['DATE_MATCH'] = pd.to_datetime(adt['DATE_MATCH'], format='%d/%m/%Y', errors='coerce') adt['DATE_INT'] = pd.to_datetime(adt['DATE_INT'], format='%d/%m/%Y', errors='coerce') # If <DATE_MATCH> provided, then <DATE_INT> must also be provided and be <= <DATE_MATCH> mask1 = adt['DATE_MATCH'].notna() & adt['DATE_INT'].isna() mask2 = adt['DATE_MATCH'].notna() & adt['DATE_INT'].notna() & (adt['DATE_INT'] > adt['DATE_MATCH']) mask = mask1 | mask2 return {'AD1': adt.index[mask].to_list()} return error, _validate def validate_1011(): error = ErrorDefinition( code='1011', description='This child is recorded as having his/her care transferred to another local authority for the final episode and therefore should not have the care leaver information completed.', affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'], ) def _validate(dfs): if 'OC3' not in dfs or 'Episodes' not in dfs: return {} else: epi = dfs['Episodes'] oc3 = dfs['OC3'] epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce') # If final <REC> = 'E3' then <IN_TOUCH>; <ACTIV> and <ACCOM> should not be provided epi.sort_values(['CHILD', 'DECOM'], inplace=True) grouped_decom_by_child = epi.groupby(['CHILD'])['DECOM'].idxmax(skipna=True) max_decom_only = epi.loc[epi.index.isin(grouped_decom_by_child), :] E3_is_last = max_decom_only[max_decom_only['REC'] == 'E3'] oc3.reset_index(inplace=True) cohort_to_check = oc3.merge(E3_is_last, on='CHILD', how='inner') error_mask = cohort_to_check[['IN_TOUCH', 'ACTIV', 'ACCOM']].notna().any(axis=1) error_list = cohort_to_check['index'][error_mask].to_list() error_list = list(set(error_list)) error_list.sort() return {'OC3': error_list} return error, _validate def validate_574(): error = ErrorDefinition( code='574', description='A new missing/away from placement without authorisation period cannot start when the previous missing/away from placement without authorisation period is still open. Missing/away from placement without authorisation periods should also not overlap.', affected_fields=['MIS_START', 'MIS_END'], ) def _validate(dfs): if 'Missing' not in dfs: return {} else: mis = dfs['Missing'] mis['MIS_START'] =
pd.to_datetime(mis['MIS_START'], format='%d/%m/%Y', errors='coerce')
pandas.to_datetime
import json import pandas as pd from quantamatics.core.APIClient import Session from quantamatics.core.utils import QException, QLog, Singleton from quantamatics.core.settings import MethodTypes, ParamsTypes class APIGatewayClient(metaclass=Singleton): def __init__(self): self.session = Session() self.logger = QLog() self.apiDirectory = None return def getAPIList(self): _, response_text = self.session.handleRequest( api_relative_path='/api/function/getAll' ) apiDirectory = json.loads(response_text) self.apiDirectory = apiDirectory apiList = pd.DataFrame(columns = ['Name', 'Provider', 'Description']) for apiMetaData in self.apiDirectory: apiList = pd.concat( [ apiList, pd.DataFrame( [ [ apiMetaData['name'], apiMetaData['assetName'], apiMetaData['description'] ] ] ) ] ) return apiList def getAPIMetaData(self, gatewayAPIName: str) -> dict: if self.apiDirectory is None: self.getAPIList() apiMetaData=list(filter(lambda x:x["name"]==gatewayAPIName,self.apiDirectory)) #TODO: remove id,type,functionId & assetId return apiMetaData def restartAPIGatewayEnv(self): try: self.session.handleRequest( api_relative_path='/api/function/restartEnvironment', method_type=MethodTypes.POST ) return True except QException: return False def executeAPICall(self, gatewayAPIName: str,params: dict = {}) -> pd.DataFrame: requestParams = {'FunctionName':gatewayAPIName, 'BatchId':None, 'Args':params } _, response_text = self.session.handleRequest( api_relative_path='/api/function/runFunction', params=requestParams, params_type=ParamsTypes.JSON, method_type=MethodTypes.POST ) responseData = json.loads(response_text) if responseData['error'] is not None: self.logger.logDebug(responseData['error']) raise QException('Execution Error: %s' % responseData['error']) try: gatewayResultsDF =
pd.read_json(responseData['body'], orient='rows')
pandas.read_json
import sys import logging import argparse import pandas as pd def compute_score(predictions, actual): """Look at 5% of most highly predicted movies for each user. Return the average actual rating of those movies. """ df =
pd.merge(predictions, actual, on=['user','movie'])
pandas.merge
import os, math import numpy as np import pandas as pd import matplotlib.pyplot as plt #from matplotlib.collections import PatchCollection from sklearn import linear_model from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() from importlib import reload # Constants #files = ['time_series_19-covid-Confirmed', 'time_series_19-covid-Deaths', 'time_series_19-covid-Recovered'] #labels = ['Confirmed', 'Deaths', 'Recovered']# until 23 March 2020 # Since 24 March 2020 #files = ['time_series_covid19_confirmed_global', 'time_series_covid19_deaths_global'] #labels = ['confirmed', 'deaths'] # Since 28 March 2020 files = ['time_series_covid19_confirmed_global', 'time_series_covid19_deaths_global', 'time_series_covid19_recovered_global'] labels = ['confirmed', 'deaths', 'recovered'] def open_csvs(): ''' Finding and opening your most recent data download if timestamp == None. Alternatively, specify a substring of requested timestamp to select which files to open. ''' timestamp = None #timestamp = '20200330_15-26' df=dict() lists = list([list(), list(), list()]) with os.scandir() as it: for entry in it: for i in range(3): if (timestamp==None or timestamp in entry.name) and files[i] in entry.name\ and entry.is_file(): lists[i].append(entry.name) for i in range(3): lists[i].sort() df[labels[i]] = pd.read_csv(lists[i][-1]) return df def data_preparation(df, country, output): ''' This is used for the JHU CSSE dataset. output can be 'confirmed', 'deaths', 'recovered', 'active' or 'all' 'active' returns dft['confirmed']-dft['deaths']-dft['recovered'] 'all' returns all three as columns in a DataFrame as used in death_over_cases.py ''' sets = dict({'EU': ['Austria', 'Belgium', 'Bulgaria', 'Croatia', 'Cyprus', 'Czechia', 'Denmark', 'Estonia', 'Finland', 'France', 'Germany', 'Greece', 'Hungary', 'Ireland', 'Italy', 'Latvia', 'Lithuania', 'Luxembourg', 'Malta', 'Netherlands', 'Poland', 'Portugal', 'Romania', 'Slovakia', 'Slovenia', 'Spain', 'Sweden']})#, #'China': [['Anhui', 'China'], ['Beijing', 'China'], ['Chongqing', 'China'], ['Fujian', 'China'], ['Gansu', 'China'], ['Guangdong', 'China'], ['Guangxi', 'China'], ['Guizhou', 'China'], ['Hainan', 'China'], ['Hebei', 'China'], ['Heilongjiang', 'China'], ['Henan', 'China'], ['Hong Kong', 'China'], ['Hubei', 'China'], ['Hunan', 'China'], ['Inner Mongolia', 'China'], ['Jiangsu', 'China'], ['Jiangxi', 'China'], ['Jilin', 'China'], ['Liaoning', 'China'], ['Macau', 'China'], ['Ningxia', 'China'], ['Qinghai', 'China'], ['Shaanxi', 'China'], ['Shandong', 'China'], ['Shanghai', 'China'], ['Shanxi', 'China'], ['Sichuan', 'China'], ['Tianjin', 'China'], ['Tibet', 'China'], ['Xinjiang', 'China'], ['Yunnan', 'China'], ['Zhejiang', 'China']]}) #sets = dict({'EU': ['Croatia', 'Hungary']}) # test only l = list() if country == 'EU' or country == 'China' or country == 'Australia': ''' First, recursive implementation l_members = list() for member in sets[country]: l_members.append(data_preparation(df, member, only_cases)) dft_members = pd.concat(l_members, axis=1) return dft_members.sum(axis=1) ''' M = dict() # these matrices are the booleans of selections for each Province/State, we take their multiple for i in range(3): k = labels[i] M[k] = list() if country == 'China' or country == 'Australia': M[k].append((df[k]['Province/State'].notna()) & (df[k]['Country/Region']==country)) l.append(df[k][M[k][0]].iloc[:,4:].sum(axis=0)) else: # country == 'EU' for member in sets[country]: #print(member) if isinstance(member, str): M[k].append((df[k]['Province/State'].isna()) & (df[k]['Country/Region']==member)) elif len(member)==2: # if it's a pair of [Province/State, Country/Region] M[k].append((df[k]['Province/State']==member[0]) & (df[k]['Country/Region']==member[1])) l.append(df[k][np.sum(np.array(M[k]), axis=0)>=1].iloc[:,4:].sum(axis=0)) dft = pd.concat(l, ignore_index=True, axis=1) #dft.rename(columns={i: labels[i] for i in range(3)}, inplace=True) else: for i in range(3): k = labels[i] if isinstance(country, str): l.append(df[k][np.logical_and(df[k]['Province/State'].isna(), df[k]['Country/Region']==country)].iloc[:,4:]) elif len(country)==2: # if it's a pair of [Province/State, Country/Region] l.append(df[k][np.logical_and(df[k]['Province/State']==country[0], df[k]['Country/Region']==country[1])].iloc[:,4:]) dft = pd.concat(l, ignore_index=True, axis=0).transpose() #print(dft) dft.rename(columns={i: labels[i] for i in range(3)}, inplace=True) #print(dft) if output=='all': df_ts = dft elif output=='active': print('Number of recovered in the past eight days:') print(dft['recovered'][-8:]) df_ts = dft['confirmed']-dft['deaths']-dft['recovered'] # On 24 March 2020, recovered is not available; on 28 March 2020 it is there again. else: df_ts = dft[output] #print(df_ts) #df_ts.rename(index={df_ts.index[i]: pd.to_datetime(df_ts.index)[i] for i in range(len(df_ts.index))}, inplace=True) df_ts.rename(index=
pd.Series(df_ts.index, index=df_ts.index)
pandas.Series
import pandas as pd import seaborn as sns import statsmodels.api as sm import numpy as np import matplotlib.pyplot as plt import matplotlib from numpy.polynomial.polynomial import polyfit from scipy.stats import shapiro from scipy.stats import ttest_ind as tt from scipy.stats import spearmanr as corrp import numpy as np from statsmodels.graphics.gofplots import qqplot font = {'family' : 'sans-serif', 'weight' : 'light', 'size' : 16} matplotlib.rc('font', **font) bad_indices=[] sr_data=pd.read_csv('self_report_study2.csv') #load self-report data mb_agnostic=pd.read_csv('mb_scores_rares_empirical_best.csv') mb_scores=mb_agnostic['MB_behav'] state_t1=pd.read_csv('Gillan_TL_full_lrT.csv',header=None) #load state transition lrs state_t=pd.read_csv('Gillan_Or_full_lrT_decay.csv',header=None) #load state transition lrs print(len(state_t1)) r,p=corrp(state_t1[0],state_t[0]) print('CORREL ST TL both models : {}, p {}'.format(r,p)) it_mb=pd.read_csv('Gillan_Or_full_MB_decay.csv',header=None) #load MB beta # it_mb=np.log(it_mb) mf1=pd.read_csv('Gillan_Or_full_MF1_decay.csv',header=None) mf2=
pd.read_csv('Gillan_Or_full_mf2_decay.csv',header=None)
pandas.read_csv
import traceback import argparse import re # regular expressions import gzip import pandas as pd ''' Load RNA sequence into memory. Reads a FASTA.gz file from GeneCode. Parses the transcript id (TID) from the FASTA defline. Returns a Pandas dataframe with columnts tid, class, sequence, seqlen. Typical input files from (https://www.gencodegenes.org/) - gencode.v38.lncRNA_transcripts.fa.gz - gencode.v38.pc_transcripts.fa.gz ''' class GenCodeLoader(): def __init__(self): self.pattern5=re.compile('.*UTR5:') self.pattern3=re.compile('.*UTR3:') self.check_list = None self.check_utr = False self.min_size = None self.max_size = None def set_label(self,label): ''' Set one label used for subsequent sequences. The value gets stored in the 'class' field. Usually use 1 for protein-coding and 0 for non-coding. ''' self.label=label def set_check_list(self,check_list): ''' Optionally provide a TID include list. Others are excluded. The parameter, type list, is used with pythin 'in' operator. ''' self.check_list=check_list def set_check_utr(self,check_utr): ''' Optionally require UTR. Equivalent to requiring an ORF. Include only deflines that specify 5'UTR and 3'UTR positions. (GenCode does have mRNA transcripts that lack an ORF!) Set this to false when loading non-coding RNA. The parameter is type boolean. ''' self.check_utr=check_utr def set_check_size(self,min,max): self.min_size = min self.max_size = max def __save_previous(self,one_def,one_seq): ''' For internal use only. FASTA sequence records are multi-line starting with a defline. This is called just before parsing a new defline to optionally save the previously parsed sequence record. ''' if one_def is None: return if self.check_utr: if self.pattern5.match(one_def) is None: return if self.pattern3.match(one_def) is None: return seq_len = len(one_seq) if self.min_size is not None and seq_len < self.min_size: return if self.max_size is not None and seq_len > self.max_size: return VERSION = '.' one_id = one_def[1:].split(VERSION)[0] if self.check_list is not None: if one_id not in self.check_list: return self.labels.append(self.label) self.seqs.append(one_seq) self.lens.append(len(one_seq)) self.ids.append(one_id) def load_file(self,filename): ''' Parse the given file and return a data structure. Given file assumed GenCode FASTA file. Returns a Pandas dataframe with four fields. ''' self.labels=[] # usually 1 for protein-coding or 0 for non-coding self.seqs=[] # usually strings of ACGT self.lens=[] # sequence length self.ids=[] # GenCode transcript ID, always starts ENST, excludes version DEFLINE='>' # start of line with ids in a FASTA FILE EMPTY='' one_def = None one_seq = '' with gzip.open (filename,'rt') as infile: for line in infile: if line[0]==DEFLINE: self.__save_previous(one_def,one_seq) one_def=line one_seq = EMPTY else: # Continue loading sequence lines till next defline. additional = line.rstrip() one_seq = one_seq + additional # Don't forget to save the last sequence after end-of-file. self.__save_previous(one_def,one_seq) df1=pd.DataFrame(self.ids,columns=['tid']) df2=pd.DataFrame(self.labels,columns=['class']) df3=pd.DataFrame(self.seqs,columns=['sequence']) df4=
pd.DataFrame(self.lens,columns=['seqlen'])
pandas.DataFrame
from __future__ import division import os import itertools import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt import skbio from scipy.stats import kruskal from skbio.stats.power import _check_strs from statsmodels.sandbox.stats.multicomp import multipletests __author__ = "<NAME>" __copyright__ = "Copyright 2015, The American Gut Project" __credits__ = ["<NAME>"] __license__ = "BSD" __version__ = "unversioned" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" def check_dir(dir_): """Creates the specified directory if it does not exist Parameters ---------- dir : strf the directory to be checked """ if not os.path.exists(dir_): os.mkdir(dir_) def pad_index(df, index_col='#SampleID', nzeros=9): """Adds zeros to the sample ID strings Parameters ---------- df : dataframe the data frame without an index column index_col : {#SampleID, str} the name of the column containing the index data n_zeros : {9, int} the number of zeros to add before the string Returns ------- df : dataframe the dataframe with an appropriate index column """ # Gets the sample IDs samples = df[index_col].values new_samples = [] # Pads the zeros on the id for samp in samples: if not isinstance(samp, str): samp = str(samp) splits = samp.split('.') first_clean = [splits[0].zfill(nzeros)] first_clean.extend(splits[1:]) new_samples.append('.'.join(first_clean)) # Sets the column as the index df.index = new_samples del df[index_col] # Returns the dataframe return df def boxplot(vecs, ax=None, notch=True, interval=0.5, show_counts=True, **kwargs): """Makes a more attractive boxplot Parameters ---------- vecs : list The list of arrays to plot as boxplots. The list format allows the arrays to be of uneven length. ax : matplotlib axis, optional The axis where data should be plotted. If none, a new axis instance will be created. interval : float, optional The spacing between the boxplot instances on the axes notch : bool, optional Displays the parametric 95% confidence interval around the mean. show_counts : bool, optional Shows the size of the groups below each plot on the x-axis p_value : float, optional Default is None. When supplied, the significance value will be displayed on the plot in the upper right hand corner by default. show_xgrid: bool, optional Default is False. Adds vertical lines at each major x-tick. show_ygrid: bool, optional Default is True. Adds horizonal lines at each major y-tick. title: str, optional The title to be placed on the graph. ylims : list The limits for the y-axis. ylabel : str The label text for the y-axis. Returns ------- ax : axes A matplotlib axes containing the plotted data Other Parameters ---------------- hide_x_ticks : bool, optional Display x-tick symbols on the plot hide_y_ticks : bool, optional Display y-tick symbols on the plot p_x : float The x position of the critical value text p_y : float The y position of the critical value text p_size : int The font size for the critical value text title_size: int The font size for the title xticklabels : list The strings to label each point on the x-axis. xfont_angle : float The angle in degrees for the x tick label text. xfont_align : {'left', 'right', 'center'} The horizonal alignment of the x tick label text. For rotated text, an alignment or 'right' is recommended. xlabel_size : int The font size of the x-axis label. xtick_size : int The font size for the xtick labels yticks : array_like The positions where ticks should appear on the y-axis. yticklabels : list The text to be displayed at each y tick. ylabel_size : int The font size of the y-axis label. ytick_size : int The font size for the ytick labels """ # Sets up an axes instance if necessary if ax is None: ax = plt.axes() # Determines the plotting locations num_cats = len(vecs) xlim = [-interval/2, interval*(num_cats-1)+interval/2] # Sets up the plotting constants ticks = np.arange(0, interval*num_cats, interval) counts = [] # Loops through the data for tick, vec in zip(ticks, vecs): # Gets vector characteristics counts.append(len(vec)) # Plots the data ax.boxplot(vec, positions=[tick], notch=notch) # Sets up axis formatting kwargs['counts'] = kwargs.get('counts', counts) kwargs['xlim'] = kwargs.get('xlim', xlim) kwargs['xticks'] = kwargs.get('xticks', ticks) _format_axis(ax, **kwargs) return ax def pretty_pandas_boxplot(meta, group, cat, order=None, ax=None, **kwargs): """Creates a more attractive boxplot than pandas Parameters ---------- meta : pandas dataframe The metadata for the variable containing a column with a continous varaible, designated in `cat`, and a categorical variable, `group` with categories given by `order`. group : str The name of a column in meta which is a categorical predictor variable cat : str A column in meta which contains a continous response variable order : list, optional The order of categories in `group`. This can be used to limit the categories plotted. For instance, if there are three categories in `group`: A, B and C, and you only wish to compare A and C, you can list order as ['A', 'C'] to limit the categories. interval : float, optional The spacing between the boxplot instances on the axes notch : bool, optional Displays the parametric 95% confidence interval around the mean. show_n : bool, optional Shows the size of the groups below each plot on the x-axis p_value : float, optional Default is None. When supplied, the significance value will be displayed on the plot in the upper right hand corner by default. show_xgrid: bool, optional Default is False. Adds vertical lines at each major x-tick. show_ygrid: bool, optional Default is True. Adds horizonal lines at each major y-tick. title: str, optional The title to be placed on the graph. ylims : list The limits for the y-axis. ylabel : str The label text for the y-axis. Returns ------- ax : axes A matplotlib axes containing the plotted data """ grouped = meta.groupby(group) # Sets up the plotting order if order is None: order = grouped.groups.keys() # Gets the data vectors vecs = [grouped.get_group(g)[cat].values for g in order] # Formats the axis, if not already done kwargs['xticklabels'] = kwargs.get('xticklabels', [g.split('(')[0] for g in order]) kwargs['show_xticks'] = kwargs.get('show_xticks', False) kwargs['show_ygrid'] = kwargs.get('show_ygrid', True) # Calculates the p value h, p = kruskal(*vecs) # Sets the boxplot properties ax = boxplot(vecs=vecs, ax=ax, p_value=p, **kwargs) return ax def post_hoc_pandas(meta, group, cat, order=None, correct=None, show_stats=True): """Preforms an post-hoc comparison between two groups Parameters ---------- meta : pandas DataFrame the metadata object for the data group : str the metadata category being interrogated cat : str the name of the column with the result order : None, list, optional Default is None. The order of groups in the category. correct : None, str, optional Method for multiple hypothesis correction using `statsmodels.sandbox.stats.multicomp.multipletests`. Methods you're likely to use are `bonferroni` and `fdr_bh`. show_stats : bool, optional When `show_stats` is True, a summary of each group will be displayed along with the p values. Returns ------- post_hoc : dataframe `post_hoc` summarizes the results of the post-hoc test. It includes statitics about each distribution, as well as the comparison matrix of p-values. """ # Groups the data grouped = meta.groupby(group) # Gets the order if order is None: order = grouped.groups.keys() # Sets up an output dataframe if show_stats: stats = pd.DataFrame({'Counts': grouped[cat].count(), 'Mean': grouped[cat].mean(), 'Stdv': grouped[cat].std(), 'Median': grouped[cat].median()}) # Preforms ad-hoc comparisons comparison = {} for pos, g1_name in enumerate(order[:-1]): g1_data = grouped.get_group(g1_name)[cat] compare = [] for id2, g2_name in enumerate(order): if id2 <= pos: compare.append(np.nan) else: g2_data = grouped.get_group(g2_name)[cat] compare.append(kruskal(g1_data, g2_data)[1]) add_series =
pd.Series(compare, index=order)
pandas.Series
from matplotlib.path import Path import numpy as np import pandas as pd import warnings def parse_polygon_gate(events, channel_labels, gate): """ Extract events in given Polygon gate :param events: NumPy array of events on which to apply the gate :param channel_labels: dictionary of channel labels (keys are channel #'s) :param gate: dictionary for a 'Polygon' gate :return: """ # First, get the column indices for the x and y parameters x_label = gate['x_axis'] y_label = gate['y_axis'] x_index = None y_index = None for chan_number, labels in channel_labels.items(): if labels['PnN'] in x_label: x_index = int(chan_number) - 1 elif labels['PnN'] in y_label: y_index = int(chan_number) - 1 if x_index is None or y_index is None: raise ValueError("Channel labels not found in data for polygon gate") xy_events = events[:, [x_index, y_index]] xy_vertices = [] for vertex in gate['vertices']: xy_vertices.append( [ float(vertex['x']), float(vertex['y']) ] ) path = Path(xy_vertices) is_in_gate = path.contains_points(xy_events, radius=0.0) gated_events = events[is_in_gate] return { 'gated_events': gated_events, 'ungated_count': events.shape[0] } def parse_boolean_gates(events, gating_dict, boolean_gate_list): for gate in boolean_gate_list: specs = [g.strip() for g in gate['specification'].split('&')] group_inclusion = [] for g in specs: if g[0] == '!': group_inclusion.append(False) else: group_inclusion.append(True) groups = [] for g in gate['groups']: g_split = g.split('/') if len(g_split) > 2: warnings.warn("Nested boolean gates are not supported (%s)" % g) groups.append(None) continue groups.append(g_split[1]) # start with including all events, ignore inspection for using 'True' # noinspection PyTypeChecker all_include_events = np.broadcast_to(True, events[:, 0].shape) for i, g in enumerate(groups): if group_inclusion[i] and g is not None: invert = False elif g is not None: invert = True # invert because it's an exclusion gate else: continue for group_gate in gating_dict[g]['gates']: # if len(group_gate['result']['gated_events']) > 0: # get the event indices for this gate gate_events = group_gate['result']['gated_events'][:, 0] # find boolean array of these indices from all parent events gate_include_events = np.in1d( events[:, 0], list(gate_events), invert=invert ) all_include_events = np.logical_and( all_include_events, gate_include_events ) gated_events = events[all_include_events] gate['result'] = { 'gated_events': gated_events, 'ungated_count': events.shape[0] } def apply_gating_hierarchy(events, channel_labels, gating_dict): """ Extract events from root gates and recurse on any children :param events: NumPy array of events on which to apply the gate :param channel_labels: dictionary of channel labels (keys are channel #'s) :param gating_dict: dictionary of gating hierarchy where current_gate is a key at the root level. Gating dict is modified to add the results to each gate. :return: None """ # We ignore boolean gates here since they reference other gates that need to be # calculated first. The boolean gates will be handled by another function that # should be run after this one. Don't really like this as it means at least 2 # passes through the gating_dict, but it will work for now. boolean_gates = [] for gate_label, gate_objects in gating_dict.items(): # Iterate through gates, I suppose it is possible to have multiple sub-regions # drawn for a single "gate" so we'll iterate and check in case there's > 1 gate gated_events = np.ndarray(shape=(0, events.shape[1])) for gate in gate_objects['gates']: gate_type = gate['type'] if gate_type == 'Polygon': gate['result'] = parse_polygon_gate(events, channel_labels, gate) gated_events = np.vstack((gated_events, gate['result']['gated_events'])) elif gate_type == 'Boolean': # Need to save these as the referenced gates may not have been # calculated yet, we'll parse them after all the other ones boolean_gates.append(gate) else: raise ValueError("Unsupported gate type: %s" % gate_type) apply_gating_hierarchy(gated_events, channel_labels, gate_objects['children']) if len(boolean_gates) > 0: parse_boolean_gates(events, gating_dict, boolean_gates) def parse_results_dict(population_root, parent_label): parsed_results = [] for label, pop in population_root.items(): if len(pop['gates']) > 1: print('multi-region gate') parsed_results.append({ 'parent_path': parent_label, 'label': label, 'type': pop['gates'][0]['type'], 'count': pop['gates'][0]['result']['gated_events'].shape[0], 'parent_count': pop['gates'][0]['result']['ungated_count'] }) child_results = parse_results_dict( pop['children'], '/'.join([parent_label, label]) ) parsed_results.extend(child_results) return parsed_results def results_to_dataframe(results): sg_results = parse_results_dict(results['populations'], 'root') df =
pd.DataFrame(sg_results)
pandas.DataFrame
import ast import re from datetime import datetime from pathlib import Path import matplotlib.pyplot as plt import numpy as np import pandas as pd import wandb color_list = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf'] def retrieve_values_from_name(fname): return re.findall(r"[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?", fname) def download_from_wandb(resdir): project = 'SYK4Model' # target_cfgs = { # 'config.lr': 0.05, # 'config.scheduler_name': 'constant', # 'config.seed_SYK': 1 # } print(f'Downloading experiment results from {project}') print(f'| Results directory : {resdir}') # print(f'| Target constraints: {target_cfgs}') api = wandb.Api() # runs = api.runs(project, filters=target_cfgs) run_ids = TARGET_RUN_IDS.split('\n') records = [] visited = set() for run_id in run_ids: if run_id in visited: raise ValueError(f'There is a duplicated run id {run_id}.') run = api.run(f'vqc-quantum/{project}/{run_id.strip()}') visited.add(run_id) if run.state == 'finished': print(run.name) if 'eigenvalues' not in run.config: print(f'| Skip this run because eigenvalues info is not in config.') continue history = run.history() eigvals_str = run.config['eigenvalues'].replace('\n', '') eigvals_str = re.sub(' +', ',', eigvals_str) try: ground_state_energy = ast.literal_eval(eigvals_str)[0] except ValueError as e: print(str(e)) print(f'Parsing Error: eigvals_str: {eigvals_str}') print(f'Retry to parse the first element') # Some runs logs eigenvalues in the following format. # [-5.69803132e-02+0.00000000e+00j ... 1.10259914e-16-4.19720017e-16j] # Due to dots let us parse the first element and then get its real part. v_str = eigvals_str.split(',')[0].strip('[') print(f' - Retried string: {v_str}') ground_state_energy = ast.literal_eval(v_str).real best_step = history.loss.argmin() min_energy_gap = np.abs(history.loss[best_step] - ground_state_energy) # |E(\theta) - E0| fidelity = history['fidelity/ground'][best_step] if run.config["n_qubits"] % 4 == 2: # SYK4 is degenerated. fidelity += history['fidelity/next_to_ground'][best_step] loss_threshold = 1e-4 hitting_time = float('inf') for i, row in history.iterrows(): if np.abs(row['loss'] - ground_state_energy) < loss_threshold: hitting_time = i break records.append( dict( n_qubits=run.config['n_qubits'], n_layers=run.config['n_layers'], min_energy_gap=min_energy_gap, fidelity=fidelity, hitting_time=hitting_time ) ) print(records[-1]) df = pd.DataFrame.from_records(records) if not resdir.exists(): resdir.mkdir(exist_ok=True, parents=True) df.to_pickle(resdir / f'minloss.pkl') print('Done') return df def retrieve_min_and_max(res, column): x = res.n_layers.unique() x.sort() y_mean = [] y_min = [] y_max = [] for l in x: r = res[res.n_layers == l] y_mean.append(r[column].mean()) y_min.append(r[column].min()) y_max.append(r[column].max()) y_mean = np.array(y_mean) y_min = np.array(y_min) y_max = np.array(y_max) return x, y_mean, y_min, y_max def draw_optimization_energy_gap(df, linestyles): n_qubits_list = df.n_qubits.unique() n_qubits_list.sort() for i, n_qubits in enumerate(n_qubits_list): label = f'{n_qubits} Qubits' res = df[df.n_qubits == n_qubits] x, y_mean, y_min, y_max = retrieve_min_and_max(res, 'min_energy_gap') plt.plot(x, y_mean, linestyles[i], linewidth=1.2, alpha=1., markersize=5, label=label) plt.fill_between(x, y_min, y_max, alpha=0.35) # plt.xscale('log') plt.yscale('log') # plt.xlim(0, 155) plt.xlabel(r'$L$', fontsize=13) plt.ylabel(r'$E(\mathbf{\theta}^*) - E_0$', fontsize=13) plt.grid(True, c='0.5', ls=':', lw=0.5) plt.legend(loc='upper right') axes = plt.gca() axes.spines['right'].set_visible(False) axes.spines['top'].set_visible(False) plt.tight_layout() plt.savefig('fig/syk_opt_energy_gap.pdf') plt.show() def draw_fidelity(df, linestyles): n_qubits_list = df.n_qubits.unique() n_qubits_list.sort() for i, n_qubits in enumerate(n_qubits_list): label = f'{n_qubits} Qubits' res = df[df.n_qubits == n_qubits] x, y_mean, y_min, y_max = retrieve_min_and_max(res, 'fidelity') plt.plot(x, y_mean, linestyles[i], linewidth=1.2, alpha=1., markersize=5, label=label) plt.fill_between(x, y_min, y_max, alpha=0.35) # plt.xscale('log') # plt.yscale('log') # plt.xlim(0, 155) plt.xlabel(r'$L$', fontsize=13) plt.ylabel(r'$|\,\langle \psi(\mathbf{\theta^*})\, |\, \phi \rangle\, |^2$', fontsize=13) plt.grid(True, c='0.5', ls=':', lw=0.5) plt.legend(loc='lower right') axes = plt.gca() axes.spines['right'].set_visible(False) axes.spines['top'].set_visible(False) plt.tight_layout() plt.savefig('fig/syk_opt_fidelity.pdf') plt.show() def draw_convergence_speed(df, linestyles): n_qubits_list = df.n_qubits.unique() n_qubits_list.sort() for i, n_qubits in enumerate(n_qubits_list): label = f'{n_qubits} Qubits' res = df[df.n_qubits == n_qubits] x, y_mean, y_min, y_max = retrieve_min_and_max(res, 'hitting_time') plt.plot(x, y_mean, linestyles[i], linewidth=1.2, alpha=1., markersize=5, label=label) plt.fill_between(x, y_min, y_max, alpha=0.35) # plt.yscale('log') # plt.xlim(0, 155) plt.xlabel(r'$L$', fontsize=13) plt.ylabel(r'$t^*$', fontsize=13) plt.grid(True, c='0.5', ls=':', lw=0.5) plt.legend(loc='upper right') axes = plt.gca() axes.spines['right'].set_visible(False) axes.spines['top'].set_visible(False) plt.tight_layout() plt.savefig('fig/syk_opt_convergence.pdf') plt.show() def main(): # Draw L vs. min_loss resdir = Path(f'results_syk_vqe/{datetime.now().strftime("%Y%m%d")}') # datapath = resdir / 'minloss.pkl' datapath = None if datapath: df =
pd.read_pickle(datapath)
pandas.read_pickle
from __future__ import division #brings in Python 3.0 mixed type calculations import numpy as np import os import pandas as pd import sys #find parent directory and import model parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)) sys.path.append(parentddir) from base.uber_model import UberModel, ModelSharedInputs class BeerexInputs(ModelSharedInputs): """ Input class for Beerex """ def __init__(self): """Class representing the inputs for Beerex""" super(BeerexInputs, self).__init__() #self.incorporation_depth = pd.Series([], dtype="float") self.application_rate = pd.Series([], dtype="float") self.application_method = pd.Series([], dtype="object") self.crop_type = pd.Series([], dtype="object") # self.application_units = pd.Series([], dtype="object") self.empirical_residue = pd.Series([], dtype="object") self.empirical_pollen = pd.Series([], dtype="float") self.empirical_nectar = pd.Series([], dtype="float") self.empirical_jelly = pd.Series([], dtype="float") self.adult_contact_ld50 = pd.Series([], dtype="float") self.adult_oral_ld50 = pd.Series([], dtype="float") self.adult_oral_noael = pd.Series([], dtype="float") self.larval_ld50 = pd.Series([], dtype="float") self.larval_noael = pd.Series([], dtype="float") self.log_kow = pd.Series([], dtype="float") self.koc = pd.Series([], dtype="float") self.mass_tree_vegetation = pd.Series([], dtype="float") self.lw1_jelly = pd.Series([], dtype="float") self.lw2_jelly = pd.Series([], dtype="float") self.lw3_jelly = pd.Series([], dtype="float") self.lw4_nectar = pd.Series([], dtype="float") self.lw4_pollen = pd.Series([], dtype="float") self.lw5_nectar = pd.Series([], dtype="float") self.lw5_pollen = pd.Series([], dtype="float") self.ld6_nectar = pd.Series([], dtype="float") self.ld6_pollen = pd.Series([], dtype="float") self.lq1_jelly = pd.Series([], dtype="float") self.lq2_jelly = pd.Series([], dtype="float") self.lq3_jelly = pd.Series([], dtype="float") self.lq4_jelly = pd.Series([], dtype="float") self.aw_cell_nectar = pd.Series([], dtype="float") self.aw_cell_pollen = pd.Series([], dtype="float") self.aw_brood_nectar = pd.Series([], dtype="float") self.aw_brood_pollen = pd.Series([], dtype="float") self.aw_comb_nectar = pd.Series([], dtype="float") self.aw_comb_pollen = pd.Series([], dtype="float") self.aw_fpollen_nectar = pd.Series([], dtype="float") self.aw_fpollen_pollen = pd.Series([], dtype="float") self.aw_fnectar_nectar = pd.Series([], dtype="float") self.aw_fnectar_pollen = pd.Series([], dtype="float") self.aw_winter_nectar = pd.Series([], dtype="float") self.aw_winter_pollen = pd.Series([], dtype="float") self.ad_nectar = pd.Series([], dtype="float") self.ad_pollen = pd.Series([], dtype="float") self.aq_jelly = pd.Series([], dtype="float") class BeerexOutputs(object): """ Output class for Beerex """ def __init__(self): """Class representing the outputs for Beerex""" super(BeerexOutputs, self).__init__() self.out_eec_spray = pd.Series(name="out_eec_spray", dtype="float") self.out_eec_soil = pd.Series(name="out_eec_soil", dtype="float") self.out_eec_seed = pd.Series(name="out_eec_seed", dtype="float") self.out_eec_tree = pd.Series(name="out_eec_tree", dtype="float") self.out_eec = pd.Series(name="out_eec", dtype="float") self.out_lw1_total_dose = pd.Series(name="out_lw1_total_dose", dtype="float") self.out_lw2_total_dose = pd.Series(name="out_lw2_total_dose", dtype="float") self.out_lw3_total_dose = pd.Series(name="out_lw3_total_dose", dtype="float") self.out_lw4_total_dose = pd.Series(name="out_lw4_total_dose", dtype="float") self.out_lw5_total_dose = pd.Series(name="out_lw5_total_dose", dtype="float") self.out_ld6_total_dose = pd.Series(name="out_ld6_total_dose", dtype="float") self.out_lq1_total_dose =
pd.Series(name="out_lq1_total_dose", dtype="float")
pandas.Series
from __future__ import print_function from authlib.client import OAuth2Session import google.oauth2.credentials import googleapiclient.discovery import google_auth import google_drive from google_auth import build_credentials, get_user_info # /index.py import flask from flask import Flask, request, jsonify, render_template, url_for import os import json import pickle import os.path from flask import Flask, jsonify, render_template, request from io import StringIO import pandas as pd import requests import sheetsFunctions app = flask.Blueprint('graphScrape', __name__) def convertFormat(dfs): payload = [] for df in dfs: each_df_payload = [] columns = df.columns for i in range(len(df)): dictVal = {} for j in range(len(columns)): try: dictVal[str(columns[j])] = int(df[columns[j]][i]) except: dictVal[str(columns[j])] = str(df[columns[j]][i]) each_df_payload.append(dictVal) payload.append({"columns" : list(columns), "data": each_df_payload}) return payload def scraper(link): print("scraping...", link) try: source = requests.get(link).text TESTDATA = StringIO(source) df =
pd.read_csv(TESTDATA, sep=",")
pandas.read_csv
#!/usr/bin/env python """ Calculating mean sentiment scores over a set time periods. Parameters: infile: str <path-to-images> batch_size: int <batch-size-doc> Usage: sentiment.py --batch_size <batch-size-doc> Example: $ python sentiment.py --batch_size 300 """ # load dependencies from pathlib import Path import pandas as pd import numpy as np import os import spacy from spacytextblob.spacytextblob import SpacyTextBlob import matplotlib.pyplot as plt import argparse # create plot function def plot_func(sentiment_df, window_size): # make rolling mean smooth_sent = sentiment_df.rolling(window_size).mean() # get the dates for the x-xis x = sentiment_df["date"] # create figure plt.figure() # plot the data plt.plot(x, smooth_sent, label="sentiment scores") # title of plot plt.title(f"Sentiment scores: {window_size} days rolling average") # labelling x-axis plt.xlabel("Date") # labelling y-axis plt.ylabel("Sentiment") # rotate x-axis labels plt.xticks(rotation=40) # add legend plt.legend() # save figure plt.savefig(os.path.join("..", "output", f"{window_size}days_sentiment.png"), bbox_inches='tight') # define main function def main(): # initialise argumentparser ap = argparse.ArgumentParser() # define arguments ap.add_argument("-i", "--infile", type = str, required=False, help="Input filename", default="abcnews-date-text.csv") ap.add_argument("-b", "--batch_size", type = int, required=False, help="Batch size for loading data into spacy docs.", default = 500) # parse arguments to args args = vars(ap.parse_args()) # load data data = pd.read_csv(os.path.join("..", "data", args["infile"])) # make publish date date format data["publish_date"] =
pd.to_datetime(data["publish_date"], format="%Y%m%d")
pandas.to_datetime
#!/usr/bin/env python # coding: utf-8 # ### Importing modules # In[1]: import pandas as pd import numpy as np import spacy from textblob import TextBlob from statistics import mean, stdev from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, roc_auc_score, roc_curve, precision_recall_curve from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import Pipeline import matplotlib.pyplot as plt from sklearn.preprocessing import binarize # ### Loading Data # In[2]: data = pd.read_csv("project_18_dataset_combined.csv") data = data[['label', 'text']] #set display option pd.set_option('display.max_colwidth', None) #make target labels boolean data['label']=data['label'].apply(lambda x: 1 if x == 14 else 0) # In[3]: print(data.shape) print(data.label.value_counts()) data.head() # ### Lemmatizing text # In[4]: nlp = spacy.load('en_core_web_md') data['text_lemmatized'] = data['text'].apply(lambda x: " ".join([words.lemma_ for words in nlp(x)])) # In[5]: data.head() # #### Q: WHAT SIZE EN_CORE_WEB TO USE??? # ### Evaluating effect of lemmatization (ceteris paribus) # In[7]: #train/test split on original and preprocessed data X_train_old, X_test_old, y_train_old, y_test_old = train_test_split(data.text, data.label, test_size=0.2, random_state=88, stratify=data.label) X_train, X_test, y_train, y_test = train_test_split(data.text_lemmatized, data.label, test_size=0.2, random_state=88, stratify=data.label) # In[8]: print(y_train_old.value_counts()) print(y_train.value_counts()) print(y_test_old.value_counts()) print(y_test.value_counts()) # In[9]: #vectorizing train data and transforming test data vectorizer = CountVectorizer() X_train_old_dtm = vectorizer.fit_transform(X_train_old) X_test_old_dtm = vectorizer.transform(X_test_old) X_train_dtm = vectorizer.fit_transform(X_train) X_test_dtm = vectorizer.transform(X_test) # In[10]: #train model on X_train_dtm mnb_old = MultinomialNB() mnb = MultinomialNB() mnb_old.fit(X_train_old_dtm, y_train_old) mnb.fit(X_train_dtm, y_train) # In[11]: #make class prediction for X_test_dtm y_pred_class_old = mnb_old.predict(X_test_old_dtm) y_pred_class = mnb.predict(X_test_dtm) # In[12]: from collections import Counter print(Counter(y_pred_class_old)) print(Counter(y_pred_class)) # In[13]: #comparing confusing matrices print('old model cm:') print(confusion_matrix(y_test_old, y_pred_class_old)) print('new model cm:') print(confusion_matrix(y_test, y_pred_class)) # In[14]: #calculating accuracy, precison print('accuracy score old model:', accuracy_score(y_test_old, y_pred_class_old)) print('accuracy score new model:', accuracy_score(y_test, y_pred_class)) print('-----') print('precision score old model:', precision_score(y_test_old, y_pred_class_old)) print('precision score new model:', precision_score(y_test, y_pred_class)) print('-----') print('recall score old model:', recall_score(y_test_old, y_pred_class_old)) print('recall score new model:', recall_score(y_test, y_pred_class)) # ### Spelling correction # In[15]: #def spelling_corrector(txt): # blob = TextBlob(txt) # return str(blob.correct()) #data['text_spelling_corrected'] = data['text_lemmatized'].apply(lambda x : [spelling_corrector(x)]) #data.head() # In[16]: #data['text_spelling_corrected2'] = data['text_lemmatized'].apply(lambda x: ' '.join(TextBlob(x).correct())) #data.head # ### Evaluating effect of spell corrector # In[ ]: # ### Effect of random states (ceteris paribus) # In[17]: #make model with range of random states in train/test split random_state_range = range(0, 1000) rs_scores = [] for rs in random_state_range: vectorizer = CountVectorizer() mnb = MultinomialNB() X_train, X_test, y_train, y_test = train_test_split(data.text_lemmatized, data.label, random_state=rs, test_size=0.2, stratify=data.label) X_train_dtm = vectorizer.fit_transform(X_train) X_test_dtm = vectorizer.transform(X_test) mnb.fit(X_train_dtm, y_train) y_pred_class = mnb.predict(X_test_dtm) rs_scores.append(precision_score(y_test, y_pred_class)) # In[18]: #calculating mean precision and standard deviation print('mean precision:', mean(rs_scores)) print('st.dev. of mean prec:', stdev(rs_scores)) #make plot plt.plot(random_state_range, rs_scores) plt.xlabel('Random state value') plt.ylabel('Testing precision') plt.grid(True) # ### Effect of test size (ceteris paribus) # In[19]: #make model with varying test sizes in train/test split test_size_range = np.linspace(0.05,0.5,91) test_size_scores = [] for ts in test_size_range: vectorizer = CountVectorizer() mnb = MultinomialNB() X_train, X_test, y_train, y_test = train_test_split(data.text_lemmatized, data.label, test_size=ts, random_state=88, stratify=data.label) X_train_dtm = vectorizer.fit_transform(X_train) X_test_dtm = vectorizer.transform(X_test) mnb.fit(X_train_dtm, y_train) y_pred_class = mnb.predict(X_test_dtm) test_size_scores.append(precision_score(y_test, y_pred_class)) # In[20]: #calculating mean precision and standard deviation print('mean precision:', mean(test_size_scores)) print('st.dev. of mean prec:', stdev(test_size_scores)) #make plot plt.plot(test_size_range, test_size_scores) plt.xlabel('Test size value') plt.ylabel('Testing precision') plt.grid(True) # ### Hyperparameter tuning # In[21]: #train/test splitting X_train, X_test, y_train, y_test = train_test_split(data.text_lemmatized, data.label, test_size=0.2, random_state=88, stratify=data.label) # In[22]: #making pipeline pipeline = Pipeline([ ('vectorizer', CountVectorizer()), ('classifier', MultinomialNB()) ]) # ##### Q: include ('tfidf', TfidfTransformer()) in pipeline??? # In[23]: #grid = { #'vectorizer__strip_accents': [None, 'ascii', 'unicode'], #works #'vectorizer__lowercase': [True, False], #works #'vectorizer__ngram_range': [(1, 1), (1, 2), (1, 3)], #works #'vectorizer__stop_words': [None, 'english'], #works #'vectorizer__max_df': [1.0, 0.9, 0.8, 0.7, 0.6, 0.5], #works #'vectorizer__min_df': [1, 0.001, 0.002, 0.005, 0.01, 0.02], #works #'vectorizer__max_features': [None, 1, 10, 100, 1000, 10000], #works #'classifier__alpha': [0.1, 0.5, 1.0, 2.0], #works #'classifier__fit_prior': [True, False], #works #'classifier__class_prior': [[0.1, 0.9], [0.1, 0.8], [0.2, 0.9], [0.2, 0.8]], #works #'tfidf__norm': ['l1', 'l2'], #works #'tfidf__use_idf': [True, False], #works #'tfidf__smooth_idf': [True, False], #works #'tfidf__sublinear_tf': [True, False], #works #} #grid_search = GridSearchCV(pipeline, param_grid=grid, scoring='precision', cv=10) #grid_search.fit(X, y) #print("-----------") #print(grid_search.best_score_) #print(grid_search.best_params_) # In[24]: grid = {} grid_search = GridSearchCV(pipeline, param_grid=grid, scoring='precision', cv=10) grid_search.fit(data.text_lemmatized, data.label) print(grid_search.best_score_) print(grid_search.best_params_) results = pd.DataFrame(grid_search.cv_results_) results[['params', 'mean_test_score','std_test_score']] # ### Evaluation between models with default and tuned parameters # In[25]: #calculating accuracy, precison and roc_auc between models with default and tuned parameters #set best parameters in pipeline for comparison pipeline_old = Pipeline([ ('vectorizer', CountVectorizer()), ('classifier', MultinomialNB()) ]) pipeline = Pipeline([ ('vectorizer', CountVectorizer()), ('classifier', MultinomialNB()) ]) model_old = pipeline_old.fit(X_train, y_train) model = pipeline.fit(X_train, y_train) y_pred_class_old = model_old.predict(X_test) y_pred_class = model.predict(X_test) print('accuracy score before tuning model:', accuracy_score(y_test, y_pred_class_old)) print('accuracy score after tuning:', accuracy_score(y_test, y_pred_class)) print('-----') print('precision score before tuning:', precision_score(y_test, y_pred_class_old)) print('precision score after tuning:', precision_score(y_test, y_pred_class)) print('-----') print('recall score before tuning:', recall_score(y_test, y_pred_class_old)) print('recall score after tuning:', recall_score(y_test, y_pred_class)) # In[26]: print(Counter(y_test)) print(Counter(y_pred_class_old)) print(Counter(y_pred_class)) # In[27]: #comparing confusing matrices print('old model cm:') print(confusion_matrix(y_test, y_pred_class_old)) print('new model cm:') print(confusion_matrix(y_test, y_pred_class)) # #### Q: Crossvalidation on final model??? # ### Examining results # In[28]: #see false positive comments false_positives =
pd.DataFrame({'false_positives': X_test[(y_pred_class==1) & (y_test==0)]})
pandas.DataFrame
import numpy as np from scipy import stats import pandas as pd __all__ = ["n_way_anova"] def n_way_anova(df_f, groups_column, score_column): factors = np.unique(df_f[groups_column]) print(factors) results =
pd.DataFrame(columns=factors, index=factors)
pandas.DataFrame
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import abc import sys import copy import time import datetime import importlib from abc import ABC from pathlib import Path from typing import Iterable, Type from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor import fire import requests import numpy as np import pandas as pd from tqdm import tqdm from loguru import logger from yahooquery import Ticker from dateutil.tz import tzlocal from qlib.utils import code_to_fname, fname_to_code CUR_DIR = Path(__file__).resolve().parent sys.path.append(str(CUR_DIR.parent.parent)) from data_collector.utils import get_calendar_list, get_hs_stock_symbols, get_us_stock_symbols INDEX_BENCH_URL = "http://push2his.eastmoney.com/api/qt/stock/kline/get?secid=1.{index_code}&fields1=f1%2Cf2%2Cf3%2Cf4%2Cf5&fields2=f51%2Cf52%2Cf53%2Cf54%2Cf55%2Cf56%2Cf57%2Cf58&klt=101&fqt=0&beg={begin}&end={end}" REGION_CN = "CN" REGION_US = "US" class YahooData: START_DATETIME = pd.Timestamp("2000-01-01") HIGH_FREQ_START_DATETIME = pd.Timestamp(datetime.datetime.now() - pd.Timedelta(days=5 * 6)) END_DATETIME = pd.Timestamp(datetime.datetime.now() + pd.Timedelta(days=1)) INTERVAL_1min = "1min" INTERVAL_1d = "1d" def __init__( self, timezone: str = None, start=None, end=None, interval="1d", delay=0, show_1min_logging: bool = False, ): """ Parameters ---------- timezone: str The timezone where the data is located delay: float time.sleep(delay), default 0 interval: str freq, value from [1min, 1d], default 1min start: str start datetime, default None end: str end datetime, default None show_1min_logging: bool show 1min logging, by default False; if True, there may be many warning logs """ self._timezone = tzlocal() if timezone is None else timezone self._delay = delay self._interval = interval self._show_1min_logging = show_1min_logging self.start_datetime = pd.Timestamp(str(start)) if start else self.START_DATETIME self.end_datetime = min(pd.Timestamp(str(end)) if end else self.END_DATETIME, self.END_DATETIME) if self._interval == self.INTERVAL_1min: self.start_datetime = max(self.start_datetime, self.HIGH_FREQ_START_DATETIME) elif self._interval == self.INTERVAL_1d: pass else: raise ValueError(f"interval error: {self._interval}") # using for 1min self._next_datetime = self.convert_datetime(self.start_datetime.date() +
pd.Timedelta(days=1)
pandas.Timedelta
# # created by <NAME> (IBSM, Freiburg) # # import cupy as cp import cupyx as cpx import cudf import cugraph import anndata import numpy as np import pandas as pd import scipy import math from scipy import sparse from typing import Any, Union, Optional import warnings from scipy.sparse import issparse from cuml.linear_model import LinearRegression from cuml.preprocessing import StandardScaler class cunnData: """ The cunnData objects can be used as an AnnData replacement for the inital preprocessing of single cell Datasets. It replaces some of the most common preprocessing steps within scanpy for annData objects. It can be initalized with a preexisting annData object or with a countmatrix and seperate Dataframes for var and obs. Index of var will be used as gene_names. Initalization with an AnnData object is advised. """ shape = tuple nnz = int genes = cudf.Series uns = {} def __init__( self, X: Optional[Union[np.ndarray,sparse.spmatrix, cp.array, cp.sparse.csr_matrix]] = None, obs: Optional[pd.DataFrame] = None, var: Optional[pd.DataFrame] = None, adata: Optional[anndata.AnnData] = None): if adata: if not issparse(adata.X): inter = scipy.sparse.csr_matrix(adata.X) self.X = cp.sparse.csr_matrix(inter, dtype=cp.float32) del inter else: self.X = cp.sparse.csr_matrix(adata.X, dtype=cp.float32) self.shape = self.X.shape self.nnz = self.X.nnz self.obs = adata.obs.copy() self.var = adata.var.copy() self.uns = adata.uns.copy() else: if not issparse(X): inter = scipy.sparse.csr_matrix(X) self.X = cp.sparse.csr_matrix(inter, dtype=cp.float32) del inter else: self.X = cp.sparse.csr_matrix(X, dtype=cp.float32) self.shape = self.X.shape self.nnz = self.X.nnz self.obs = obs self.var = var def to_AnnData(self): """ Takes the cunnData object and creates an AnnData object Returns ------- annData object """ adata = anndata.AnnData(self.X.get()) adata.obs = self.obs.copy() adata.var = self.var.copy() adata.uns = self.uns.copy() return adata def calc_gene_qc(self, batchsize = None): """ Filters out genes that expressed in less than a specified number of cells Parameters ---------- batchsize: int (default: None) Number of rows to be processed together This can be adjusted for performance to trade-off memory use. Returns ------- updated `.var` with `n_cells` and `n_counts` filtered cunndata object inplace for genes less than the threshhold """ if batchsize: pass n_batches = math.ceil(self.X.shape[0] / batchsize) n_counts = cp.zeros(shape=(n_batches,self.X.shape[1])) n_cells= cp.zeros(shape=(n_batches,self.X.shape[1])) for batch in range(n_batches): batch_size = batchsize start_idx = batch * batch_size stop_idx = min(batch * batch_size + batch_size, self.X.shape[0]) arr_batch = self.X[start_idx:stop_idx] arr_batch = arr_batch.tocsc() n_cells_batch = cp.diff(arr_batch.indptr).ravel() n_cells[batch,:]=n_cells_batch n_counts_batch = arr_batch.sum(axis = 0).ravel() n_counts[batch,:]=n_counts_batch self.var["n_cells"] = cp.asnumpy(n_cells.sum(axis= 0).ravel()) self.var["n_counts"] = cp.asnumpy(n_counts.sum(axis= 0).ravel()) else: self.X = self.X.tocsc() n_cells = cp.diff(self.X.indptr).ravel() n_counts = self.X.sum(axis = 0).ravel() self.X = self.X.tocsr() self.var["n_cells"] = cp.asnumpy(n_cells) self.var["n_counts"] = cp.asnumpy(n_counts) def filter_genes(self, qc_var = "n_cells", min_count = None, max_count = None, batchsize = None, verbose =True): """ Filter genes that have greater than a max number of genes or less than a minimum number of a feature in a given `.var` columns. Can so far only be used for numerical columns. You can run this function on 'n_cells' or 'n_counts' with a previous columns in `.var`. Parameters ---------- qc_var: str (default: n_cells) column in `.var` with numerical entries to filter against min_count : float Lower bound on number of a given feature to keep gene max_count : float Upper bound on number of a given feature to keep gene batchsize: int (default: None) only needed if you run `filter_genes` before `calculate_qc` or `calc_gene_qc` on 'n_genes' or 'n_counts'. Number of rows to be processed together. This can be adjusted for performance to trade-off memory use. verbose: bool (default: True) Print number of discarded genes Returns ------- a filtered cunnData object inplace """ if qc_var in self.var.keys(): if min_count is not None and max_count is not None: thr=np.where((self.var[qc_var] <= max_count) & (min_count <= self.var[qc_var]))[0] elif min_count is not None: thr=np.where(self.var[qc_var] >= min_count)[0] elif max_count is not None: thr=np.where(self.var[qc_var] <= max_count)[0] if verbose: print(f"filtered out {self.var.shape[0]-thr.shape[0]} genes based on {qc_var}") self.X = self.X.tocsr() self.X = self.X[:, thr] self.shape = self.X.shape self.nnz = self.X.nnz self.X = self.X.tocsr() self.var = self.var.iloc[cp.asnumpy(thr)] elif qc_var in ["n_cells","n_counts"]: self.calc_gene_qc(batch_size = batch_size) if min_count is not None and max_count is not None: thr=np.where((self.var[qc_var] <= max_count) & (min_count <= self.var[qc_var]))[0] elif min_count is not None: thr=np.where(self.var[qc_var] >= min_count)[0] elif max_count is not None: thr=np.where(self.var[qc_var] <= max_count)[0] if verbose: print(f"filtered out {self.var.shape[0]-thr.shape[0]} genes based on {qc_var}") self.X = self.X.tocsr() self.X = self.X[:, thr] self.shape = self.X.shape self.nnz = self.X.nnz self.X = self.X.tocsr() self.var = self.var.iloc[cp.asnumpy(thr)] else: print(f"please check qc_var") def caluclate_qc(self, qc_vars = None, batchsize = None): """ Calculates basic qc Parameters. Calculates number of genes per cell (n_genes) and number of counts per cell (n_counts). Loosly based on calculate_qc_metrics from scanpy [Wolf et al. 2018]. Updates .obs with columns with qc data. Parameters ---------- qc_vars: str, list (default: None) Keys for boolean columns of .var which identify variables you could want to control for (e.g. Mito). Run flag_gene_family first batchsize: int (default: None) Number of rows to be processed together. This can be adjusted for performance to trade-off memory use. Returns ------- adds the following columns in .obs n_counts number of counts per cell n_genes number of genes per cell for qc_var in qc_vars total_qc_var number of counts per qc_var (e.g total counts mitochondrial genes) percent_qc_vars Proportion of counts of qc_var (percent of counts mitochondrial genes) """ if batchsize: n_batches = math.ceil(self.X.shape[0] / batchsize) n_genes = [] n_counts = [] if "n_cells" not in self.var.keys() or "n_counts" not in self.var.keys(): self.calc_gene_qc(batchsize = batchsize) if qc_vars: if type(qc_vars) is str: qc_var_total = [] elif type(qc_vars) is list: qc_var_total = [] for i in range(len(qc_vars)): my_list = [] qc_var_total.append(my_list) for batch in range(n_batches): batch_size = batchsize start_idx = batch * batch_size stop_idx = min(batch * batch_size + batch_size, self.X.shape[0]) arr_batch = self.X[start_idx:stop_idx] n_genes.append(cp.diff(arr_batch.indptr).ravel().get()) n_counts.append(arr_batch.sum(axis=1).ravel().get()) if qc_vars: if type(qc_vars) is str: qc_var_total.append(arr_batch[:,self.var[qc_vars]].sum(axis=1).ravel().get()) elif type(qc_vars) is list: for i in range(len(qc_vars)): qc_var_total[i].append(arr_batch[:,self.var[qc_vars[i]]].sum(axis=1).ravel().get()) self.obs["n_genes"] = np.concatenate(n_genes) self.obs["n_counts"] = np.concatenate(n_counts) if qc_vars: if type(qc_vars) is str: self.obs["total_"+qc_vars] = np.concatenate(qc_var_total) self.obs["percent_"+qc_vars] =self.obs["total_"+qc_vars]/self.obs["n_counts"]*100 elif type(qc_vars) is list: for i in range(len(qc_vars)): self.obs["total_"+qc_vars[i]] = np.concatenate(qc_var_total[i]) self.obs["percent_"+qc_vars[i]] =self.obs["total_"+qc_vars[i]]/self.obs["n_counts"]*100 else: self.obs["n_genes"] = cp.asnumpy(cp.diff(self.X.indptr)).ravel() self.obs["n_counts"] = cp.asnumpy(self.X.sum(axis=1)).ravel() if "n_cells" not in self.var.keys() or "n_counts" not in self.var.keys(): self.calc_gene_qc(batchsize = None) if qc_vars: if type(qc_vars) is str: self.obs["total_"+qc_vars]=cp.asnumpy(self.X[:,self.var[qc_vars]].sum(axis=1)) self.obs["percent_"+qc_vars]=self.obs["total_"+qc_vars]/self.obs["n_counts"]*100 elif type(qc_vars) is list: for qc_var in qc_vars: self.obs["total_"+qc_var]=cp.asnumpy(self.X[:,self.var[qc_var]].sum(axis=1)) self.obs["percent_"+qc_var]=self.obs["total_"+qc_var]/self.obs["n_counts"]*100 def flag_gene_family(self, gene_family_name = str, gene_family_prefix = None, gene_list= None): """ Flags a gene or gene_familiy in .var with boolean. (e.g all mitochondrial genes). Please only choose gene_family prefix or gene_list Parameters ---------- gene_family_name: str name of colums in .var where you want to store informationa as a boolean gene_family_prefix: str prefix of the gene familiy (eg. mt- for all mitochondrial genes in mice) gene_list: list list of genes to flag in .var Returns ------- adds the boolean column in .var """ if gene_family_prefix: self.var[gene_family_name] = cp.asnumpy(self.var.index.str.startswith(gene_family_prefix)).ravel() if gene_list: self.var[gene_family_name] = cp.asnumpy(self.var.index.isin(gene_list)).ravel() def filter_cells(self, qc_var, min_count=None, max_count=None, batchsize = None,verbose=True): """ Filter cells that have greater than a max number of genes or less than a minimum number of a feature in a given .obs columns. Can so far only be used for numerical columns. It is recommended to run `calculated_qc` before using this function. You can run this function on n_genes or n_counts before running `calculated_qc`. Parameters ---------- qc_var: str column in .obs with numerical entries to filter against min_count : float Lower bound on number of a given feature to keep cell max_count : float Upper bound on number of a given feature to keep cell batchsize: int (default: None) only needed if you run `filter_cells` before `calculate_qc` on 'n_genes' or 'n_counts'. Number of rows to be processed together. This can be adjusted for performance to trade-off memory use. verbose: bool (default: True) Print number of discarded cells Returns ------- a filtered cunnData object inplace """ if qc_var in self.obs.keys(): inter = np.array if min_count is not None and max_count is not None: inter=np.where((self.obs[qc_var] < max_count) & (min_count< self.obs[qc_var]))[0] elif min_count is not None: inter=np.where(self.obs[qc_var] > min_count)[0] elif max_count is not None: inter=np.where(self.obs[qc_var] < max_count)[0] else: print(f"Please specify a cutoff to filter against") if verbose: print(f"filtered out {self.obs.shape[0]-inter.shape[0]} cells") self.X = self.X[inter,:] self.shape = self.X.shape self.nnz = self.X.nnz self.obs = self.obs.iloc[inter] elif qc_var in ['n_genes','n_counts']: print(f"Running calculate_qc for 'n_genes' or 'n_counts'") self.caluclate_qc(batchsize=batchsize) inter = np.array if min_count is not None and max_count is not None: inter=np.where((self.obs[qc_var] < max_count) & (min_count< self.obs[qc_var]))[0] elif min_count is not None: inter=np.where(self.obs[qc_var] > min_count)[0] elif max_count is not None: inter=np.where(self.obs[qc_var] < max_count)[0] else: print(f"Please specify a cutoff to filter against") if verbose: print(f"filtered out {self.obs.shape[0]-inter.shape[0]} cells") self.X = self.X[inter,:] self.shape = self.X.shape self.nnz = self.X.nnz self.obs = self.obs.iloc[inter] else: print(f"Please check qc_var.") def normalize_total(self, target_sum): """ Normalizes rows in matrix so they sum to `target_sum` Parameters ---------- target_sum : int Each row will be normalized to sum to this value Returns ------- a normalized sparse Matrix to a specified target sum """ csr_arr = self.X mul_kernel = cp.RawKernel(r''' extern "C" __global__ void mul_kernel(const int *indptr, float *data, int nrows, int tsum) { int row = blockDim.x * blockIdx.x + threadIdx.x; if(row >= nrows) return; float scale = 0.0; int start_idx = indptr[row]; int stop_idx = indptr[row+1]; for(int i = start_idx; i < stop_idx; i++) scale += data[i]; if(scale > 0.0) { scale = tsum / scale; for(int i = start_idx; i < stop_idx; i++) data[i] *= scale; } } ''', 'mul_kernel') mul_kernel((math.ceil(csr_arr.shape[0] / 32.0),), (32,), (csr_arr.indptr, csr_arr.data, csr_arr.shape[0], int(target_sum))) self.X = csr_arr def log1p(self): """ Calculated the natural logarithm of one plus the sparse marttix, element-wise inlpace in cunnData object. """ self.X = self.X.log1p() self.uns["log1p"] = {"base": None} def highly_varible_genes(self,min_mean = 0.0125,max_mean =3,min_disp= 0.5,max_disp =np.inf, n_top_genes = None, flavor = 'seurat', n_bins = 20, batch_key = None): """ Annotate highly variable genes. Expects logarithmized data. Reimplentation of scanpy's function. Depending on flavor, this reproduces the R-implementations of Seurat, Cell Ranger. For these dispersion-based methods, the normalized dispersion is obtained by scaling with the mean and standard deviation of the dispersions for genes falling into a given bin for mean expression of genes. This means that for each bin of mean expression, highly variable genes are selected. Parameters ---------- min_mean: float (default: 0.0125) If n_top_genes unequals None, this and all other cutoffs for the means and the normalized dispersions are ignored. max_mean: float (default: 3) If n_top_genes unequals None, this and all other cutoffs for the means and the normalized dispersions are ignored. min_disp: float (default: 0.5) If n_top_genes unequals None, this and all other cutoffs for the means and the normalized dispersions are ignored. max_disp: float (default: inf) If n_top_genes unequals None, this and all other cutoffs for the means and the normalized dispersions are ignored. n_top_genes: int (defualt: None) Number of highly-variable genes to keep. n_bins : int (default: 20) Number of bins for binning the mean gene expression. Normalization is done with respect to each bin. If just a single gene falls into a bin, the normalized dispersion is artificially set to 1. flavor : {‘seurat’, ‘cell_ranger’} (default: 'seurat') Choose the flavor for identifying highly variable genes. For the dispersion based methods in their default workflows, Seurat passes the cutoffs whereas Cell Ranger passes n_top_genes. batch_key: If specified, highly-variable genes are selected within each batch separately and merged. Returns ------- upates .var with the following fields highly_variablebool boolean indicator of highly-variable genes means means per gene dispersions dispersions per gene dispersions_norm normalized dispersions per gene """ if batch_key is None: df = _highly_variable_genes_single_batch( self.X.tocsc(), min_disp=min_disp, max_disp=max_disp, min_mean=min_mean, max_mean=max_mean, n_top_genes=n_top_genes, n_bins=n_bins, flavor=flavor) else: self.obs[batch_key].astype("category") batches = self.obs[batch_key].cat.categories df = [] genes = self.var.index.to_numpy() for batch in batches: inter_matrix = self.X[np.where(self.obs[batch_key]==batch)[0],].tocsc() thr_org = cp.diff(inter_matrix.indptr).ravel() thr = cp.where(thr_org >= 1)[0] thr_2 = cp.where(thr_org < 1)[0] inter_matrix = inter_matrix[:, thr] thr = thr.get() thr_2 = thr_2.get() inter_genes = genes[thr] other_gens_inter = genes[thr_2] hvg_inter = _highly_variable_genes_single_batch(inter_matrix, min_disp=min_disp, max_disp=max_disp, min_mean=min_mean, max_mean=max_mean, n_top_genes=n_top_genes, n_bins=n_bins, flavor=flavor) hvg_inter["gene"] = inter_genes missing_hvg = pd.DataFrame( np.zeros((len(other_gens_inter), len(hvg_inter.columns))), columns=hvg_inter.columns, ) missing_hvg['highly_variable'] = missing_hvg['highly_variable'].astype(bool) missing_hvg['gene'] = other_gens_inter hvg = hvg_inter.append(missing_hvg, ignore_index=True) idxs = np.concatenate((thr, thr_2)) hvg = hvg.loc[np.argsort(idxs)] df.append(hvg) df = pd.concat(df, axis=0) df['highly_variable'] = df['highly_variable'].astype(int) df = df.groupby('gene').agg( dict( means=np.nanmean, dispersions=np.nanmean, dispersions_norm=np.nanmean, highly_variable=np.nansum, ) ) df.rename( columns=dict(highly_variable='highly_variable_nbatches'), inplace=True ) df['highly_variable_intersection'] = df['highly_variable_nbatches'] == len( batches ) if n_top_genes is not None: # sort genes by how often they selected as hvg within each batch and # break ties with normalized dispersion across batches df.sort_values( ['highly_variable_nbatches', 'dispersions_norm'], ascending=False, na_position='last', inplace=True, ) df['highly_variable'] = False df.highly_variable.iloc[:n_top_genes] = True df = df.loc[genes] else: df = df.loc[genes] dispersion_norm = df.dispersions_norm.values dispersion_norm[np.isnan(dispersion_norm)] = 0 # similar to Seurat gene_subset = np.logical_and.reduce( ( df.means > min_mean, df.means < max_mean, df.dispersions_norm > min_disp, df.dispersions_norm < max_disp, ) ) df['highly_variable'] = gene_subset self.var["highly_variable"] =df['highly_variable'].values self.var["means"] = df['means'].values self.var["dispersions"]=df['dispersions'].values self.var["dispersions_norm"]=df['dispersions_norm'].values self.uns['hvg'] = {'flavor': flavor} if batch_key is not None: self.var['highly_variable_nbatches'] = df[ 'highly_variable_nbatches' ].values self.var['highly_variable_intersection'] = df[ 'highly_variable_intersection' ].values def filter_highly_variable(self): """ Filters the cunndata object for highly_variable genes. Run highly_varible_genes first. Returns ------- updates cunndata object to only contain highly variable genes. """ if "highly_variable" in self.var.keys(): thr = np.where(self.var["highly_variable"] == True)[0] self.X =self.X.tocsc() self.X = self.X[:, thr] self.shape = self.X.shape self.nnz = self.X.nnz self.var = self.var.iloc[cp.asnumpy(thr)] else: print(f"Please calculate highly variable genes first") def regress_out(self, keys, verbose=False): """ Use linear regression to adjust for the effects of unwanted noise and variation. Parameters ---------- adata The annotated data matrix. keys Keys for numerical observation annotation on which to regress on. verbose : bool Print debugging information Returns ------- updates cunndata object with the corrected data matrix """ if type(self.X) is not cpx.scipy.sparse.csc.csc_matrix: self.X = self.X.tocsc() dim_regressor= 2 if type(keys)is list: dim_regressor = len(keys)+1 regressors = cp.ones((self.X.shape[0]*dim_regressor)).reshape((self.X.shape[0], dim_regressor), order="F") if dim_regressor==2: regressors[:, 1] = cp.array(self.obs[keys]).ravel() else: for i in range(dim_regressor-1): regressors[:, i+1] = cp.array(self.obs[keys[i]]).ravel() outputs = cp.empty(self.X.shape, dtype=self.X.dtype, order="F") if self.X.shape[0] < 100000 and cpx.scipy.sparse.issparse(self.X): self.X = self.X.todense() for i in range(self.X.shape[1]): if verbose and i % 500 == 0: print("Regressed %s out of %s" %(i, self.X.shape[1])) X = regressors y = self.X[:,i] outputs[:, i] = _regress_out_chunk(X, y) self.X = outputs def scale(self, max_value=10): """ Scales matrix to unit variance and clips values Parameters ---------- normalized : cupy.ndarray or numpy.ndarray of shape (n_cells, n_genes) Matrix to scale max_value : int After scaling matrix to unit variance, values will be clipped to this number of std deviations. Return ------ updates cunndata object with a scaled cunndata.X """ if type(self.X) is not cp._core.core.ndarray: print("densifying _.X") self.X = self.X.toarray() X = StandardScaler().fit_transform(self.X) self.X = cp.clip(X,a_max=max_value) def scale_2(self, max_value=10): """ Scales matrix to unit variance and clips values Parameters ---------- max_value : int After scaling matrix to unit variance, values will be clipped to this number of std deviations. Return ------ updates cunndata object with a scaled cunndata.X """ if type(self.X) is not cp._core.core.ndarray: print("densifying _.X") X = self.X.toarray() else: X =self.X mean = X.mean(axis=0) X -= mean del mean stddev = cp.sqrt(X.var(axis=0)) X /= stddev del stddev self.X = cp.clip(X,a_max=max_value) def _regress_out_chunk(X, y): """ Performs a data_cunk.shape[1] number of local linear regressions, replacing the data in the original chunk w/ the regressed result. Parameters ---------- X : cupy.ndarray of shape (n_cells, 3) Matrix of regressors y : cupy.sparse.spmatrix of shape (n_cells,) Sparse matrix containing a single column of the cellxgene matrix Returns ------- dense_mat : cupy.ndarray of shape (n_cells,) Adjusted column """ if cp.sparse.issparse(y): y = y.todense() lr = LinearRegression(fit_intercept=False, output_type="cupy") lr.fit(X, y, convert_dtype=True) return y.reshape(y.shape[0],) - lr.predict(X).reshape(y.shape[0]) def _highly_variable_genes_single_batch(my_mat,min_mean = 0.0125,max_mean =3,min_disp= 0.5,max_disp =np.inf, n_top_genes = None, flavor = 'seurat', n_bins = 20): """\ See `highly_variable_genes`. Returns ------- A DataFrame that contains the columns `highly_variable`, `means`, `dispersions`, and `dispersions_norm`. """ if flavor == 'seurat': my_mat = my_mat.expm1() mean = (my_mat.sum(axis =0)/my_mat.shape[0]).ravel() mean[mean == 0] = 1e-12 my_mat.data **= 2 inter = (my_mat.sum(axis =0)/my_mat.shape[0]).ravel() var = inter - mean ** 2 disp = var/mean if flavor == 'seurat': # logarithmized mean as in Seurat disp[disp == 0] = np.nan disp = np.log(disp) mean = np.log1p(mean) df = pd.DataFrame() mean = mean.get() disp = disp.get() df['means'] = mean df['dispersions'] = disp if flavor == 'seurat': df['mean_bin'] =
pd.cut(df['means'], bins=n_bins)
pandas.cut
""" ABSOLUTELY NOT TESTED """ import time import os import datetime from collections import namedtuple import numpy as np import pandas as pd import sklearn.preprocessing import torch import torch.nn as nn import torch.optim as optim from dateutil.relativedelta import relativedelta from simple_ts_forecast.models import Model SavedFit = namedtuple('SavedFit', 'filename date_test_start datetime_fit mape') def r2_score(y_test, y_pred, torch_order=False): if torch_order: y_test, y_pred = y_pred, y_test if isinstance(y_test, np.ndarray) and isinstance(y_pred, np.ndarray): return 1 - np.mean((y_test - y_pred) ** 2) / np.mean((y_test - np.mean(y_test)) ** 2) elif isinstance(y_test, torch.Tensor) and isinstance(y_pred, torch.Tensor): return 1 - torch.mean((y_test - y_pred) ** 2).item() / torch.mean((y_test - torch.mean(y_test)) ** 2).item() else: raise TypeError(f"input_ array must be np.ndarray or torch.Tensor, got {type(y_test)}, {type(y_pred)}") def mean_absolute_percent_error(y_test, y_pred, torch_order=False): if torch_order: y_test, y_pred = y_pred, y_test if isinstance(y_test, np.ndarray) and isinstance(y_pred, np.ndarray): return np.mean(np.abs((y_test - y_pred) / y_test)) * 100 elif isinstance(y_test, torch.Tensor) and isinstance(y_pred, torch.Tensor): return torch.mean(torch.abs((y_test - y_pred) / y_test)) * 100 else: raise TypeError(f"input_ array must be np.ndarray or torch.Tensor, got {type(y_test)}, {type(y_pred)}") class LSTM(Model): """Use this class as another classic simple_ts_forecast""" class _Model(nn.Module): """PyTorch RNN model""" def __init__(self, input_size, hidden_size, output_size, device): super().__init__() self.device = device self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size self.lstm_1 = nn.LSTMCell(self.input_size, self.hidden_size) self.lstm_2 = nn.LSTMCell(self.hidden_size, self.hidden_size) self.dropout_1 = nn.Dropout(p=0.5) self.dropout_2 = nn.Dropout(p=0.1) self.linear = nn.Linear(self.hidden_size, self.input_size) self.out_linear = nn.Linear(self.input_size, self.output_size) def forward(self, x, future=1): x = x.to(self.device) outputs = [] # reset the state of LSTM # the state is kept till the end of the sequence h_t1, c_t1 = self.init_hidden(x.size(0)) h_t2, c_t2 = self.init_hidden(x.size(0)) for input_t in x.split(1, dim=1): h_t1, c_t1 = self.lstm_1(input_t.squeeze(1), (h_t1, c_t1)) h_t1 = self.dropout_1(h_t1) h_t2, c_t2 = self.lstm_2(h_t1, (h_t2, c_t2)) output = self.linear(self.dropout_2(h_t2)) outputs += [self.out_linear(output)] for i in range(future - 1): h_t1, c_t1 = self.lstm_1(output, (h_t1, c_t1)) h_t1 = self.dropout_1(h_t1) h_t2, c_t2 = self.lstm_2(h_t1, (h_t2, c_t2)) output = self.linear(self.dropout_2(h_t2)) outputs += [self.out_linear(output)] outputs = torch.stack(outputs, 1).squeeze(2) return outputs def init_hidden(self, batch_size): h_t = torch.zeros(batch_size, self.hidden_size, dtype=torch.float32).to(self.device) c_t = torch.zeros(batch_size, self.hidden_size, dtype=torch.float32).to(self.device) return h_t, c_t def __init__(self, n=14, window=35, lr=0.005, sched_step_size=10, sched_gamma=0.5, model_params=None, model_input_size=1, model_hidden_size=300, model_output_size=1, scaler=None, device=None, gpu_num=0, train_set_prop=0.9, batch_size=175, n_epochs=30, models_dir='lstm_saves/ts_mnpz/', days_between_fits=31, n_fits=3, search_window=14, post_process_coef=0.75): """Init model Args: n (int, optional): future days num to predict. Defaults to 14. window (int, optional): window of past data from predict. Defaults to 35. lr (float, optional): learning rate of optimizer. Defaults to 0.005. sched_step_size (int, optional): lr_scheduler.StepLR step size. Defaults to 10. sched_gamma (float, optional): lr_scheduler.StepLR gamma. Defaults to 0.5. model_params (dict, optional): dict of params = args to model init. Defaults to dict of 3 params below. model_input_size (int, optional): param of Model, num input_ features. Defaults to 1. model_hidden_size (int, optional): param of Model, size of hidden layers. Defaults to 300. model_output_size (int, optional): param of Model, size of output. Defaults to 1. scaler (sklearn.preprocessing.*Scaler, optional): class Scaler for features. Defaults to sklearn.preprocessing.StandardScaler. device (torch.device, optional): device train on. Defaults to gpu, if available. gpu_num (int, optional): gpu num in sys. Defaults to 0. train_set_prop (float, optional): if not providing sate_test_start uses these coef to slicing train data. Defaults to 0.9. batch_size (int, optional): batch size for train. Defaults to 175. n_epochs (int, optional): number epochs for train. Defaults to 30. models_dir (str, optional): path to saves of simple_ts_forecast. Defaults to 'lstm_saves/ts_mnpz/'. days_between_fits (int, optional): days between fits for predict for report. Defaults to 31. n_fits (int, optional): number of fits for one test data. Defaults to 3. search_window (int, optional): search saved fit up to search_window days back. Defaults to 14. post_process_coef (float, optional): in [0, 1]. Defaults to 0.75. """ super().__init__() self.model_params = model_params or dict(input_size=model_input_size, hidden_size=model_hidden_size, output_size=model_output_size) self.device = device or torch.device(f'cuda:{gpu_num}' if torch.cuda.is_available() else 'cpu') self.cpu_device = torch.device('cpu') self.model = self._Model(**self.model_params, device=self.cpu_device) self.loss_fn = nn.MSELoss() self.lr = lr self.sched_step_size = sched_step_size self.sched_gamma = sched_gamma self.Scaler = scaler or sklearn.preprocessing.StandardScaler self.scalers = [] self.n_in = window self.n_out = n self.n_epochs = n_epochs self.batch_size = batch_size self.seeds = [0, 42, 1, 123, 1337, 2000, -1000, 300] self.models_dir = models_dir os.makedirs(self.models_dir, exist_ok=True) self.days_between_fits = days_between_fits self._filename_pattern = 'model_{date_test_start}_{datetime_fit}_{mape:.2f}_.pt' self.train_set_prop = train_set_prop self.n_fits = n_fits self.search_window = search_window self.post_process_coef = post_process_coef def fit(self, X, verbose=False, date_test_start=None, force_fit=False, load_from_filename=None, saving=True): """fit or load LSTM model Args: X ([pd.DataFrame]): all series to train (and testing model) without Nan verbose (bool, optional): if True prints verbose information. Defaults to False. date_test_start (str or datetime): Date for first n_out prediction. Defaults to end of 90% of df. force_fit (bool, optional): Fit even if exist saved. Defaults to False. load_from_filename (str, optional): Filename load from (without dirname). Defaults to None. """ ind = pd.to_datetime(X.index) X = X.values n_features = X.shape[1] if date_test_start is None: test_start = int(len(X) * self.train_set_prop) date_test_start = pd.to_datetime(ind[test_start]) else: test_start = ind.get_loc(date_test_start) + 1 - self.n_in - self.n_out self._test_start = test_start self.date_test_start = pd.to_datetime(date_test_start) train = X[:test_start].reshape(-1, n_features) test = X[test_start:].reshape(-1, n_features) trains = [] tests = [] for i in range(n_features): scaler = self.Scaler() series = train[:, i].reshape(-1, 1) scaler = scaler.fit(series) trains.append(scaler.fit_transform(series)) tests.append(scaler.transform(test[:, i].reshape(-1, 1))) self.scalers.append(scaler) shift_size = self.n_in train_arr = np.concatenate(trains, 1) test_arr = np.concatenate(tests, 1) x_train, y_train = self.series_to_supervised(train_arr, self.n_in, self.n_out, shift_size, for_new_arch=True) self._x_train = x_train self._y_train = y_train x_test, y_test = self.series_to_supervised(test_arr, self.n_in, self.n_out, shift_size, for_new_arch=True) self._x_test = x_test self._y_test = y_test if load_from_filename and not force_fit: self.load_model(self.models_dir + load_from_filename) elif force_fit: self._n_fits(self.n_fits, verbose, saving) else: filename = self.find_nearest_save(self.date_test_start) if filename: self.load_model(self.models_dir + filename) else: self._n_fits(self.n_fits, verbose, saving) def _n_fits(self, n_fits=3, verbose=False, saving=True): info = [] min_mape = float('inf') min_mape_i = 0 for i in range(n_fits): if i < len(self.seeds): torch.manual_seed(self.seeds[i]) else: torch.seed() self.model = self._Model(**self.model_params, device=self.device) self.model.to(self.device) self.loss_fn = nn.MSELoss() self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr) self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=self.sched_step_size, gamma=self.sched_gamma) if verbose: print(f'START fit {i}') train_loss, val_loss, tttime, mape = self.train(self._x_train, self._y_train, self._x_test, self._y_test, verbose=verbose) if verbose: print(f'MAPE on {i} fit = {mape:.4f}, last best = {min_mape:.4f}, elapsed {tttime / 60:.2f}min.\n') if min_mape > mape: min_mape = mape min_mape_i = i info.append((self.model, self.loss_fn, self.optimizer, self.scheduler)) self.model.to(self.cpu_device) self.model.device = self.cpu_device if verbose: print(f'\nTHE BEST Model is {min_mape_i} with MAPE = {min_mape:.4f}\n') self.model, self.loss_fn, self.optimizer, self.scheduler = info[min_mape_i] self.mape_on_val = min_mape if saving: self.save_fit() if torch.cuda.is_available(): torch.cuda.empty_cache() def predict(self, X, dates_from_predict=None, post_process=True): """ :param X: all series, same as in fit(), but with additional data at the end :type X: pd.DataFrame or np.ndarray :param dates_from_predict: indexes of days in df to predict if None predicts for last date in df :return: np.array if predictions for each day in dates_to_predict """ n_features = X.shape[1] trains = [] for i in range(n_features): scaler = self.scalers[i] series = X.iloc[:, i:i + 1].values trains.append(scaler.transform(series)) X = pd.DataFrame(np.concatenate(trains, 1), index=X.index) ind = X.index if dates_from_predict is None: dates_from_predict = [ind[-1]] to_predict = [] for date in dates_from_predict: end_ind = ind.get_loc(date) x = X.iloc[end_ind - self.n_in:end_ind, :].values to_predict.append(x) to_predict = np.array(to_predict) x = torch.from_numpy(to_predict).float() with torch.no_grad(): self.model.eval() y_pred = self.model(x, future=self.n_out).cpu() y_pred = y_pred[:, -self.n_out:].numpy() predicted_scaled = self._scale_all_predictions(y_pred) predicted_scaled = np.array(predicted_scaled).reshape(len(dates_from_predict), self.n_out) columns = [f'n{i + 1}' for i in range(self.n_out)] pred = pd.DataFrame(predicted_scaled, index=dates_from_predict, columns=columns) if post_process: ma = X.loc[pred.index].values[:, :1] ppc = self.post_process_coef pred = pred - predicted_scaled[:, :1] + (ma * ppc + predicted_scaled[:, :1] * (1 - ppc)) return pred def predict_for_report(self, X, date_start, date_end, current_fit=False, force_fits=False, verbose=False, saving=True, post_process=True): date_start = pd.to_datetime(date_start) date_end = pd.to_datetime(date_end) columns = [f'n{i + 1}' for i in range(self.n_out)] if current_fit: predicted = self._evaluate_all(self._x_test, self._y_test) start = date_start - relativedelta(days=self.n_out) ind = pd.date_range(start, periods=len(predicted)) return pd.DataFrame(predicted, index=ind, columns=columns) flag = False preds = [] l_range = (date_end - date_start).days for i in range(0, l_range, self.days_between_fits): if l_range - (i + self.days_between_fits) < self.n_out: flag = True new_date_start = date_start + relativedelta(days=i) new_end = new_date_start + relativedelta(days=self.days_between_fits - 1) if flag: new_end = date_end if force_fits: self.fit(X.loc[:new_end], date_test_start=new_date_start, force_fit=True, verbose=verbose, saving=saving) else: saved_fit_fn = self.find_nearest_save(new_date_start) if saved_fit_fn: self.fit(X.loc[:new_end], date_test_start=new_date_start, load_from_filename=saved_fit_fn, verbose=verbose, saving=saving) else: self.fit(X.loc[:new_end], date_test_start=new_date_start, force_fit=True, verbose=verbose, saving=saving) predicted = self._evaluate_all(self._x_test, self._y_test) start = new_date_start - relativedelta(days=self.n_out) ind = pd.date_range(start, periods=len(predicted)) preds.append(pd.DataFrame(predicted, index=ind, columns=columns)) if flag: break pred = pd.concat(preds) if post_process: predicted_scaled = pred.values ma = X.loc[pred.index].values[:, :1] ppc = self.post_process_coef pred = pred - predicted_scaled[:, :1] + (ma * ppc + predicted_scaled[:, :1] * (1 - ppc)) return pred def save_fit(self): checkpoint = { 'model': self._Model(**self.model_params, device=self.cpu_device), 'date_test_start': self.date_test_start, 'state_dict': self.model.state_dict(), 'mape_on_val': self.mape_on_val } torch.save(checkpoint, self.models_dir + self._filename_pattern.format(date_test_start=self.date_test_start.date(), datetime_fit=datetime.datetime.now().strftime( "%Y-%m-%d %H%M%S"), mape=self.mape_on_val)) def load_model(self, filepath): checkpoint = torch.load(filepath, map_location=self.cpu_device) self.model = checkpoint['model'] self.model.load_state_dict(checkpoint['state_dict']) self.model.eval() self.model.to(self.cpu_device) self.mape_on_val = checkpoint['mape_on_val'] self.date_test_start = checkpoint['date_test_start'] def list_saved_fits(self): filenames = [fn for fn in os.listdir(self.models_dir) if fn.endswith('.pt')] list_of_fits = [] for fn in filenames: _, date_test_start, datetime_fit, mape, _ = fn.split('_') date_test_start = pd.to_datetime(date_test_start) datetime_fit = pd.to_datetime(datetime_fit, format="%Y-%m-%d %H%M%S") mape = float(mape) list_of_fits.append(SavedFit(fn, date_test_start, datetime_fit, mape)) return list_of_fits def find_nearest_save(self, date): date =
pd.to_datetime(date)
pandas.to_datetime
from pandas.testing import assert_frame_equal import pandas as pd from sparkmagic.utils.utils import coerce_pandas_df_to_numeric_datetime def test_no_coercing(): records = [{u'buildingID': 0, u'date': u'6/1/13', u'temp_diff': u'12'}, {u'buildingID': 1, u'date': u'random', u'temp_diff': u'0adsf'}] desired_df = pd.DataFrame(records) df = pd.DataFrame(records) coerce_pandas_df_to_numeric_datetime(df) assert_frame_equal(desired_df, df) def test_date_coercing(): records = [{u'buildingID': 0, u'date': u'6/1/13', u'temp_diff': u'12'}, {u'buildingID': 1, u'date': u'6/1/13', u'temp_diff': u'0adsf'}] desired_df = pd.DataFrame(records) desired_df["date"] = pd.to_datetime(desired_df["date"]) df = pd.DataFrame(records) coerce_pandas_df_to_numeric_datetime(df) assert_frame_equal(desired_df, df) def test_date_coercing_none_values(): records = [{u'buildingID': 0, u'date': u'6/1/13', u'temp_diff': u'12'}, {u'buildingID': 1, u'date': None, u'temp_diff': u'0adsf'}] desired_df = pd.DataFrame(records) desired_df["date"] = pd.to_datetime(desired_df["date"]) df = pd.DataFrame(records) coerce_pandas_df_to_numeric_datetime(df) assert_frame_equal(desired_df, df) def test_date_none_values_and_no_coercing(): records = [{u'buildingID': 0, u'date': u'6/1/13', u'temp_diff': u'12'}, {u'buildingID': 1, u'date': None, u'temp_diff': u'0adsf'}, {u'buildingID': 1, u'date': u'adsf', u'temp_diff': u'0adsf'}] desired_df = pd.DataFrame(records) df = pd.DataFrame(records) coerce_pandas_df_to_numeric_datetime(df) assert_frame_equal(desired_df, df) def test_numeric_coercing(): records = [{u'buildingID': 0, u'date': u'6/1/13', u'temp_diff': u'12'}, {u'buildingID': 1, u'date': u'adsf', u'temp_diff': u'0'}] desired_df = pd.DataFrame(records) desired_df["temp_diff"] = pd.to_numeric(desired_df["temp_diff"]) df = pd.DataFrame(records) coerce_pandas_df_to_numeric_datetime(df) assert_frame_equal(desired_df, df) def test_numeric_coercing_none_values(): records = [{u'buildingID': 0, u'date': u'6/1/13', u'temp_diff': u'12'}, {u'buildingID': 1, u'date': u'asdf', u'temp_diff': None}] desired_df = pd.DataFrame(records) desired_df["temp_diff"] = pd.to_numeric(desired_df["temp_diff"]) df = pd.DataFrame(records) coerce_pandas_df_to_numeric_datetime(df) assert_frame_equal(desired_df, df) def test_numeric_none_values_and_no_coercing(): records = [{u'buildingID': 0, u'date': u'6/1/13', u'temp_diff': u'12'}, {u'buildingID': 1, u'date': u'asdf', u'temp_diff': None}, {u'buildingID': 1, u'date': u'adsf', u'temp_diff': u'0asdf'}] desired_df = pd.DataFrame(records) df = pd.DataFrame(records) coerce_pandas_df_to_numeric_datetime(df) assert_frame_equal(desired_df, df) def test_df_dict_does_not_throw(): json_str = """ [{ "id": 580320, "name": "<NAME>", "results": "Fail", "violations": "37. TOILET area.", "words": ["37.", "toilet", "area."], "features": { "type": 0, "size": 262144, "indices": [0, 45, 97], "values": [7.0, 5.0, 1.0] }, "rawPrediction": { "type": 1, "values": [3.640841752791392, -3.640841752791392] }, "probability": { "type": 1, "values": [0.974440185187647, 0.025559814812352966] }, "prediction": 0.0 }] """ df = pd.read_json(json_str) coerce_pandas_df_to_numeric_datetime(df) def test_overflow_coercing(): records = [{'_c0':'12345678901'}] desired_df =
pd.DataFrame(records)
pandas.DataFrame
import matplotlib.pyplot as plt from pathlib import Path import pandas as pd import os import numpy as np def get_file_paths(file_directory): file_paths = os.listdir(file_directory) file_paths = list(filter(lambda f_path: os.path.isdir(file_directory / f_path), file_paths)) return file_paths def plot_day(plot_directory, df_phases_day, sdp_name, start_time, df_comparison_values, plot_method, comparison_label): sdp_directory = plot_directory / sdp_name if not os.path.exists(sdp_directory): os.makedirs(sdp_directory) plt.figure(1) plt.ylabel('Phases') p_counter = 1 relevant_plot = False transgressions_sum = 0 for df_p_day in df_phases_day: if not df_p_day.empty: transgressions = plot_method(df_p_day, p_counter) transgressions_sum += transgressions relevant_plot = relevant_plot or transgressions > 0 p_counter = p_counter + 1 if relevant_plot and not df_comparison_values.empty: df_comparison_values.plot(figsize=(24, 6), linewidth=0.5, color='grey', label=comparison_label) if relevant_plot: legend = plt.legend(fontsize='x-large', loc='lower left') for line in legend.get_lines(): line.set_linewidth(4.0) plot_path = plot_directory / sdp_name / start_time if relevant_plot: plt.savefig(plot_path) plt.close(1) if transgressions_sum > 0: print(start_time) print(transgressions_sum) return transgressions_sum def plot_pickle_daywise(pickle_directory, plot_directory, plot_method, comparison_series_func): transgression_sum = 0 nmbr_elements_sum = 0 file_paths = get_file_paths(pickle_directory) print(file_paths) for path in file_paths: print(path) comparison_label, df_comparison_values = comparison_series_func(path) # df_mean_values = pd.read_pickle(pickle_directory/(path+'season_aggregation')).sort_index() path = pickle_directory / Path(path) df_phases = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3'])) nmbr_elements_sum += sum(map(lambda df: df.shape[0], df_phases)) day = pd.Timedelta('1d') min_date = min(list(map(lambda df: df.index.min(), df_phases))).date() max_date = max(list(map(lambda df: df.index.max(), df_phases))).date() print(min_date) print(max_date) for start_time in pd.date_range(min_date, max_date, freq='d'): end_time = start_time + day # df_day = df.loc[df.index>start_time and df.index<end_time, :] df_phases_day = list(map(lambda df: df.loc[start_time:end_time], df_phases)) df_comparison_values_day = df_comparison_values.loc[start_time:end_time] # print(start_time.date()) transgression_sum += plot_day(plot_directory, df_phases_day, path.name, str(start_time.date()), df_comparison_values_day, plot_method, comparison_label) return transgression_sum, nmbr_elements_sum def plot_station_dif_anomalies(pickle_directory, base_plot_directory, anomaly_threshold): plot_directory = base_plot_directory / ("StationDif_" + str(anomaly_threshold).replace(".", "_")) def plot_station_dif_v2(df_p_day, p_counter): transgressions = list(np.where(abs(df_p_day.StationDif) > anomaly_threshold)[0]) df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o', markerfacecolor='black', label="phase" + str(p_counter)) return len(transgressions) def comparison_series_func(station_name): return "meanStationAverage", pd.read_pickle(pickle_directory / 'meanStationValues') transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_station_dif_v2, comparison_series_func) print(transgression_sum) print(nmbr_elements_sum) ratio = transgression_sum / nmbr_elements_sum print(ratio) f = open(plot_directory / str(ratio), "w+") f.close() def plot_phase_dif_anomalies(pickle_directory, base_plot_directory, anomaly_threshold): plot_directory = base_plot_directory / ("PhaseDif_" + str(anomaly_threshold).replace(".", "_")) def plot_station_dif_v2(df_p_day, p_counter): transgressions = list(np.where(abs(df_p_day.phase_dif) > anomaly_threshold)[0]) df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o', markerfacecolor='black', label="phase" + str(p_counter)) return len(transgressions) def comparison_series_func(station_name): return "",
pd.DataFrame()
pandas.DataFrame
#!/usr/bin/env python # Native library import sys import pickle import argparse import multiprocessing from random import shuffle import math import tempfile from operator import itemgetter from io import StringIO # Other import numpy as np import pandas as pd # Features import ms2pipfeatures_pyx_HCD import ms2pipfeatures_pyx_HCDch2 import ms2pipfeatures_pyx_CID # import ms2pipfeatures_pyx_HCDiTRAQ4phospho # import ms2pipfeatures_pyx_HCDiTRAQ4 import ms2pipfeatures_pyx_ETD # From other Python files from write_msp import write_msp def process_peptides(worker_num, data, a_map, afile, modfile, modfile2, PTMmap, fragmethod): """ Function for each worker to process a list of peptides. The models are chosen based on fragmethod, PTMmap, Ntermmap and Ctermmap determine the modifications applied to each peptide sequence. Returns the predicted spectra for all the peptides. """ # Rename ms2pipfeatures_pyx # This needs to be done inside process_peptides and inside process_spectra, as ms2pipfeatures_pyx # cannot be passed as an argument through multiprocessing. Also, in order to be compatible with # MS2PIP Server (which calls the function Run), this can not be done globally. if fragmethod == "CID": ms2pipfeatures_pyx = ms2pipfeatures_pyx_CID elif fragmethod == "HCD": ms2pipfeatures_pyx = ms2pipfeatures_pyx_HCD elif fragmethod == "HCDiTRAQ4phospho": ms2pipfeatures_pyx = ms2pipfeatures_pyx_HCDiTRAQ4phospho elif fragmethod == "HCDiTRAQ4": ms2pipfeatures_pyx = ms2pipfeatures_pyx_HCDiTRAQ4 elif fragmethod == "HCDch2": ms2pipfeatures_pyx = ms2pipfeatures_pyx_HCDch2 elif fragmethod == "ETD": ms2pipfeatures_pyx = ms2pipfeatures_pyx_ETD ms2pipfeatures_pyx.ms2pip_init(bytearray(afile.encode()), bytearray(modfile.encode()), bytearray(modfile2.encode())) # transform pandas dataframe into dictionary for easy access specdict = data[["spec_id", "peptide", "modifications", "charge"]].set_index("spec_id").to_dict() peptides = specdict["peptide"] modifications = specdict["modifications"] charges = specdict["charge"] final_result = pd.DataFrame(columns=["spec_id", "peplen", "charge", "ion", "ionnumber", "mz", "prediction"]) pcount = 0 for pepid in peptides: peptide = peptides[pepid] peptide = peptide.replace("L", "I") mods = modifications[pepid] # convert peptide string to integer list to speed up C code peptide = np.array([0] + [a_map[x] for x in peptide] + [0], dtype=np.uint16) modpeptide = apply_mods(peptide, mods, PTMmap) if type(modpeptide) == str: if modpeptide == "Unknown modification": continue ch = charges[pepid] # get ion mzs mzs = ms2pipfeatures_pyx.get_mzs(modpeptide) # get ion intensities predictions = ms2pipfeatures_pyx.get_predictions(peptide, modpeptide, ch) # return predictions as a DataFrame tmp = pd.DataFrame(columns=['spec_id', 'peplen', 'charge', 'ion', 'ionnumber', 'mz', 'prediction']) num_ions = len(predictions[0]) if fragmethod == 'ETD': tmp["ion"] = ['b'] * num_ions + ['y'] * num_ions + ['c'] * num_ions + ['z'] * num_ions tmp["ionnumber"] = list(range(1, num_ions + 1)) * 4 tmp["mz"] = mzs[0] + mzs[1] + mzs[2] + mzs[3] tmp["prediction"] = predictions[0] + predictions[1] + predictions[2] + predictions[3] elif fragmethod == 'HCDch2': tmp["ion"] = ['b'] * num_ions + ['y'] * num_ions + ['b2'] * num_ions + ['y2'] * num_ions tmp["ionnumber"] = list(range(1, num_ions + 1)) * 4 tmp["mz"] = mzs[0] + mzs[1] + mzs[2] + mzs[3] tmp["prediction"] = predictions[0] + predictions[1] + predictions[2] + predictions[3] else: tmp["ion"] = ['b'] * num_ions + ['y'] * num_ions tmp["mz"] = mzs[0] + mzs[1] tmp["ionnumber"] = list(range(1, num_ions + 1)) * 2 tmp["prediction"] = predictions[0] + predictions[1] tmp["peplen"] = len(peptide) - 2 tmp["charge"] = ch tmp["spec_id"] = pepid final_result = final_result.append(tmp) pcount += 1 if (pcount % 500) == 0: print("w{}({})".format(worker_num, pcount), end=', ') return final_result def process_spectra(worker_num, spec_file, vector_file, data, a_map, afile, modfile, modfile2, PTMmap, fragmethod, fragerror): """ Function for each worker to process a list of spectra. Each peptide's sequence is extracted from the mgf file. Then models are chosen based on fragmethod, PTMmap, Ntermmap and Ctermmap determine the modifications applied to each peptide sequence and the spectrum is predicted. Then either the feature vectors are returned, or a DataFrame with the predicted and empirical intensities. """ # Rename ms2pipfeatures_pyx # This needs to be done inside process_peptides and inside process_spectra, as ms2pipfeatures_pyx # cannot be passed as an argument through multiprocessing. Also, in order to be compatible with # MS2PIP Server (which calls the function Run), this can not be done globally. if fragmethod == "CID": ms2pipfeatures_pyx = ms2pipfeatures_pyx_CID elif fragmethod == "HCD": ms2pipfeatures_pyx = ms2pipfeatures_pyx_HCD elif fragmethod == "HCDiTRAQ4phospho": ms2pipfeatures_pyx = ms2pipfeatures_pyx_HCDiTRAQ4phospho elif fragmethod == "HCDiTRAQ4": ms2pipfeatures_pyx = ms2pipfeatures_pyx_HCDiTRAQ4 elif fragmethod == "HCDch2": ms2pipfeatures_pyx = ms2pipfeatures_pyx_HCDch2 elif fragmethod == "ETD": ms2pipfeatures_pyx = ms2pipfeatures_pyx_ETD ms2pipfeatures_pyx.ms2pip_init(bytearray(afile.encode()), bytearray(modfile.encode()), bytearray(modfile2.encode())) # transform pandas datastructure into dictionary for easy access specdict = data[["spec_id", "peptide", "modifications"]].set_index("spec_id").to_dict() peptides = specdict["peptide"] modifications = specdict["modifications"] # cols contains the names of the computed features cols_n = get_feature_names() dataresult =
pd.DataFrame(columns=["spec_id", "peplen", "charge", "ion", "ionnumber", "mz", "target", "prediction"])
pandas.DataFrame
import sys import os import math import copy import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from scipy.stats import rankdata import multiprocessing as mp import logging import scanpy as sc import anndata as ad from scipy.io import mmread,mmwrite from scipy.sparse import csr_matrix,issparse import matplotlib as mpl from functools import reduce from sklearn.decomposition import PCA import umap from sctriangulate.colors import * # for publication ready figure mpl.rcParams['pdf.fonttype'] = 42 mpl.rcParams['ps.fonttype'] = 42 mpl.rcParams['font.family'] = 'Arial' def sctriangulate_preprocessing_setting(backend='Agg',png=False): # change the backend mpl.use(backend) if png: # for publication and super large dataset mpl.rcParams['savefig.dpi'] = 600 mpl.rcParams['figure.dpi'] = 600 def small_txt_to_adata(int_file,gene_is_index=True): ''' given a small dense expression (<2GB) txt file, load them into memory as adata, and also make sure the X is sparse matrix. :param int_file: string, path to the input txt file, delimited by tab :param gene_is_index: boolean, whether the gene/features are the index. :return: AnnData Exmaples:: from sctriangulate.preprocessing import small_txt_to_adata adata= = small_txt_to_adata('./input.txt',gene_is_index=True) ''' df = pd.read_csv(int_file,sep='\t',index_col=0) if gene_is_index: adata = ad.AnnData(X=csr_matrix(df.values.T),var=pd.DataFrame(index=df.index.values),obs=pd.DataFrame(index=df.columns.values)) else: adata = ad.AnnData(X=csr_matrix(df.values),var=pd.DataFrame(index=df.columns.values),obs=pd.DataFrame(index=df.index.values)) adata.var_names_make_unique() adata.X = csr_matrix(adata.X) return adata def large_txt_to_mtx(int_file,out_folder,gene_is_index=True,type_convert_to='int16'): # whether the txt if gene * cell ''' Given a large txt dense expression file, convert them to mtx file on cluster to facilitate future I/O :param int_file: string, path to the intput txt file, delimited by tab :param out_folder: string, path to the output folder where the mtx file will be stored :param gene_is_index: boolean, whether the gene/features is the index in the int_file. :param type_convert_to: string, since it is a large dataframe, need to read in chunk, to accelarate it and reduce the memory footprint, we convert it to either 'int16' if original data is count, or 'float32' if original data is normalized data. Examples:: from sctriangulate.preprocessing import large_txt_to_mtx large_txt_to_mtx(int_file='input.txt',out_folder='./data',gene_is_index=False,type_convert_to='float32') ''' reader = pd.read_csv(int_file,sep='\t',index_col=0,chunksize=1000) store = [] for chunk in reader: tmp = chunk.astype(type_convert_to) store.append(tmp) data = pd.concat(store) print(data.shape) '''save as mtx, now!!!''' if not os.path.exists(out_folder): os.mkdir(out_folder) if gene_is_index: data.index.to_series().to_csv(os.path.join(out_folder,'genes.tsv'),sep='\t',header=None,index=None) data.columns.to_series().to_csv(os.path.join(out_folder,'barcodes.tsv'),sep='\t',header=None,index=None) mmwrite(os.path.join(out_folder,'matrix.mtx'),csr_matrix(data.values)) else: data.columns.to_series().to_csv(os.path.join(out_folder,'genes.tsv'),sep='\t',header=None,index=None) data.index.to_series().to_csv(os.path.join(out_folder,'barcodes.tsv'),sep='\t',header=None,index=None) mmwrite(os.path.join(out_folder,'matrix.mtx'),csr_matrix(data.values.T)) def mtx_to_adata(int_folder,gene_is_index=True,feature='genes',feature_col='index',barcode_col='index'): # whether the mtx file is gene * cell ''' convert mtx file to adata in RAM, make sure the X is sparse. :param int_folder: string, folder where the mtx files are stored. :param gene_is_index: boolean, whether the gene is index. :param feature: string, the name of the feature tsv file, if rna, it will be genes.tsv. :param feature_col: 'index' as index, or a int (which column, python is zero based) to use in your feature.tsv as feature :param barcode_col: 'index' as index, or a int (which column, python is zero based) to use in your barcodes.tsv as barcode :return: AnnData Examples:: from sctriangulate.preprocessing import mtx_to_adata mtx_to_adata(int_folder='./data',gene_is_index=False,feature='genes') ''' if feature_col == 'index': gene = pd.read_csv(os.path.join(int_folder,'{}.tsv'.format(feature)),sep='\t',index_col=0,header=None).index else: gene = pd.read_csv(os.path.join(int_folder,'{}.tsv'.format(feature)),sep='\t',index_col=0,header=None)[feature_col] if barcode_col == 'index': cell = pd.read_csv(os.path.join(int_folder,'barcodes.tsv'),sep='\t',index_col=0,header=None).index else: cell = pd.read_csv(os.path.join(int_folder,'barcodes.tsv'),sep='\t',index_col=0,header=None)[barcode_col] value = csr_matrix(mmread(os.path.join(int_folder,'matrix.mtx'))) if gene_is_index: value = value.T adata = ad.AnnData(X=value,obs=pd.DataFrame(index=cell),var=pd.DataFrame(index=gene)) else: adata = ad.AnnData(X=value,obs=pd.DataFrame(index=cell),var=pd.DataFrame(index=gene)) adata.var.index.name = None adata.var_names_make_unique() return adata def mtx_to_large_txt(int_folder,out_file,gene_is_index=False): ''' convert mtx back to large dense txt expression dataframe. :param int_folder: string, path to the input mtx folder. :param out_file: string, path to the output txt file. :param gene_is_index: boolean, whether the gene is the index. Examples:: from sctriangulate.preprocessing import mtx_to_large_txt mtx_to_large_txt(int_folder='./data',out_file='input.txt',gene_is_index=False) ''' gene = pd.read_csv(os.path.join(int_folder,'genes.tsv'),sep='\t',index_col=0,header=None).index cell = pd.read_csv(os.path.join(int_folder,'barcodes.tsv'),sep='\t',index_col=0,header=None).index value = mmread(os.path.join(int_folder,'matrix.mtx')).toarray() if gene_is_index: data = pd.DataFrame(data=value,index=gene,columns=cell) else: data = pd.DataFrame(data=value.T,index=cell,columns=gene) data.to_csv(out_file,sep='\t',chunksize=1000) def adata_to_mtx(adata,gene_is_index=True,var_column=None,obs_column=None,outdir='data'): # create folder if not exist if not os.path.exists(outdir): os.mkdir(outdir) # write genes.tsv if var_column is None: var = adata.var_names.to_series() else: var = adata.var[var_column] var.to_csv(os.path.join(outdir,'genes.tsv'),sep='\t',header=None,index=None) # write barcodes.tsv if obs_column is None: obs = adata.obs_names.to_series() else: obs = adata.obs[obs_column] obs.to_csv(os.path.join(outdir,'barcodes.tsv'),sep='\t',header=None,index=None) # write matrix.mtx if not gene_is_index: mmwrite(os.path.join(outdir,'matrix.mtx'),make_sure_mat_sparse(adata.X)) else: mmwrite(os.path.join(outdir,'matrix.mtx'),make_sure_mat_sparse(adata.X).transpose()) def add_azimuth(adata,result,name='predicted.celltype.l2'): ''' a convenient function if you have azimuth predicted labels in hand, and want to add the label to the adata. :param adata: AnnData :param result: string, the path to the 'azimuth_predict.tsv' file :param name: string, the column name where the user want to transfer to the adata. Examples:: from sctriangulate.preprocessing import add_azimuth add_azimuth(adata,result='./azimuth_predict.tsv',name='predicted.celltype.l2') ''' azimuth = pd.read_csv(result,sep='\t',index_col=0) azimuth_map = azimuth[name].to_dict() azimuth_prediction = azimuth['{}.score'.format(name)].to_dict() azimuth_mapping = azimuth['mapping.score'].to_dict() adata.obs['azimuth'] = adata.obs_names.map(azimuth_map).values adata.obs['prediction_score'] = adata.obs_names.map(azimuth_prediction).values adata.obs['mapping_score'] = adata.obs_names.map(azimuth_mapping).values def add_annotations(adata,inputs,cols_input,index_col=0,cols_output=None,kind='disk'): ''' Adding annotations from external sources to the adata :param adata: Anndata :param inputs: string, path to the txt file where the barcode to cluster label information is stored. :param cols_input: list, what columns the users want to transfer to the adata. :param index_col: int, for the input, which column will serve as the index column :param cols_output: list, corresponding to the cols_input, how these columns will be named in the adata.obs columns :param kind: a string, either 'disk', or 'memory', disk means the input is the path to the text file, 'memory' means the input is the variable name in the RAM that represents the dataframe Examples:: from sctriangulate.preprocessing import add_annotations add_annotations(adata,inputs='./annotation.txt',cols_input=['col1','col2'],index_col=0,cols_output=['annotation1','annontation2'],kind='disk') add_annotations(adata,inputs=df,cols_input=['col1','col2'],index_col=0,cols_output=['annotation1','annontation2'],kind='memory') ''' # means a single file such that one column is barcodes, annotations are within other columns if kind == 'disk': annotations = pd.read_csv(inputs,sep='\t',index_col=index_col).loc[:,cols_input] elif kind == 'memory': # index_col will be ignored annotations = inputs.loc[:,cols_input] mappings = [] for col in cols_input: mapping = annotations[col].to_dict() mappings.append(mapping) if cols_output is None: for i,col in enumerate(cols_input): adata.obs[col] = adata.obs_names.map(mappings[i]).fillna('Unknown').values adata.obs[col] = adata.obs[col].astype('str').astype('category') else: for i in range(len(cols_input)): adata.obs[cols_output[i]] = adata.obs_names.map(mappings[i]).fillna('Unknown').values adata.obs[cols_output[i]] = adata.obs[cols_output[i]].astype('str').astype('category') def add_umap(adata,inputs,mode,cols=None,index_col=0): ''' if umap embedding is pre-computed, add it back to adata object. :param adata: Anndata :param inputs: string, path to the the txt file where the umap embedding was stored. :param mode: string, valid value 'pandas_disk', 'pandas_memory', 'numpy' * **pandas_disk**: the `inputs` argument should be the path to the txt file * **pandas_memory**: the `inputs` argument should be the name of the pandas dataframe in the program, inputs=df * **numpy**, the `inputs` argument should be a 2D ndarray contains pre-sorted (same order as barcodes in adata) umap coordinates :param cols: list, what columns contain umap embeddings :param index_col: int, which column will serve as the index column. Examples:: from sctriangulate.preprocessing import add_umap add_umap(adata,inputs='umap.txt',mode='pandas_disk',cols=['umap1','umap2'],index_col=0) ''' # make sure cols are [umap_x, umap_y] if mode == 'pandas_disk': df = pd.read_csv(inputs,sep='\t',index_col=index_col) umap_x = df[cols[0]].to_dict() umap_y = df[cols[1]].to_dict() adata.obs['umap_x'] = adata.obs_names.map(umap_x).values adata.obs['umap_y'] = adata.obs_names.map(umap_y).values adata.obsm['X_umap'] = adata.obs.loc[:,['umap_x','umap_y']].values adata.obs.drop(columns=['umap_x','umap_y'],inplace=True) elif mode == 'pandas_memory': df = inputs umap_x = df[cols[0]].to_dict() umap_y = df[cols[1]].to_dict() adata.obs['umap_x'] = adata.obs_names.map(umap_x).values adata.obs['umap_y'] = adata.obs_names.map(umap_y).values adata.obsm['X_umap'] = adata.obs.loc[:,['umap_x','umap_y']].values adata.obs.drop(columns=['umap_x','umap_y'],inplace=True) elif mode == 'numpy': # assume the order is correct adata.obsm['X_umap'] = inputs def doublet_predict(adata): # gave RNA count or log matrix ''' wrapper function for running srublet, a new column named 'doublet_scores' will be added to the adata :param adata: Anndata :return: dict Examples:: from sctriangulate.preprocessing import doublet_predict mapping = doublet_predict(old_adata) ''' from scipy.sparse import issparse import scrublet as scr if issparse(adata.X): adata.X = adata.X.toarray() counts_matrix = adata.X scrub = scr.Scrublet(counts_matrix) doublet_scores, predicted_doublets = scrub.scrub_doublets(min_counts=1, min_cells=1) adata.obs['doublet_scores'] = doublet_scores return adata.obs['doublet_scores'].to_dict() def make_sure_adata_writable(adata,delete=False): ''' maks sure the adata is able to write to disk, since h5 file is stricted typed, so no mixed dtype is allowd. this function basically is to detect the column of obs/var that are of mixed types, and delete them. :param adata: Anndata :param delete: boolean, False will just print out what columns are mixed type, True will automatically delete those columns :return: Anndata Examples:: from sctriangulate.preprocessing import make_sure_adata_writable make_sure_adata_writable(adata,delete=True) ''' # check index, can not have name var_names = adata.var_names obs_names = adata.obs_names var_names.name = None obs_names.name = None adata.var_names = var_names adata.obs_names = obs_names # make sure column is pure type, basically, if mixed tyep, delete the column, and print out the delete columns # go to: https://github.com/theislab/scanpy/issues/1866 var = adata.var obs = adata.obs for col in var.columns: if var[col].dtypes == 'O': all_type = np.array([type(item) for item in var[col]]) first = all_type[0] if (first==all_type).all() and first == str: # object, but every item is str continue else: # mixed type print('column {} in var will be deleted, because mixed types'.format(col)) if delete: adata.var.drop(columns=[col],inplace=True) for col in obs.columns: if obs[col].dtypes == 'O': all_type = np.array([type(item) for item in obs[col]]) first = all_type[0] if (first==all_type).all() and first == str: # object, but every item is str continue else: # mixed type print('column {} in obs will be deleted, because mixed types'.format(col)) if delete: adata.obs.drop(columns=[col],inplace=True) return adata def scanpy_recipe(adata,species='human',is_log=False,resolutions=[1,2,3],modality='rna',umap=True,save=True,pca_n_comps=None,n_top_genes=3000): ''' Main preprocessing function. Run Scanpy normal pipeline to achieve Leiden clustering with various resolutions across multiple modalities. :param adata: Anndata :param species: string, 'human' or 'mouse' :param is_log: boolean, whether the adata.X is count or normalized data. :param resolutions: list, what leiden resolutions the users want to obtain. :param modality: string, valid values: 'rna','adt','atac', 'binary'[mutation data, TCR data, etc] :param umap: boolean, whether to compute umap embedding. :param save: boolean, whether to save the obtained adata object with cluster label information in it. :param pca_n_comps: int, how many PCs to keep when running PCA. Suggestion: RNA (30-50), ADT (15), ATAC (100) :param n_top_genes: int, how many features to keep when selecting highly_variable_genes. Suggestion: RNA (3000), ADT (ignored), ATAC (50000-100000) :return: Anndata Examples:: from sctriangulate.preprocessing import scanpy_recipe # rna adata = scanpy_recipe(adata,is_log=False,resolutions=[1,2,3],modality='rna',pca_n_comps=50,n_top_genes=3000) # adt adata = scanpy_recipe(adata,is_log=False,resolutions=[1,2,3],modality='adt',pca_n_comps=15) # atac adata = scanpy_recipe(adata,is_log=False,resolutions=[1,2,3],modality='atac',pca_n_comps=100,n_top_genes=100000) # binary adata = scanpy_recipe(adata,resolutions=[1,2,3],modality='binary') ''' adata.var_names_make_unique() # normal analysis if modality == 'rna': if not is_log: # count data if species == 'human': adata.var['mt'] = adata.var_names.str.startswith('MT-') elif species == 'mouse': adata.var['mt'] = adata.var_names.str.startswith('mt-') sc.pp.calculate_qc_metrics(adata,qc_vars=['mt'],percent_top=None,inplace=True,log1p=False) sc.pp.normalize_total(adata,target_sum=1e4) sc.pp.log1p(adata) sc.pp.highly_variable_genes(adata,flavor='seurat',n_top_genes=n_top_genes) adata.raw = adata adata = adata[:,adata.var['highly_variable']] sc.pp.regress_out(adata,['total_counts','pct_counts_mt']) sc.pp.scale(adata,max_value=10) sc.tl.pca(adata,n_comps=pca_n_comps) sc.pp.neighbors(adata) for resolution in resolutions: sc.tl.leiden(adata,resolution=resolution,key_added='sctri_{}_leiden_{}'.format(modality,resolution)) if umap: sc.tl.umap(adata) # put raw back to X, and make sure it is sparse matrix adata = adata.raw.to_adata() if not issparse(adata.X): adata.X = csr_matrix(adata.X) if save: resolutions = '_'.join([str(item) for item in resolutions]) adata.write('adata_after_scanpy_recipe_{}_{}_umap_{}.h5ad'.format(modality,resolutions,umap)) else: # log(1+x) and depth normalized data if species == 'human': adata.var['mt'] = adata.var_names.str.startswith('MT-') elif species == 'mouse': adata.var['mt'] = adata.var_names.str.startswith('mt-') sc.pp.calculate_qc_metrics(adata,qc_vars=['mt'],percent_top=None,inplace=True,log1p=False) sc.pp.highly_variable_genes(adata,flavor='seurat',n_top_genes=n_top_genes) adata.raw = adata adata = adata[:,adata.var['highly_variable']] sc.pp.regress_out(adata,['total_counts','pct_counts_mt']) sc.pp.scale(adata,max_value=10) sc.tl.pca(adata,n_comps=pca_n_comps) sc.pp.neighbors(adata) for resolution in resolutions: sc.tl.leiden(adata,resolution=resolution,key_added='sctri_{}_leiden_{}'.format(modality,resolution)) if umap: sc.tl.umap(adata) # put raw back to X, and make sure it is sparse matrix adata = adata.raw.to_adata() if not issparse(adata.X): adata.X = csr_matrix(adata.X) if save: resolutions = '_'.join([str(item) for item in resolutions]) adata.write('adata_after_scanpy_recipe_{}_{}_umap_{}.h5ad'.format(modality,resolutions,umap)) elif modality == 'atac': if not is_log: sc.pp.calculate_qc_metrics(adata,percent_top=None,inplace=True,log1p=False) sc.pp.normalize_total(adata,target_sum=1e4) sc.pp.log1p(adata) sc.pp.highly_variable_genes(adata,flavor='seurat',n_top_genes=n_top_genes) adata.raw = adata adata = adata[:,adata.var['highly_variable']] #sc.pp.scale(adata,max_value=10) # because in episcanpy toturial, it seems to be ignored sc.tl.pca(adata,n_comps=pca_n_comps) sc.pp.neighbors(adata) for resolution in resolutions: sc.tl.leiden(adata,resolution=resolution,key_added='sctri_{}_leiden_{}'.format(modality,resolution)) if umap: sc.tl.umap(adata) adata = adata.raw.to_adata() if not issparse(adata.X): adata.X = csr_matrix(adata.X) if save: resolutions = '_'.join([str(item) for item in resolutions]) adata.write('adata_after_scanpy_recipe_{}_{}_umap_{}.h5ad'.format(modality,resolutions,umap)) else: sc.pp.calculate_qc_metrics(adata,percent_top=None,inplace=True,log1p=False) sc.pp.highly_variable_genes(adata,flavor='seurat',n_top_genes=n_top_genes) adata.raw = adata adata = adata[:,adata.var['highly_variable']] #sc.pp.scale(adata,max_value=10) sc.tl.pca(adata,n_comps=pca_n_comps) sc.pp.neighbors(adata) for resolution in resolutions: sc.tl.leiden(adata,resolution=resolution,key_added='sctri_{}_leiden_{}'.format(modality,resolution)) if umap: sc.tl.umap(adata) adata = adata.raw.to_adata() if not issparse(adata.X): adata.X = csr_matrix(adata.X) if save: resolutions = '_'.join([str(item) for item in resolutions]) adata.write('adata_after_scanpy_recipe_{}_{}_umap_{}.h5ad'.format(modality,resolutions,umap)) elif modality == 'adt': if not is_log: sc.pp.calculate_qc_metrics(adata,percent_top=None,inplace=True,log1p=False) adata.X = make_sure_mat_sparse(Normalization.CLR_normalization(make_sure_mat_dense(adata.X))) sc.tl.pca(adata,n_comps=pca_n_comps) sc.pp.neighbors(adata) for resolution in resolutions: sc.tl.leiden(adata,resolution=resolution,key_added='sctri_{}_leiden_{}'.format(modality,resolution)) if umap: sc.tl.umap(adata) if not issparse(adata.X): adata.X = csr_matrix(adata.X) if save: resolutions = '_'.join([str(item) for item in resolutions]) adata.write('adata_after_scanpy_recipe_{}_{}_umap_{}.h5ad'.format(modality,resolutions,umap)) else: sc.tl.pca(adata,n_comps=pca_n_comps) sc.pp.neighbors(adata) for resolution in resolutions: sc.tl.leiden(adata,resolution=resolution,key_added='sctri_{}_leiden_{}'.format(modality,resolution)) if umap: sc.tl.umap(adata) if not issparse(adata.X): adata.X = csr_matrix(adata.X) if save: resolutions = '_'.join([str(item) for item in resolutions]) adata.write('adata_after_scanpy_recipe_{}_{}_umap_{}.h5ad'.format(modality,resolutions,umap)) elif modality == 'binary': # mutation #sc.tl.pca(adata,n_comps=pca_n_comps) sc.pp.neighbors(adata,use_rep='X',metric='jaccard') for resolution in resolutions: sc.tl.leiden(adata,resolution=resolution,key_added='sctri_{}_leiden_{}'.format(modality,resolution)) if umap: sc.tl.umap(adata) if not issparse(adata.X): adata.X = csr_matrix(adata.X) if save: resolutions = '_'.join([str(item) for item in resolutions]) adata.write('adata_after_scanpy_recipe_{}_{}_umap_{}.h5ad'.format(modality,resolutions,umap)) elif modality == 'spatial': sc.pp.scale(adata) sc.pp.neighbors(adata) for resolution in resolutions: sc.tl.leiden(adata,resolution=resolution,key_added='sctri_{}_leiden_{}'.format(modality,resolution)) if save: resolutions = '_'.join([str(item) for item in resolutions]) adata.write('adata_after_scanpy_recipe_{}_{}_umap_{}.h5ad'.format(modality,resolutions,False)) return adata def concat_rna_and_other(adata_rna,adata_other,umap,name,prefix): ''' concatenate rna adata and another modality's adata object :param adata_rna: AnnData :param adata_other: Anndata :param umap: string, whose umap to use, either 'rna' or 'other' :param name: string, the name of other modality, for example, 'adt' or 'atac' :param prefix: string, the prefix added in front of features from other modality, by scTriangulate convertion, adt will be 'AB_', atac will be ''. :return adata_combine: Anndata Examples:: from sctriangulate.preprocessing import concat_rna_and_other concat_rna_and_other(adata_rna,adata_adt,umap='rna',name='adt',prefix='AB_') ''' adata_rna = adata_rna.copy() adata_other = adata_other.copy() # remove layers, [!obsm], varm, obsp, varp, raw for adata in [adata_rna,adata_other]: del adata.layers del adata.varm del adata.obsp del adata.varp del adata.raw adata_other = adata_other[adata_rna.obs_names,:] # make sure the obs order is the same adata_other.var_names = [prefix + item for item in adata_other.var_names] adata_combine = ad.concat([adata_rna,adata_other],axis=1,join='outer',merge='first',label='modality',keys=['rna','{}'.format(name)]) if umap == 'rna': adata_combine.obsm['X_umap'] = adata_rna.obsm['X_umap'] elif umap == 'other': adata_combine.obsm['X_umap'] = adata_other.obsm['X_umap'] if not issparse(adata_combine.X): adata_combine.X = csr_matrix(adata_combine.X) return adata_combine def nca_embedding(adata,nca_n_components,label,method,max_iter=50,plot=True,save=True,format='pdf',legend_loc='on data',n_top_genes=None,hv_features=None,add_features=None): ''' Doing Neighborhood component ananlysis (NCA), so it is a supervised PCA that takes the label from the annotation, and try to generate a UMAP embedding that perfectly separate the labelled clusters. :param adata: the Anndata :param nca_n_components: recommend to be 10 based on `Ref <https://www.nature.com/articles/s41586-021-03969-3>`_ :param label: string, the column name which contains the label information :param method: either 'umap' or 'tsne' :param max_iter: for the NCA, default is 50, it is generally good enough :param plot: whether to plot the umap/tsne or not :param save: whether to save the plot or not :param format: the saved format, default is 'pdf' :param legend_loc: 'on data' or 'right margin' :param n_top_genes: how many hypervariable genes to choose for NCA, recommended 3000 or 5000, default is None, means there will be other features to add, multimodal setting :param hv_features: a list contains the user-supplied hypervariable genes/features, in multimodal setting, this can be [rna genes] + [ADT protein] :param add_features: this should be another adata contains features from other modalities, or None means just for RNA Example:: from sctriangulate.preprocessing import nca_embedding # only RNA nca_embedding(adata,nca_n_components=10,label='annotation1',method='umap',n_top_genes=3000) # RNA + ADT # list1 contains [gene features that are variable] and [ADT features that are variable] nca_embedding(adata_rna,nca_n_components=10,label='annotation1',method='umap',n_top_genes=3000,hv_features=list1, add_features=adata_adt) ''' from sklearn.neighbors import NeighborhoodComponentsAnalysis adata = adata if n_top_genes is not None: sc.pp.highly_variable_genes(adata,flavor='seurat',n_top_genes=n_top_genes) else: if add_features is not None: # first add the features, input should be anndata adata = concat_rna_and_other(adata,add_features,umap=None,name='add_features',prefix='add_features_') if hv_features is not None: # custom hv tmp = pd.Series(index=adata.var_names,data=np.full(len(adata.var_names),fill_value=False)) tmp.loc[hv_features] = True adata.var['highly_variable'] = tmp.values adata.raw = adata adata = adata[:,adata.var['highly_variable']] X = make_sure_mat_dense(adata.X) y = adata.obs[label].values nca = NeighborhoodComponentsAnalysis(n_components=nca_n_components,max_iter=max_iter) embed = nca.fit_transform(X,y) # (n_cells,n_components) adata.obsm['X_nca'] = embed adata = adata.raw.to_adata() if method == 'umap': sc.pp.neighbors(adata,use_rep='X_nca') sc.tl.umap(adata) sc.pl.umap(adata,color=label,frameon=False,legend_loc=legend_loc) if save: plt.savefig(os.path.join('.','nca_embedding_{}_{}.{}'.format(label,method,format)),bbox_inches='tight') plt.close() elif method == 'tsne': sc.tl.tsne(adata,use_rep='X_nca') sc.pl.tsne(adata,color=label,frameon=False,legend_loc=legend_loc) if save: plt.savefig(os.path.join('.','nca_embedding_{}_{}.{}'.format(label,method,format)),bbox_inches='tight') plt.close() adata.X = make_sure_mat_sparse(adata.X) return adata def umap_dual_view_save(adata,cols): ''' generate a pdf file with two umap up and down, one is with legend on side, another is with legend on data. More importantly, this allows you to generate multiple columns iteratively. :param adata: Anndata :param cols: list, all columns from which we want to draw umap. Examples:: from sctriangulate.preprocessing import umap_dual_view_save umap_dual_view_save(adata,cols=['annotation1','annotation2','total_counts']) ''' for col in cols: fig,ax = plt.subplots(nrows=2,ncols=1,figsize=(8,20),gridspec_kw={'hspace':0.3}) # for final_annotation sc.pl.umap(adata,color=col,frameon=False,ax=ax[0]) sc.pl.umap(adata,color=col,frameon=False,legend_loc='on data',legend_fontsize=5,ax=ax[1]) plt.savefig('./umap_dual_view_{}.pdf'.format(col),bbox_inches='tight') plt.close() def just_log_norm(adata): sc.pp.normalize_total(adata,target_sum=1e4) sc.pp.log1p(adata) return adata def format_find_concat(adata,canonical_chr_only=True,gtf_file='gencode.v38.annotation.gtf',key_added='gene_annotation',**kwargs): ''' this is a wrapper function to add nearest genes to your ATAC peaks or bins. For instance, if the peak is chr1:55555-55566, it will be annotated as chr1:55555-55566_gene1;gene2 :param adata: The anndata, the var_names is the peak/bin, please make sure the format is like chr1:55555-55566 :param canonical_chr_only: boolean, default to True, means only contain features on canonical chromosomes. for human, it is chr1-22 and X,Y :param gtf_file: the path to the gtf files, we provide the hg38 on this `google drive link <https://drive.google.com/file/d/11gbJl2-wZr3LbpWaU9RiUAGPebqWYi1z/view?usp=sharing>`_ to download :param key_added: string, the column name where the gene annotation will be inserted to adata.var, default is 'gene_annotation' :return adata: Anndata, the gene annotation will be added to var, and the var_name will be suffixed with gene annotation, if canonical_chr_only is True, then only features on canonical chromsome will be retained. Example:: adata = format_find_concat(adata) ''' adata= reformat_peak(adata,canonical_chr_only=canonical_chr_only) find_genes(adata,gtf_file=gtf_file,key_added=key_added,**kwargs) adata.var_names = [name + '_' + gene for name,gene in zip(adata.var_names,adata.var[key_added])] return adata class GeneConvert(object): ''' A collection of gene symbol conversion functions. Now support: 1. ensemblgene id to gene symbol. ''' @staticmethod def ensemblgene_to_symbol(query,species): ''' Examples:: from sctriangulate.preprocessing import GeneConvert converted_list = GeneConvert.ensemblgene_to_symbol(['ENSG00000010404','ENSG00000010505'],species='human') ''' # assume query is a list, will also return a list import mygene mg = mygene.MyGeneInfo() out = mg.querymany(query,scopes='ensemblgene',fileds='symbol',species=species,returnall=True,as_dataframe=True,df_index=True) result = out['out']['symbol'].fillna('unknown_gene').tolist() try: assert len(query) == len(result) except AssertionError: # have duplicate results df = out['out'] df_unique = df.loc[~df.index.duplicated(),:] result = df_unique['symbol'].fillna('unknown_gene').tolist() return result def dual_gene_plot(adata,gene1,gene2,s=8,save=True,format='pdf',dir='.',umap_lim=None): from scipy.sparse import issparse if issparse(adata.X): adata.X = adata.X.toarray() index1 = np.where(adata.var_names == gene1)[0][0] index2 = np.where(adata.var_names == gene2)[0][0] exp1 = adata.X[:,index1] exp2 = adata.X[:,index2] color = [] for i in range(len(exp1)): if exp1[i] > 0 and exp2[i] > 0: color.append('#F2DE77') elif exp1[i] > 0 and exp2[i] == 0: color.append('#5ABF9A') elif exp1[i] == 0 and exp2[i] > 0: color.append('#F25C69') else: color.append('lightgrey') fig, ax = plt.subplots() if umap_lim is not None: ax.set_xlim(umap_lim[0]) ax.set_ylim(umap_lim[1]) ax.scatter(x=adata.obsm['X_umap'][:,0],y=adata.obsm['X_umap'][:,1],s=s,c=color) import matplotlib.lines as mlines ax.legend(handles=[mlines.Line2D([],[],marker='o',color=i,linestyle='') for i in ['#F2DE77','#5ABF9A','#F25C69','lightgrey']], labels=['Both','{}'.format(gene1),'{}'.format(gene2),'None'],frameon=False,loc='upper left',bbox_to_anchor=[1,1]) if save: plt.savefig(os.path.join(dir,'sctri_dual_gene_plot_{}_{}.{}'.format(gene1,gene2,format)),bbox_inches='tight') plt.close() return ax def multi_gene_plot(adata,genes,s=8,save=True,format='pdf',dir='.',umap_lim=None): from scipy.sparse import issparse if issparse(adata.X): adata.X = adata.X.toarray() exp_list = [] for gene in genes: index_gene = np.where(adata.var_names == gene)[0][0] exp_gene = adata.X[:,index_gene] exp_list.append(exp_gene) color = [] for i in range(len(exp_list[0])): if len(genes) == 3: c = ['#04BFBF','#83A603','#F7766D'] elif len(genes) == 4: c = ['#04BFBF','#83A603','#F7766D','#E36DF2'] elif len(genes) == 5: c = ['#04BFBF','#83A603','#F7766D','#E36DF2','#A69B03'] b = '#BABABA' l_exp = np.array([exp[i] for exp in exp_list]) n_exp = np.count_nonzero(l_exp > 0) if n_exp > 1: color.append(c[np.where(l_exp==l_exp.max())[0][0]]) elif n_exp == 1: color.append(c[np.where(l_exp>0)[0][0]]) elif n_exp == 0: color.append(b) fig, ax = plt.subplots() if umap_lim is not None: ax.set_xlim(umap_lim[0]) ax.set_ylim(umap_lim[1]) ax.scatter(x=adata.obsm['X_umap'][:,0],y=adata.obsm['X_umap'][:,1],s=s,c=color) import matplotlib.lines as mlines ax.legend(handles=[mlines.Line2D([],[],marker='o',color=i,linestyle='') for i in c+[b]], labels=genes + ['None'],frameon=False, loc='upper left',bbox_to_anchor=[1,1]) if save: output = '_'.join(genes) plt.savefig(os.path.join(dir,'sctri_multi_gene_plot_{}.{}'.format(output,format)),bbox_inches='tight') plt.close() return ax def make_sure_mat_dense(mat): ''' make sure a matrix is dense :param mat: ndarary :return mat: ndarray (dense) Examples:: mat = make_sure_mat_dense(mat) ''' if not issparse(mat): pass else: mat = mat.toarray() return mat def make_sure_mat_sparse(mat): # will be csr if the input mat is a dense array ''' make sure a matrix is sparse :param mat: ndarary :return mat: ndarray (sparse) Examples:: mat = make_sure_mat_dense(mat) ''' if not issparse(mat): mat = csr_matrix(mat) else: pass return mat class Normalization(object): ''' a series of Normalization functions Now support: 1. CLR normalization 2. total count normalization (CPTT, CPM) 3. GMM normalization ''' # matrix should be cell x feature, expecting a ndarray @staticmethod def CLR_normalization(mat): ''' Examples:: from sctriangulate.preprocessing import Normalization post_mat = Normalization.CLR_normalization(pre_mat) ''' from scipy.stats import gmean gmeans = gmean(mat+1,axis=1).reshape(-1,1) post = np.log(mat/gmeans + 1) return post @staticmethod def total_normalization(mat,target=1e4): ''' Examples:: from sctriangulate.preprocessing import Normalization post_mat = Normalization.total_normalization(pre_mat) ''' total = np.sum(mat,axis=1).reshape(-1,1) sf = total/target post = np.log(mat/sf + 1) return post @staticmethod def GMM_normalization(mat): ''' Examples:: from sctriangulate.preprocessing import Normalization post_mat = Normalization.GMM_normalization(pre_mat) ''' mat = Normalization.total_normalization(mat) from sklearn.mixture import GaussianMixture model = GaussianMixture(n_components=2,random_state=0) model.fit(mat) means = model.means_ # (n_components,n_features) bg_index = np.argmin(means.mean(axis=1)) bg_mean = means[bg_index,:].reshape(1,-1) post = mat - bg_mean return post def gene_activity_count_matrix_new_10x(fall_in_promoter,fall_in_gene,valid=None): ''' Full explanation please refer to ``gene_activity_count_matrix_old_10x`` Examples:: from sctriangulate.preprocessing import gene_activity_count_matrix_new_10x gene_activity_count_matrix_new_10x(fall_in_promoter,fall_in_gene,valid=None) ''' gene_promoter = pd.read_csv(fall_in_promoter,sep='\t',header=None) gene_body =
pd.read_csv(fall_in_gene,sep='\t',header=None)
pandas.read_csv
""" Clean a DataFrame column containing text data. """ import re import string from functools import partial, update_wrapper from typing import Any, Callable, Dict, List, Optional, Set, Union from unicodedata import normalize import dask.dataframe as dd import numpy as np import pandas as pd from ..assets.english_stopwords import english_stopwords from .utils import NULL_VALUES, to_dask REGEX_BRACKETS = { "angle": re.compile(r"(\<)[^<>]*(\>)"), "curly": re.compile(r"(\{)[^{}]*(\})"), "round": re.compile(r"(\()[^()]*(\))"), "square": re.compile(r"(\[)[^\[\]]*(\])"), } REGEX_DIGITS = re.compile(r"\d+") REGEX_DIGITS_BLOCK = re.compile(r"\b\d+\b") REGEX_HTML = re.compile(r"<[A-Za-z/][^>]*>|&(?:[a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});") REGEX_PUNCTUATION = re.compile(rf"[{re.escape(string.punctuation)}]") REGEX_URL = re.compile(r"(?:https?://|www\.)\S+") REGEX_WHITESPACE = re.compile(r"[\n\t]|[ ]{2,}") def clean_text( df: Union[pd.DataFrame, dd.DataFrame], column: str, pipeline: Optional[List[Dict[str, Any]]] = None, stopwords: Optional[Set[str]] = None, ) -> pd.DataFrame: """ Clean text data in a DataFrame column. Read more in the :ref:`User Guide <clean_text_user_guide>`. Parameters ---------- df A pandas or Dask DataFrame containing the data to be cleaned. column The name of the column containing text data. pipeline A list of cleaning functions to be applied to the column. If None, use the default pipeline. See the :ref:`User Guide <clean_text_custom_pipeline>` for more information on customizing the pipeline. (default: None) stopwords A set of words to be removed from the column. If None, use NLTK's stopwords. (default: None) Examples -------- Clean a column of text data using the default pipeline. >>> df = pd.DataFrame({"text": ["This show was an amazing, fresh & innovative idea in the \ 70's when it first aired."]}) >>> clean_text(df, 'text') text 0 show amazing fresh innovative idea first aired """ df = to_dask(df) pipe = _get_default_pipeline(stopwords) if not pipeline else _get_custom_pipeline(pipeline) for func in pipe: df[column] = df[column].apply(func, meta=object) df = df.compute() return df def default_text_pipeline() -> List[Dict[str, Any]]: """ Return a list of dictionaries representing the functions in the default pipeline. Use as a template for creating a custom pipeline. Read more in the :ref:`User Guide <clean_text_user_guide>`. Examples -------- >>> default_text_pipeline() [{'operator': 'fillna'}, {'operator': 'lowercase'}, {'operator': 'remove_digits'}, {'operator': 'remove_html'}, {'operator': 'remove_urls'}, {'operator': 'remove_punctuation'}, {'operator': 'remove_accents'}, {'operator': 'remove_stopwords', 'parameters': {'stopwords': None}}, {'operator': 'remove_whitespace'}] """ return [ {"operator": "fillna"}, {"operator": "lowercase"}, {"operator": "remove_digits"}, {"operator": "remove_html"}, {"operator": "remove_urls"}, {"operator": "remove_punctuation"}, {"operator": "remove_accents"}, {"operator": "remove_stopwords", "parameters": {"stopwords": None}}, {"operator": "remove_whitespace"}, ] def _get_default_pipeline( stopwords: Optional[Set[str]] = None, ) -> List[Callable[..., Any]]: """ Return a list of functions defining the default pipeline. """ return [ _fillna, _lowercase, _remove_digits, _remove_html, _remove_urls, _remove_punctuation, _remove_accents, lambda x: _remove_stopwords(x, stopwords), _remove_whitespace, ] def _get_custom_pipeline(pipeline: List[Dict[str, Any]]) -> List[Callable[..., Any]]: """ Return a list of functions defining a custom pipeline. """ func_dict = _get_func_dict() custom_pipeline: List[Callable[..., Any]] = [] for component in pipeline: # Check whether function is built in or user defined operator = ( func_dict[component["operator"]] if isinstance(component["operator"], str) else component["operator"] ) # Append the function to the pipeline # If parameters are specified, create a partial function to lock in # the values and prevent them from being overwritten in subsequent loops if "parameters" in component: custom_pipeline.append(_wrapped_partial(operator, component["parameters"])) else: custom_pipeline.append(operator) return custom_pipeline def _get_func_dict() -> Dict[str, Callable[..., Any]]: """ Return a mapping of strings to function names. """ return { "fillna": _fillna, "lowercase": _lowercase, "sentence_case": _sentence_case, "title_case": _title_case, "uppercase": _uppercase, "remove_accents": _remove_accents, "remove_bracketed": _remove_bracketed, "remove_digits": _remove_digits, "remove_html": _remove_html, "remove_prefixed": _remove_prefixed, "remove_punctuation": _remove_punctuation, "remove_stopwords": _remove_stopwords, "remove_urls": _remove_urls, "remove_whitespace": _remove_whitespace, "replace_bracketed": _replace_bracketed, "replace_digits": _replace_digits, "replace_prefixed": _replace_prefixed, "replace_punctuation": _replace_punctuation, "replace_stopwords": _replace_stopwords, "replace_text": _replace_text, "replace_urls": _replace_urls, } def _fillna(text: Any, value: Any = np.nan) -> Any: """ Replace all null values with NaN (default) or the supplied value. """ return value if text in NULL_VALUES else str(text) def _lowercase(text: Any) -> Any: """ Convert all characters to lowercase. """ return str(text).lower() if pd.notna(text) else text def _sentence_case(text: Any) -> Any: """ Convert first character to uppercase and remaining to lowercase. """ return str(text).capitalize() if pd.notna(text) else text def _title_case(text: Any) -> Any: """ Convert first character of each word to uppercase and remaining to lowercase. """ return str(text).title() if pd.notna(text) else text def _uppercase(text: Any) -> Any: """ Convert all characters to uppercase. """ return str(text).upper() if pd.notna(text) else text def _remove_accents(text: Any) -> Any: """ Remove accents (diacritic marks). """ return ( normalize("NFD", str(text)).encode("ascii", "ignore").decode("ascii") if pd.notna(text) else text ) def _remove_bracketed(text: Any, brackets: Union[str, Set[str]], inclusive: bool = True) -> Any: """ Remove text between brackets. Parameters ---------- brackets The bracket style. - "angle": <> - "curly": {} - "round": () - "square": [] inclusive If True (default), remove the brackets along with the text in between. Otherwise, keep the brackets. """ if pd.isna(text): return text text = str(text) value = "" if inclusive else r"\g<1>\g<2>" if isinstance(brackets, set): for bracket in brackets: text = re.sub(REGEX_BRACKETS[bracket], value, text) else: text = re.sub(REGEX_BRACKETS[brackets], value, text) return text def _remove_digits(text: Any) -> Any: """ Remove all digits. """ return re.sub(REGEX_DIGITS, "", str(text)) if pd.notna(text) else text def _remove_html(text: Any) -> Any: """ Remove HTML tags. """ return re.sub(REGEX_HTML, "", str(text)) if pd.notna(text) else text def _remove_prefixed(text: Any, prefix: Union[str, Set[str]]) -> Any: """ Remove substrings that start with the prefix(es). """ if pd.isna(text): return text text = str(text) if isinstance(prefix, set): for pre in prefix: text = re.sub(rf"{pre}\S+", "", text) else: text = re.sub(rf"{prefix}\S+", "", text) return text def _remove_punctuation(text: Any) -> Any: """ Remove punctuation marks. """ return re.sub(REGEX_PUNCTUATION, " ", str(text)) if pd.notna(text) else text def _remove_stopwords(text: Any, stopwords: Optional[Set[str]] = None) -> Any: """ Remove a set of words from the text. If `stopwords` is None (default), use NLTK's stopwords. """ if pd.isna(text): return text stopwords = english_stopwords if not stopwords else stopwords return " ".join(word for word in str(text).split() if word.lower() not in stopwords) def _remove_urls(text: Any) -> Any: """ Remove URLS. """ return re.sub(REGEX_URL, "", str(text)) if
pd.notna(text)
pandas.notna
""" This script reads the docking_benchmark_dataset.csv file generated by 01_generate_benchmark_dataset.py and calculates a similarity matrix over all structures using the RMSD of the KLIFS binding pocket. """ from pathlib import Path from typing import Iterable from openeye import oechem import pandas as pd CACHE_DIR = Path("../data/.cache") klifs_residues_dict = {} structure_dict = {} def get_klifs_residues(structure, structure_klifs_id, klifs_residues_dict=klifs_residues_dict): """Get a list of all KLIFS residues in the format 'ALA123'.""" from opencadd.databases.klifs import setup_remote from kinoml.modeling.OEModeling import residue_ids_to_residue_names, remove_non_protein if structure_klifs_id in klifs_residues_dict.keys(): return klifs_residues_dict[structure_klifs_id] remote = setup_remote() protein = remove_non_protein(structure, remove_water=True) klifs_residue_ids = [ residue_id for residue_id in remote.pockets.by_structure_klifs_id(structure_klifs_id)["residue.id"] if residue_id != "_" ] klifs_residue_names = residue_ids_to_residue_names(protein, klifs_residue_ids) klifs_residues = [ residue_name + residue_id for residue_name, residue_id in zip(klifs_residue_names, klifs_residue_ids) ] klifs_residues_dict[structure_klifs_id] = klifs_residues return klifs_residues def load_pdb_entry(pdb_id, chain_id, alternate_location, directory=CACHE_DIR): """Load a PDB entry as OpenEye molecule.""" from kinoml.databases.pdb import download_pdb_structure from kinoml.modeling.OEModeling import read_molecules, select_chain, select_altloc structure_path = download_pdb_structure(pdb_id, directory) structure = read_molecules(structure_path)[0] structure = select_chain(structure, chain_id) if alternate_location != "-": try: structure = select_altloc(structure, alternate_location) except ValueError: # KLIFS contains erroneously annotated altloc information print( f"PDB entry {pdb_id} does not contain " f"alternate location {alternate_location}. " "Continuing without selecting alternate location." ) return structure def superpose_proteins( reference_protein: oechem.OEMolBase, fit_protein: oechem.OEMolBase, residues: Iterable = tuple(), chain_id: str = " ", insertion_code: str = " " ) -> oechem.OEMolBase: """ Superpose a protein structure onto a reference protein. The superposition can be customized to consider only the specified residues. Parameters ---------- reference_protein: oechem.OEMolBase An OpenEye molecule holding a protein structure which will be used as reference during superposition. fit_protein: oechem.OEMolBase An OpenEye molecule holding a protein structure which will be superposed onto the reference protein. residues: Iterable of str Residues that should be used during superposition in format "GLY123". chain_id: str Chain identifier for residues that should be used during superposition. insertion_code: str Insertion code for residues that should be used during superposition. Returns ------- superposed_protein: oechem.OEMolBase An OpenEye molecule holding the superposed protein structure. """ from openeye import oespruce # do not modify input superposed_protein = fit_protein.CreateCopy() # set superposition method options = oespruce.OESuperpositionOptions() if len(residues) == 0: options.SetSuperpositionType(oespruce.OESuperpositionType_Global) else: options.SetSuperpositionType(oespruce.OESuperpositionType_Site) for residue in residues: options.AddSiteResidue(f"{residue[:3]}:{residue[3:]}:{insertion_code}:{chain_id}") # perform superposition superposition = oespruce.OEStructuralSuperposition( reference_protein, superposed_protein, options ) superposition.Transform(superposed_protein) rmsd = superposition.GetRMSD() return rmsd, superposed_protein docking_benchmark_dataset =
pd.read_csv("../data/docking_benchmark_dataset.csv", index_col=0)
pandas.read_csv
#!/usr/bin/env python # coding: utf-8 # In[ ]: import os import sys import pandas as pd import numpy as np # In[ ]: import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.font_manager as fm import seaborn as sns #%matplotlib inline # In[ ]: #file 불러오기 #file 불러오기 filepath = sys.argv[1] filename = sys.argv[2] #filepath = "C:/Users/JIHYEON_KIM/Documents/workspace/rda/files/" #filename = "input3.csv" data =
pd.read_csv(filepath + "/" + filename, encoding='UTF-8')
pandas.read_csv
# -*- coding: utf-8 -*- import re import warnings from datetime import timedelta from itertools import product import pytest import numpy as np import pandas as pd from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex, compat, date_range, period_range) from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY from pandas.errors import PerformanceWarning, UnsortedIndexError from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.indexes.base import InvalidIndexError from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike from pandas._libs.tslib import Timestamp import pandas.util.testing as tm from pandas.util.testing import assert_almost_equal, assert_copy from .common import Base class TestMultiIndex(Base): _holder = MultiIndex _compat_props = ['shape', 'ndim', 'size'] def setup_method(self, method): major_axis = Index(['foo', 'bar', 'baz', 'qux']) minor_axis = Index(['one', 'two']) major_labels = np.array([0, 0, 1, 2, 3, 3]) minor_labels = np.array([0, 1, 0, 1, 0, 1]) self.index_names = ['first', 'second'] self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis], labels=[major_labels, minor_labels ], names=self.index_names, verify_integrity=False)) self.setup_indices() def create_index(self): return self.index def test_can_hold_identifiers(self): idx = self.create_index() key = idx[0] assert idx._can_hold_identifiers_and_holds_name(key) is True def test_boolean_context_compat2(self): # boolean context compat # GH7897 i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)]) i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)]) common = i1.intersection(i2) def f(): if common: pass tm.assert_raises_regex(ValueError, 'The truth value of a', f) def test_labels_dtypes(self): # GH 8456 i = MultiIndex.from_tuples([('A', 1), ('A', 2)]) assert i.labels[0].dtype == 'int8' assert i.labels[1].dtype == 'int8' i = MultiIndex.from_product([['a'], range(40)]) assert i.labels[1].dtype == 'int8' i = MultiIndex.from_product([['a'], range(400)]) assert i.labels[1].dtype == 'int16' i = MultiIndex.from_product([['a'], range(40000)]) assert i.labels[1].dtype == 'int32' i = pd.MultiIndex.from_product([['a'], range(1000)]) assert (i.labels[0] >= 0).all() assert (i.labels[1] >= 0).all() def test_where(self): i = MultiIndex.from_tuples([('A', 1), ('A', 2)]) def f(): i.where(True) pytest.raises(NotImplementedError, f) def test_where_array_like(self): i = MultiIndex.from_tuples([('A', 1), ('A', 2)]) klasses = [list, tuple, np.array, pd.Series] cond = [False, True] for klass in klasses: def f(): return i.where(klass(cond)) pytest.raises(NotImplementedError, f) def test_repeat(self): reps = 2 numbers = [1, 2, 3] names = np.array(['foo', 'bar']) m = MultiIndex.from_product([ numbers, names], names=names) expected = MultiIndex.from_product([ numbers, names.repeat(reps)], names=names) tm.assert_index_equal(m.repeat(reps), expected) with tm.assert_produces_warning(FutureWarning): result = m.repeat(n=reps) tm.assert_index_equal(result, expected) def test_numpy_repeat(self): reps = 2 numbers = [1, 2, 3] names = np.array(['foo', 'bar']) m = MultiIndex.from_product([ numbers, names], names=names) expected = MultiIndex.from_product([ numbers, names.repeat(reps)], names=names) tm.assert_index_equal(np.repeat(m, reps), expected) msg = "the 'axis' parameter is not supported" tm.assert_raises_regex( ValueError, msg, np.repeat, m, reps, axis=1) def test_set_name_methods(self): # so long as these are synonyms, we don't need to test set_names assert self.index.rename == self.index.set_names new_names = [name + "SUFFIX" for name in self.index_names] ind = self.index.set_names(new_names) assert self.index.names == self.index_names assert ind.names == new_names with tm.assert_raises_regex(ValueError, "^Length"): ind.set_names(new_names + new_names) new_names2 = [name + "SUFFIX2" for name in new_names] res = ind.set_names(new_names2, inplace=True) assert res is None assert ind.names == new_names2 # set names for specific level (# GH7792) ind = self.index.set_names(new_names[0], level=0) assert self.index.names == self.index_names assert ind.names == [new_names[0], self.index_names[1]] res = ind.set_names(new_names2[0], level=0, inplace=True) assert res is None assert ind.names == [new_names2[0], self.index_names[1]] # set names for multiple levels ind = self.index.set_names(new_names, level=[0, 1]) assert self.index.names == self.index_names assert ind.names == new_names res = ind.set_names(new_names2, level=[0, 1], inplace=True) assert res is None assert ind.names == new_names2 @pytest.mark.parametrize('inplace', [True, False]) def test_set_names_with_nlevel_1(self, inplace): # GH 21149 # Ensure that .set_names for MultiIndex with # nlevels == 1 does not raise any errors expected = pd.MultiIndex(levels=[[0, 1]], labels=[[0, 1]], names=['first']) m = pd.MultiIndex.from_product([[0, 1]]) result = m.set_names('first', level=0, inplace=inplace) if inplace: result = m tm.assert_index_equal(result, expected) def test_set_levels_labels_directly(self): # setting levels/labels directly raises AttributeError levels = self.index.levels new_levels = [[lev + 'a' for lev in level] for level in levels] labels = self.index.labels major_labels, minor_labels = labels major_labels = [(x + 1) % 3 for x in major_labels] minor_labels = [(x + 1) % 1 for x in minor_labels] new_labels = [major_labels, minor_labels] with pytest.raises(AttributeError): self.index.levels = new_levels with pytest.raises(AttributeError): self.index.labels = new_labels def test_set_levels(self): # side note - you probably wouldn't want to use levels and labels # directly like this - but it is possible. levels = self.index.levels new_levels = [[lev + 'a' for lev in level] for level in levels] def assert_matching(actual, expected, check_dtype=False): # avoid specifying internal representation # as much as possible assert len(actual) == len(expected) for act, exp in zip(actual, expected): act = np.asarray(act) exp = np.asarray(exp) tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype) # level changing [w/o mutation] ind2 = self.index.set_levels(new_levels) assert_matching(ind2.levels, new_levels) assert_matching(self.index.levels, levels) # level changing [w/ mutation] ind2 = self.index.copy() inplace_return = ind2.set_levels(new_levels, inplace=True) assert inplace_return is None assert_matching(ind2.levels, new_levels) # level changing specific level [w/o mutation] ind2 = self.index.set_levels(new_levels[0], level=0) assert_matching(ind2.levels, [new_levels[0], levels[1]]) assert_matching(self.index.levels, levels) ind2 = self.index.set_levels(new_levels[1], level=1) assert_matching(ind2.levels, [levels[0], new_levels[1]]) assert_matching(self.index.levels, levels) # level changing multiple levels [w/o mutation] ind2 = self.index.set_levels(new_levels, level=[0, 1]) assert_matching(ind2.levels, new_levels) assert_matching(self.index.levels, levels) # level changing specific level [w/ mutation] ind2 = self.index.copy() inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True) assert inplace_return is None assert_matching(ind2.levels, [new_levels[0], levels[1]]) assert_matching(self.index.levels, levels) ind2 = self.index.copy() inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True) assert inplace_return is None assert_matching(ind2.levels, [levels[0], new_levels[1]]) assert_matching(self.index.levels, levels) # level changing multiple levels [w/ mutation] ind2 = self.index.copy() inplace_return = ind2.set_levels(new_levels, level=[0, 1], inplace=True) assert inplace_return is None assert_matching(ind2.levels, new_levels) assert_matching(self.index.levels, levels) # illegal level changing should not change levels # GH 13754 original_index = self.index.copy() for inplace in [True, False]: with tm.assert_raises_regex(ValueError, "^On"): self.index.set_levels(['c'], level=0, inplace=inplace) assert_matching(self.index.levels, original_index.levels, check_dtype=True) with tm.assert_raises_regex(ValueError, "^On"): self.index.set_labels([0, 1, 2, 3, 4, 5], level=0, inplace=inplace) assert_matching(self.index.labels, original_index.labels, check_dtype=True) with tm.assert_raises_regex(TypeError, "^Levels"): self.index.set_levels('c', level=0, inplace=inplace) assert_matching(self.index.levels, original_index.levels, check_dtype=True) with tm.assert_raises_regex(TypeError, "^Labels"): self.index.set_labels(1, level=0, inplace=inplace) assert_matching(self.index.labels, original_index.labels, check_dtype=True) def test_set_labels(self): # side note - you probably wouldn't want to use levels and labels # directly like this - but it is possible. labels = self.index.labels major_labels, minor_labels = labels major_labels = [(x + 1) % 3 for x in major_labels] minor_labels = [(x + 1) % 1 for x in minor_labels] new_labels = [major_labels, minor_labels] def assert_matching(actual, expected): # avoid specifying internal representation # as much as possible assert len(actual) == len(expected) for act, exp in zip(actual, expected): act = np.asarray(act) exp = np.asarray(exp, dtype=np.int8) tm.assert_numpy_array_equal(act, exp) # label changing [w/o mutation] ind2 = self.index.set_labels(new_labels) assert_matching(ind2.labels, new_labels) assert_matching(self.index.labels, labels) # label changing [w/ mutation] ind2 = self.index.copy() inplace_return = ind2.set_labels(new_labels, inplace=True) assert inplace_return is None assert_matching(ind2.labels, new_labels) # label changing specific level [w/o mutation] ind2 = self.index.set_labels(new_labels[0], level=0) assert_matching(ind2.labels, [new_labels[0], labels[1]]) assert_matching(self.index.labels, labels) ind2 = self.index.set_labels(new_labels[1], level=1) assert_matching(ind2.labels, [labels[0], new_labels[1]]) assert_matching(self.index.labels, labels) # label changing multiple levels [w/o mutation] ind2 = self.index.set_labels(new_labels, level=[0, 1]) assert_matching(ind2.labels, new_labels) assert_matching(self.index.labels, labels) # label changing specific level [w/ mutation] ind2 = self.index.copy() inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True) assert inplace_return is None assert_matching(ind2.labels, [new_labels[0], labels[1]]) assert_matching(self.index.labels, labels) ind2 = self.index.copy() inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True) assert inplace_return is None assert_matching(ind2.labels, [labels[0], new_labels[1]]) assert_matching(self.index.labels, labels) # label changing multiple levels [w/ mutation] ind2 = self.index.copy() inplace_return = ind2.set_labels(new_labels, level=[0, 1], inplace=True) assert inplace_return is None assert_matching(ind2.labels, new_labels) assert_matching(self.index.labels, labels) # label changing for levels of different magnitude of categories ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)]) new_labels = range(129, -1, -1) expected = pd.MultiIndex.from_tuples( [(0, i) for i in new_labels]) # [w/o mutation] result = ind.set_labels(labels=new_labels, level=1) assert result.equals(expected) # [w/ mutation] result = ind.copy() result.set_labels(labels=new_labels, level=1, inplace=True) assert result.equals(expected) def test_set_levels_labels_names_bad_input(self): levels, labels = self.index.levels, self.index.labels names = self.index.names with tm.assert_raises_regex(ValueError, 'Length of levels'): self.index.set_levels([levels[0]]) with tm.assert_raises_regex(ValueError, 'Length of labels'): self.index.set_labels([labels[0]]) with tm.assert_raises_regex(ValueError, 'Length of names'): self.index.set_names([names[0]]) # shouldn't scalar data error, instead should demand list-like with tm.assert_raises_regex(TypeError, 'list of lists-like'): self.index.set_levels(levels[0]) # shouldn't scalar data error, instead should demand list-like with tm.assert_raises_regex(TypeError, 'list of lists-like'): self.index.set_labels(labels[0]) # shouldn't scalar data error, instead should demand list-like with tm.assert_raises_regex(TypeError, 'list-like'): self.index.set_names(names[0]) # should have equal lengths with tm.assert_raises_regex(TypeError, 'list of lists-like'): self.index.set_levels(levels[0], level=[0, 1]) with tm.assert_raises_regex(TypeError, 'list-like'): self.index.set_levels(levels, level=0) # should have equal lengths with tm.assert_raises_regex(TypeError, 'list of lists-like'): self.index.set_labels(labels[0], level=[0, 1]) with tm.assert_raises_regex(TypeError, 'list-like'): self.index.set_labels(labels, level=0) # should have equal lengths with tm.assert_raises_regex(ValueError, 'Length of names'): self.index.set_names(names[0], level=[0, 1]) with tm.assert_raises_regex(TypeError, 'string'): self.index.set_names(names, level=0) def test_set_levels_categorical(self): # GH13854 index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]]) for ordered in [False, True]: cidx = CategoricalIndex(list("bac"), ordered=ordered) result = index.set_levels(cidx, 0) expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]], labels=index.labels) tm.assert_index_equal(result, expected) result_lvl = result.get_level_values(0) expected_lvl = CategoricalIndex(list("bacb"), categories=cidx.categories, ordered=cidx.ordered) tm.assert_index_equal(result_lvl, expected_lvl) def test_metadata_immutable(self): levels, labels = self.index.levels, self.index.labels # shouldn't be able to set at either the top level or base level mutable_regex = re.compile('does not support mutable operations') with tm.assert_raises_regex(TypeError, mutable_regex): levels[0] = levels[0] with tm.assert_raises_regex(TypeError, mutable_regex): levels[0][0] = levels[0][0] # ditto for labels with tm.assert_raises_regex(TypeError, mutable_regex): labels[0] = labels[0] with tm.assert_raises_regex(TypeError, mutable_regex): labels[0][0] = labels[0][0] # and for names names = self.index.names with tm.assert_raises_regex(TypeError, mutable_regex): names[0] = names[0] def test_inplace_mutation_resets_values(self): levels = [['a', 'b', 'c'], [4]] levels2 = [[1, 2, 3], ['a']] labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]] mi1 = MultiIndex(levels=levels, labels=labels) mi2 = MultiIndex(levels=levels2, labels=labels) vals = mi1.values.copy() vals2 = mi2.values.copy() assert mi1._tuples is not None # Make sure level setting works new_vals = mi1.set_levels(levels2).values tm.assert_almost_equal(vals2, new_vals) # Non-inplace doesn't kill _tuples [implementation detail] tm.assert_almost_equal(mi1._tuples, vals) # ...and values is still same too tm.assert_almost_equal(mi1.values, vals) # Inplace should kill _tuples mi1.set_levels(levels2, inplace=True) tm.assert_almost_equal(mi1.values, vals2) # Make sure label setting works too labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]] exp_values = np.empty((6,), dtype=object) exp_values[:] = [(long(1), 'a')] * 6 # Must be 1d array of tuples assert exp_values.shape == (6,) new_values = mi2.set_labels(labels2).values # Not inplace shouldn't change tm.assert_almost_equal(mi2._tuples, vals2) # Should have correct values tm.assert_almost_equal(exp_values, new_values) # ...and again setting inplace should kill _tuples, etc mi2.set_labels(labels2, inplace=True) tm.assert_almost_equal(mi2.values, new_values) def test_copy_in_constructor(self): levels = np.array(["a", "b", "c"]) labels = np.array([1, 1, 2, 0, 0, 1, 1]) val = labels[0] mi = MultiIndex(levels=[levels, levels], labels=[labels, labels], copy=True) assert mi.labels[0][0] == val labels[0] = 15 assert mi.labels[0][0] == val val = levels[0] levels[0] = "PANDA" assert mi.levels[0][0] == val def test_set_value_keeps_names(self): # motivating example from #3742 lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe'] lev2 = ['1', '2', '3'] * 2 idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number']) df = pd.DataFrame( np.random.randn(6, 4), columns=['one', 'two', 'three', 'four'], index=idx) df = df.sort_index() assert df._is_copy is None assert df.index.names == ('Name', 'Number') df.at[('grethe', '4'), 'one'] = 99.34 assert df._is_copy is None assert df.index.names == ('Name', 'Number') def test_copy_names(self): # Check that adding a "names" parameter to the copy is honored # GH14302 multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2']) multi_idx1 = multi_idx.copy() assert multi_idx.equals(multi_idx1) assert multi_idx.names == ['MyName1', 'MyName2'] assert multi_idx1.names == ['MyName1', 'MyName2'] multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2']) assert multi_idx.equals(multi_idx2) assert multi_idx.names == ['MyName1', 'MyName2'] assert multi_idx2.names == ['NewName1', 'NewName2'] multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2']) assert multi_idx.equals(multi_idx3) assert multi_idx.names == ['MyName1', 'MyName2'] assert multi_idx3.names == ['NewName1', 'NewName2'] def test_names(self): # names are assigned in setup names = self.index_names level_names = [level.name for level in self.index.levels] assert names == level_names # setting bad names on existing index = self.index tm.assert_raises_regex(ValueError, "^Length of names", setattr, index, "names", list(index.names) + ["third"]) tm.assert_raises_regex(ValueError, "^Length of names", setattr, index, "names", []) # initializing with bad names (should always be equivalent) major_axis, minor_axis = self.index.levels major_labels, minor_labels = self.index.labels tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex, levels=[major_axis, minor_axis], labels=[major_labels, minor_labels], names=['first']) tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex, levels=[major_axis, minor_axis], labels=[major_labels, minor_labels], names=['first', 'second', 'third']) # names are assigned index.names = ["a", "b"] ind_names = list(index.names) level_names = [level.name for level in index.levels] assert ind_names == level_names def test_astype(self): expected = self.index.copy() actual = self.index.astype('O') assert_copy(actual.levels, expected.levels) assert_copy(actual.labels, expected.labels) self.check_level_names(actual, expected.names) with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"): self.index.astype(np.dtype(int)) @pytest.mark.parametrize('ordered', [True, False]) def test_astype_category(self, ordered): # GH 18630 msg = '> 1 ndim Categorical are not supported at this time' with tm.assert_raises_regex(NotImplementedError, msg): self.index.astype(CategoricalDtype(ordered=ordered)) if ordered is False: # dtype='category' defaults to ordered=False, so only test once with tm.assert_raises_regex(NotImplementedError, msg): self.index.astype('category') def test_constructor_single_level(self): result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']], labels=[[0, 1, 2, 3]], names=['first']) assert isinstance(result, MultiIndex) expected = Index(['foo', 'bar', 'baz', 'qux'], name='first') tm.assert_index_equal(result.levels[0], expected) assert result.names == ['first'] def test_constructor_no_levels(self): tm.assert_raises_regex(ValueError, "non-zero number " "of levels/labels", MultiIndex, levels=[], labels=[]) both_re = re.compile('Must pass both levels and labels') with tm.assert_raises_regex(TypeError, both_re): MultiIndex(levels=[]) with tm.assert_raises_regex(TypeError, both_re): MultiIndex(labels=[]) def test_constructor_mismatched_label_levels(self): labels = [np.array([1]), np.array([2]), np.array([3])] levels = ["a"] tm.assert_raises_regex(ValueError, "Length of levels and labels " "must be the same", MultiIndex, levels=levels, labels=labels) length_error = re.compile('>= length of level') label_error = re.compile(r'Unequal label lengths: \[4, 2\]') # important to check that it's looking at the right thing. with tm.assert_raises_regex(ValueError, length_error): MultiIndex(levels=[['a'], ['b']], labels=[[0, 1, 2, 3], [0, 3, 4, 1]]) with tm.assert_raises_regex(ValueError, label_error): MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]]) # external API with tm.assert_raises_regex(ValueError, length_error): self.index.copy().set_levels([['a'], ['b']]) with tm.assert_raises_regex(ValueError, label_error): self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]]) def test_constructor_nonhashable_names(self): # GH 20527 levels = [[1, 2], [u'one', u'two']] labels = [[0, 0, 1, 1], [0, 1, 0, 1]] names = ((['foo'], ['bar'])) message = "MultiIndex.name must be a hashable type" tm.assert_raises_regex(TypeError, message, MultiIndex, levels=levels, labels=labels, names=names) # With .rename() mi = MultiIndex(levels=[[1, 2], [u'one', u'two']], labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=('foo', 'bar')) renamed = [['foor'], ['barr']] tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed) # With .set_names() tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed) @pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'], ['1', 'a', '1']]) def test_duplicate_level_names(self, names): # GH18872 pytest.raises(ValueError, pd.MultiIndex.from_product, [[0, 1]] * 3, names=names) # With .rename() mi = pd.MultiIndex.from_product([[0, 1]] * 3) tm.assert_raises_regex(ValueError, "Duplicated level name:", mi.rename, names) # With .rename(., level=) mi.rename(names[0], level=1, inplace=True) tm.assert_raises_regex(ValueError, "Duplicated level name:", mi.rename, names[:2], level=[0, 2]) def assert_multiindex_copied(self, copy, original): # Levels should be (at least, shallow copied) tm.assert_copy(copy.levels, original.levels) tm.assert_almost_equal(copy.labels, original.labels) # Labels doesn't matter which way copied tm.assert_almost_equal(copy.labels, original.labels) assert copy.labels is not original.labels # Names doesn't matter which way copied assert copy.names == original.names assert copy.names is not original.names # Sort order should be copied assert copy.sortorder == original.sortorder def test_copy(self): i_copy = self.index.copy() self.assert_multiindex_copied(i_copy, self.index) def test_shallow_copy(self): i_copy = self.index._shallow_copy() self.assert_multiindex_copied(i_copy, self.index) def test_view(self): i_view = self.index.view() self.assert_multiindex_copied(i_view, self.index) def check_level_names(self, index, names): assert [level.name for level in index.levels] == list(names) def test_changing_names(self): # names should be applied to levels level_names = [level.name for level in self.index.levels] self.check_level_names(self.index, self.index.names) view = self.index.view() copy = self.index.copy() shallow_copy = self.index._shallow_copy() # changing names should change level names on object new_names = [name + "a" for name in self.index.names] self.index.names = new_names self.check_level_names(self.index, new_names) # but not on copies self.check_level_names(view, level_names) self.check_level_names(copy, level_names) self.check_level_names(shallow_copy, level_names) # and copies shouldn't change original shallow_copy.names = [name + "c" for name in shallow_copy.names] self.check_level_names(self.index, new_names) def test_get_level_number_integer(self): self.index.names = [1, 0] assert self.index._get_level_number(1) == 0 assert self.index._get_level_number(0) == 1 pytest.raises(IndexError, self.index._get_level_number, 2) tm.assert_raises_regex(KeyError, 'Level fourth not found', self.index._get_level_number, 'fourth') def test_from_arrays(self): arrays = [] for lev, lab in zip(self.index.levels, self.index.labels): arrays.append(np.asarray(lev).take(lab)) # list of arrays as input result = MultiIndex.from_arrays(arrays, names=self.index.names) tm.assert_index_equal(result, self.index) # infer correctly result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')], ['a', 'b']]) assert result.levels[0].equals(Index([Timestamp('20130101')])) assert result.levels[1].equals(Index(['a', 'b'])) def test_from_arrays_iterator(self): # GH 18434 arrays = [] for lev, lab in zip(self.index.levels, self.index.labels): arrays.append(np.asarray(lev).take(lab)) # iterator as input result = MultiIndex.from_arrays(iter(arrays), names=self.index.names) tm.assert_index_equal(result, self.index) # invalid iterator input with tm.assert_raises_regex( TypeError, "Input must be a list / sequence of array-likes."): MultiIndex.from_arrays(0) def test_from_arrays_index_series_datetimetz(self): idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3, tz='US/Eastern') idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3, tz='Asia/Tokyo') result = pd.MultiIndex.from_arrays([idx1, idx2]) tm.assert_index_equal(result.get_level_values(0), idx1) tm.assert_index_equal(result.get_level_values(1), idx2) result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)]) tm.assert_index_equal(result2.get_level_values(0), idx1) tm.assert_index_equal(result2.get_level_values(1), idx2) tm.assert_index_equal(result, result2) def test_from_arrays_index_series_timedelta(self): idx1 = pd.timedelta_range('1 days', freq='D', periods=3) idx2 = pd.timedelta_range('2 hours', freq='H', periods=3) result = pd.MultiIndex.from_arrays([idx1, idx2]) tm.assert_index_equal(result.get_level_values(0), idx1) tm.assert_index_equal(result.get_level_values(1), idx2) result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)]) tm.assert_index_equal(result2.get_level_values(0), idx1) tm.assert_index_equal(result2.get_level_values(1), idx2) tm.assert_index_equal(result, result2) def test_from_arrays_index_series_period(self): idx1 = pd.period_range('2011-01-01', freq='D', periods=3) idx2 = pd.period_range('2015-01-01', freq='H', periods=3) result = pd.MultiIndex.from_arrays([idx1, idx2]) tm.assert_index_equal(result.get_level_values(0), idx1) tm.assert_index_equal(result.get_level_values(1), idx2) result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)]) tm.assert_index_equal(result2.get_level_values(0), idx1) tm.assert_index_equal(result2.get_level_values(1), idx2) tm.assert_index_equal(result, result2) def test_from_arrays_index_datetimelike_mixed(self): idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3, tz='US/Eastern') idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3) idx3 = pd.timedelta_range('1 days', freq='D', periods=3) idx4 = pd.period_range('2011-01-01', freq='D', periods=3) result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4]) tm.assert_index_equal(result.get_level_values(0), idx1) tm.assert_index_equal(result.get_level_values(1), idx2) tm.assert_index_equal(result.get_level_values(2), idx3) tm.assert_index_equal(result.get_level_values(3), idx4) result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2), pd.Series(idx3), pd.Series(idx4)]) tm.assert_index_equal(result2.get_level_values(0), idx1) tm.assert_index_equal(result2.get_level_values(1), idx2) tm.assert_index_equal(result2.get_level_values(2), idx3) tm.assert_index_equal(result2.get_level_values(3), idx4) tm.assert_index_equal(result, result2) def test_from_arrays_index_series_categorical(self): # GH13743 idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), ordered=False) idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), ordered=True) result = pd.MultiIndex.from_arrays([idx1, idx2]) tm.assert_index_equal(result.get_level_values(0), idx1) tm.assert_index_equal(result.get_level_values(1), idx2) result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)]) tm.assert_index_equal(result2.get_level_values(0), idx1) tm.assert_index_equal(result2.get_level_values(1), idx2) result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values]) tm.assert_index_equal(result3.get_level_values(0), idx1) tm.assert_index_equal(result3.get_level_values(1), idx2) def test_from_arrays_empty(self): # 0 levels with tm.assert_raises_regex( ValueError, "Must pass non-zero number of levels/labels"): MultiIndex.from_arrays(arrays=[]) # 1 level result = MultiIndex.from_arrays(arrays=[[]], names=['A']) assert isinstance(result, MultiIndex) expected = Index([], name='A') tm.assert_index_equal(result.levels[0], expected) # N levels for N in [2, 3]: arrays = [[]] * N names = list('ABC')[:N] result = MultiIndex.from_arrays(arrays=arrays, names=names) expected = MultiIndex(levels=[[]] * N, labels=[[]] * N, names=names) tm.assert_index_equal(result, expected) def test_from_arrays_invalid_input(self): invalid_inputs = [1, [1], [1, 2], [[1], 2], 'a', ['a'], ['a', 'b'], [['a'], 'b']] for i in invalid_inputs: pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i) def test_from_arrays_different_lengths(self): # see gh-13599 idx1 = [1, 2, 3] idx2 = ['a', 'b'] tm.assert_raises_regex(ValueError, '^all arrays must ' 'be same length$', MultiIndex.from_arrays, [idx1, idx2]) idx1 = [] idx2 = ['a', 'b'] tm.assert_raises_regex(ValueError, '^all arrays must ' 'be same length$', MultiIndex.from_arrays, [idx1, idx2]) idx1 = [1, 2, 3] idx2 = [] tm.assert_raises_regex(ValueError, '^all arrays must ' 'be same length$', MultiIndex.from_arrays, [idx1, idx2]) def test_from_product(self): first = ['foo', 'bar', 'buz'] second = ['a', 'b', 'c'] names = ['first', 'second'] result = MultiIndex.from_product([first, second], names=names) tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'), ('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'), ('buz', 'c')] expected = MultiIndex.from_tuples(tuples, names=names) tm.assert_index_equal(result, expected) def test_from_product_iterator(self): # GH 18434 first = ['foo', 'bar', 'buz'] second = ['a', 'b', 'c'] names = ['first', 'second'] tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'), ('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'), ('buz', 'c')] expected = MultiIndex.from_tuples(tuples, names=names) # iterator as input result = MultiIndex.from_product(iter([first, second]), names=names) tm.assert_index_equal(result, expected) # Invalid non-iterable input with tm.assert_raises_regex( TypeError, "Input must be a list / sequence of iterables."): MultiIndex.from_product(0) def test_from_product_empty(self): # 0 levels with tm.assert_raises_regex( ValueError, "Must pass non-zero number of levels/labels"): MultiIndex.from_product([]) # 1 level result = MultiIndex.from_product([[]], names=['A']) expected = pd.Index([], name='A') tm.assert_index_equal(result.levels[0], expected) # 2 levels l1 = [[], ['foo', 'bar', 'baz'], []] l2 = [[], [], ['a', 'b', 'c']] names = ['A', 'B'] for first, second in zip(l1, l2): result = MultiIndex.from_product([first, second], names=names) expected = MultiIndex(levels=[first, second], labels=[[], []], names=names) tm.assert_index_equal(result, expected) # GH12258 names = ['A', 'B', 'C'] for N in range(4): lvl2 = lrange(N) result = MultiIndex.from_product([[], lvl2, []], names=names) expected = MultiIndex(levels=[[], lvl2, []], labels=[[], [], []], names=names) tm.assert_index_equal(result, expected) def test_from_product_invalid_input(self): invalid_inputs = [1, [1], [1, 2], [[1], 2], 'a', ['a'], ['a', 'b'], [['a'], 'b']] for i in invalid_inputs: pytest.raises(TypeError, MultiIndex.from_product, iterables=i) def test_from_product_datetimeindex(self): dt_index = date_range('2000-01-01', periods=2) mi = pd.MultiIndex.from_product([[1, 2], dt_index]) etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp( '2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp( '2000-01-01')), (2, pd.Timestamp('2000-01-02'))]) tm.assert_numpy_array_equal(mi.values, etalon) def test_from_product_index_series_categorical(self): # GH13743 first = ['foo', 'bar'] for ordered in [False, True]: idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), ordered=ordered) expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"), categories=list("bac"), ordered=ordered) for arr in [idx, pd.Series(idx), idx.values]: result = pd.MultiIndex.from_product([first, arr]) tm.assert_index_equal(result.get_level_values(1), expected) def test_values_boxed(self): tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT), (3, pd.Timestamp('2000-01-03')), (1, pd.Timestamp('2000-01-04')), (2, pd.Timestamp('2000-01-02')), (3, pd.Timestamp('2000-01-03'))] result = pd.MultiIndex.from_tuples(tuples) expected = construct_1d_object_array_from_listlike(tuples) tm.assert_numpy_array_equal(result.values, expected) # Check that code branches for boxed values produce identical results tm.assert_numpy_array_equal(result.values[:4], result[:4].values) def test_values_multiindex_datetimeindex(self): # Test to ensure we hit the boxing / nobox part of MI.values ints = np.arange(10 ** 18, 10 ** 18 + 5) naive = pd.DatetimeIndex(ints) aware = pd.DatetimeIndex(ints, tz='US/Central') idx = pd.MultiIndex.from_arrays([naive, aware]) result = idx.values outer = pd.DatetimeIndex([x[0] for x in result]) tm.assert_index_equal(outer, naive) inner = pd.DatetimeIndex([x[1] for x in result]) tm.assert_index_equal(inner, aware) # n_lev > n_lab result = idx[:2].values outer = pd.DatetimeIndex([x[0] for x in result]) tm.assert_index_equal(outer, naive[:2]) inner = pd.DatetimeIndex([x[1] for x in result]) tm.assert_index_equal(inner, aware[:2]) def test_values_multiindex_periodindex(self): # Test to ensure we hit the boxing / nobox part of MI.values ints = np.arange(2007, 2012) pidx = pd.PeriodIndex(ints, freq='D') idx = pd.MultiIndex.from_arrays([ints, pidx]) result = idx.values outer = pd.Int64Index([x[0] for x in result]) tm.assert_index_equal(outer, pd.Int64Index(ints)) inner = pd.PeriodIndex([x[1] for x in result]) tm.assert_index_equal(inner, pidx) # n_lev > n_lab result = idx[:2].values outer = pd.Int64Index([x[0] for x in result]) tm.assert_index_equal(outer, pd.Int64Index(ints[:2])) inner = pd.PeriodIndex([x[1] for x in result]) tm.assert_index_equal(inner, pidx[:2]) def test_append(self): result = self.index[:3].append(self.index[3:]) assert result.equals(self.index) foos = [self.index[:1], self.index[1:3], self.index[3:]] result = foos[0].append(foos[1:]) assert result.equals(self.index) # empty result = self.index.append([]) assert result.equals(self.index) def test_append_mixed_dtypes(self): # GH 13660 dti = date_range('2011-01-01', freq='M', periods=3, ) dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern') pi = period_range('2011-01', freq='M', periods=3) mi = MultiIndex.from_arrays([[1, 2, 3], [1.1, np.nan, 3.3], ['a', 'b', 'c'], dti, dti_tz, pi]) assert mi.nlevels == 6 res = mi.append(mi) exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3], [1.1, np.nan, 3.3, 1.1, np.nan, 3.3], ['a', 'b', 'c', 'a', 'b', 'c'], dti.append(dti), dti_tz.append(dti_tz), pi.append(pi)]) tm.assert_index_equal(res, exp) other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'], ['x', 'y', 'z'], ['x', 'y', 'z'], ['x', 'y', 'z'], ['x', 'y', 'z']]) res = mi.append(other) exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'], [1.1, np.nan, 3.3, 'x', 'y', 'z'], ['a', 'b', 'c', 'x', 'y', 'z'], dti.append(pd.Index(['x', 'y', 'z'])), dti_tz.append(pd.Index(['x', 'y', 'z'])), pi.append(pd.Index(['x', 'y', 'z']))]) tm.assert_index_equal(res, exp) def test_get_level_values(self): result = self.index.get_level_values(0) expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'], name='first') tm.assert_index_equal(result, expected) assert result.name == 'first' result = self.index.get_level_values('first') expected = self.index.get_level_values(0) tm.assert_index_equal(result, expected) # GH 10460 index = MultiIndex( levels=[CategoricalIndex(['A', 'B']), CategoricalIndex([1, 2, 3])], labels=[np.array([0, 0, 0, 1, 1, 1]), np.array([0, 1, 2, 0, 1, 2])]) exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B']) tm.assert_index_equal(index.get_level_values(0), exp) exp = CategoricalIndex([1, 2, 3, 1, 2, 3]) tm.assert_index_equal(index.get_level_values(1), exp) def test_get_level_values_int_with_na(self): # GH 17924 arrays = [['a', 'b', 'b'], [1, np.nan, 2]] index = pd.MultiIndex.from_arrays(arrays) result = index.get_level_values(1) expected = Index([1, np.nan, 2]) tm.assert_index_equal(result, expected) arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]] index = pd.MultiIndex.from_arrays(arrays) result = index.get_level_values(1) expected = Index([np.nan, np.nan, 2]) tm.assert_index_equal(result, expected) def test_get_level_values_na(self): arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]] index = pd.MultiIndex.from_arrays(arrays) result = index.get_level_values(0) expected = pd.Index([np.nan, np.nan, np.nan]) tm.assert_index_equal(result, expected) result = index.get_level_values(1) expected = pd.Index(['a', np.nan, 1]) tm.assert_index_equal(result, expected) arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])] index = pd.MultiIndex.from_arrays(arrays) result = index.get_level_values(1) expected = pd.DatetimeIndex([0, 1, pd.NaT]) tm.assert_index_equal(result, expected) arrays = [[], []] index = pd.MultiIndex.from_arrays(arrays) result = index.get_level_values(0) expected = pd.Index([], dtype=object) tm.assert_index_equal(result, expected) def test_get_level_values_all_na(self): # GH 17924 when level entirely consists of nan arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]] index = pd.MultiIndex.from_arrays(arrays) result = index.get_level_values(0) expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64) tm.assert_index_equal(result, expected) result = index.get_level_values(1) expected = pd.Index(['a', np.nan, 1], dtype=object) tm.assert_index_equal(result, expected) def test_reorder_levels(self): # this blows up tm.assert_raises_regex(IndexError, '^Too many levels', self.index.reorder_levels, [2, 1, 0]) def test_nlevels(self): assert self.index.nlevels == 2 def test_iter(self): result = list(self.index) expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'), ('baz', 'two'), ('qux', 'one'), ('qux', 'two')] assert result == expected def test_legacy_pickle(self): if PY3: pytest.skip("testing for legacy pickles not " "support on py3") path = tm.get_data_path('multiindex_v1.pickle') obj = pd.read_pickle(path) obj2 = MultiIndex.from_tuples(obj.values) assert obj.equals(obj2) res = obj.get_indexer(obj) exp = np.arange(len(obj), dtype=np.intp) assert_almost_equal(res, exp) res = obj.get_indexer(obj2[::-1]) exp = obj.get_indexer(obj[::-1]) exp2 = obj2.get_indexer(obj2[::-1]) assert_almost_equal(res, exp) assert_almost_equal(exp, exp2) def test_legacy_v2_unpickle(self): # 0.7.3 -> 0.8.0 format manage path = tm.get_data_path('mindex_073.pickle') obj = pd.read_pickle(path) obj2 = MultiIndex.from_tuples(obj.values) assert obj.equals(obj2) res = obj.get_indexer(obj) exp = np.arange(len(obj), dtype=np.intp) assert_almost_equal(res, exp) res = obj.get_indexer(obj2[::-1]) exp = obj.get_indexer(obj[::-1]) exp2 = obj2.get_indexer(obj2[::-1]) assert_almost_equal(res, exp) assert_almost_equal(exp, exp2) def test_roundtrip_pickle_with_tz(self): # GH 8367 # round-trip of timezone index = MultiIndex.from_product( [[1, 2], ['a', 'b'], date_range('20130101', periods=3, tz='US/Eastern') ], names=['one', 'two', 'three']) unpickled = tm.round_trip_pickle(index) assert index.equal_levels(unpickled) def test_from_tuples_index_values(self): result = MultiIndex.from_tuples(self.index) assert (result.values == self.index.values).all() def test_contains(self): assert ('foo', 'two') in self.index assert ('bar', 'two') not in self.index assert None not in self.index def test_contains_top_level(self): midx = MultiIndex.from_product([['A', 'B'], [1, 2]]) assert 'A' in midx assert 'A' not in midx._engine def test_contains_with_nat(self): # MI with a NaT mi = MultiIndex(levels=[['C'], pd.date_range('2012-01-01', periods=5)], labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]], names=[None, 'B']) assert ('C', pd.Timestamp('2012-01-01')) in mi for val in mi.values: assert val in mi def test_is_all_dates(self): assert not self.index.is_all_dates def test_is_numeric(self): # MultiIndex is never numeric assert not self.index.is_numeric() def test_getitem(self): # scalar assert self.index[2] == ('bar', 'one') # slice result = self.index[2:5] expected = self.index[[2, 3, 4]] assert result.equals(expected) # boolean result = self.index[[True, False, True, False, True, True]] result2 = self.index[np.array([True, False, True, False, True, True])] expected = self.index[[0, 2, 4, 5]] assert result.equals(expected) assert result2.equals(expected) def test_getitem_group_select(self): sorted_idx, _ = self.index.sortlevel(0) assert sorted_idx.get_loc('baz') == slice(3, 4) assert sorted_idx.get_loc('foo') == slice(0, 2) def test_get_loc(self): assert self.index.get_loc(('foo', 'two')) == 1 assert self.index.get_loc(('baz', 'two')) == 3 pytest.raises(KeyError, self.index.get_loc, ('bar', 'two')) pytest.raises(KeyError, self.index.get_loc, 'quux') pytest.raises(NotImplementedError, self.index.get_loc, 'foo', method='nearest') # 3 levels index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index( lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array( [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])]) pytest.raises(KeyError, index.get_loc, (1, 1)) assert index.get_loc((2, 0)) == slice(3, 5) def test_get_loc_duplicates(self): index = Index([2, 2, 2, 2]) result = index.get_loc(2) expected = slice(0, 4) assert result == expected # pytest.raises(Exception, index.get_loc, 2) index = Index(['c', 'a', 'a', 'b', 'b']) rs = index.get_loc('c') xp = 0 assert rs == xp def test_get_value_duplicates(self): index = MultiIndex(levels=[['D', 'B', 'C'], [0, 26, 27, 37, 57, 67, 75, 82]], labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2], [1, 3, 4, 6, 0, 2, 2, 3, 5, 7]], names=['tag', 'day']) assert index.get_loc('D') == slice(0, 3) with pytest.raises(KeyError): index._engine.get_value(np.array([]), 'D') def test_get_loc_level(self): index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index( lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array( [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])]) loc, new_index = index.get_loc_level((0, 1)) expected = slice(1, 2) exp_index = index[expected].droplevel(0).droplevel(0) assert loc == expected assert new_index.equals(exp_index) loc, new_index = index.get_loc_level((0, 1, 0)) expected = 1 assert loc == expected assert new_index is None pytest.raises(KeyError, index.get_loc_level, (2, 2)) index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array( [0, 0, 0, 0]), np.array([0, 1, 2, 3])]) result, new_index = index.get_loc_level((2000, slice(None, None))) expected = slice(None, None) assert result == expected assert new_index.equals(index.droplevel(0)) @pytest.mark.parametrize('level', [0, 1]) @pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None]) def test_get_loc_nan(self, level, null_val): # GH 18485 : NaN in MultiIndex levels = [['a', 'b'], ['c', 'd']] key = ['b', 'd'] levels[level] = np.array([0, null_val], dtype=type(null_val)) key[level] = null_val idx = MultiIndex.from_product(levels) assert idx.get_loc(tuple(key)) == 3 def test_get_loc_missing_nan(self): # GH 8569 idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]]) assert isinstance(idx.get_loc(1), slice) pytest.raises(KeyError, idx.get_loc, 3) pytest.raises(KeyError, idx.get_loc, np.nan) pytest.raises(KeyError, idx.get_loc, [np.nan]) @pytest.mark.parametrize('dtype1', [int, float, bool, str]) @pytest.mark.parametrize('dtype2', [int, float, bool, str]) def test_get_loc_multiple_dtypes(self, dtype1, dtype2): # GH 18520 levels = [np.array([0, 1]).astype(dtype1), np.array([0, 1]).astype(dtype2)] idx = pd.MultiIndex.from_product(levels) assert idx.get_loc(idx[2]) == 2 @pytest.mark.parametrize('level', [0, 1]) @pytest.mark.parametrize('dtypes', [[int, float], [float, int]]) def test_get_loc_implicit_cast(self, level, dtypes): # GH 18818, GH 15994 : as flat index, cast int to float and vice-versa levels = [['a', 'b'], ['c', 'd']] key = ['b', 'd'] lev_dtype, key_dtype = dtypes levels[level] = np.array([0, 1], dtype=lev_dtype) key[level] = key_dtype(1) idx = MultiIndex.from_product(levels) assert idx.get_loc(tuple(key)) == 3 def test_get_loc_cast_bool(self): # GH 19086 : int is casted to bool, but not vice-versa levels = [[False, True], np.arange(2, dtype='int64')] idx = MultiIndex.from_product(levels) assert idx.get_loc((0, 1)) == 1 assert idx.get_loc((1, 0)) == 2 pytest.raises(KeyError, idx.get_loc, (False, True)) pytest.raises(KeyError, idx.get_loc, (True, False)) def test_slice_locs(self): df = tm.makeTimeDataFrame() stacked = df.stack() idx = stacked.index slob = slice(*idx.slice_locs(df.index[5], df.index[15])) sliced = stacked[slob] expected = df[5:16].stack() tm.assert_almost_equal(sliced.values, expected.values) slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30), df.index[15] - timedelta(seconds=30))) sliced = stacked[slob] expected = df[6:15].stack() tm.assert_almost_equal(sliced.values, expected.values) def test_slice_locs_with_type_mismatch(self): df = tm.makeTimeDataFrame() stacked = df.stack() idx = stacked.index tm.assert_raises_regex(TypeError, '^Level type mismatch', idx.slice_locs, (1, 3)) tm.assert_raises_regex(TypeError, '^Level type mismatch', idx.slice_locs, df.index[5] + timedelta( seconds=30), (5, 2)) df = tm.makeCustomDataframe(5, 5) stacked = df.stack() idx = stacked.index with tm.assert_raises_regex(TypeError, '^Level type mismatch'): idx.slice_locs(timedelta(seconds=30)) # TODO: Try creating a UnicodeDecodeError in exception message with tm.assert_raises_regex(TypeError, '^Level type mismatch'): idx.slice_locs(df.index[1], (16, "a")) def test_slice_locs_not_sorted(self): index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index( lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array( [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])]) tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than " "MultiIndex lexsort depth", index.slice_locs, (1, 0, 1), (2, 1, 0)) # works sorted_index, _ = index.sortlevel(0) # should there be a test case here??? sorted_index.slice_locs((1, 0, 1), (2, 1, 0)) def test_slice_locs_partial(self): sorted_idx, _ = self.index.sortlevel(0) result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one')) assert result == (1, 5) result = sorted_idx.slice_locs(None, ('qux', 'one')) assert result == (0, 5) result = sorted_idx.slice_locs(('foo', 'two'), None) assert result == (1, len(sorted_idx)) result = sorted_idx.slice_locs('bar', 'baz') assert result == (2, 4) def test_slice_locs_not_contained(self): # some searchsorted action index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]], labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3], [0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0) result = index.slice_locs((1, 0), (5, 2)) assert result == (3, 6) result = index.slice_locs(1, 5) assert result == (3, 6) result = index.slice_locs((2, 2), (5, 2)) assert result == (3, 6) result = index.slice_locs(2, 5) assert result == (3, 6) result = index.slice_locs((1, 0), (6, 3)) assert result == (3, 8) result = index.slice_locs(-1, 10) assert result == (0, len(index)) def test_consistency(self): # need to construct an overflow major_axis = lrange(70000) minor_axis = lrange(10) major_labels = np.arange(70000) minor_labels = np.repeat(lrange(10), 7000) # the fact that is works means it's consistent index = MultiIndex(levels=[major_axis, minor_axis], labels=[major_labels, minor_labels]) # inconsistent major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3]) minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1]) index = MultiIndex(levels=[major_axis, minor_axis], labels=[major_labels, minor_labels]) assert not index.is_unique def test_truncate(self): major_axis = Index(lrange(4)) minor_axis = Index(lrange(2)) major_labels = np.array([0, 0, 1, 2, 3, 3]) minor_labels = np.array([0, 1, 0, 1, 0, 1]) index = MultiIndex(levels=[major_axis, minor_axis], labels=[major_labels, minor_labels]) result = index.truncate(before=1) assert 'foo' not in result.levels[0] assert 1 in result.levels[0] result = index.truncate(after=1) assert 2 not in result.levels[0] assert 1 in result.levels[0] result = index.truncate(before=1, after=2) assert len(result.levels[0]) == 2 # after < before pytest.raises(ValueError, index.truncate, 3, 1) def test_get_indexer(self): major_axis = Index(lrange(4)) minor_axis = Index(lrange(2)) major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp) minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp) index = MultiIndex(levels=[major_axis, minor_axis], labels=[major_labels, minor_labels]) idx1 = index[:5] idx2 = index[[1, 3, 5]] r1 = idx1.get_indexer(idx2) assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp)) r1 = idx2.get_indexer(idx1, method='pad') e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp) assert_almost_equal(r1, e1) r2 = idx2.get_indexer(idx1[::-1], method='pad') assert_almost_equal(r2, e1[::-1]) rffill1 = idx2.get_indexer(idx1, method='ffill') assert_almost_equal(r1, rffill1) r1 = idx2.get_indexer(idx1, method='backfill') e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp) assert_almost_equal(r1, e1) r2 = idx2.get_indexer(idx1[::-1], method='backfill') assert_almost_equal(r2, e1[::-1]) rbfill1 = idx2.get_indexer(idx1, method='bfill') assert_almost_equal(r1, rbfill1) # pass non-MultiIndex r1 = idx1.get_indexer(idx2.values) rexp1 = idx1.get_indexer(idx2) assert_almost_equal(r1, rexp1) r1 = idx1.get_indexer([1, 2, 3]) assert (r1 == [-1, -1, -1]).all() # create index with duplicates idx1 = Index(lrange(10) + lrange(10)) idx2 = Index(lrange(20)) msg = "Reindexing only valid with uniquely valued Index objects" with tm.assert_raises_regex(InvalidIndexError, msg): idx1.get_indexer(idx2) def test_get_indexer_nearest(self): midx = MultiIndex.from_tuples([('a', 1), ('b', 2)]) with pytest.raises(NotImplementedError): midx.get_indexer(['a'], method='nearest') with pytest.raises(NotImplementedError): midx.get_indexer(['a'], method='pad', tolerance=2) def test_hash_collisions(self): # non-smoke test that we don't get hash collisions index = MultiIndex.from_product([np.arange(1000), np.arange(1000)], names=['one', 'two']) result = index.get_indexer(index.values) tm.assert_numpy_array_equal(result, np.arange( len(index), dtype='intp')) for i in [0, 1, len(index) - 2, len(index) - 1]: result = index.get_loc(index[i]) assert result == i def test_format(self): self.index.format() self.index[:0].format() def test_format_integer_names(self): index = MultiIndex(levels=[[0, 1], [0, 1]], labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1]) index.format(names=True) def test_format_sparse_display(self): index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]], labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]]) result = index.format() assert result[3] == '1 0 0 0' def test_format_sparse_config(self): warn_filters = warnings.filters warnings.filterwarnings('ignore', category=FutureWarning, module=".*format") # GH1538 pd.set_option('display.multi_sparse', False) result = self.index.format() assert result[1] == 'foo two' tm.reset_display_options() warnings.filters = warn_filters def test_to_frame(self): tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')] index = MultiIndex.from_tuples(tuples) result = index.to_frame(index=False) expected = DataFrame(tuples) tm.assert_frame_equal(result, expected) result = index.to_frame() expected.index = index tm.assert_frame_equal(result, expected) tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')] index = MultiIndex.from_tuples(tuples, names=['first', 'second']) result = index.to_frame(index=False) expected = DataFrame(tuples) expected.columns = ['first', 'second'] tm.assert_frame_equal(result, expected) result = index.to_frame() expected.index = index tm.assert_frame_equal(result, expected) index = MultiIndex.from_product([range(5), pd.date_range('20130101', periods=3)]) result = index.to_frame(index=False) expected = DataFrame( {0: np.repeat(np.arange(5, dtype='int64'), 3), 1: np.tile(pd.date_range('20130101', periods=3), 5)}) tm.assert_frame_equal(result, expected) index = MultiIndex.from_product([range(5), pd.date_range('20130101', periods=3)]) result = index.to_frame() expected.index = index tm.assert_frame_equal(result, expected) def test_to_hierarchical(self): index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), ( 2, 'two')]) result = index.to_hierarchical(3) expected = MultiIndex(levels=[[1, 2], ['one', 'two']], labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]]) tm.assert_index_equal(result, expected) assert result.names == index.names # K > 1 result = index.to_hierarchical(3, 2) expected = MultiIndex(levels=[[1, 2], ['one', 'two']], labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]]) tm.assert_index_equal(result, expected) assert result.names == index.names # non-sorted index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'), (2, 'a'), (2, 'b')], names=['N1', 'N2']) result = index.to_hierarchical(2) expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'), (1, 'b'), (2, 'a'), (2, 'a'), (2, 'b'), (2, 'b')], names=['N1', 'N2']) tm.assert_index_equal(result, expected) assert result.names == index.names def test_bounds(self): self.index._bounds def test_equals_multi(self): assert self.index.equals(self.index) assert not self.index.equals(self.index.values) assert self.index.equals(Index(self.index.values)) assert self.index.equal_levels(self.index) assert not self.index.equals(self.index[:-1]) assert not self.index.equals(self.index[-1]) # different number of levels index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index( lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array( [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])]) index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1]) assert not index.equals(index2) assert not index.equal_levels(index2) # levels are different major_axis = Index(lrange(4)) minor_axis = Index(lrange(2)) major_labels = np.array([0, 0, 1, 2, 2, 3]) minor_labels = np.array([0, 1, 0, 0, 1, 0]) index = MultiIndex(levels=[major_axis, minor_axis], labels=[major_labels, minor_labels]) assert not self.index.equals(index) assert not self.index.equal_levels(index) # some of the labels are different major_axis = Index(['foo', 'bar', 'baz', 'qux']) minor_axis = Index(['one', 'two']) major_labels = np.array([0, 0, 2, 2, 3, 3]) minor_labels = np.array([0, 1, 0, 1, 0, 1]) index = MultiIndex(levels=[major_axis, minor_axis], labels=[major_labels, minor_labels]) assert not self.index.equals(index) def test_equals_missing_values(self): # make sure take is not using -1 i = pd.MultiIndex.from_tuples([(0, pd.NaT), (0, pd.Timestamp('20130101'))]) result = i[0:1].equals(i[0]) assert not result result = i[1:2].equals(i[1]) assert not result def test_identical(self): mi = self.index.copy() mi2 = self.index.copy() assert mi.identical(mi2) mi = mi.set_names(['new1', 'new2']) assert mi.equals(mi2) assert not mi.identical(mi2) mi2 = mi2.set_names(['new1', 'new2']) assert mi.identical(mi2) mi3 = Index(mi.tolist(), names=mi.names) mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False) assert mi.identical(mi3) assert not mi.identical(mi4) assert mi.equals(mi4) def test_is_(self): mi = MultiIndex.from_tuples(lzip(range(10), range(10))) assert mi.is_(mi) assert mi.is_(mi.view()) assert mi.is_(mi.view().view().view().view()) mi2 = mi.view() # names are metadata, they don't change id mi2.names = ["A", "B"] assert mi2.is_(mi) assert mi.is_(mi2) assert mi.is_(mi.set_names(["C", "D"])) mi2 = mi.view() mi2.set_names(["E", "F"], inplace=True) assert mi.is_(mi2) # levels are inherent properties, they change identity mi3 = mi2.set_levels([lrange(10), lrange(10)]) assert not mi3.is_(mi2) # shouldn't change assert mi2.is_(mi) mi4 = mi3.view() # GH 17464 - Remove duplicate MultiIndex levels mi4.set_levels([lrange(10), lrange(10)], inplace=True) assert not mi4.is_(mi3) mi5 = mi.view() mi5.set_levels(mi5.levels, inplace=True) assert not mi5.is_(mi) def test_union(self): piece1 = self.index[:5][::-1] piece2 = self.index[3:] the_union = piece1 | piece2 tups = sorted(self.index.values) expected = MultiIndex.from_tuples(tups) assert the_union.equals(expected) # corner case, pass self or empty thing: the_union = self.index.union(self.index) assert the_union is self.index the_union = self.index.union(self.index[:0]) assert the_union is self.index # won't work in python 3 # tuples = self.index.values # result = self.index[:4] | tuples[4:] # assert result.equals(tuples) # not valid for python 3 # def test_union_with_regular_index(self): # other = Index(['A', 'B', 'C']) # result = other.union(self.index) # assert ('foo', 'one') in result # assert 'B' in result # result2 = self.index.union(other) # assert result.equals(result2) def test_intersection(self): piece1 = self.index[:5][::-1] piece2 = self.index[3:] the_int = piece1 & piece2 tups = sorted(self.index[3:5].values) expected = MultiIndex.from_tuples(tups) assert the_int.equals(expected) # corner case, pass self the_int = self.index.intersection(self.index) assert the_int is self.index # empty intersection: disjoint empty = self.index[:2] & self.index[2:] expected = self.index[:0] assert empty.equals(expected) # can't do in python 3 # tuples = self.index.values # result = self.index & tuples # assert result.equals(tuples) def test_sub(self): first = self.index # - now raises (previously was set op difference) with pytest.raises(TypeError): first - self.index[-3:] with pytest.raises(TypeError): self.index[-3:] - first with pytest.raises(TypeError): self.index[-3:] - first.tolist() with pytest.raises(TypeError): first.tolist() - self.index[-3:] def test_difference(self): first = self.index result = first.difference(self.index[-3:]) expected = MultiIndex.from_tuples(sorted(self.index[:-3].values), sortorder=0, names=self.index.names) assert isinstance(result, MultiIndex) assert result.equals(expected) assert result.names == self.index.names # empty difference: reflexive result = self.index.difference(self.index) expected = self.index[:0] assert result.equals(expected) assert result.names == self.index.names # empty difference: superset result = self.index[-3:].difference(self.index) expected = self.index[:0] assert result.equals(expected) assert result.names == self.index.names # empty difference: degenerate result = self.index[:0].difference(self.index) expected = self.index[:0] assert result.equals(expected) assert result.names == self.index.names # names not the same chunklet = self.index[-3:] chunklet.names = ['foo', 'baz'] result = first.difference(chunklet) assert result.names == (None, None) # empty, but non-equal result = self.index.difference(self.index.sortlevel(1)[0]) assert len(result) == 0 # raise Exception called with non-MultiIndex result = first.difference(first.values) assert result.equals(first[:0]) # name from empty array result = first.difference([]) assert first.equals(result) assert first.names == result.names # name from non-empty array result = first.difference([('foo', 'one')]) expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), ( 'foo', 'two'), ('qux', 'one'), ('qux', 'two')]) expected.names = first.names assert first.names == result.names tm.assert_raises_regex(TypeError, "other must be a MultiIndex " "or a list of tuples", first.difference, [1, 2, 3, 4, 5]) def test_from_tuples(self): tm.assert_raises_regex(TypeError, 'Cannot infer number of levels ' 'from empty list', MultiIndex.from_tuples, []) expected = MultiIndex(levels=[[1, 3], [2, 4]], labels=[[0, 1], [0, 1]], names=['a', 'b']) # input tuples result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b']) tm.assert_index_equal(result, expected) def test_from_tuples_iterator(self): # GH 18434 # input iterator for tuples expected = MultiIndex(levels=[[1, 3], [2, 4]], labels=[[0, 1], [0, 1]], names=['a', 'b']) result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b']) tm.assert_index_equal(result, expected) # input non-iterables with tm.assert_raises_regex( TypeError, 'Input must be a list / sequence of tuple-likes.'): MultiIndex.from_tuples(0) def test_from_tuples_empty(self): # GH 16777 result = MultiIndex.from_tuples([], names=['a', 'b']) expected = MultiIndex.from_arrays(arrays=[[], []], names=['a', 'b']) tm.assert_index_equal(result, expected) def test_argsort(self): result = self.index.argsort() expected = self.index.values.argsort() tm.assert_numpy_array_equal(result, expected) def test_sortlevel(self): import random tuples = list(self.index) random.shuffle(tuples) index = MultiIndex.from_tuples(tuples) sorted_idx, _ = index.sortlevel(0) expected = MultiIndex.from_tuples(sorted(tuples)) assert sorted_idx.equals(expected) sorted_idx, _ = index.sortlevel(0, ascending=False) assert sorted_idx.equals(expected[::-1]) sorted_idx, _ = index.sortlevel(1) by1 = sorted(tuples, key=lambda x: (x[1], x[0])) expected = MultiIndex.from_tuples(by1) assert sorted_idx.equals(expected) sorted_idx, _ = index.sortlevel(1, ascending=False) assert sorted_idx.equals(expected[::-1]) def test_sortlevel_not_sort_remaining(self): mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC')) sorted_idx, _ = mi.sortlevel('A', sort_remaining=False) assert sorted_idx.equals(mi) def test_sortlevel_deterministic(self): tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'), ('foo', 'one'), ('baz', 'two'), ('qux', 'one')] index = MultiIndex.from_tuples(tuples) sorted_idx, _ = index.sortlevel(0) expected = MultiIndex.from_tuples(sorted(tuples)) assert sorted_idx.equals(expected) sorted_idx, _ = index.sortlevel(0, ascending=False) assert sorted_idx.equals(expected[::-1]) sorted_idx, _ = index.sortlevel(1) by1 = sorted(tuples, key=lambda x: (x[1], x[0])) expected = MultiIndex.from_tuples(by1) assert sorted_idx.equals(expected) sorted_idx, _ = index.sortlevel(1, ascending=False) assert sorted_idx.equals(expected[::-1]) def test_dims(self): pass def test_drop(self): dropped = self.index.drop([('foo', 'two'), ('qux', 'one')]) index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')]) dropped2 = self.index.drop(index) expected = self.index[[0, 2, 3, 5]] tm.assert_index_equal(dropped, expected) tm.assert_index_equal(dropped2, expected) dropped = self.index.drop(['bar']) expected = self.index[[0, 1, 3, 4, 5]] tm.assert_index_equal(dropped, expected) dropped = self.index.drop('foo') expected = self.index[[2, 3, 4, 5]] tm.assert_index_equal(dropped, expected) index = MultiIndex.from_tuples([('bar', 'two')]) pytest.raises(KeyError, self.index.drop, [('bar', 'two')]) pytest.raises(KeyError, self.index.drop, index) pytest.raises(KeyError, self.index.drop, ['foo', 'two']) # partially correct argument mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')]) pytest.raises(KeyError, self.index.drop, mixed_index) # error='ignore' dropped = self.index.drop(index, errors='ignore') expected = self.index[[0, 1, 2, 3, 4, 5]] tm.assert_index_equal(dropped, expected) dropped = self.index.drop(mixed_index, errors='ignore') expected = self.index[[0, 1, 2, 3, 5]] tm.assert_index_equal(dropped, expected) dropped = self.index.drop(['foo', 'two'], errors='ignore') expected = self.index[[2, 3, 4, 5]] tm.assert_index_equal(dropped, expected) # mixed partial / full drop dropped = self.index.drop(['foo', ('qux', 'one')]) expected = self.index[[2, 3, 5]] tm.assert_index_equal(dropped, expected) # mixed partial / full drop / error='ignore' mixed_index = ['foo', ('qux', 'one'), 'two'] pytest.raises(KeyError, self.index.drop, mixed_index) dropped = self.index.drop(mixed_index, errors='ignore') expected = self.index[[2, 3, 5]] tm.assert_index_equal(dropped, expected) def test_droplevel_with_names(self): index = self.index[self.index.get_loc('foo')] dropped = index.droplevel(0) assert dropped.name == 'second' index = MultiIndex( levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array( [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])], names=['one', 'two', 'three']) dropped = index.droplevel(0) assert dropped.names == ('two', 'three') dropped = index.droplevel('two') expected = index.droplevel(1) assert dropped.equals(expected) def test_droplevel_list(self): index = MultiIndex( levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array( [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])], names=['one', 'two', 'three']) dropped = index[:2].droplevel(['three', 'one']) expected = index[:2].droplevel(2).droplevel(0) assert dropped.equals(expected) dropped = index[:2].droplevel([]) expected = index[:2] assert dropped.equals(expected) with pytest.raises(ValueError): index[:2].droplevel(['one', 'two', 'three']) with pytest.raises(KeyError): index[:2].droplevel(['one', 'four']) def test_drop_not_lexsorted(self): # GH 12078 # define the lexsorted version of the multi-index tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')] lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c']) assert lexsorted_mi.is_lexsorted() # and the not-lexsorted version df = pd.DataFrame(columns=['a', 'b', 'c', 'd'], data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]]) df = df.pivot_table(index='a', columns=['b', 'c'], values='d') df = df.reset_index() not_lexsorted_mi = df.columns assert not not_lexsorted_mi.is_lexsorted() # compare the results tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi) with tm.assert_produces_warning(PerformanceWarning): tm.assert_index_equal(lexsorted_mi.drop('a'), not_lexsorted_mi.drop('a')) def test_insert(self): # key contained in all levels new_index = self.index.insert(0, ('bar', 'two')) assert new_index.equal_levels(self.index) assert new_index[0] == ('bar', 'two') # key not contained in all levels new_index = self.index.insert(0, ('abc', 'three')) exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first') tm.assert_index_equal(new_index.levels[0], exp0) exp1 = Index(list(self.index.levels[1]) + ['three'], name='second') tm.assert_index_equal(new_index.levels[1], exp1) assert new_index[0] == ('abc', 'three') # key wrong length msg = "Item must have length equal to number of levels" with tm.assert_raises_regex(ValueError, msg): self.index.insert(0, ('foo2',)) left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]], columns=['1st', '2nd', '3rd']) left.set_index(['1st', '2nd'], inplace=True) ts = left['3rd'].copy(deep=True) left.loc[('b', 'x'), '3rd'] = 2 left.loc[('b', 'a'), '3rd'] = -1 left.loc[('b', 'b'), '3rd'] = 3 left.loc[('a', 'x'), '3rd'] = 4 left.loc[('a', 'w'), '3rd'] = 5 left.loc[('a', 'a'), '3rd'] = 6 ts.loc[('b', 'x')] = 2 ts.loc['b', 'a'] = -1 ts.loc[('b', 'b')] = 3 ts.loc['a', 'x'] = 4 ts.loc[('a', 'w')] = 5 ts.loc['a', 'a'] = 6 right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2], ['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4], ['a', 'w', 5], ['a', 'a', 6]], columns=['1st', '2nd', '3rd']) right.set_index(['1st', '2nd'], inplace=True) # FIXME data types changes to float because # of intermediate nan insertion; tm.assert_frame_equal(left, right, check_dtype=False) tm.assert_series_equal(ts, right['3rd']) # GH9250 idx = [('test1', i) for i in range(5)] + \ [('test2', i) for i in range(6)] + \ [('test', 17), ('test', 18)] left = pd.Series(np.linspace(0, 10, 11), pd.MultiIndex.from_tuples(idx[:-2])) left.loc[('test', 17)] = 11 left.loc[('test', 18)] = 12 right = pd.Series(np.linspace(0, 12, 13), pd.MultiIndex.from_tuples(idx)) tm.assert_series_equal(left, right) def test_take_preserve_name(self): taken = self.index.take([3, 0, 1]) assert taken.names == self.index.names def test_take_fill_value(self): # GH 12631 vals = [['A', 'B'], [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]] idx = pd.MultiIndex.from_product(vals, names=['str', 'dt']) result = idx.take(np.array([1, 0, -1])) exp_vals = [('A', pd.Timestamp('2011-01-02')), ('A', pd.Timestamp('2011-01-01')), ('B', pd.Timestamp('2011-01-02'))] expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt']) tm.assert_index_equal(result, expected) # fill_value result = idx.take(np.array([1, 0, -1]), fill_value=True) exp_vals = [('A', pd.Timestamp('2011-01-02')), ('A', pd.Timestamp('2011-01-01')), (np.nan, pd.NaT)] expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt']) tm.assert_index_equal(result, expected) # allow_fill=False result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) exp_vals = [('A', pd.Timestamp('2011-01-02')), ('A', pd.Timestamp('2011-01-01')), ('B', pd.Timestamp('2011-01-02'))] expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt']) tm.assert_index_equal(result, expected) msg = ('When allow_fill=True and fill_value is not None, ' 'all indices must be >= -1') with tm.assert_raises_regex(ValueError, msg): idx.take(np.array([1, 0, -2]), fill_value=True) with tm.assert_raises_regex(ValueError, msg): idx.take(np.array([1, 0, -5]), fill_value=True) with pytest.raises(IndexError): idx.take(np.array([1, -5])) def take_invalid_kwargs(self): vals = [['A', 'B'], [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]] idx = pd.MultiIndex.from_product(vals, names=['str', 'dt']) indices = [1, 2] msg = r"take\(\) got an unexpected keyword argument 'foo'" tm.assert_raises_regex(TypeError, msg, idx.take, indices, foo=2) msg = "the 'out' parameter is not supported" tm.assert_raises_regex(ValueError, msg, idx.take, indices, out=indices) msg = "the 'mode' parameter is not supported" tm.assert_raises_regex(ValueError, msg, idx.take, indices, mode='clip') @pytest.mark.parametrize('other', [Index(['three', 'one', 'two']), Index(['one']), Index(['one', 'three'])]) def test_join_level(self, other, join_type): join_index, lidx, ridx = other.join(self.index, how=join_type, level='second', return_indexers=True) exp_level = other.join(self.index.levels[1], how=join_type) assert join_index.levels[0].equals(self.index.levels[0]) assert join_index.levels[1].equals(exp_level) # pare down levels mask = np.array( [x[1] in exp_level for x in self.index], dtype=bool) exp_values = self.index.values[mask] tm.assert_numpy_array_equal(join_index.values, exp_values) if join_type in ('outer', 'inner'): join_index2, ridx2, lidx2 = \ self.index.join(other, how=join_type, level='second', return_indexers=True) assert join_index.equals(join_index2) tm.assert_numpy_array_equal(lidx, lidx2) tm.assert_numpy_array_equal(ridx, ridx2) tm.assert_numpy_array_equal(join_index2.values, exp_values) def test_join_level_corner_case(self): # some corner cases idx = Index(['three', 'one', 'two']) result = idx.join(self.index, level='second') assert isinstance(result, MultiIndex) tm.assert_raises_regex(TypeError, "Join.*MultiIndex.*ambiguous", self.index.join, self.index, level=1) def test_join_self(self, join_type): res = self.index joined = res.join(res, how=join_type) assert res is joined def test_join_multi(self): # GH 10665 midx = pd.MultiIndex.from_product( [np.arange(4), np.arange(4)], names=['a', 'b']) idx = pd.Index([1, 2, 5], name='b') # inner jidx, lidx, ridx = midx.join(idx, how='inner', return_indexers=True) exp_idx = pd.MultiIndex.from_product( [np.arange(4), [1, 2]], names=['a', 'b']) exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp) exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp) tm.assert_index_equal(jidx, exp_idx) tm.assert_numpy_array_equal(lidx, exp_lidx) tm.assert_numpy_array_equal(ridx, exp_ridx) # flip jidx, ridx, lidx = idx.join(midx, how='inner', return_indexers=True) tm.assert_index_equal(jidx, exp_idx) tm.assert_numpy_array_equal(lidx, exp_lidx) tm.assert_numpy_array_equal(ridx, exp_ridx) # keep MultiIndex jidx, lidx, ridx = midx.join(idx, how='left', return_indexers=True) exp_ridx = np.array([-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1], dtype=np.intp) tm.assert_index_equal(jidx, midx) assert lidx is None tm.assert_numpy_array_equal(ridx, exp_ridx) # flip jidx, ridx, lidx = idx.join(midx, how='right', return_indexers=True) tm.assert_index_equal(jidx, midx) assert lidx is None tm.assert_numpy_array_equal(ridx, exp_ridx) def test_reindex(self): result, indexer = self.index.reindex(list(self.index[:4])) assert isinstance(result, MultiIndex) self.check_level_names(result, self.index[:4].names) result, indexer = self.index.reindex(list(self.index)) assert isinstance(result, MultiIndex) assert indexer is None self.check_level_names(result, self.index.names) def test_reindex_level(self): idx = Index(['one']) target, indexer = self.index.reindex(idx, level='second') target2, indexer2 = idx.reindex(self.index, level='second') exp_index = self.index.join(idx, level='second', how='right') exp_index2 = self.index.join(idx, level='second', how='left') assert target.equals(exp_index) exp_indexer = np.array([0, 2, 4]) tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False) assert target2.equals(exp_index2) exp_indexer2 = np.array([0, -1, 0, -1, 0, -1]) tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False) tm.assert_raises_regex(TypeError, "Fill method not supported", self.index.reindex, self.index, method='pad', level='second') tm.assert_raises_regex(TypeError, "Fill method not supported", idx.reindex, idx, method='bfill', level='first') def test_duplicates(self): assert not self.index.has_duplicates assert self.index.append(self.index).has_duplicates index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[ [0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]]) assert index.has_duplicates # GH 9075 t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169), (u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119), (u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135), (u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145), (u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158), (u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122), (u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160), (u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180), (u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143), (u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128), (u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129), (u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111), (u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114), (u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121), (u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126), (u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155), (u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123), (u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)] index = pd.MultiIndex.from_tuples(t) assert not index.has_duplicates # handle int64 overflow if possible def check(nlevels, with_nulls): labels = np.tile(np.arange(500), 2) level = np.arange(500) if with_nulls: # inject some null values labels[500] = -1 # common nan value labels = [labels.copy() for i in range(nlevels)] for i in range(nlevels): labels[i][500 + i - nlevels // 2] = -1 labels += [np.array([-1, 1]).repeat(500)] else: labels = [labels] * nlevels + [np.arange(2).repeat(500)] levels = [level] * nlevels + [[0, 1]] # no dups index = MultiIndex(levels=levels, labels=labels) assert not index.has_duplicates # with a dup if with_nulls: def f(a): return np.insert(a, 1000, a[0]) labels = list(map(f, labels)) index = MultiIndex(levels=levels, labels=labels) else: values = index.values.tolist() index = MultiIndex.from_tuples(values + [values[0]]) assert index.has_duplicates # no overflow check(4, False) check(4, True) # overflow possible check(8, False) check(8, True) # GH 9125 n, k = 200, 5000 levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)] labels = [np.random.choice(n, k * n) for lev in levels] mi = MultiIndex(levels=levels, labels=labels) for keep in ['first', 'last', False]: left = mi.duplicated(keep=keep) right = pd._libs.hashtable.duplicated_object(mi.values, keep=keep) tm.assert_numpy_array_equal(left, right) # GH5873 for a in [101, 102]: mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]]) assert not mi.has_duplicates with warnings.catch_warnings(record=True): # Deprecated - see GH20239 assert mi.get_duplicates().equals(MultiIndex.from_arrays( [[], []])) tm.assert_numpy_array_equal(mi.duplicated(), np.zeros( 2, dtype='bool')) for n in range(1, 6): # 1st level shape for m in range(1, 5): # 2nd level shape # all possible unique combinations, including nan lab = product(range(-1, n), range(-1, m)) mi = MultiIndex(levels=[list('abcde')[:n], list('WXYZ')[:m]], labels=np.random.permutation(list(lab)).T) assert len(mi) == (n + 1) * (m + 1) assert not mi.has_duplicates with warnings.catch_warnings(record=True): # Deprecated - see GH20239 assert mi.get_duplicates().equals(MultiIndex.from_arrays( [[], []])) tm.assert_numpy_array_equal(mi.duplicated(), np.zeros( len(mi), dtype='bool')) def test_duplicate_meta_data(self): # GH 10115 index = MultiIndex( levels=[[0, 1], [0, 1, 2]], labels=[[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]]) for idx in [index, index.set_names([None, None]), index.set_names([None, 'Num']), index.set_names(['Upper', 'Num']), ]: assert idx.has_duplicates assert idx.drop_duplicates().names == idx.names def test_get_unique_index(self): idx = self.index[[0, 1, 0, 1, 1, 0, 0]] expected = self.index._shallow_copy(idx[[0, 1]]) for dropna in [False, True]: result = idx._get_unique_index(dropna=dropna) assert result.unique tm.assert_index_equal(result, expected) @pytest.mark.parametrize('names', [None, ['first', 'second']]) def test_unique(self, names): mi = pd.MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]], names=names) res = mi.unique() exp = pd.MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names) tm.assert_index_equal(res, exp) mi = pd.MultiIndex.from_arrays([list('aaaa'), list('abab')], names=names) res = mi.unique() exp = pd.MultiIndex.from_arrays([list('aa'), list('ab')], names=mi.names) tm.assert_index_equal(res, exp) mi = pd.MultiIndex.from_arrays([list('aaaa'), list('aaaa')], names=names) res = mi.unique() exp = pd.MultiIndex.from_arrays([['a'], ['a']], names=mi.names) tm.assert_index_equal(res, exp) # GH #20568 - empty MI mi = pd.MultiIndex.from_arrays([[], []], names=names) res = mi.unique() tm.assert_index_equal(mi, res) @pytest.mark.parametrize('level', [0, 'first', 1, 'second']) def test_unique_level(self, level): # GH #17896 - with level= argument result = self.index.unique(level=level) expected = self.index.get_level_values(level).unique() tm.assert_index_equal(result, expected) # With already unique level mi = pd.MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]], names=['first', 'second']) result = mi.unique(level=level) expected = mi.get_level_values(level) tm.assert_index_equal(result, expected) # With empty MI mi = pd.MultiIndex.from_arrays([[], []], names=['first', 'second']) result = mi.unique(level=level) expected = mi.get_level_values(level) def test_unique_datetimelike(self): idx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01', '2015-01-01', 'NaT', 'NaT']) idx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02', '2015-01-02', 'NaT', '2015-01-01'], tz='Asia/Tokyo') result = pd.MultiIndex.from_arrays([idx1, idx2]).unique() eidx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT']) eidx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-02', 'NaT', '2015-01-01'], tz='Asia/Tokyo') exp = pd.MultiIndex.from_arrays([eidx1, eidx2]) tm.assert_index_equal(result, exp) def test_tolist(self): result = self.index.tolist() exp = list(self.index.values) assert result == exp def test_repr_with_unicode_data(self): with pd.core.config.option_context("display.encoding", 'UTF-8'): d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]} index = pd.DataFrame(d).set_index(["a", "b"]).index assert "\\u" not in repr(index) # we don't want unicode-escaped def test_repr_roundtrip(self): mi = MultiIndex.from_product([list('ab'), range(3)], names=['first', 'second']) str(mi) if PY3: tm.assert_index_equal(eval(repr(mi)), mi, exact=True) else: result = eval(repr(mi)) # string coerces to unicode tm.assert_index_equal(result, mi, exact=False) assert mi.get_level_values('first').inferred_type == 'string' assert result.get_level_values('first').inferred_type == 'unicode' mi_u = MultiIndex.from_product( [list(u'ab'), range(3)], names=['first', 'second']) result = eval(repr(mi_u)) tm.assert_index_equal(result, mi_u, exact=True) # formatting if PY3: str(mi) else: compat.text_type(mi) # long format mi = MultiIndex.from_product([list('abcdefg'), range(10)], names=['first', 'second']) if PY3: tm.assert_index_equal(eval(repr(mi)), mi, exact=True) else: result = eval(repr(mi)) # string coerces to unicode tm.assert_index_equal(result, mi, exact=False) assert mi.get_level_values('first').inferred_type == 'string' assert result.get_level_values('first').inferred_type == 'unicode' result = eval(repr(mi_u)) tm.assert_index_equal(result, mi_u, exact=True) def test_str(self): # tested elsewhere pass def test_unicode_string_with_unicode(self): d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]} idx = pd.DataFrame(d).set_index(["a", "b"]).index if PY3: str(idx) else: compat.text_type(idx) def test_bytestring_with_unicode(self): d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]} idx = pd.DataFrame(d).set_index(["a", "b"]).index if PY3: bytes(idx) else: str(idx) def test_slice_keep_name(self): x = MultiIndex.from_tuples([('a', 'b'), (1, 2), ('c', 'd')], names=['x', 'y']) assert x[1:].names == x.names def test_isna_behavior(self): # should not segfault GH5123 # NOTE: if MI representation changes, may make sense to allow # isna(MI) with pytest.raises(NotImplementedError): pd.isna(self.index) def test_level_setting_resets_attributes(self): ind = pd.MultiIndex.from_arrays([ ['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3] ]) assert ind.is_monotonic ind.set_levels([['A', 'B'], [1, 3, 2]], inplace=True) # if this fails, probably didn't reset the cache correctly. assert not ind.is_monotonic def test_is_monotonic_increasing(self): i = MultiIndex.from_product([np.arange(10), np.arange(10)], names=['one', 'two']) assert i.is_monotonic assert i._is_strictly_monotonic_increasing assert Index(i.values).is_monotonic assert i._is_strictly_monotonic_increasing i = MultiIndex.from_product([np.arange(10, 0, -1), np.arange(10)], names=['one', 'two']) assert not i.is_monotonic assert not i._is_strictly_monotonic_increasing assert not Index(i.values).is_monotonic assert not Index(i.values)._is_strictly_monotonic_increasing i = MultiIndex.from_product([np.arange(10), np.arange(10, 0, -1)], names=['one', 'two']) assert not i.is_monotonic assert not i._is_strictly_monotonic_increasing assert not Index(i.values).is_monotonic assert not Index(i.values)._is_strictly_monotonic_increasing i = MultiIndex.from_product([[1.0, np.nan, 2.0], ['a', 'b', 'c']]) assert not i.is_monotonic assert not i._is_strictly_monotonic_increasing assert not Index(i.values).is_monotonic assert not Index(i.values)._is_strictly_monotonic_increasing # string ordering i = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two', 'three']], labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=['first', 'second']) assert not i.is_monotonic assert not Index(i.values).is_monotonic assert not i._is_strictly_monotonic_increasing assert not Index(i.values)._is_strictly_monotonic_increasing i = MultiIndex(levels=[['bar', 'baz', 'foo', 'qux'], ['mom', 'next', 'zenith']], labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=['first', 'second']) assert i.is_monotonic assert Index(i.values).is_monotonic assert i._is_strictly_monotonic_increasing assert Index(i.values)._is_strictly_monotonic_increasing # mixed levels, hits the TypeError i = MultiIndex( levels=[[1, 2, 3, 4], ['gb00b03mlx29', 'lu0197800237', 'nl0000289783', 'nl0000289965', 'nl0000301109']], labels=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]], names=['household_id', 'asset_id']) assert not i.is_monotonic assert not i._is_strictly_monotonic_increasing # empty i = MultiIndex.from_arrays([[], []]) assert i.is_monotonic assert Index(i.values).is_monotonic assert i._is_strictly_monotonic_increasing assert Index(i.values)._is_strictly_monotonic_increasing def test_is_monotonic_decreasing(self): i = MultiIndex.from_product([np.arange(9, -1, -1), np.arange(9, -1, -1)], names=['one', 'two']) assert i.is_monotonic_decreasing assert i._is_strictly_monotonic_decreasing assert Index(i.values).is_monotonic_decreasing assert i._is_strictly_monotonic_decreasing i = MultiIndex.from_product([np.arange(10), np.arange(10, 0, -1)], names=['one', 'two']) assert not i.is_monotonic_decreasing assert not i._is_strictly_monotonic_decreasing assert not Index(i.values).is_monotonic_decreasing assert not Index(i.values)._is_strictly_monotonic_decreasing i = MultiIndex.from_product([np.arange(10, 0, -1), np.arange(10)], names=['one', 'two']) assert not i.is_monotonic_decreasing assert not i._is_strictly_monotonic_decreasing assert not Index(i.values).is_monotonic_decreasing assert not Index(i.values)._is_strictly_monotonic_decreasing i = MultiIndex.from_product([[2.0, np.nan, 1.0], ['c', 'b', 'a']]) assert not i.is_monotonic_decreasing assert not i._is_strictly_monotonic_decreasing assert not Index(i.values).is_monotonic_decreasing assert not Index(i.values)._is_strictly_monotonic_decreasing # string ordering i = MultiIndex(levels=[['qux', 'foo', 'baz', 'bar'], ['three', 'two', 'one']], labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=['first', 'second']) assert not i.is_monotonic_decreasing assert not Index(i.values).is_monotonic_decreasing assert not i._is_strictly_monotonic_decreasing assert not Index(i.values)._is_strictly_monotonic_decreasing i = MultiIndex(levels=[['qux', 'foo', 'baz', 'bar'], ['zenith', 'next', 'mom']], labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=['first', 'second']) assert i.is_monotonic_decreasing assert Index(i.values).is_monotonic_decreasing assert i._is_strictly_monotonic_decreasing assert Index(i.values)._is_strictly_monotonic_decreasing # mixed levels, hits the TypeError i = MultiIndex( levels=[[4, 3, 2, 1], ['nl0000301109', 'nl0000289965', 'nl0000289783', 'lu0197800237', 'gb00b03mlx29']], labels=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]], names=['household_id', 'asset_id']) assert not i.is_monotonic_decreasing assert not i._is_strictly_monotonic_decreasing # empty i = MultiIndex.from_arrays([[], []]) assert i.is_monotonic_decreasing assert Index(i.values).is_monotonic_decreasing assert i._is_strictly_monotonic_decreasing assert Index(i.values)._is_strictly_monotonic_decreasing def test_is_strictly_monotonic_increasing(self): idx = pd.MultiIndex(levels=[['bar', 'baz'], ['mom', 'next']], labels=[[0, 0, 1, 1], [0, 0, 0, 1]]) assert idx.is_monotonic_increasing assert not idx._is_strictly_monotonic_increasing def test_is_strictly_monotonic_decreasing(self): idx = pd.MultiIndex(levels=[['baz', 'bar'], ['next', 'mom']], labels=[[0, 0, 1, 1], [0, 0, 0, 1]]) assert idx.is_monotonic_decreasing assert not idx._is_strictly_monotonic_decreasing def test_reconstruct_sort(self): # starts off lexsorted & monotonic mi = MultiIndex.from_arrays([ ['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3] ]) assert mi.is_lexsorted() assert mi.is_monotonic recons = mi._sort_levels_monotonic() assert recons.is_lexsorted() assert recons.is_monotonic assert mi is recons assert mi.equals(recons) assert
Index(mi.values)
pandas.Index
""" @file @brief Command line about validation of prediction runtime. """ import os from io import StringIO from collections import OrderedDict import json import numpy from onnx import TensorProto from pandas import DataFrame from cpyquickhelper.numbers import measure_time from onnxruntime import InferenceSession, SessionOptions from ..onnxrt import OnnxInference from ..onnxrt.ops_whole.session import OnnxWholeSession def _random_input(typ, shape, batch): if typ in ('tensor(double)', TensorProto.DOUBLE): # pylint: disable=E1101 dtype = numpy.float64 elif typ in ('tensor(float)', TensorProto.FLOAT): # pylint: disable=E1101 dtype = numpy.float32 else: raise NotImplementedError( "Unable to guess dtype from %r." % typ) if len(shape) <= 1: new_shape = shape elif shape[0] in (None, 0): new_shape = tuple([batch] + list(shape[1:])) else: new_shape = shape return numpy.random.randn(*new_shape).astype(dtype) def random_feed(inputs, batch=10): """ Creates a dictionary of random inputs. :param batch: dimension to use as batch dimension if unknown :return: dictionary """ res = OrderedDict() for inp in inputs: name = inp.name if hasattr(inp.type, 'tensor_type'): typ = inp.type.tensor_type.elem_type shape = tuple(getattr(d, 'dim_value', 0) for d in inp.type.tensor_type.shape.dim) else: typ = inp.type shape = inp.shape res[name] = _random_input(typ, shape, batch) return res def latency(model, law='normal', size=1, number=10, repeat=10, max_time=0, runtime="onnxruntime", device='cpu', fmt=None, profiling=None, profile_output='profiling.csv'): """ Measures the latency of a model (python API). :param model: ONNX graph :param law: random law used to generate fake inputs :param size: batch size, it replaces the first dimension of every input if it is left unknown :param number: number of calls to measure :param repeat: number of times to repeat the experiment :param max_time: if it is > 0, it runs as many time during that period of time :param runtime: available runtime :param device: device, `cpu`, `cuda:0` :param fmt: None or `csv`, it then returns a string formatted like a csv file :param profiling: if True, profile the execution of every node, if can be by name or type. :param profile_output: output name for the profiling if profiling is specified .. cmdref:: :title: Measures model latency :cmd: -m mlprodict latency --help :lid: l-cmd-latency The command generates random inputs and call many times the model on these inputs. It returns the processing time for one iteration. Example:: python -m mlprodict latency --model "model.onnx" """ if not os.path.exists(model): raise FileNotFoundError( # pragma: no cover "Unable to find model %r." % model) if profiling not in (None, '', 'name', 'type'): raise ValueError( "Unexpected value for profiling: %r." % profiling) size = int(size) number = int(number) repeat = int(repeat) if max_time in (None, 0, ""): max_time = None else: max_time = float(max_time) if max_time <= 0: max_time = None if law != "normal": raise ValueError( "Only law='normal' is supported, not %r." % law) if device != 'cpu': raise NotImplementedError( # pragma no cover "Only support cpu for now not %r." % device) if profiling in ('name', 'type') and profile_output in (None, ''): raise ValueError( # pragma: no cover 'profiling is enabled but profile_output is wrong (%r).' '' % profile_output) if runtime == "onnxruntime": if profiling in ('name', 'type'): so = SessionOptions() so.enable_profiling = True sess = InferenceSession(model, sess_options=so) else: sess = InferenceSession(model) fct = lambda feeds: sess.run(None, feeds) inputs = sess.get_inputs() else: if profiling in ('name', 'type'): runtime_options = {"enable_profiling": True} if runtime != 'onnxruntime1': raise NotImplementedError( # pragma: no cover "Profiling is not implemented for runtime=%r." % runtime) else: runtime_options = None oinf = OnnxInference(model, runtime=runtime, runtime_options=runtime_options) fct = lambda feeds: oinf.run(feeds) inputs = oinf.obj.graph.input feeds = random_feed(inputs, size) res = measure_time(lambda: fct(feeds), number=number, repeat=repeat, context={}, max_time=max_time, div_by_number=True) if profiling in ('name', 'type'): if runtime == 'onnxruntime': profile_name = sess.end_profiling() with open(profile_name, 'r', encoding='utf-8') as f: js = json.load(f) js = OnnxWholeSession.process_profiling(js) df =
DataFrame(js)
pandas.DataFrame
import xml.etree.ElementTree as ET # to parse XML import numpy as np # To convert list to numpy array. Used for creating # pandas dataframe column import pandas as pd # used to create csv of parsed data print ("Started reading xml file from xmlparse library...") tree = ET.parse("./learners_cleaned.xml" ) print ("Done reading xml files") root = tree.getroot() learnerid = [] nationality = [] grade = [] level = [] topic = [] text = [] print ("Starting parsing ...") # iterate news items for writings in root.findall('./writings'): for writing in writings: level.append(writing.attrib['level']) for item in writing: if item.tag=="learner": learnerid.append(item.attrib['id']) nationality.append(item.attrib['nationality']) if item.tag=="topic": topic.append(item.text) if item.tag=="grade": grade.append(item.text) if item.tag=="text": text.append(item.text) print ("Parsing completed \n") df =
pd.DataFrame(columns = ["learnerId","nationality","grade","level","topic","text"])
pandas.DataFrame
''' Author: <NAME> Create Time: 2021-10-14 19:35:38 Copyright: Copyright (c) 2021 <NAME>. See LICENSE for details ''' from OpenFlows.Domain.ModelingElements.NetworkElements import IActiveElementsInput, IBaseLinksInput, IBasePolygonInput, IBasePolygonsInput, INetworkElements, IPointNodesInput from OpenFlows.Water.Domain import IWaterModel from OpenFlows.Water.Domain.ModelingElements.NetworkElements import IBaseDirectedNodesInput, IBaseNodesInput, IBasePumpsInput, IBaseValvesInput, ICheckValveElementsInput, IConventionalTanksInput, ICustomerMetersInput, IDemandNodesInput, IFireFlowNodesInput, IFlowControlValvesInput, IGeneralPurposeValves, IGeneralPurposeValvesInput, IHydrantsInput, IHydroTanksInput, IIsolationValveElementsInput, IJunctionsInput, ILateralsInput, IPhysicalNodeElementsInput, IPipes, IPressureBreakingValves, IPressureBreakingValvesInput, IPressureSustainingValvesInput, IPressureValvesInput, IPumpStations, IPumpStationsInput, IPumps, IPumpsInput, IReservoirs, ISCADAElements, ISCADAElementsInput, ITanks, ITanksInput, ITaps, IThrottleControlValvesInput, IVSPBsInput, IWaterQualityElementsInput, IWaterQualityNodesInput, IWaterZoneableNetworkElementsInput import numpy as np import pandas as pd import networkx as nx from typing import Any, List, Type class NetworkInput: # region Fields __waterModel: IWaterModel # endregion # region Constructor def __init__(self, water_model: IWaterModel) -> None: self.__waterModel = water_model pass def __repr__(self) -> str: line1 = f"{__class__.__name__}: {self.__waterModel}." line2 = f"Pipe Count: {self.__waterModel.Network.Pipes.Count}" line3 = f"Junction Count: {self.__waterModel.Network.Junctions.Count}" return f"{line1} {line2} {line3}." # endregion # region Public Methods def get_networkx_graph(self, laterals: bool = False) -> nx.Graph: columns = ["Id", "Label"] if laterals else [ "Id", "Label", "Diameter", "IsActive"] links_df = self.pipe_df[columns].copy() if laterals: links_df.append(self.lateral_df[columns]) graph: nx.Graph = nx.from_pandas_edgelist( df=links_df, source="StartNodeId", target="StopNodeId", edge_attr=columns) return graph # endregion // Public Methods # region Public Properties (Network Elements DF) @property def pipe_df(self) -> pd.DataFrame: return self.__get_pipe_input(self.__waterModel.Network.Pipes) @property def lateral_df(self) -> pd.DataFrame: return self.__get_lateral_input(self.__waterModel.Network.Laterals) @property def junction_df(self) -> pd.DataFrame: return self.__get_junction_input(self.__waterModel.Network.Junctions) @property def hydrant_df(self) -> pd.DataFrame: return self.__get_hydrant_input(self.__waterModel.Network.Hydrants) @property def tank_df(self) -> pd.DataFrame: return self.__get_tank_input(self.__waterModel.Network.Tanks) @property def reservoir_df(self) -> pd.DataFrame: return self.__get_reservoir_input(self.__waterModel.Network.Reservoirs) @property def tap_df(self) -> pd.DataFrame: return self.__get_tap_input(self.__waterModel.Network.Taps) @property def pump_df(self) -> pd.DataFrame: return self.__get_pump_input(self.__waterModel.Network.Pumps) @property def pump_stn_df(self) -> pd.DataFrame: return self.__get_pump_stn_input(self.__waterModel.Network.PumpStations) @property def customer_meter_df(self) -> pd.DataFrame: return self.__get_customer_meter_input(self.__waterModel.Network.CustomerMeters) @property def scada_elem_df(self) -> pd.DataFrame: return self.__get_scada_elem_input(self.__waterModel.Network.SCADAElements) @property def vspb_df(self) -> pd.DataFrame: return self.__get_vspb_input(self.__waterModel.Network.VSPBs) @property def prv_df(self) -> pd.DataFrame: return self.__get_prv_input(self.__waterModel.Network.PRVs) @property def psv_df(self) -> pd.DataFrame: return self.__get_psv_input(self.__waterModel.Network.PSVs) @property def pbv_df(self) -> pd.DataFrame: return self.__get_pbv_input(self.__waterModel.Network.PBVs) @property def fcv_df(self) -> pd.DataFrame: return self.__get_fcv_input(self.__waterModel.Network.FCVs) @property def tcv_df(self) -> pd.DataFrame: return self.__get_tcv_input(self.__waterModel.Network.TCVs) @property def gpv_df(self) -> pd.DataFrame: return self.__get_gpv_input(self.__waterModel.Network.GPVs) @property def iso_valve_df(self) -> pd.DataFrame: return self.__get_iso_valve_input(self.__waterModel.Network.IsolationValves) @property def hydro_tank_df(self) -> pd.DataFrame: return self.__get_hydro_tank_input(self.__waterModel.Network.HydropneumaticTanks) @property def check_valve_df(self) -> pd.DataFrame: return self.__get_check_valve_input(self.__waterModel.Network.CheckValves) # endregion // Public Properties # region Private methods def __dict_to_value(self, series: pd.Series, data_type: Type) -> pd.Series: series = series.apply(lambda d: d.Value) if data_type: if data_type is str: series = series.astype("string") else: series = series.astype(data_type) return series def __get_elements_input(self, elements: INetworkElements) -> pd.DataFrame: df = pd.DataFrame() df["Label"] = elements.Labels() df["Id"] = df["Label"].apply(lambda d: d.Key).astype(pd.Int64Dtype()) df["Label"] = df["Label"].apply(lambda d: d.Value).astype("string") return df def __get_physical_elevation_input(self, elements: IPhysicalNodeElementsInput, df: pd.DataFrame) -> pd.DataFrame: df["Elevation"] = elements.Elevations() df["Elevation"] = self.__dict_to_value(df["Elevation"], float) return df def __get_active_elements_input(self, elements: IActiveElementsInput, df: pd.DataFrame) -> pd.DataFrame: df["IsActive"] = elements.IsActives() df["IsActive"] = self.__dict_to_value(df["IsActive"], bool) return df def __get_zone_elements_input(self, elements: IWaterZoneableNetworkElementsInput, df: pd.DataFrame) -> pd.DataFrame: df["Zone"] = elements.Zones() df["Zone"] = self.__dict_to_value(df["Zone"], None) df["ZoneId"] = df["Zone"].apply(lambda z: z.Id if z else None) df["ZoneLabel"] = df["Zone"].apply( lambda z: z.Label if z else None).astype("string") return df def __get_point_node_input(self, elements: IPointNodesInput, df: pd.DataFrame) -> pd.DataFrame: df["Geometry"] = elements.Geometries() df["Geometry"] = self.__dict_to_value(df["Geometry"], None) x_and_y: List[Any] = df["Geometry"].apply( lambda p: [p.X, p.Y]).tolist() if x_and_y: # TODO: find the type of x&y df[["X", "Y"]] = x_and_y else: df["X"] = None df["Y"] = None return df def __get_polygons_geometry(self, elements: IBasePolygonsInput, df: pd.DataFrame) -> pd.DataFrame: df["Geometry"] = elements.Geometries() df["Geometry"] = self.__dict_to_value(df["Geometry"], None) df["Geometry"] = df["Geometry"].apply( lambda pts: [[p.X, p.Y] for p in pts]).tolist() return df def __get_water_quality_node_input(self, elements: IWaterQualityElementsInput, df: pd.DataFrame) -> pd.DataFrame: df["InitAge"] = elements.InitialAge() df["InitAge"] = self.__dict_to_value(df["InitAge"], float) df["InitConc"] = elements.InitialConcentration() df["InitConc"] = self.__dict_to_value(df["InitConc"], float) df["InitTrace"] = elements.InitialTrace() df["InitTrace"] = self.__dict_to_value(df["InitTrace"], float) return df def __get_installation_year_input(self, elements: Any, df: pd.DataFrame) -> pd.DataFrame: df["InstallYr"] = elements.InstallationYears() df["InstallYr"] = self.__dict_to_value( df["InstallYr"], pd.Int64Dtype()) return df def __get_minor_loss_node_input(self, elements: Any, df: pd.DataFrame) -> pd.DataFrame: df["dMLossCoeff"] = elements.DerivedMinorLossCoefficient() df["dMLossCoeff"] = self.__dict_to_value(df["dMLossCoeff"], float) df["IsLocalMLoss"] = elements.SpecifyLocalMinorLoss() df["IsLocalMLoss"] = self.__dict_to_value(df["IsLocalMLoss"], bool) df["LocalMLossCoeff"] = elements.LocalMinorLossCoefficient() df["LocalMLossCoeff"] = self.__dict_to_value( df["LocalMLossCoeff"], float) return df def __get_valve_characerstics_input(self, elements: Any, df: pd.DataFrame) -> pd.DataFrame: df["ValveChrsts"] = elements.ValveCharacteristics() df["ValveChrsts"] = self.__dict_to_value(df["ValveChrsts"], None) return df def __get_hammer_valve_type_input(self, elements: Any, df: pd.DataFrame) -> pd.DataFrame: df["ValveType"] = elements.ValveTypes() df["ValveType"] = self.__dict_to_value( df["ValveType"], pd.Int64Dtype()) return df def __get_demand_node_input(self, elements: IDemandNodesInput) -> pd.DataFrame: df = self.__get_base_node_input(elements) return df def __get_fire_node_input(self, elements: IFireFlowNodesInput) -> pd.DataFrame: df = self.__get_demand_node_input(elements) return df # region Base Node / Link / Polygon Inputs def __get_base_node_input(self, elements: IBaseNodesInput) -> pd.DataFrame: df = self.__get_elements_input(elements) df = self.__get_physical_elevation_input(elements, df) df = self.__get_active_elements_input(elements, df) df = self.__get_zone_elements_input(elements, df) df = self.__get_water_quality_node_input(elements, df) df = self.__get_point_node_input(elements, df) return df def __get_base_link_input(self, elements: IBaseLinksInput) -> pd.DataFrame: df = pd.DataFrame() df = self.__get_elements_input(elements) df = self.__get_active_elements_input(elements, df) df["StartNode"] = elements.StartNodes() df["StartNode"] = self.__dict_to_value(df["StartNode"], None) df["StartNodeId"] = df["StartNode"].apply( lambda n: n.Id).astype(pd.Int64Dtype()) df["StopNode"] = elements.StopNodes() df["StopNode"] = self.__dict_to_value(df["StopNode"], None) df["StopNodeId"] = df["StopNode"].apply( lambda n: n.Id).astype(pd.Int64Dtype()) df["IsUDLength"] = elements.IsUserDefinedLengths() df["IsUDLength"] = self.__dict_to_value(df["IsUDLength"], bool) df["Length"] = elements.Lengths() df["Length"] = self.__dict_to_value(df["Length"], float) df["Geometry"] = elements.Geometries() df["Geometry"] = self.__dict_to_value(df["Geometry"], None) return df def __get_base_polygon_input(self, elements: IBasePolygonsInput) -> pd.DataFrame: df = pd.DataFrame() df = self.__get_elements_input(elements) df = self.__get_active_elements_input(elements, df) df = self.__get_polygons_geometry(elements, df) return df # endregion def __get_associated_elements(self, elements: Any, df: pd.DataFrame) -> pd.DataFrame: df["AssocElem"] = elements.AssociatedElements() df["AssocElem"] = self.__dict_to_value(df["AssocElem"], None) df["AssocElemId"] = df["AssocElem"].apply( lambda n: n.Id if n else None).astype(pd.Int64Dtype()) return df # region Base Elements Input def __get_base_directed_node_input(self, elements: IBaseDirectedNodesInput) -> pd.DataFrame: df = self.__get_base_node_input(elements) df = self.__get_installation_year_input(elements, df) return df def __get_base_pump_node_input(self, elements: IPumpsInput) -> pd.DataFrame: df = self.__get_base_directed_node_input(elements) df["InitSpeedFactor"] = elements.InitialRelativeSpeedFactors() df["InitSpeedFactor"] = self.__dict_to_value( df["InitSpeedFactor"], float) df["InitStatus"] = elements.InitialStatus() df["InitStatus"] = self.__dict_to_value(df["InitStatus"], bool) return df def __get_base_valve_node_input(self, elements: IBaseValvesInput) -> pd.DataFrame: df = self.__get_base_directed_node_input(elements) df = self.__get_minor_loss_node_input(elements, df) df["InitStatus"] = elements.InitialStatus() df["InitStatus"] = self.__dict_to_value(df["InitStatus"], None) df["Diameter"] = elements.Diameters() df["Diameter"] = self.__dict_to_value(df["Diameter"], float) return df def __get_base_tank_node_input(self, elements: IConventionalTanksInput) -> pd.DataFrame: df = self.__get_demand_node_input(elements) return df def __get_conventional_tank_node_input(self, elements: IConventionalTanksInput) -> pd.DataFrame: df = self.__get_demand_node_input(elements) df = self.__get_water_quality_node_input(elements, df) df["SectionType"] = elements.TankSection() df["SectionType"] = self.__dict_to_value( df["SectionType"], None) df["ActiveVolFull"] = elements.ActiveVolumeFull() df["ActiveVolFull"] = self.__dict_to_value( df["ActiveVolFull"], float) df["Diameter"] = elements.Diameter() df["Diameter"] = self.__dict_to_value( df["Diameter"], float) df["AvgArea"] = elements.AverageArea() df["AvgArea"] = self.__dict_to_value( df["AvgArea"], float) df["BaseElev"] = elements.BaseElevation() df["BaseElev"] = self.__dict_to_value( df["BaseElev"], float) df["MinLevel"] = elements.MinimumLevel() df["MinLevel"] = self.__dict_to_value( df["MinLevel"], float) df["MaxLevel"] = elements.MaximumLevel() df["MaxLevel"] = self.__dict_to_value( df["MaxLevel"], float) df["InitLevel"] = elements.InitialLevel() df["InitLevel"] = self.__dict_to_value( df["InitLevel"], float) df["UseHighAlarm"] = elements.UseHighAlarm() df["UseHighAlarm"] = self.__dict_to_value( df["UseHighAlarm"], bool) df["HighAlarmLvl"] = elements.HighAlarmLevel() df["HighAlarmLvl"] = self.__dict_to_value( df["HighAlarmLvl"], float) df["UseLowAlarm"] = elements.UseLowAlarm() df["UseLowAlarm"] = self.__dict_to_value( df["UseLowAlarm"], bool) df["LowAlarmLvl"] = elements.LowAlarmLevel() df["LowAlarmLvl"] = self.__dict_to_value( df["LowAlarmLvl"], float) df["InactiveVol"] = elements.InactiveVolume() df["InactiveVol"] = self.__dict_to_value( df["InactiveVol"], float) return df def __get_base_pressure_valve_node_input(self, elements: IPressureValvesInput) -> pd.DataFrame: df = self.__get_base_valve_node_input(elements) df["PressureSettings"] = elements.PressureValveSettings() df["PressureSettings"] = self.__dict_to_value( df["PressureSettings"], float) df["InitSetting"] = elements.InitialSettings() df["InitSetting"] = self.__dict_to_value(df["InitSetting"], None) return df def __get_general_purpose_valve_node_input(self, elements: IGeneralPurposeValvesInput) -> pd.DataFrame: df = self.__get_base_valve_node_input(elements) df["GpvHlCurve"] = elements.GPVHeadlossCurves() df["GpvHlCurve"] = self.__dict_to_value(df["GpvHlCurve"], None) df["ValveChrsts"] = elements.ValveCharacteristics() df["ValveChrsts"] = self.__dict_to_value(df["ValveChrsts"], None) return df def __get_tank_input(self, elements: ITanksInput) -> pd.DataFrame: df = self.__get_conventional_tank_node_input(elements) df = self.__get_valve_characerstics_input(elements, df) df = self.__get_hammer_valve_type_input(elements, df) return df def __get_hydro_tank_input(self, elements: IHydroTanksInput) -> pd.DataFrame: df = self.__get_base_tank_node_input(elements) df["InitGasVol"] = elements.InitialVolumeOfGas() df["InitGasVol"] = self.__dict_to_value(df["InitGasVol"], float) df["InletOrifDia"] = elements.TankInletOrificeDiameter() df["InletOrifDia"] = self.__dict_to_value(df["InletOrifDia"], float) df["RatioOfLosses"] = elements.RatioOfLosses() df["RatioOfLosses"] = self.__dict_to_value(df["RatioOfLosses"], float) df["GasLawExponent"] = elements.GasLawExponent() df["GasLawExponent"] = self.__dict_to_value( df["GasLawExponent"], float) df["HasBladder"] = elements.HasBladder() df["HasBladder"] = self.__dict_to_value(df["HasBladder"], bool) df["GasPresetPressure"] = elements.GasPresetPressure() df["GasPresetPressure"] = self.__dict_to_value( df["GasPresetPressure"], float) df["MeanLqdElev"] = elements.MeanLiquidElevation() df["MeanLqdElev"] = self.__dict_to_value(df["MeanLqdElev"], float) df["AirInOrifDia"] = elements.AirInflowOrificeDiameter() df["AirInOrifDia"] = self.__dict_to_value(df["AirInOrifDia"], float) df["AirOutOrifDia"] = elements.AirOutflowOrificeDiameter() df["AirOutOrifDia"] = self.__dict_to_value(df["AirOutOrifDia"], float) df["DippingTubeDia"] = elements.DippingTubeDiameter() df["DippingTubeDia"] = self.__dict_to_value( df["DippingTubeDia"], float) df["CompChamberVol"] = elements.CompressionChamberVolume() df["CompChamberVol"] = self.__dict_to_value( df["CompChamberVol"], float) df["TopElevDippingTube"] = elements.TopElevationDippingTube() df["TopElevDippingTube"] = self.__dict_to_value( df["TopElevDippingTube"], float) df["LevelType"] = elements.LevelType() df["LevelType"] = self.__dict_to_value(df["LevelType"], None) df["HydroTankType"] = elements.HydroTankType() df["HydroTankType"] = self.__dict_to_value(df["HydroTankType"], None) # df["AirOutOrifDia"] = elements.AirOutflowOrificeDiameter() # df["AirOutOrifDia"] = self.__dict_to_value(df["AirOutOrifDia"], float) # df["DippingTubeDia"] = elements.DippingTubeDiameter() # df["DippingTubeDia"] = self.__dict_to_value(df["DippingTubeDia"], float) # df["CompChamberVol"] = elements.CompressionChamberVolume() # df["CompChamberVol"] = self.__dict_to_value(df["CompChamberVol"], float) # df["TopElevDippingTube"] = elements.TopElevationDippingTube() # df["TopElevDippingTube"] = self.__dict_to_value(df["TopElevDippingTube"], float) return df def __get_reservoir_input(self, elements: IReservoirs) -> pd.DataFrame: df = self.__get_base_node_input(elements) return df def __get_tap_input(self, elements: ITaps) -> pd.DataFrame: df = self.__get_elements_input(elements) df = self.__get_point_node_input(elements, df) df = self.__get_associated_elements(elements, df) return df # endregion # region Pipes/Laterals def __get_pipe_input(self, elements: IPipes) -> pd.DataFrame: df = self.__get_base_link_input(elements) df = self.__get_installation_year_input(elements, df) df["Status"] = elements.Input.PipeStatuses() df["Status"] = self.__dict_to_value(df["Status"], bool) df["Diameter"] = elements.Input.Diameters() df["Diameter"] = self.__dict_to_value(df["Diameter"], float) df["Material"] = elements.Input.Materials() df["Material"] = self.__dict_to_value(df["Material"], str) df["FrictionCoeff"] = elements.Input.FrictionCoefficients() df["FrictionCoeff"] = self.__dict_to_value(df["FrictionCoeff"], float) return df def __get_lateral_input(self, elements: ILateralsInput) -> pd.DataFrame: df = self.__get_base_link_input(elements) return df # endregion # region Fireflow Nodes def __get_junction_input(self, elements: IJunctionsInput) -> pd.DataFrame: df = self.__get_fire_node_input(elements) return df def __get_hydrant_input(self, elements: IHydrantsInput) -> pd.DataFrame: df = self.__get_fire_node_input(elements) return df # endregion # region Pumps / Pump Stations / VSPB def __get_pump_input(self, elements: IPumpsInput) -> pd.DataFrame: df = self.__get_base_directed_node_input(elements) df["InitSpeedFactor"] = elements.InitialRelativeSpeedFactors() df["InitSpeedFactor"] = self.__dict_to_value( df["InitSpeedFactor"], float) df["InitStatus"] = elements.InitialStatus() df["InitStatus"] = self.__dict_to_value( df["InitStatus"], pd.Int64Dtype()) # TODO: double check the fields return df def __get_pump_stn_input(self, elements: IPumpStationsInput) -> pd.DataFrame: df = self.__get_base_polygon_input(elements) return df def __get_vspb_input(self, elements: IVSPBsInput) -> pd.DataFrame: df = self.__get_base_pump_node_input(elements) df["PumpDefinition"] = elements.PumpDefinitions() df["PumpDefinition"] = self.__dict_to_value( df["PumpDefinition"], None) df["PumpDefinitionId"] = df["PumpDefinition"].apply( lambda p: p.Id if p else None).astype(pd.Int64Dtype()) df["ControlNode"] = elements.ControlNodes() df["ControlNode"] = self.__dict_to_value( df["ControlNode"], None) df["ControlNodeId"] = df["ControlNode"].apply( lambda p: p.Id if p else None).astype(pd.Int64Dtype()) df["TgtHGL"] = elements.TargetHydraulicGrades() df["TgtHGL"] = self.__dict_to_value( df["TgtHGL"], float) df["MaxSpeedFactor"] = elements.MaximumRelativeSpeedFactors() df["MaxSpeedFactor"] = self.__dict_to_value( df["MaxSpeedFactor"], float) df["NumLagPumps"] = elements.NumberOfLagPumps() df["NumLagPumps"] = self.__dict_to_value( df["NumLagPumps"], pd.Int64Dtype()) df["CtrlNodeSucSide"] = elements.ControlNodeOnSuctionSide() df["CtrlNodeSucSide"] = self.__dict_to_value( df["CtrlNodeSucSide"], None) df["CtrlNodeSucSideId"] = df["CtrlNodeSucSide"].apply( lambda p: p.Id if p else None).astype(pd.Int64Dtype()) df["TgtFlow"] = elements.TargetFlows() df["TgtFlow"] = self.__dict_to_value( df["TgtFlow"], float) df["TgtPressure"] = elements.TargetPressures() df["TgtPressure"] = self.__dict_to_value( df["TgtPressure"], float) df["VSPBType"] = elements.VSPBTypes() df["VSPBType"] = self.__dict_to_value( df["VSPBType"], None) df["VSPBFixedHeadType"] = elements.VSPBFixedHeadTypes() df["VSPBFixedHeadType"] = self.__dict_to_value( df["VSPBFixedHeadType"], None) return df # endregion # region Customer Meters def __get_customer_meter_input(self, elements: ICustomerMetersInput) -> pd.DataFrame: df = self.__get_elements_input(elements) df = self.__get_point_node_input(elements, df) df = self.__get_physical_elevation_input(elements, df) df["Demand"] = elements.BaseDemands() df["Demand"] = self.__dict_to_value(df["Demand"], float) df["Pattern"] = elements.DemandPatterns() df["Pattern"] = self.__dict_to_value(df["Pattern"], None) df["PatternId"] = df["Pattern"].apply( lambda p: p.Id if p else None).astype(pd.Int64Dtype()) df["StartDemandDist"] = elements.StartDemandDistributions() df["StartDemandDist"] = self.__dict_to_value( df["StartDemandDist"], float) df["AssocElem"] = elements.AssociatedElements() df["AssocElem"] = self.__dict_to_value(df["AssocElem"], None) df["AssocElemId"] = df["AssocElem"].apply( lambda c: c.Id if c else None).astype(pd.Int64Dtype()) df["UnitDemand"] = elements.UnitDemands() df["UnitDemand"] = self.__dict_to_value(df["UnitDemand"], float) df["UnitDmdPattern"] = elements.UnitDemandPatterns() df["UnitDmdPattern"] = self.__dict_to_value(df["UnitDmdPattern"], None) df["UnitDmdPatternId"] = df["UnitDmdPattern"].apply( lambda p: p.Id if p else None).astype(
pd.Int64Dtype()
pandas.Int64Dtype
''' Created on May 16, 2018 @author: cef significant scripts for calculating damage within the ABMRI framework for secondary data loader scripts, see fdmg.datos.py ''' #=============================================================================== # # IMPORT STANDARD MODS ------------------------------------------------------- #=============================================================================== import logging, os, time, re, math, copy, gc, weakref """ unused sys, imp, """ import pandas as pd import numpy as np import scipy.integrate #=============================================================================== # shortcuts #=============================================================================== #from collections import OrderedDict from hp.dict import MyOrderedDict as OrderedDict from weakref import WeakValueDictionary as wdict from weakref import proxy from hp.basic import OrderedSet #=============================================================================== # IMPORT CUSTOM MODS --------------------------------------------------------- #=============================================================================== import hp.plot import hp.basic import hp.pd import hp.oop import hp.data import fdmg.datos as datos import matplotlib.pyplot as plt import matplotlib #import matplotlib.animation as animation #load the animation module (with the new search path) import udev.scripts mod_logger = logging.getLogger(__name__) mod_logger.debug('initilized') #=============================================================================== #module level defaults ------------------------------------------------------ #=============================================================================== #datapars_cols = [u'dataname', u'desc', u'datafile_tailpath', u'datatplate_tailpath', u'trim_row'] #headers in the data tab datafile_types_list = ['.csv', '.xls'] idx = pd.IndexSlice class Fdmg( #flood damage model hp.sel.Sel_controller, #no init hp.dyno.Dyno_wrap, #add some empty containers hp.plot.Plot_o, #build the label hp.sim.Sim_model, #Sim_wrap: attach the reset_d. Sim_model: inherit attributes hp.oop.Trunk_o, #no init #Parent_cmplx: attach empty kids_sd #Parent: set some defaults hp.oop.Child): """ #=========================================================================== # INPUTS #=========================================================================== pars_path ==> pars_file.xls main external parameter spreadsheet. See description in file for each column dataset parameters tab = 'data'. expected columns: datapars_cols session parameters tab = 'gen'. expected rows: sessionpars_rows """ #=========================================================================== # program parameters #=========================================================================== name = 'fdmg' #list of attribute names to try and inherit from the session try_inherit_anl = set(['ca_ltail', 'ca_rtail', 'mind', \ 'dbg_fld_cnt', 'legacy_binv_f', 'gis_area_max', \ 'fprob_mult', 'flood_tbl_nm', 'gpwr_aep', 'dmg_rat_f',\ 'joist_space', 'G_anchor_ht', 'bsmt_opn_ht_code','bsmt_egrd_code', \ 'damp_func_code', 'cont_val_scale', 'hse_skip_depth', \ 'area_egrd00', 'area_egrd01', 'area_egrd02', 'fhr_nm', 'write_fdmg_sum', 'dfeat_xclud_price', 'write_fdmg_sum_fly']) fld_aep_spcl = 100 #special flood to try and include in db runs bsmt_egrd = 'wet' #default value for bsmt_egrd legacy_binv_f = True #flag to indicate that the binv is in legacy format (use indicies rather than column labels) gis_area_max = 3500 #lsit of data o names expected on the fdmg tab #state = 'na' #for tracking what flood aep is currently in the model 'consider allowing the user control of these' gis_area_min = 5 gis_area_max = 5000 write_fdmg_sum_fly = False write_dmg_fly_first = True #start off to signifiy first run #=========================================================================== # debuggers #=========================================================================== beg_hist_df = None #=========================================================================== # user provided values #=========================================================================== #legacy pars floor_ht = 0.0 mind = '' #column to match between data sets and name the house objects #EAD calc ca_ltail ='flat' ca_rtail =2 #aep at which zero value is assumeed. 'none' uses lowest aep in flood set #Floodo controllers gpwr_aep = 100 #default max aep where gridpower_f = TRUE (when the power shuts off) dbg_fld_cnt = 0 #area exposure area_egrd00 = None area_egrd01 = None area_egrd02 = None #Dfunc controllers place_codes = None dmg_types = None flood_tbl_nm = None #name of the flood table to use #timeline deltas 'just keeping this on the fdmg for simplicitly.. no need for flood level heterogenieyt' wsl_delta = 0.0 fprob_mult = 1.0 #needs to be a float for type matching dmg_rat_f = False #Fdmg.House pars joist_space = 0.3 G_anchor_ht = 0.6 bsmt_egrd_code = 'plpm' damp_func_code = 'seep' bsmt_opn_ht_code = '*min(2.0)' hse_skip_depth = -4 #depth to skip house damage calc fhr_nm = '' cont_val_scale = .25 write_fdmg_sum = True dfeat_xclud_price = 0.0 #=========================================================================== # calculation parameters #=========================================================================== res_fancy = None gpwr_f = True #placeholder for __init__ calcs fld_aep_l = None dmg_dx_base = None #results frame for writing plotr_d = None #dictionary of EAD plot workers dfeats_d = dict() #{tag:dfeats}. see raise_all_dfeats() fld_pwr_cnt = 0 seq = 0 #damage results/stats dmgs_df = None dmgs_df_wtail = None #damage summaries with damages for the tail logic included ead_tot = 0 dmg_tot = 0 #=========================================================================== # calculation data holders #=========================================================================== dmg_dx = None #container for full run results bdry_cnt = 0 bwet_cnt = 0 bdamp_cnt = 0 def __init__(self,*vars, **kwargs): logger = mod_logger.getChild('Fdmg') #======================================================================= # initilize cascade #======================================================================= super(Fdmg, self).__init__(*vars, **kwargs) #initilzie teh baseclass #======================================================================= # object updates #======================================================================= self.reset_d.update({'ead_tot':0, 'dmgs_df':None, 'dmg_dx':None,\ 'wsl_delta':0}) #update the rest attributes #======================================================================= # pre checks #======================================================================= self.check_pars() #check the data loaded on your tab if not self.session._write_data: self.write_fdmg_sum = False #======================================================================= #setup functions #======================================================================= #par cleaners/ special loaders logger.debug("load_hse_geo() \n") self.load_hse_geo() logger.info('load and clean dfunc data \n') self.load_pars_dfunc(self.session.pars_df_d['dfunc']) #load the data functions to damage type table logger.debug('\n') self.setup_dmg_dx_cols() logger.debug('load_submodels() \n') self.load_submodels() logger.debug('init_dyno() \n') self.init_dyno() #outputting setup if self.write_fdmg_sum_fly: self.fly_res_fpath = os.path.join(self.session.outpath, '%s fdmg_res_fly.csv'%self.session.tag) if self.db_f: if not self.model.__repr__() == self.__repr__(): raise IOError logger.info('Fdmg model initialized as \'%s\' \n'%(self.name)) def check_pars(self): #check your data pars df_raw = self.session.pars_df_d['datos'] #======================================================================= # check mandatory data objects #======================================================================= if not 'binv' in df_raw['name'].tolist(): raise IOError #======================================================================= # check optional data objects #======================================================================= fdmg_tab_nl = ['rfda_curve', 'binv','dfeat_tbl', 'fhr_tbl'] boolidx = df_raw['name'].isin(fdmg_tab_nl) if not np.all(boolidx): raise IOError #passed some unexpected data names return def load_submodels(self): logger = self.logger.getChild('load_submodels') self.state = 'load' #======================================================================= # data objects #======================================================================= 'this is the main loader that builds all teh children as specified on the data tab' logger.info('loading dat objects from \'fdmg\' tab') logger.debug('\n \n') #build datos from teh data tab 'todo: hard code these class types (rather than reading from teh control file)' self.fdmgo_d = self.raise_children_df(self.session.pars_df_d['datos'], #df to raise on kid_class = None) #should raise according to df entry self.session.prof(state='load.fdmg.datos') 'WARNING: fdmgo_d is not set until after ALL the children on this tab are raised' #attach special children self.binv = self.fdmgo_d['binv'] """NO! this wont hold resetting updates self.binv_df = self.binv.childmeta_df""" #======================================================================= # flood tables #======================================================================= self.ftblos_d = self.raise_children_df(self.session.pars_df_d['flood_tbls'], #df to raise on kid_class = datos.Flood_tbl) #should raise according to df entry 'initial call which only udpates the binv_df' self.set_area_prot_lvl() if 'fhr_tbl' in self.fdmgo_d.keys(): self.set_fhr() #======================================================================= # dfeats #====================================================================== if self.session.load_dfeats_first_f & self.session.wdfeats_f: logger.debug('raise_all_dfeats() \n') self.dfeats_d = self.fdmgo_d['dfeat_tbl'].raise_all_dfeats() #======================================================================= # raise houses #======================================================================= logger.info('raising houses') logger.debug('\n') self.binv.raise_houses() self.session.prof(state='load.fdmg.houses') 'calling this here so all of the other datos are raised' #self.rfda_curve = self.fdmgo_d['rfda_curve'] """No! we need to get this in before the binv.reset_d['childmeta_df'] is set self.set_area_prot_lvl() #apply the area protectino from teh named flood table""" logger.info('loading floods') logger.debug('\n \n') self.load_floods() self.session.prof(state='load.fdmg.floods') logger.debug("finished with %i kids\n"%len(self.kids_d)) return def setup_dmg_dx_cols(self): #get teh columns to use for fdmg results """ This is setup to generate a unique set of ordered column names with this logic take the damage types add mandatory fields add user provided fields """ logger = self.logger.getChild('setup_dmg_dx_cols') #======================================================================= #build the basic list of column headers #======================================================================= #damage types at the head col_os = OrderedSet(self.dmg_types) #put #basic add ons _ = col_os.update(['total', 'hse_depth', 'wsl', 'bsmt_egrd', 'anchor_el']) #======================================================================= # special logic #======================================================================= if self.dmg_rat_f: for dmg_type in self.dmg_types: _ = col_os.add('%s_rat'%dmg_type) if not self.wsl_delta==0: col_os.add('wsl_raw') """This doesnt handle runs where we start with a delta of zero and then add some later for these, you need to expplicitly call 'wsl_raw' in the dmg_xtra_cols_fat""" #ground water damage if 'dmg_gw' in self.session.outpars_d['Flood']: col_os.add('gw_f') #add the dem if necessary if 'gw_f' in col_os: col_os.add('dem_el') #======================================================================= # set pars based on user provided #======================================================================= #s = self.session.outpars_d[self.__class__.__name__] #extra columns for damage resulst frame if self.db_f or self.session.write_fdmg_fancy: logger.debug('including extra columns in outputs') #clewan the extra cols 'todo: move this to a helper' if hasattr(self.session, 'xtra_cols'): try: dc_l = eval(self.session.xtra_cols) #convert to a list except: logger.error('failed to convert \'xtra_cols\' to a list. check formatting') raise IOError else: dc_l = ['wsl_raw', 'gis_area', 'hse_type', 'B_f_height', 'BS_ints','gw_f'] if not isinstance(dc_l, list): raise IOError col_os.update(dc_l) #add these self.dmg_df_cols = col_os logger.debug('set dmg_df_cols as: %s'%self.dmg_df_cols) return def load_pars_dfunc(self, df_raw=None): #build a df from the dfunc tab #======================================================================= # defaults #======================================================================= logger = self.logger.getChild('load_pars_dfunc') dfunc_ecols = ['place_code','dmg_code','dfunc_type','anchor_ht_code'] if df_raw is None: df_raw = self.session.pars_df_d['dfunc'] logger.debug('from df %s: \n %s'%(str(df_raw.shape), df_raw)) #======================================================================= # clean #======================================================================= df1 = df_raw.dropna(axis='columns', how='all') df2 = df1.dropna(axis='index', how='all') #drop rows with all na #column check if not hp.pd.header_check(df2, dfunc_ecols, logger=logger): raise IOError #======================================================================= # custom columns #======================================================================= df3 = df2.copy(deep=True) df3['dmg_type'] = df3['place_code'] + df3['dmg_code'] df3['name'] = df3['dmg_type'] #======================================================================= # data loading #======================================================================= if 'tailpath' in df3.columns: boolidx = ~pd.isnull(df3['tailpath']) #get dfuncs with data requests self.load_raw_dfunc(df3[boolidx]) df3 = df3.drop(['headpath', 'tailpath'], axis = 1, errors='ignore') #drop these columns #======================================================================= # garage checking #======================================================================= boolidx = np.logical_and(df3['place_code'] == 'G', df3['dfunc_type'] == 'rfda') if np.any(boolidx): logger.error('got dfunc_type = rfda for a garage curve (no such thing)') raise IOError #======================================================================= # get special lists #======================================================================= #dmg_types self.dmg_types = df3['dmg_type'].tolist() #damage codes boolidx = df3['place_code'].str.contains('total') self.dmg_codes = df3.loc[~boolidx, 'dmg_code'].unique().tolist() #place_codes place_codes = df3['place_code'].unique().tolist() if 'total' in place_codes: place_codes.remove('total') self.place_codes = place_codes self.session.pars_df_d['dfunc'] = df3 logger.debug('dfunc_df with %s'%str(df3.shape)) #======================================================================= # get slice for houses #======================================================================= #identify all the entries except total boolidx = df3['place_code'] != 'total' self.house_childmeta_df = df3[boolidx] #get this trim """ hp.pd.v(df3) """ def load_hse_geo(self): #special loader for hse_geo dxcol (from tab hse_geo) logger = self.logger.getChild('load_hse_geo') #======================================================================= # load and clean the pars #======================================================================= df_raw = hp.pd.load_xls_df(self.session.parspath, sheetname = 'hse_geo', header = [0,1], logger = logger) df = df_raw.dropna(how='all', axis = 'index') self.session.pars_df_d['hse_geo'] = df #======================================================================= # build a blank starter for each house to fill #======================================================================= omdex = df.columns #get the original mdex 'probably a cleaner way of doing this' lvl0_values = omdex.get_level_values(0).unique().tolist() lvl1_values = omdex.get_level_values(1).unique().tolist() lvl1_values.append('t') newcols = pd.MultiIndex.from_product([lvl0_values, lvl1_values], names=['place_code','finish_code']) geo_dxcol = pd.DataFrame(index = df.index, columns = newcols) #make the frame self.geo_dxcol_blank = geo_dxcol if self.db_f: if np.any(pd.isnull(df)): raise IOError l = geo_dxcol.index.tolist() if not l == [u'area', u'height', u'per', u'inta']: raise IOError return def load_raw_dfunc(self, meta_df_raw): #load raw data for dfuncs logger = self.logger.getChild('load_raw_dfunc') logger.debug('with df \'%s\''%(str(meta_df_raw.shape))) d = dict() #empty container meta_df = meta_df_raw.copy() #======================================================================= # loop through each row and load the data #======================================================================= for indx, row in meta_df.iterrows(): inpath = os.path.join(row['headpath'], row['tailpath']) df = hp.pd.load_smart_df(inpath, index_col =None, logger = logger) d[row['name']] = df.dropna(how = 'all', axis = 'index') #store this into the dictionaryu logger.info('finished loading raw dcurve data on %i dcurves: %s'%(len(d), d.keys())) self.dfunc_raw_d = d return def load_floods(self): #======================================================================= # defaults #======================================================================= logger = self.logger.getChild('load_floods') logger.debug('setting floods df \n') self.set_floods_df() df = self.floods_df logger.debug('raising floods \n') d = self.raise_children_df(df, #build flood children kid_class = Flood, dup_sibs_f= True, container = OrderedDict) #pass attributes from one tot eh next #======================================================================= # ordered by aep #======================================================================= fld_aep_od = OrderedDict() for childname, childo in d.iteritems(): if hasattr(childo, 'ari'): fld_aep_od[childo.ari] = childo else: raise IOError logger.info('raised and bundled %i floods by aep'%len(fld_aep_od)) self.fld_aep_od = fld_aep_od return def set_floods_df(self): #build the flood meta data logger = self.logger.getChild('set_floods_df') df_raw = self.session.pars_df_d['floods'] df1 = df_raw.sort_values('ari').reset_index(drop=True) df1['ari'] = df1['ari'].astype(np.int) #======================================================================= # slice for debug set #======================================================================= if self.db_f & (not self.dbg_fld_cnt == 'all'): #check that we even have enough to do the slicing if len(df1) < 2: logger.error('too few floods for debug slicing. pass dbg_fld_cnt == all') raise IOError df2 = pd.DataFrame(columns = df1.columns) #make blank starter frame dbg_fld_cnt = int(self.dbg_fld_cnt) logger.info('db_f=TRUE. selecting %i (of %i) floods'%(dbg_fld_cnt, len(df1))) #=================================================================== # try to pull out and add the 100yr #=================================================================== try: boolidx = df1.loc[:,'ari'] == self.fld_aep_spcl if not boolidx.sum() == 1: logger.debug('failed to locate 1 flood') raise IOError df2 = df2.append(df1[boolidx]) #add this row to the end df1 = df1[~boolidx] #slice out this row dbg_fld_cnt = max(0, dbg_fld_cnt - 1) #reduce the loop count by 1 dbg_fld_cnt = min(dbg_fld_cnt, len(df1)) #double check in case we are given a very short set logger.debug('added the %s year flood to the list with dbg_fld_cnt %i'%(self.fld_aep_spcl, dbg_fld_cnt)) except: logger.debug('failed to extract the special %i flood'%self.fld_aep_spcl) df2 = df1.copy() #=================================================================== # build list of extreme (low/high) floods #=================================================================== evn_cnt = 0 odd_cnt = 0 for cnt in range(0, dbg_fld_cnt, 1): if cnt % 2 == 0: #evens. pull from front idxr = evn_cnt evn_cnt += 1 else: #odds. pull from end idxr = len(df1) - odd_cnt - 1 odd_cnt += 1 logger.debug('pulling flood with indexer %i'%(idxr)) ser = df1.iloc[idxr, :] #make thsi slice df2 = df2.append(ser) #append this to the end #clean up df = df2.drop_duplicates().sort_values('ari').reset_index(drop=True) logger.debug('built extremes flood df with %i aeps: %s'%(len(df), df.loc[:,'ari'].values.tolist())) if not len(df) == int(self.dbg_fld_cnt): raise IOError else: df = df1.copy() if not len(df) > 0: raise IOError self.floods_df = df return def set_area_prot_lvl(self): #assign the area_prot_lvl to the binv based on your tab #logger = self.logger.getChild('set_area_prot_lvl') """ TODO: Consider moving this onto the binv and making the binv dynamic... Calls: handles for flood_tbl_nm """ logger = self.logger.getChild('set_area_prot_lvl') logger.debug('assigning \'area_prot_lvl\' for \'%s\''%self.flood_tbl_nm) #======================================================================= # get data #======================================================================= ftbl_o = self.ftblos_d[self.flood_tbl_nm] #get the activated flood table object ftbl_o.apply_on_binv('aprot_df', 'area_prot_lvl') """ hp.pd.v(binv_df) type(df.iloc[:, 0]) """ return True def set_fhr(self): #assign the fhz bfe and zone from the fhr_tbl data logger = self.logger.getChild('set_fhr') logger.debug('assigning for \'fhz\' and \'bfe\'') #get the data for this fhr set fhr_tbl_o = self.fdmgo_d['fhr_tbl'] try: df = fhr_tbl_o.d[self.fhr_nm] except: if not self.fhr_nm in fhr_tbl_o.d.keys(): logger.error('could not find selected fhr_nm \'%s\' in the loaded rule sets: \n %s' %(self.fhr_nm, fhr_tbl_o.d.keys())) raise IOError #======================================================================= # loop through each series and apply #======================================================================= """ not the most generic way of handling this... todo: add generic method to the binv can take ser or df updates the childmeta_df if before init updates the children if after init """ for hse_attn in ['fhz', 'bfe']: ser = df[hse_attn] if not self.session.state == 'init': #======================================================================= # tell teh binv to update its houses #======================================================================= self.binv.set_all_hse_atts(hse_attn, ser = ser) else: logger.debug('set column \'%s\' onto the binv_df'%hse_attn) self.binv.childmeta_df.loc[:,hse_attn] = ser #set this column in teh binvdf """I dont like this fhr_tbl_o.apply_on_binv('fhz_df', 'fhz', coln = self.fhr_nm) fhr_tbl_o.apply_on_binv('bfe_df', 'bfe', coln = self.fhr_nm)""" return True def get_all_aeps_classic(self): #get the list of flood aeps from the classic flood table format 'kept this special syntax reader separate in case we want to change th eformat of the flood tables' flood_pars_df = self.session.pars_df_d['floods'] #load the data from the flood table fld_aep_l = flood_pars_df.loc[:, 'ari'].values #drop the 2 values and convert to a list return fld_aep_l def run(self, **kwargs): #placeholder for simulation runs logger = self.logger.getChild('run') logger.debug('on run_cnt %i'%self.run_cnt) self.run_cnt += 1 self.state='run' #======================================================================= # prechecks #======================================================================= if self.db_f: if not isinstance(self.outpath, basestring): raise IOError logger.info('\n fdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmgfdmg') logger.info('for run_cnt %i'%self.run_cnt) self.calc_fld_set(**kwargs) return def setup_res_dxcol(self, #setup the results frame fld_aep_l = None, #dmg_type_list = 'all', bid_l = None): #======================================================================= # defaults #======================================================================= if bid_l == None: bid_l = self.binv.bid_l if fld_aep_l is None: fld_aep_l = self.fld_aep_od.keys() #just get all teh keys from the dictionary #if dmg_type_list=='all': dmg_type_list = self.dmg_types #======================================================================= # setup the dxind for writing #======================================================================= lvl0_values = fld_aep_l lvl1_values = self.dmg_df_cols #include extra reporting columns #fold these into a mdex (each flood_aep has all dmg_types) columns = pd.MultiIndex.from_product([lvl0_values, lvl1_values], names=['flood_aep','hse_atts']) dmg_dx = pd.DataFrame(index = bid_l, columns = columns).sort_index() #make the frame self.dmg_dx_base = dmg_dx.copy() if self.db_f: logger = self.logger.getChild('setup_res_dxcol') if not self.beg_hist_df == False: fld_aep_l.sort() columns = pd.MultiIndex.from_product([fld_aep_l, ['bsmt_egrd', 'cond']], names=['flood_aep','bsmt_egrd']) self.beg_hist_df = pd.DataFrame(index=bid_l, columns = columns) logger.info('recording bsmt_egrd history with %s'%str(self.beg_hist_df.shape)) else: self.beg_hist_df = None """ dmg_dx.columns """ return def calc_fld_set(self, #calc flood damage for the flood set fld_aep_l = None, #list of flood aeps to calcluate #dmg_type_list = 'all', #list of damage types to calculate bid_l = None, #list of building names ot calculate wsl_delta = None, #delta value to add to all wsl wtf = None, #optinonal flag to control writing of dmg_dx (otherwise session.write_fdmg_set_dx is used) **run_fld): #kwargs to send to run_fld 'we could separate the object creation and the damage calculation' """ #======================================================================= # INPUTS #======================================================================= fld_aep_l: list of floods to calc this can be a custom list built by the user extracted from the flood table (see session.get_ftbl_aeps) loaded from the legacy rfda pars (session.rfda_pars.fld_aep_l)\ bid_l: list of ids (matching the mind varaible set under Fdmg) #======================================================================= # OUTPUTS #======================================================================= dmg_dx: dxcol of flood damage across all dmg_types and floods mdex lvl0: flood aep lvl1: dmg_type + extra cols I wanted to have this flexible, so the dfunc could pass up extra headers couldnt get it to work. instead used a global list and acheck new headers must be added to the gloabl list and Dfunc. index bldg_id #======================================================================= # TODO: #======================================================================= setup to calc across binvs as well """ #======================================================================= # defaults #======================================================================= start = time.time() logger = self.logger.getChild('calc_fld_set') if wtf is None: wtf = self.session.write_fdmg_set_dx if wsl_delta is None: wsl_delta= self.wsl_delta #======================================================================= # setup and load the results frame #======================================================================= #check to see that all of these conditions pass if not np.all([bid_l is None, fld_aep_l is None]): logger.debug('non default run. rebuild the dmg_dx_base') #non default run. rebuild the frame self.setup_res_dxcol( fld_aep_l = fld_aep_l, #dmg_type_list = dmg_type_list, bid_l = bid_l) elif self.dmg_dx_base is None: #probably the first run if not self.run_cnt == 1: raise IOError logger.debug('self.dmg_dx_base is None. rebuilding') self.setup_res_dxcol(fld_aep_l = fld_aep_l, #dmg_type_list = dmg_type_list, bid_l = bid_l) #set it up with the defaults dmg_dx = self.dmg_dx_base.copy() #just start witha copy of the base #======================================================================= # finish defaults #======================================================================= 'these are all mostly for reporting' if fld_aep_l is None: fld_aep_l = self.fld_aep_od.keys() #just get all teh keys from the dictionary """ leaving these as empty kwargs and letting floods handle if bid_l == None: bid_l = binv_dato.bid_l if dmg_type_list=='all': dmg_type_list = self.dmg_types """ """ lvl0_values = dmg_dx.columns.get_level_values(0).unique().tolist() lvl1_values = dmg_dx.columns.get_level_values(1).unique().tolist()""" logger.info('calc flood damage (%i) floods w/ wsl_delta = %.2f'%(len(fld_aep_l), wsl_delta)) logger.debug('ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff \n') #======================================================================= # loop and calc eacch flood #======================================================================= fcnt = 0 first = True for flood_aep in fld_aep_l: #lopo through and build each flood #self.session.prof(state='%s.fdmg.calc_fld_set.%i'%(self.get_id(), fcnt)) #memory profiling self.state = flood_aep 'useful for keeping track of what the model is doing' #get teh flood flood_dato = self.fld_aep_od[flood_aep] #pull thsi from the dictionary logger.debug('getting dmg_df for %s'%flood_dato.name) #=================================================================== # run sequence #=================================================================== #get damage for these depths dmg_df = flood_dato.run_fld(**run_fld) #add the damage df to this slice if dmg_df is None: continue #skip this one #=================================================================== # wrap up #=================================================================== dmg_dx[flood_aep] = dmg_df #store into the frame fcnt += 1 logger.debug('for flood_aep \'%s\' on fcnt %i got dmg_df %s \n'%(flood_aep, fcnt, str(dmg_df.shape))) #=================================================================== # checking #=================================================================== if self.db_f: #check that the floods are increasing if first: first = False last_aep = None else: if not flood_aep > last_aep: raise IOError last_aep = flood_aep #======================================================================= # wrap up #======================================================================= self.state = 'na' if wtf: filetail = '%s %s %s %s res_fld'%(self.session.tag, self.simu_o.name, self.tstep_o.name, self.name) filepath = os.path.join(self.outpath, filetail) hp.pd.write_to_file(filepath, dmg_dx, overwrite=True, index=True) #send for writing self.dmg_dx = dmg_dx stop = time.time() logger.info('in %.4f secs calcd damage on %i of %i floods'%(stop - start, fcnt, len(fld_aep_l))) return def get_results(self): #called by Timestep.run_dt() self.state='wrap' logger = self.logger.getChild('get_results') #======================================================================= # optionals #======================================================================= s = self.session.outpars_d[self.__class__.__name__] if (self.session.write_fdmg_fancy) or (self.session.write_fdmg_sum): logger.debug("calc_summaries \n") dmgs_df = self.calc_summaries() self.dmgs_df = dmgs_df.copy() else: dmgs_df = None if ('ead_tot' in s) or ('dmg_df' in s): logger.debug('\n') self.calc_annulized(dmgs_df = dmgs_df, plot_f = False) 'this will also run calc_sumamries if it hasnt happened yet' if 'dmg_tot' in s: #get a cross section of the 'total' column across all flood_aeps and sum for all entries self.dmg_tot = self.dmg_dx.xs('total', axis=1, level=1).sum().sum() if ('bwet_cnt' in s) or ('bdamp_cnt' in s) or ('bdry_cnt' in s): logger.debug('get_fld_begrd_cnt') self.get_fld_begrd_cnt() if 'fld_pwr_cnt' in s: logger.debug('calc_fld_pwr_cnt \n') cnt = 0 for aep, obj in self.fld_aep_od.iteritems(): if obj.gpwr_f: cnt +=1 self.fld_pwr_cnt = cnt self.binv.calc_binv_stats() if self.session.write_fdmg_fancy: self.write_res_fancy() if self.write_fdmg_sum_fly: #write the results after each run self.write_dmg_fly() if self.db_f: self.check_dmg_dx() logger.debug('finished \n') def calc_summaries(self, #annualize the damages fsts_l = ['gpwr_f', 'dmg_sw', 'dmg_gw'], #list of additional flood attributes to report in teh summary dmg_dx=None, plot=False, #flag to execute plot_dmgs() at the end. better to do this explicitly with an outputr wtf=None): """ basically dropping dimensions on the outputs and adding annuzlied damages #======================================================================= # OUTPUTS #======================================================================= DROP BINV DIMENSIOn dmgs_df: df with columns: raw damage types, and annualized damage types index: each flood entries: total damage for binv DROP FLOODS DIMENSIOn aad_sum_ser DROP ALL DIMENSIONS ead_tot """ #======================================================================= # defaults #======================================================================= logger = self.logger.getChild('calc_summaries') if dmg_dx is None: dmg_dx = self.dmg_dx.copy() if plot is None: plot = self.session._write_figs if wtf is None: wtf = self.write_fdmg_sum #======================================================================= # #setup frame #======================================================================= #get the columns dmg_types = self.dmg_types + ['total'] #======================================================================= # #build the annualized damage type names #======================================================================= admg_types = [] for entry in dmg_types: admg_types.append(entry+'_a') cols = dmg_types + ['prob', 'prob_raw'] + admg_types + fsts_l self.dmg_df_cols """ hp.pd.v(dmg_dx) """ dmgs_df = pd.DataFrame(columns = cols) dmgs_df['ari'] = dmg_dx.columns.get_level_values(0).unique() dmgs_df = dmgs_df.sort_values('ari').reset_index(drop=True) #======================================================================= # loop through and fill out the data #======================================================================= for index, row in dmgs_df.iterrows(): #loop through an dfill out dmg_df = dmg_dx[row['ari']] #get the fdmg for this aep #sum all the damage types for dmg_type in dmg_types: row[dmg_type] = dmg_df[dmg_type].sum() #sum them all up #calc the probability row['prob_raw'] = 1/float(row['ari']) #inverse of aep row['prob'] = row['prob_raw'] * self.fprob_mult #apply the multiplier #calculate the annualized damages for admg_type in admg_types: dmg_type = admg_type[:-2] #drop the a row[admg_type] = row[dmg_type] * row['prob'] #=================================================================== # get stats from the floodo #=================================================================== floodo = self.fld_aep_od[row['ari']] for attn in fsts_l: row[attn] = getattr(floodo, attn) #=================================================================== # #add this row backinto the frame #=================================================================== dmgs_df.loc[index,:] = row #======================================================================= # get series totals #======================================================================= dmgs_df = dmgs_df.sort_values('prob').reset_index(drop='true') #======================================================================= # closeout #======================================================================= logger.debug('annualized %i damage types for %i floods'%(len(dmg_type), len(dmgs_df))) if wtf: filetail = '%s dmg_sumry'%(self.session.state) filepath = os.path.join(self.outpath, filetail) hp.pd.write_to_file(filepath, dmgs_df, overwrite=True, index=False) #send for writing logger.debug('set data with %s and cols: %s'%(str(dmgs_df.shape), dmgs_df.columns.tolist())) if plot: self.plot_dmgs(wtf=wtf) #======================================================================= # post check #======================================================================= if self.db_f: #check for sort logic if not dmgs_df.loc[:,'prob'].is_monotonic: raise IOError if not dmgs_df['total'].iloc[::-1].is_monotonic: #flip the order logger.warning('bigger floods arent causing more damage') 'some of the flood tables seem bad...' #raise IOError #all probabilities should be larger than zero if not np.all(dmgs_df.loc[:,'prob'] > 0): raise IOError return dmgs_df def calc_annulized(self, dmgs_df = None, ltail = None, rtail = None, plot_f=None, dx = 0.001): #get teh area under the damage curve """ #======================================================================= # INPUTS #======================================================================= ltail: left tail treatment code (low prob high damage) flat: extend the max damage to the zero probability event 'none': don't extend the tail rtail: right trail treatment (high prob low damage) 'none': don't extend '2year': extend to zero damage at the 2 year aep """ #======================================================================= # defaults #======================================================================= logger = self.logger.getChild('calc_annulized') if ltail is None: ltail = self.ca_ltail if rtail is None: rtail = self.ca_rtail 'plotter ignores passed kwargs here' if plot_f is None: plot_f= self.session._write_figs #======================================================================= # get data #======================================================================= if dmgs_df is None: dmgs_df = self.calc_summaries() #df_raw = self.data.loc[:,('total', 'prob', 'ari')].copy().reset_index(drop=True) 'only slicing columns for testing' df = dmgs_df.copy().reset_index(drop=True) if len(df) == 1: logger.warning('only got one flood entry. skipping') self.ead_tot = 0 self.dmgs_df_wtail = df return logger.debug("with ltail = \'%s\', rtail = \'%s\' and df %s"%(ltail, rtail, str(df.shape))) if self.db_f: if len(df) <2: logger.error('didnt get enough flood entries to calcluate EAD') raw_input('press enter to continue any way....') #======================================================================= # left tail treatment #======================================================================= if ltail == 'flat': #zero probability 'assume 1000yr flood is the max damage' max_dmg = df['total'].max()*1.0001 df.loc[-1, 'prob'] = 0 df.loc[-1, 'ari'] = 999999 df.loc[-1, 'total'] = max_dmg logger.debug('ltail == flat. duplicated danage %.2f at prob 0'%max_dmg) elif ltail == 'none': pass else: raise IOError 'todo: add option for value multiplier' #======================================================================= # right tail #======================================================================= if rtail == 'none': pass elif hp.basic.isnum(rtail): rtail_yr = float(rtail) rtail_p = 1.0 / rtail_yr max_p = df['prob'].max() #floor check if rtail_p < max_p: logger.error('rtail_p (%.2f) < max_p (%.2f)'%(rtail_p, max_p)) raise IOError #same elif rtail_p == max_p: logger.debug("rtail_p == min(xl. no changes made") else: logger.debug("adding zero damage for aep = %.1f"%rtail_yr) #zero damage 'assume no damage occurs at the passed rtail_yr' loc = len(df) df.loc[loc, 'prob'] = rtail_p df.loc[loc, 'ari'] = 1.0/rtail_p df.loc[loc, 'total'] = 0 """ hp.pd.view_web_df(self.data) """ else: raise IOError #======================================================================= # clean up #======================================================================= df = df.sort_index() #resort the index if self.db_f: 'these should still hold' if not df.loc[:,'prob'].is_monotonic: raise IOError """see above if not df['total'].iloc[::-1].is_monotonic: raise IOError""" x, y = df['prob'].values.tolist(), df['total'].values.tolist() #======================================================================= # find area under curve #======================================================================= try: #ead_tot = scipy.integrate.simps(y, x, dx = dx, even = 'avg') 'this was giving some weird results' ead_tot = scipy.integrate.trapz(y, x, dx = dx) except: logger.warning('scipy.integrate.trapz failed. setting ead_tot to zero') ead_tot = 0 raise IOError logger.info('found ead_tot = %.2f $/yr from %i points with tail_codes: \'%s\' and \'%s\'' %(ead_tot, len(y), ltail, rtail)) self.ead_tot = ead_tot #======================================================================= # checks #======================================================================= if self.db_f: if pd.isnull(ead_tot): raise IOError if not isinstance(ead_tot, float): raise IOError if ead_tot <=0: raise IOError #======================================================================= # update data with tails #======================================================================= self.dmgs_df_wtail = df.sort_index().reset_index(drop=True) #======================================================================= # generate plot #======================================================================= if plot_f: self.plot_dmgs(self, right_nm = None, xaxis = 'prob', logx = False) return def get_fld_begrd_cnt(self): #tabulate the bsmt_egrd counts from each flood logger = self.logger.getChild('get_fld_begrd_cnt') #======================================================================= # data setup #======================================================================= dmg_dx = self.dmg_dx.copy() #lvl1_values = dmg_dx.columns.get_level_values(0).unique().tolist() #get all teh basement egrade types df1 = dmg_dx.loc[:,idx[:, 'bsmt_egrd']] #get a slice by level 2 values #get occurances by value d = hp.pd.sum_occurances(df1, logger=logger) #======================================================================= # loop and calc #======================================================================= logger.debug('looping through %i bsmt_egrds: %s'%(len(d), d.keys())) for bsmt_egrd, cnt in d.iteritems(): attn = 'b'+bsmt_egrd +'_cnt' logger.debug('for \'%s\' got %i'%(attn, cnt)) setattr(self, attn, cnt) logger.debug('finished \n') def check_dmg_dx(self): #check logical consistency of the damage results logger = self.logger.getChild('check_dmg_dx') #======================================================================= # data setup #======================================================================= dmg_dx = self.dmg_dx.copy() mdex = dmg_dx.columns aep_l = mdex.get_level_values(0).astype(int).unique().values.tolist() aep_l.sort() #======================================================================= # check that each flood increases in damage #======================================================================= total = None aep_last = None for aep in aep_l: #get this slice df = dmg_dx[aep] if total is None: boolcol = np.isin(df.columns, ['MS', 'MC', 'BS', 'BC', 'GS']) #identify damage columns total = df.loc[:,boolcol].sum().sum() if not aep == min(aep_l): raise IOError else: newtot = df.loc[:,boolcol].sum().sum() if not newtot >= total: logger.warning('aep %s tot %.2f < aep %s %.2f'%(aep, newtot, aep_last, total)) #raise IOError #print 'new tot %.2f > oldtot %.2f'%(newtot, total) total = newtot aep_last = aep return def wrap_up(self): #======================================================================= # update asset containers #======================================================================= """ #building inventory 'should be flagged for updating during House.notify()' if self.binv.upd_kid_f: self.binv.update()""" """dont think we need this here any more.. only on udev. keeping it just to be save""" self.last_tstep = copy.copy(self.time) self.state='close' def write_res_fancy(self, #for saving results in xls per tab. called as a special outputr dmg_dx=None, include_ins = False, include_raw = False, include_begh = False): """ #======================================================================= # INPUTS #======================================================================= include_ins: whether ot add inputs as tabs. ive left this separate from the 'copy_inputs' flag as it is not a true file copy of the inputs """ #======================================================================= # defaults #======================================================================= logger = self.logger.getChild('write_res_fancy') if dmg_dx is None: dmg_dx = self.dmg_dx if dmg_dx is None: logger.warning('got no dmg_dx. skipping') return #======================================================================= # setup #======================================================================= od = OrderedDict() #======================================================================= # add the parameters #======================================================================= #get the blank frame df = pd.DataFrame(columns = ['par','value'] ) df['par'] = list(self.try_inherit_anl) for indx, row in df.iterrows(): df.iloc[indx, 1] = getattr(self, row['par']) #set this value od['pars'] = df #======================================================================= # try and add damage summary #======================================================================= if not self.dmgs_df is None: od['dmg summary'] = self.dmgs_df #======================================================================= # #get theh dmg_dx decomposed #======================================================================= od.update(hp.pd.dxcol_to_df_set(dmg_dx, logger=self.logger)) #======================================================================= # #add dmg_dx as a raw tab #======================================================================= if include_raw: od['raw_res'] = dmg_dx #======================================================================= # add inputs #======================================================================= if include_ins: for dataname, dato in self.kids_d.iteritems(): if hasattr(dato, 'data') & hp.pd.isdf(dato.data): od[dataname] = dato.data #======================================================================= # add debuggers #======================================================================= if include_begh: if not self.beg_hist_df is None: od['beg_hist'] = self.beg_hist_df #======================================================================= # #write to excel #======================================================================= filetail = '%s %s %s %s fancy_res'%(self.session.tag, self.simu_o.name, self.tstep_o.name, self.name) filepath = os.path.join(self.outpath, filetail) hp.pd.write_dfset_excel(od, filepath, engine='xlsxwriter', logger=self.logger) return def write_dmg_fly(self): #write damage results after each run logger = self.logger.getChild('write_dmg_fly') dxcol = self.dmg_dx #results #======================================================================= # build the resuults summary series #======================================================================= #get all the flood aeps lvl0vals = dxcol.columns.get_level_values(0).unique().astype(int).tolist() #blank holder res_ser = pd.Series(index = lvl0vals) #loop and calc sums for each flood for aep in lvl0vals: res_ser[aep] = dxcol.loc[:,(aep,'total')].sum() #add extras if not self.ead_tot is None: res_ser['ead_tot'] = self.ead_tot res_ser['dt'] = self.tstep_o.year res_ser['sim'] = self.simu_o.ind lindex = '%s.%s'%(self.simu_o.name, self.tstep_o.name) hp.pd.write_fly_df(self.fly_res_fpath,res_ser, lindex = lindex, first = self.write_dmg_fly_first, tag = 'fdmg totals', db_f = self.db_f, logger=logger) #write results on the fly self.write_dmg_fly_first = False return def get_plot_kids(self): #raise kids for plotting the damage summaries logger = self.logger.getChild('get_plot_kids') #======================================================================= # get slice of aad_fmt_df matching the aad cols #======================================================================= aad_fmt_df = self.session.pars_df_d['dmg_sumry_plot'] #pull teh formater pars from the tab dmgs_df = self.dmgs_df self.data = dmgs_df boolidx = aad_fmt_df.loc[:,'name'].isin(dmgs_df.columns) #get just those formaters with data in the aad aad_fmt_df_slice = aad_fmt_df[boolidx] #get this slice3 """ hp.pd.view_web_df(self.data) hp.pd.view_web_df(df) hp.pd.view_web_df(aad_fmt_df_slice) aad_fmt_df_slice.columns """ #======================================================================= # formatter kids setup #======================================================================= """need to run this every time so the data is updated TODO: allow some updating here so we dont have to reduibl deach time if self.plotter_kids_dict is None:""" self.plotr_d = self.raise_children_df(aad_fmt_df_slice, kid_class = hp.data.Data_o) logger.debug('finisehd \n') def plot_dmgs(self, wtf=None, right_nm = None, xaxis = 'ari', logx = True, ylims = None, #tuple of min/max values for the y-axis ): #plot curve of aad """ see tab 'aad_fmt' to control what is plotted and formatting """ #======================================================================= # defaults #======================================================================= logger = self.logger.getChild('plot_dmgs') if wtf == None: wtf = self.session._write_figs #======================================================================= # prechecks #======================================================================= if self.db_f: if self.dmgs_df is None: raise IOError #======================================================================= # setup #======================================================================= if not ylims is None: try: ylims = eval(ylims) except: pass #get the plot workers if self.plotr_d is None: self.get_plot_kids() kids_d = self.plotr_d title = '%s-%s-%s EAD-ARI plot on %i objs'%(self.session.tag, self.simu_o.name, self.name, len(self.binv.childmeta_df)) logger.debug('with \'%s\''%title) if not self.tstep_o is None: title = title + ' for %s'%self.tstep_o.name #======================================================================= # update plotters #======================================================================= logger.debug('updating plotters with my data') #get data data_og = self.data.copy() #store this for later if self.dmgs_df_wtail is None: df = self.dmgs_df.copy() else: df = self.dmgs_df_wtail.copy() df = df.sort_values(xaxis, ascending=True) #reformat data df.set_index(xaxis, inplace = True) #re set self.data = df #tell kids to refresh their data from here for gid, obj in kids_d.iteritems(): obj.data = obj.loadr_vir() self.data = data_og #reset the data #======================================================================= # get annotation #======================================================================= val_str = '$' + "{:,.2f}".format(self.ead_tot/1e6) #val_str = "{:,.2f}".format(self.ead_tot) """ txt = 'total aad: $%s \n tail kwargs: \'%s\' and \'%s\' \n'%(val_str, self.ca_ltail, self.ca_rtail) +\ 'binv.cnt = %i, floods.cnt = %i \n'%(self.binv.cnt, len(self.fld_aep_od))""" txt = 'total EAD = %s'%val_str #======================================================================= #plot the workers #======================================================================= #twinx if not right_nm is None: logger.debug('twinning axis with name \'%s\''%right_nm) title = title + '_twin' # sort children into left/right buckets by name to plot on each axis right_pdb_d, left_pdb_d = self.sort_buckets(kids_d, right_nm) if self.db_f: if len (right_pdb_d) <1: raise IOError #======================================================================= # #send for plotting #======================================================================= 'this plots both bundles by their data indexes' ax1, ax2 = self.plot_twinx(left_pdb_d, right_pdb_d, logx=logx, xlab = xaxis, title=title, annot = txt, wtf=False) 'cant figure out why teh annot is plotting twice' ax2.set_ylim(0, 1) #prob limits legon = False else: logger.debug('single axis') try: del kids_d['prob'] except: pass pdb = self.get_pdb_dict(kids_d.values()) ax1 = self.plot_bundles(pdb, logx=logx, xlab = 'ARI', ylab = 'damage ($ 10^6)', title=title, annot = txt, wtf=False) legon=True #hatch #======================================================================= # post formatting #======================================================================= #set axis limits if xaxis == 'ari': ax1.set_xlim(1, 1000) #aep limits elif xaxis == 'prob': ax1.set_xlim(0, .6) if not ylims is None: ax1.set_ylim(ylims[0], ylims[1]) #ax1.set_ylim(0, ax1.get_ylim()[1]) #$ limits #======================================================================= # format y axis labels #======================================================= ================ old_tick_l = ax1.get_yticks() #get teh old labels # build the new ticks l = [] for value in old_tick_l: new_v = '$' + "{:,.0f}".format(value/1e6) l.append(new_v) #apply the new labels ax1.set_yticklabels(l) """ #add thousands comma ax1.get_yaxis().set_major_formatter( #matplotlib.ticker.FuncFormatter(lambda x, p: '$' + "{:,.2f}".format(x/1e6))) matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))""" if xaxis == 'ari': ax1.get_xaxis().set_major_formatter( matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ','))) if wtf: fig = ax1.figure savepath_raw = os.path.join(self.outpath,title) flag = hp.plot.save_fig(self, fig, savepath_raw=savepath_raw, dpi = self.dpi, legon=legon) if not flag: raise IOError #plt.close() return class Flood( hp.dyno.Dyno_wrap, hp.sim.Sim_o, hp.oop.Parent, #flood object worker hp.oop.Child): #=========================================================================== # program pars #=========================================================================== gpwr_f = False #grid power flag palceholder #=========================================================================== # user defineid pars #=========================================================================== ari = None #loaded from flood table #area exposure grade. control for areas depth decision algorhithim based on the performance of macro structures (e.g. dykes). area_egrd00 = '' area_egrd01 = '' area_egrd02 = '' area_egrd00_code = None area_egrd01_code = None area_egrd02_code = None #=========================================================================== # calculated pars #=========================================================================== hdep_avg = 0 #average house depth #damate properties total = 0 BS = 0 BC = 0 MS = 0 MC = 0 dmg_gw = 0 dmg_sw = 0 dmg_df_blank =None wsl_avg = 0 #=========================================================================== # data containers #=========================================================================== hdmg_cnt = 0 dmg_df = None dmg_res_df = None #bsmt_egrd counters. see get_begrd_cnt() bdry_cnt = 0 bwet_cnt = 0 bdamp_cnt = 0 def __init__(self, parent, *vars, **kwargs): logger = mod_logger.getChild('Flood') logger.debug('start _init_') #======================================================================= # #attach custom vars #======================================================================= self.inherit_parent_ans=set(['mind', 'dmg_types']) #======================================================================= # initilize cascade #======================================================================= super(Flood, self).__init__(parent, *vars, **kwargs) #initilzie teh baseclass #======================================================================= # common setup #======================================================================= if self.sib_cnt == 0: #update the resets pass #======================================================================= # unique setup #======================================================================= """ handled by the outputr self.reset_d.update({'hdmg_cnt':0})""" self.ari = int(self.ari) self.dmg_res_df = pd.DataFrame() #set as an empty frame for output handling #======================================================================= # setup functions #======================================================================= self.set_gpwr_f() logger.debug('set_dmg_df_blank()') self.set_dmg_df_blank() logger.debug('get your water levels from the selected wsl table \n') self.set_wsl_frm_tbl() logger.debug('set_area_egrd()') self.set_area_egrd() logger.debug('get_info_from_binv()') df = self.get_info_from_binv() #initial run to set blank frame self.set_wsl_from_egrd(df) """ moved into set_wsl_frm_tbl() logger.debug('\n') self.setup_dmg_df()""" self.init_dyno() self.logger.debug('__init___ finished \n') def set_dmg_df_blank(self): logger = self.logger.getChild('set_dmg_df_blank') binv_df = self.model.binv.childmeta_df colns = OrderedSet(self.model.dmg_df_cols.tolist() + ['wsl', 'area_prot_lvl']) 'wsl should be redundant' #get boolean self.binvboolcol = binv_df.columns.isin(colns) #store this for get_info_from_binv() #get teh blank frame self.dmg_df_blank = pd.DataFrame(columns = colns, index = binv_df.index) #get the blank frame 'this still needs the wsl levels attached based on your area exposure grade' logger.debug('set dmg_df_blank with %s'%(str(self.dmg_df_blank.shape))) return def set_gpwr_f(self): #set your power flag if self.is_frozen('gpwr_f'): return True#shortcut for frozen logger = self.logger.getChild('set_gpwr_f') #======================================================================= # get based on aep #======================================================================= min_aep = int(self.model.gpwr_aep) if self.ari < min_aep: gpwr_f = True else: gpwr_f = False logger.debug('for min_aep = %i, set gpwr_f = %s'%(min_aep, gpwr_f)) #update handler self.handle_upd('gpwr_f', gpwr_f, proxy(self), call_func = 'set_gpwr_f') return True def set_wsl_frm_tbl(self, #build the raw wsl data from the passed flood table flood_tbl_nm = None, #name of flood table to pull raw data from #bid_l=None, ): """ here we get the raw values these are later modified by teh area_egrd with self.get_wsl_from_egrd() #======================================================================= # INPUTS #======================================================================= flood_tbl_df_raw: raw df of the classic flood table columns:` count, aep, aep, aep, aep....\ real_columns: bldg_id, CPID, depth, depth, depth, etc... index: unique arbitrary wsl_ser: series of wsl for this flood on each bldg_id #======================================================================= # calls #======================================================================= dynp handles Fdmg.flood_tbl_nm """ #======================================================================= # defaults #======================================================================= logger = self.logger.getChild('set_wsl_frm_tbl') if flood_tbl_nm is None: flood_tbl_nm = self.model.flood_tbl_nm #======================================================================= # get data #======================================================================= #pull the raw flood tables ftbl_o = self.model.ftblos_d[flood_tbl_nm] wsl_d = ftbl_o.wsl_d df = pd.DataFrame(index = wsl_d.values()[0].index) #blank frame from teh first entry #======================================================================= # loop and apply for each flood type #======================================================================= for ftype, df1 in wsl_d.iteritems(): #======================================================================= # data checks #======================================================================= if self.db_f: if not ftype in ['wet', 'dry', 'damp']: raise IOError df_raw =df1.copy() if not self.ari in df_raw.columns: logger.error('the flood provided on the \'floods\' tab (\'%s\') does not have a match in the flood table: \n %s'% (self.ari, self.model.ftblos_d[flood_tbl_nm].filepath)) raise IOError #======================================================================= # slice for this flood #======================================================================= boolcol = df1.columns == self.ari #slice for this aep #get the series for this wsl_ser = df1.loc[:, boolcol].iloc[:,0].astype(float) #wsl_ser = wsl_ser.rename(ftype) #rename with the aep 'binv slicing moved to Flood_tbl.clean_data()' #======================================================================= # checks #======================================================================= if self.db_f: if len(wsl_ser) <1: raise IOError """ allowing #check for nuls if np.any(pd.isnull(wsl_ser2)): raise IOError""" #======================================================================= # wrap up report and attach #======================================================================= df[ftype] = wsl_ser logger.debug('from \'%s\' for \'%s\' got wsl_ser %s for aep: %i' %(flood_tbl_nm, ftype, str(wsl_ser.shape), self.ari)) self.wsl_df = df #set this 'notusing dy nps' if self.session.state == 'init': self.reset_d['wsl_df'] = df.copy() return True def set_area_egrd(self): #pull your area exposure grade from somewhere """ #======================================================================= # calls #======================================================================= self.__init__() dynp handles: Fdmg.flood_tbl_nm (just in case we are pulling from there """ #======================================================================= # dependency check #======================================================================= if not self.session.state=='init': dep_l = [([self.model], ['set_area_prot_lvl()'])] if self.deps_is_dated(dep_l, method = 'reque', caller = 'set_area_egrd'): return False logger = self.logger.getChild('set_area_egrd') #======================================================================= # steal egrd from elsewhere table if asked #======================================================================= for cnt in range(0,3,1): #loop through each one attn = 'area_egrd%02d'%cnt area_egrd_code = getattr(self, attn + '_code') if area_egrd_code in ['dry', 'damp', 'wet']: area_egrd = area_egrd_code #=================================================================== # pull from teh flood table #=================================================================== elif area_egrd_code == '*ftbl': ftbl_o = self.model.ftblos_d[self.model.flood_tbl_nm] #get the flood tabl object area_egrd = getattr(ftbl_o, attn) #get from teh table #=================================================================== # pull from teh model #=================================================================== elif area_egrd_code == '*model': area_egrd = getattr(self.model, attn) #get from teh table else: logger.error('for \'%s\' got unrecognized area_egrd_code: \'%s\''%(attn, area_egrd_code)) raise IOError #=================================================================== # set these #=================================================================== self.handle_upd(attn, area_egrd, weakref.proxy(self), call_func = 'set_area_egrd') 'this should triger generating a new wsl set to teh blank_dmg_df' logger.debug('set \'%s\' from \'%s\' as \'%s\'' %(attn, area_egrd_code,area_egrd)) if self.db_f: if not area_egrd in ['dry', 'damp', 'wet']: raise IOError return True def set_wsl_from_egrd(self, df = None): #calculate the wsl based on teh area_egrd """ This is a partial results retrival for non damage function results TODO: consider checking for depednency on House.area_prot_lvl #======================================================================= # calls #======================================================================= self.__init__ dynp handles Flood.area_egrd## """ #======================================================================= # check dependencies and frozen #=========================================================== ============ if not self.session.state=='init': dep_l = [([self], ['set_area_egrd()', 'set_wsl_frm_tbl()'])] if self.deps_is_dated(dep_l, method = 'reque', caller = 'set_wsl_from_egrd'): return False #======================================================================= # defaults #======================================================================= logger = self.logger.getChild('set_wsl_from_egrd') #if wsl_delta is None: wsl_delta = self.model.wsl_delta #======================================================================= # get data #======================================================================= if df is None: df = self.get_info_from_binv() 'need to have updated area_prot_lvls' #======================================================================= # precheck #======================================================================= if self.db_f: if not isinstance(df, pd.DataFrame): raise IOError if not len(df) > 0: raise IOError #======================================================================= # add the wsl for each area_egrd #======================================================================= for prot_lvl in range(0,3,1): #loop through each one #get your grade fro this prot_lvl attn = 'area_egrd%02d'%prot_lvl area_egrd = getattr(self, attn) #identify the housese for this protection level boolidx = df.loc[:,'area_prot_lvl'] == prot_lvl if boolidx.sum() == 0: continue #give them the wsl corresponding to this grade df.loc[boolidx, 'wsl'] = self.wsl_df.loc[boolidx,area_egrd] #set a tag for the area_egrd if 'area_egrd' in df.columns: df.loc[boolidx, 'area_egrd'] = area_egrd logger.debug('for prot_lvl %i, set %i wsl from \'%s\''%(prot_lvl, boolidx.sum(), area_egrd)) #======================================================================= # set this #======================================================================= self.dmg_df_blank = df #======================================================================= # post check #======================================================================= logger.debug('set dmg_df_blank with %s'%str(df.shape)) if self.session.state=='init': self.reset_d['dmg_df_blank'] = df.copy() if self.db_f: if np.any(pd.isnull(df['wsl'])): logger.error('got some wsl nulls') raise IOError return True """ hp.pd.v(df) hp.pd.v(self.dmg_df_blank) """ def run_fld(self, **kwargs): #shortcut to collect all the functions for a simulation ru n self.run_cnt += 1 dmg_df_blank = self.get_info_from_binv() dmg_df = self.get_dmg_set(dmg_df_blank, **kwargs) if self.db_f: self.check_dmg_df(dmg_df) 'leaving this here for simplicity' self.calc_statres_flood(dmg_df) return dmg_df def get_info_from_binv(self): #======================================================================= # defaults #======================================================================= logger = self.logger.getChild('get_info_from_binv') binv_df = self.model.binv.childmeta_df #pull static values binvboolcol = self.binvboolcol df = self.dmg_df_blank.copy() 'this should have wsl added to it from set_wsl_from_egrd()' if self.db_f: if not len(binvboolcol) == len(binv_df.columns): logger.warning('got length mismatch between binvboolcol (%i) and the binv_df columns (%i)'% (len(binvboolcol), len(binv_df.columns))) 'pandas will handle this mistmatch.. just ignores the end' #======================================================================= # #update with values from teh binv #======================================================================= df.update(binv_df.loc[:,binvboolcol], overwrite=True) #update from all the values in teh binv logger.debug('retreived %i values from the binv_df on: %s' %(binv_df.loc[:,binvboolcol].count().count(), binv_df.loc[:,binvboolcol].columns.tolist())) #======================================================================= # macro calcs #======================================================================= if 'hse_depth' in df.columns: df['hse_depth'] = df['wsl'] - df['anchor_el'] #groudn water damage flag if 'gw_f' in df.columns: df.loc[:,'gw_f'] = df['dem_el'] > df['wsl'] #water is below grade if self.db_f: if 'bsmt_egrd' in binv_df.columns: raise IOError return df def get_dmg_set(self, #calcluate the damage for each house dmg_df, #empty frame for filling with damage results #dmg_type_list='all', #bid_l = None, #wsl_delta = None, dmg_rat_f =None, #includt eh damage ratio in results ): """ #======================================================================= # INPUTS #======================================================================= depth_ser: series of depths (for this flood) with index = bldg_id """ #======================================================================= # defaults #======================================================================= logger = self.logger.getChild('get_dmg_set(%s)'%self.get_id()) if dmg_rat_f is None: dmg_rat_f = self.model.dmg_rat_f hse_od = self.model.binv.hse_od #ordred dictionary by bid: hse_dato """ see get_wsl_from_egrd() #======================================================================= # build the dmg_df #======================================================================= bid_ar = self.model.binv.data.loc[:,self.mind].values.astype(np.int) #get everything from teh binv dmg_df = pd.DataFrame(index = bid_ar, columns = self.model.dmg_df_cols)""" #======================================================================= # pre checks #======================================================================= if self.db_f: if not isinstance(dmg_df, pd.DataFrame): raise IOError boolidx = dmg_df.index.isin(hse_od.keys()) if not np.all(boolidx): logger.error('some of the bldg_ids in the wsl_ser were not found in the binv: \n %s' %dmg_df.index[~boolidx]) raise IOError #check the damage columns are empty boolcol = np.isin(dmg_df.columns, ['MS', 'MC', 'BS', 'BC', 'GS', 'total']) #identify damage columns if not np.all(pd.isnull(dmg_df.loc[:,boolcol])): raise IOError #======================================================================= # frame setup #======================================================================= #identify columns containing damage results dmgbool = np.logical_or(dmg_df.columns.isin(self.model.dmg_types), #damages pd.Series(dmg_df.columns).str.contains('_rat').values) #damage ratios #======================================================================= # get teh damage for each house #======================================================================= logger.debug('getting damage for %s entries'%(str(dmg_df.shape))) """ to improve performance, we're only looping through those entries with real flood deths (skin_df) however, the full results frame is still used (non_real entries should equal zero) """ """generally no memory added during these self.session.prof(state='%s.get_dmg_set.loop'%(self.name)) #memory profiling""" cnt = 0 first = True for index, row in dmg_df.iterrows(): #loop through each row #=================================================================== # pre-printouts #=================================================================== #self.session.prof(state='%s.get_dmg_set.%i'%(self.name, cnt)) #memory profiling cnt +=1 if cnt%self.session._logstep == 0: logger.info(' (%i/%i)'%(cnt, len(dmg_df))) #=================================================================== # retrive info #=================================================================== hse_obj = hse_od[index] #get this house object by bldg_id hse_obj.floodo = self #let the house know who is flooding it logger.debug('on hse \'%s\' \n'%hse_obj.name) #=================================================================== # add damage results #=================================================================== if row['hse_depth'] < self.model.hse_skip_depth: logger.debug('depth below hse_obj.vuln_el for bldg_id: %i. setting fdmg=0'%index) row[dmgbool] = 0.0 #set all damage to zero #depth significant. calc it else: #runt he house logger.debug('running house \n') dmg_ser = hse_obj.run_hse(row['wsl'], dmg_rat_f = dmg_rat_f) row.update(dmg_ser) #add all these entries #=================================================================== # extract extra attributers from teh house #=================================================================== #find the entries to skip attribute in filling if first: boolar1 = ~np.isin(row.index, ['total']) boolar2 =
pd.isnull(row)
pandas.isnull
import os, re import argparse import numpy as np import pickle as pl from os import walk from gensim.models import Word2Vec from nltk.tokenize import RegexpTokenizer import pandas as pd from tensorflow.contrib.keras import preprocessing from tqdm import tqdm from konlpy.tag import Twitter twitter = Twitter() from libs import hangle, mongo, analytics seungwon_user_id = '5b856c9d995fc115c6659c04' def build_emb_matrix_and_vocab(embedding_model, keep_in_dict=100000, embedding_size=200): # 0 th element is the default vector for unknowns. emb_matrix = np.zeros((keep_in_dict + 2, embedding_size)) word2index = {} index2word = {} for k in range(1, keep_in_dict + 1): word = embedding_model.wv.index2word[k - 1] # print('word: {}'.format(word)) emb_matrix[k] = embedding_model[word] word2index[word] = k index2word[k] = word word2index['UNK'] = 0 index2word[0] = 'UNK' word2index['STOP'] = keep_in_dict + 1 index2word[keep_in_dict + 1] = 'STOP' return emb_matrix, word2index, index2word def sent2index(sent, word2index): words = sent.strip().split(' ') sent_index = [word2index[word] if word in word2index else 0 for word in words] return sent_index def get_sentence(index2word, sen_index): return ' '.join([index2word[index] for index in sen_index]) def gen_data(word2index, type='tension'): data = [] db = mongo.get_db() videos = mongo.to_dicts(db.video.find({})) pbar = tqdm(total=len(videos)) for video in videos: video = mongo.to_dict(db.video.find_one({'video_id': video['video_id']})) subs = mongo.to_dicts(db.sub.find({'video_id': video['video_id']}).sort('index', 1)) users = mongo.to_dicts(db.user.find({})) video['sub_total'] = len(subs) target_users = [] for user in users: context_no_label_total = db.label.find({ 'video_id': video['video_id'], 'context': 0, 'user_id': user['_id'], }).count() context_yes_label_total = db.label.find({ 'video_id': video['video_id'], 'context': 1, 'user_id': user['_id'], }).count() if video['sub_total'] == context_yes_label_total and context_no_label_total == context_yes_label_total: target_users.append(user) if len(target_users) < 2: pbar.update(1) continue offset = 1 for i, sub in enumerate(subs): sub['category'] = 0 if type == 'tension': for user in target_users: label1 = db['label'].find_one({ 'video_id': video['video_id'], 'context': 0, 'sub_index': sub['index'], 'user_id': user['_id'], }) label2 = db['label'].find_one({ 'video_id': video['video_id'], 'context': 1, 'sub_index': sub['index'], 'user_id': user['_id'], }) sub['category'] = analytics.get_category(label1, label2) elif type == 'sentiment': label = db['label'].find_one({ 'video_id': video['video_id'], 'context': 1, 'sub_index': sub['index'], 'user_id': seungwon_user_id, }) sentiment = label['sentiment_label'] if sentiment == '부정': sub['category'] = 0 elif sentiment == '중립': sub['category'] = 1 elif sentiment == '긍정': sub['category'] = 2 elif type == 'intent': label = db['label'].find_one({ 'video_id': video['video_id'], 'context': 1, 'sub_index': sub['index'], 'user_id': seungwon_user_id, }) sentiment = label['intent_label'] if sentiment == '호응유도': sub['category'] = 0 elif sentiment == '의견': sub['category'] = 1 elif sentiment == '인용': sub['category'] = 2 elif sentiment == '일화': sub['category'] = 3 elif sentiment == '팩트': sub['category'] = 4 def text_to_indexes(text): n_text = hangle.normalize(text, english=True, number=True, punctuation=True) tokens = twitter.pos(n_text, stem=True) indexes = [] for token in tokens: word = '{}/{}'.format(token[0], token[1]) if word in word2index: indexes.append(word2index[word]) else: print('no word : {}'.format(word)) indexes.append(0) return indexes doc = [] for j in range(max(0, i - offset), i): doc.append(text_to_indexes(subs[j]['text'])) doc.append(text_to_indexes(sub['text'])) for j in range(i + 1, min(i + offset + 1, len(subs))): doc.append(text_to_indexes(subs[j]['text'])) # print('doc :', doc) data.append({ 'doc': doc, 'start_ts': sub['start_ts'], 'end_ts': sub['end_ts'], 'category': sub['category'], }) pbar.update(1) pbar.close() return data def preprocess_sub(data, sent_length, max_rev_len, keep_in_dict=10000): ## As the result, each review will be composed of max_rev_len sentences. If the original review is longer than that, we truncate it, and if shorter than that, we append empty sentences to it. And each sentence will be composed of sent_length words. If the original sentence is longer than that, we truncate it, and if shorter, we append the word of 'UNK' to it. Also, we keep track of the actual number of sentences each review contains. data_formatted = [] review_lens = [] for i, item in enumerate(data): review = item['doc'] review_formatted = preprocessing.sequence.pad_sequences(review, maxlen=sent_length, padding="post", truncating="post", value=keep_in_dict + 1) review_len = review_formatted.shape[0] review_lens.append(review_len if review_len <= max_rev_len else max_rev_len) lack_len = max_rev_length - review_len review_formatted_right_len = review_formatted if lack_len > 0: # extra_rows = np.zeros([lack_len, sent_length], dtype=np.int32) extra_rows = np.full((lack_len, sent_length), keep_in_dict + 1) review_formatted_right_len = np.append(review_formatted, extra_rows, axis=0) elif lack_len < 0: row_index = [max_rev_length + i for i in list(range(0, -lack_len))] review_formatted_right_len = np.delete(review_formatted, row_index, axis=0) data_formatted.append(review_formatted_right_len) data[i]['doc'] = review_formatted_right_len data[i]['sub_lens'] = review_formatted.shape[0] def divide(data, train_prop): import random # 예시 # x = [1, 2, 3, 4, 5] # y = [0, 0, 1, 0, 0] # train_prop = 0.2 random.seed(1234) # tmp: [3 4 1 0 2], 랜덤순열 random.shuffle(data) train = data[:round(train_prop * len(data))] test = data[-(len(data) - round(train_prop * len(data))):] return train, test def get_df(data): label = [] doc = [] length = [] for item in data: label.append(item['category']) doc.append(item['doc']) length.append(item['sub_lens']) return
pd.DataFrame({'label': label, 'doc': doc, 'length': length})
pandas.DataFrame
import os import pandas as pd import pytest from pandas.testing import assert_frame_equal from .. import read_sql @pytest.fixture(scope="module") # type: ignore def mysql_url() -> str: conn = os.environ["MYSQL_URL"] return conn def test_mysql_without_partition(mysql_url: str) -> None: query = "select * from test_table limit 3" df = read_sql(mysql_url, query) expected = pd.DataFrame( index=range(3), data={ "test_int": pd.Series([1, 2, 3], dtype="Int64"), "test_float": pd.Series([1.1, 2.2, 3.3], dtype="float64") } ) assert_frame_equal(df, expected, check_names=True) def test_mysql_with_partition(mysql_url: str) -> None: query = "select * from test_table" df = read_sql( mysql_url, query, partition_on="test_int", partition_num=3, ) expected = pd.DataFrame( index=range(6), data={ "test_int": pd.Series([1, 2, 3, 4, 5, 6], dtype="Int64"), "test_float": pd.Series([1.1, 2.2, 3.3, 4.4, 5.5, 6.6], dtype="float64") } ) assert_frame_equal(df, expected, check_names=True) def test_mysql_types(mysql_url: str) -> None: query = "select * from test_types" df = read_sql(mysql_url, query) expected = pd.DataFrame( index=range(3), data={ "test_date": pd.Series(["1999-07-25", "2020-12-31", "2021-01-28"], dtype="datetime64[ns]"), "test_time": pd.Series(["00:00:00", "23:59:59", "12:30:30"], dtype="object"), "test_datetime": pd.Series(["1999-07-25 00:00:00", "2020-12-31 23:59:59", None], dtype="datetime64[ns]"), "test_new_decimal": pd.Series([1.1, None, 3.3], dtype="float"), "test_decimal": pd.Series([1, 2, 3], dtype="float"), "test_varchar": pd.Series([None, "varchar2", "varchar3"], dtype="object"), "test_char": pd.Series(["char1", "char2", "char3"], dtype="object") } ) assert_frame_equal(df, expected, check_names=True) def test_mysql_types_text(mysql_url: str) -> None: query = "select * from test_types" df = read_sql(mysql_url, query, protocol="text") expected = pd.DataFrame( index=range(3), data={ "test_date": pd.Series(["1999-07-25", "2020-12-31", "2021-01-28"], dtype="datetime64[ns]"), "test_time": pd.Series(["00:00:00", "23:59:59", "12:30:30"], dtype="object"), "test_datetime": pd.Series(["1999-07-25 00:00:00", "2020-12-31 23:59:59", None], dtype="datetime64[ns]"), "test_new_decimal": pd.Series([1.1, None, 3.3], dtype="float"), "test_decimal":
pd.Series([1, 2, 3], dtype="float")
pandas.Series
from glob import glob import pandas as pd import sastvd as svd pd.set_option("display.max_columns", None) # %% Phoenix results = glob(str(svd.outputs_dir() / "phoenix/rq_results/*.csv")) results2 = glob(str(svd.outputs_dir() / "phoenix_new/rq_results_new/*.csv")) results += results2 res_df = pd.concat([
pd.read_csv(i)
pandas.read_csv
# import all the required files i.e. numpy , pandas and math library from graphlib.financialGraph import Data import numpy as np import pandas as pd from pandas import DataFrame , Series import math # All the indicators are defined and arranged in Alphabetical order # ------------------> A <------------------------ # [0] __ Average True Range (ATR) # Moving Average of True Range(TR) def atr(data: DataFrame, period: int = 14) -> Series: TR = tr(data) return pd.Series( TR.rolling(center=False, window=period, min_periods=1).mean(), name=f'{period} ATR' ) # [0] __ Adaptive Price Zone (APZ) # TODO def apz(data: DataFrame,period: int = 21,dev_factor: int = 2, MA: Series = None,adjust: bool = True,) -> DataFrame: if not isinstance(MA, pd.Series): MA = dema(data, period) price_range = pd.Series( (data["high"] - data["low"]).ewm(span=period, adjust=adjust).mean() ) volatility_value = pd.Series( price_range.ewm(span=period, adjust=adjust).mean(), name="vol_val" ) upper_band = pd.Series((volatility_value * dev_factor) + MA, name="UPPER") lower_band = pd.Series(MA - (volatility_value * dev_factor), name="LOWER") return pd.concat([upper_band, lower_band], axis=1) # ------------------> B <------------------------ # [0] __ Bollinger Bands (BBANDS) # TODO def bbands(data: DataFrame,period: int = 20,MA: Series = None, column: str = "close",std_multiplier: float = 2,) -> DataFrame: std = data[column].rolling(window=period).std() if not isinstance(MA, pd.core.series.Series): middle_band = pd.Series(sma(data, period), name="BB_MIDDLE") else: middle_band = pd.Series(MA, name="BB_MIDDLE") upper_bb = pd.Series(middle_band + (std_multiplier * std), name="BB_UPPER") lower_bb = pd.Series(middle_band - (std_multiplier * std), name="BB_LOWER") return pd.concat([upper_bb, middle_band, lower_bb], axis=1) # [0] __ Bollinger Bands Width (BBWidth) # TODO def bbwidth( data: DataFrame, period: int = 20, MA: Series = None, column: str = "close" ) -> Series: BB = bbands(data, period, MA, column) return pd.Series( (BB["BB_UPPER"] - BB["BB_LOWER"]) / BB["BB_MIDDLE"], name="{0} period BBWITH".format(period), ) # ------------------> D <------------------------ # [0] __ Double Exponential Moving Average (DEMA) # 2 * EWMA - ewm(EWMA) def dema(data : DataFrame,period: int = 10,column: str ='close',adjust: bool = True) -> Series: DEMA = ( 2*ema(data,period) - ema(data,period).ewm(span=period , adjust=adjust).mean() ) return pd.Series( DEMA , name = f'{period}_DEMA' ) # [0] __ Directional Movement Index (DMI) # TODO def dmi(data: DataFrame, column: str = "close", adjust: bool = True) -> Series: def _get_time(close): sd = close.rolling(5).std() asd = sd.rolling(10).mean() v = sd / asd t = 14 / v.round() t[t.isna()] = 0 t = t.map(lambda x: int(min(max(x, 5), 30))) return t def _dmi(index): time = t.iloc[index] if (index - time) < 0: subset = data.iloc[0:index] else: subset = data.iloc[(index - time) : index] return rsi(subset, period=time, adjust=adjust).values[-1] dates = Series(data.index) periods = Series(range(14, len(dates)), index=dates.index[14:].values) t = _get_time(data[column]) return periods.map(lambda x: _dmi(x)) # ------------------> E <------------------------ # [0] __ Exponential Weighted Moving Average (EWMA) or Exponential Moving Average(EMA) # Exponential average of prev n day prices def ema(data : DataFrame,period: int = 10,column: str ='close',adjust: bool = True) -> Series: return pd.Series( data[column].ewm(span=period, adjust=adjust).mean(), name = f'{period}_EMA' ) # [0] __ Kaufman Efficiency indicator (KER) or (ER) # change in price / volatility Here change and volatility are absolute def er(data : DataFrame,period: int = 10,column: str ='close') -> Series: change = data[column].diff(period).abs() volatility = data[column].diff().abs().rolling(window=period,min_periods=1).sum() return pd.Series(change / volatility, name=f'{period}_ER' ) # [0] __ TODO (EVSTC) # TODO def evstc(data: DataFrame,period_fast: int = 12,period_slow: int = 30, k_period: int = 10,d_period: int = 3,adjust: bool = True) -> Series: ema_slow = evwma(data, period_slow) ema_fast = evwma(data, period_fast) macd = ema_fast - ema_slow STOK = pd.Series(( (macd - macd.rolling(window=k_period).min()) / (macd.rolling(window=k_period).max() - macd.rolling(window=k_period).min()) ) * 100) STOD = STOK.rolling(window=d_period).mean() STOD_DoubleSmooth = STOD.rolling(window=d_period).mean() return pd.Series(STOD_DoubleSmooth, name="{0} period EVSTC".format(k_period)) # [0] __ Elastic Volume Weighted Moving Average (EVWMA) # x is ((volume sum for n period) - volume ) divided by (volume sum for n period) # y is volume * close / (volume sum for n period) def evwma(data, period: int = 20) -> Series: vol_sum = (data["volume"].rolling(window=period,min_periods=1).sum()) x = (vol_sum - data["volume"]) / vol_sum y = (data["volume"] * data["close"]) / vol_sum evwma = [0] for x, y in zip(x.fillna(0).iteritems(), y.iteritems()): if x[1] == 0 or y[1] == 0: evwma.append(0) else: evwma.append(evwma[-1] * x[1] + y[1]) return pd.Series( evwma[1:], index=data.index, name=f'{period}_EVWMA' ) # [0] __ Elastic Volume Weighted Moving average convergence divergence (EV_MACD) # MACD calculation on basis of Elastic Volume Weighted Moving average (EVWMA) def ev_macd(data: DataFrame,period_fast: int = 20,period_slow: int = 40, signal: int = 9,adjust: bool = True,) -> DataFrame: evwma_slow = evwma(data, period_slow) evwma_fast = evwma(data, period_fast) MACD = pd.Series(evwma_fast - evwma_slow, name="EV MACD") MACD_signal = pd.Series( MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(), name="SIGNAL" ) return pd.concat([MACD, MACD_signal], axis=1) # ------------------> F <------------------------ # [0] __ Fisher Transform # TODO def fish(data: DataFrame, period: int = 10, adjust: bool = True) -> Series: from numpy import log, seterr seterr(divide="ignore") med = (data["high"] + data["low"]) / 2 ndaylow = med.rolling(window=period).min() ndayhigh = med.rolling(window=period).max() raw = (2 * ((med - ndaylow) / (ndayhigh - ndaylow))) - 1 smooth = raw.ewm(span=5, adjust=adjust).mean() _smooth = smooth.fillna(0) return pd.Series( (log((1 + _smooth) / (1 - _smooth))).ewm(span=3, adjust=adjust).mean(), name="{0} period FISH.".format(period), ) # [0] __ Fractal Adaptive Moving Average (FRAMA) # TODO def FRAMA(data: DataFrame, period: int = 16, batch: int=10) -> Series: assert period % 2 == 0, print("FRAMA period must be even") c = data.close.copy() window = batch * 2 hh = c.rolling(batch).max() ll = c.rolling(batch).min() n1 = (hh - ll) / batch n2 = n1.shift(batch) hh2 = c.rolling(window).max() ll2 = c.rolling(window).min() n3 = (hh2 - ll2) / window # calculate fractal dimension D = (np.log(n1 + n2) - np.log(n3)) / np.log(2) alp = np.exp(-4.6 * (D - 1)) alp = np.clip(alp, .01, 1).values filt = c.values for i, x in enumerate(alp): cl = c.values[i] if i < window: continue filt[i] = cl * x + (1 - x) * filt[i - 1] return pd.Series(filt, index=data.index, name= f'{period} FRAMA' ) # [0] __ Finite Volume Element (FVE) # TODO def fve(data: DataFrame, period: int = 22, factor: int = 0.3) -> Series: hl2 = (data["high"] + data["low"]) / 2 tp_ = tp(data) smav = data["volume"].rolling(window=period).mean() mf = pd.Series((data["close"] - hl2 + tp_.diff()), name="mf") _mf = pd.concat([data["close"], data["volume"], mf], axis=1) def vol_shift(row): if row["mf"] > factor * row["close"] / 100: return row["volume"] elif row["mf"] < -factor * row["close"] / 100: return -row["volume"] else: return 0 _mf["vol_shift"] = _mf.apply(vol_shift, axis=1) _sum = _mf["vol_shift"].rolling(window=period).sum() return pd.Series((_sum / smav) / period * 100) # ------------------> H <------------------------ # [0] __ Hull Moving Average (HMA) # wma of change in wma where change in wma is 2 * (wma half period) - (wma full period) def hma(data, period: int = 16) -> Series: half_length = int(period / 2) sqrt_length = int(math.sqrt(period)) wmaf = wma(data, period=half_length) wmas = wma(data, period=period) data["deltawma"] = 2 * wmaf - wmas hma = wma(data, column="deltawma", period=sqrt_length) return pd.Series(hma, name=f'{period}_HMA') # ------------------> I <------------------------ # [0] __ Ichimoku Cloud # TODO def ichimoku(data: DataFrame,tenkan_period: int = 9,kijun_period: int = 26, senkou_period: int = 52,chikou_period: int = 26,) -> DataFrame: tenkan_sen = pd.Series( ( data["high"].rolling(window=tenkan_period).max() + data["low"].rolling(window=tenkan_period).min() ) / 2, name="TENKAN", ) ## conversion line kijun_sen = pd.Series( ( data["high"].rolling(window=kijun_period).max() + data["low"].rolling(window=kijun_period).min() ) / 2, name="KIJUN", ) ## base line senkou_span_a = pd.Series( ((tenkan_sen + kijun_sen) / 2), name="senkou_span_a" ) .shift(kijun_period) ## Leading span senkou_span_b = pd.Series( ( ( data["high"].rolling(window=senkou_period).max() + data["low"].rolling(window=senkou_period).min() ) / 2 ), name="SENKOU", ).shift(kijun_period) chikou_span = pd.Series( data["close"].shift(-chikou_period), name="CHIKOU", ) return pd.concat( [tenkan_sen, kijun_sen, senkou_span_a, senkou_span_b, chikou_span], axis=1 ) # [0] __ Inverse Fisher Transform (IFTRSI) # TODO def ift_rsi(data: DataFrame,column: str = "close",rsi_period: int = 5, wma_period: int = 9,) -> Series: v1 = pd.Series(0.1 * (rsi(data, rsi_period) - 50), name="v1") d = (wma_period * (wma_period + 1)) / 2 weights = np.arange(1, wma_period + 1) def linear(w): def _compute(x): return (w * x).sum() / d return _compute _wma = v1.rolling(wma_period, min_periods=wma_period) v2 = _wma.apply(linear(weights), raw=True) return pd.Series( ((v2 ** 2 - 1) / (v2 ** 2 + 1)), name="IFT_RSI" ) # ------------------> K <------------------------ # [0] __ Kaufman's Adaptive Moving Average (KAMA) # first KAMA is SMA # Current KAMA = Previous KAMA + smoothing_constant * (Price - Previous KAMA) def kama(data,er_: int = 10,ema_fast: int = 2, ema_slow: int = 30,period: int = 20, column: str ='close') -> Series: er_ = er(data) fast_alpha = 2 / (ema_fast + 1) slow_alpha = 2 / (ema_slow + 1) sc = pd.Series( (er_ * (fast_alpha - slow_alpha) + slow_alpha) ** 2, name="smoothing_constant", ) sma = pd.Series( data[column].rolling(period).mean(), name="SMA" ) kama = [] for s, ma, price in zip( sc.iteritems(), sma.shift().iteritems(), data[column].iteritems() ): try: kama.append(kama[-1] + s[1] * (price[1] - kama[-1])) except (IndexError, TypeError): if pd.notnull(ma[1]): kama.append(ma[1] + s[1] * (price[1] - ma[1])) else: kama.append(None) sma["KAMA"] = pd.Series( kama, index=sma.index, name=f'{period}_KAMA') return sma['KAMA'] # [0] __ Keltner Channels (KC) # TODO def kc(ohlc: DataFrame,period: int = 20,atr_period: int = 10, MA: Series = None,kc_mult: float = 2,) -> DataFrame: if not isinstance(MA, pd.core.series.Series): middle = pd.Series(ema(ohlc, period), name="KC_MIDDLE") else: middle = pd.Series(MA, name="KC_MIDDLE") up = pd.Series(middle + (kc_mult * atr(ohlc, atr_period)), name="KC_UPPER") down = pd.Series( middle - (kc_mult * atr(ohlc, atr_period)), name="KC_LOWER" ) return pd.concat([up, down], axis=1) # ------------------> M <------------------------ # [0] __ Moving average convergence divergence (MACD) # MACD is Difference of ema fast and ema slow # Here fast period is 12 and slow period is 26 # MACD Signal is ewm of MACD def macd(data,period_fast: int = 12,period_slow: int = 26, signal: int = 9,column: str = "close",adjust: bool = True ) -> DataFrame: EMA_fast = pd.Series( data[column].ewm(ignore_na=False, span=period_fast, adjust=adjust).mean(), name=f'{period_fast}_EMA_fast') EMA_slow = pd.Series( data[column].ewm(ignore_na=False, span=period_slow, adjust=adjust).mean(), name=f'{period_slow}_EMA_slow') MACD = pd.Series(EMA_fast - EMA_slow,name='MACD') MACD_signal = pd.Series( MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(),name=f'{signal}_SIGNAL' ) DIFF = pd.Series( MACD - MACD_signal, name="diff MACD_MSIGNAL" ) return pd.concat( [DIFF, MACD, MACD_signal ], axis=1 ) # [0] __ Moving Standard Deviation (MSD) # Standard deviation of a given period for the column passed as arguement def msd(data: DataFrame, period: int = 21, column: str = "close") -> Series: return pd.Series(data[column].rolling(period).std(), name="MSD") # Momentum Breakout Bands (MOBO) # TODO def mobo(data: DataFrame,period: int = 10,std_multiplier: float = 0.8, column: str = "close",) -> DataFrame: BB = bbands(data, period=10, std_multiplier=0.8, column=column) return BB # [0] __ Market momentum (MOM) def mom(data: DataFrame, period: int = 10, column: str = "close") -> Series: return pd.Series(data[column].diff(period), name=f'{period}_MOM' ) # [0] __ Moving Volume Weighted Average Price (MVWAP) # SMA of (close * volume ) divided by SMA of volume def mvwap(data: DataFrame, period:int = 9) -> Series: data["cv"] =(data["close"] * data["volume"]) return pd.Series( (sma(data,period = period,column = "cv")/sma(data,period=period,column="volume")), name="MVWAP." ) # ------------------> P <------------------------ # ------------|| Pivot ||------------------------ # [0] __ Pivot Camarilla # TODO def pivot_camarilla(data: DataFrame) -> DataFrame: df_ = data.shift() pivot = pd.Series(tp(df_), name="pivot") s1 = df_['close']+(1.1*(df_['high']-df_['low'])/12) s2 = df_['close']-(1.1*(df_['high']-df_['low'])/6) s3 = df_['close']-(1.1*(df_['high']-df_['low'])/4) s4 =df_['close']-(1.1*(df_['high']-df_['low'])/2) r1 = df_['close']+(1.1*(df_['high']-df_['low'])/12) r2 = df_['close']+(1.1*(df_['high']-df_['low'])/6) r3 =df_['close']+(1.1*(df_['high']-df_['low'])/4) r4 = df_['close']+(1.1*(df_['high']-df_['low'])/2) return pd.concat( [ pivot, pd.Series(s1, name="s1"), pd.Series(s2, name="s2"), pd.Series(s3, name="s3"), pd.Series(s4, name="s4"), pd.Series(r1, name="r1"), pd.Series(r2, name="r2"), pd.Series(r3, name="r3"), pd.Series(r4, name="r4"), ], axis=1, ) # [0] __ Pivot Classic # TODO def pivot_classic(data: DataFrame) -> DataFrame: df_ = data.shift() pivot = pd.Series(tp(df_), name="pivot") s1 = (pivot * 2) - df_["high"] s2 = pivot - (df_["high"] - df_["low"]) s3 = pivot - 2*(df_["high"] - df_["low"]) s4 = pivot - 3*(df_["high"] - df_["low"]) r1 = (pivot * 2) - df_["low"] r2 = pivot + (df_["high"] - df_["low"]) r3 = pivot + 2*(df_["high"] - df_["low"]) r4 = pivot + 3*(df_["high"] - df_["low"]) return pd.concat( [ pivot, pd.Series(s1, name="s1"), pd.Series(s2, name="s2"), pd.Series(s3, name="s3"), pd.Series(s4, name="s4"), pd.Series(r1, name="r1"), pd.Series(r2, name="r2"), pd.Series(r3, name="r3"), pd.Series(r4, name="r4"), ], axis=1, ) # [0] __ Pivot Demark # TODO def pivot_demark(data: DataFrame) -> DataFrame: df_ = data.shift() pivot,s1,r1=[],[],[] for i in range(len(df_)): if df_['open'][i]==df_['close'][i]: x=df_['high'][i]+df_['low'][i]+2*df_['close'][i] elif df_['close'][i]>df_['open'][i]: x=2*df_['high'][i]+df_['low'][i]+df_['close'][i] else: x=df_['high'][i]+2*df_['low'][i]+df_['close'][i] pivot.append(x/4) s1.append(x/2 - df_["high"][i]) r1.append(x/2 - df_["low"][i]) data_ = pd.DataFrame(pivot,columns=['pivot']) data_['s1']=s1 data_['r1']=r1 return data_ # [0] __ Pivot Fibonacci # TODO def pivot_fibonacci(data: DataFrame) -> DataFrame: df_ = data.shift() pivot = pd.Series(tp(df_), name="pivot") s1 = pivot - ((df_["high"] - df_["low"])*0.382) s2 = pivot - ((df_["high"] - df_["low"])*0.618) s3 = pivot - (df_["high"] - df_["low"]) s4 = pivot + ((df_["high"] - df_["low"])*1.382) r1 = pivot + ((df_["high"] - df_["low"])*0.382) r2 = pivot + ((df_["high"] - df_["low"])*0.618) r3 =pivot + (df_["high"] - df_["low"]) r4 = pivot + (df_["high"] - df_["low"])*1.382 return pd.concat( [ pivot, pd.Series(s1, name="s1"), pd.Series(s2, name="s2"), pd.Series(s3, name="s3"), pd.Series(s4, name="s4"), pd.Series(r1, name="r1"), pd.Series(r2, name="r2"), pd.Series(r3, name="r3"), pd.Series(r4, name="r4"), ], axis=1, ) # [0] __ Pivot Traditional # TODO def pivot_traditional(data: DataFrame) -> DataFrame: df_ = data.shift() pivot = pd.Series(tp(df_), name="pivot") s1 = (pivot * 2) - df_["high"] s2 = pivot - (df_["high"] - df_["low"]) s3 = df_["low"] - (2 * (df_["high"] - pivot)) s4 = df_["low"] - (3 * (df_["high"] - pivot)) s5 = df_["low"] - (4 * (df_["high"] - pivot)) r1 = (pivot * 2) - df_["low"] r2 = pivot + (df_["high"] - df_["low"]) r3 = df_["high"] + (2 * (pivot - df_["low"])) r4 = df_["high"] + (3 * (pivot - df_["low"])) r5 = df_["high"] + (4 * (pivot - df_["low"])) return pd.concat( [ pivot, pd.Series(s1, name="s1"), pd.Series(s2, name="s2"), pd.Series(s3, name="s3"), pd.Series(s4, name="s4"), pd.Series(s5, name="s5"), pd.Series(r1, name="r1"), pd.Series(r2, name="r2"), pd.Series(r3, name="r3"), pd.Series(r4, name="r4"), pd.Series(r5, name="r5"), ], axis=1, ) # [0] __ Pivot Woodie # TODO def pivot_woodie(data: DataFrame) -> DataFrame: df_ = data.shift() pivot = pd.Series((df_['high']+df_['low']+2*data['open'])/4, name="pivot") s1 = 2*pivot-df_['high'] s2 = pivot - (df_["high"] - df_["low"]) s3 = df_["low"] - (2 * (pivot - df_["high"])) s4 = s3 - (df_["high"] - df_["low"]) r1 = 2*pivot-df_['low'] r2 = pivot + (df_["high"] - df_["low"]) r3 =df_["high"] + (2 * (pivot - df_["low"])) r4 = r3 + (df_["high"] - df_["low"]) return pd.concat( [ pivot, pd.Series(s1, name="s1"), pd.Series(s2, name="s2"), pd.Series(s3, name="s3"), pd.Series(s4, name="s4"), pd.Series(r1, name="r1"), pd.Series(r2, name="r2"), pd.Series(r3, name="r3"), pd.Series(r4, name="r4"), ], axis=1, ) # [0] __ PPO # TODO def ppo(data: DataFrame,period_fast: int = 12,period_slow: int = 26, signal: int = 9,column: str = "close", adjust: bool = True,) -> DataFrame: EMA_fast = pd.Series( data[column].ewm(ignore_na=False, span=period_fast, adjust=adjust).mean(), name="EMA_fast", ) EMA_slow = pd.Series( data[column].ewm(ignore_na=False, span=period_slow, adjust=adjust).mean(), name="EMA_slow", ) PPO = pd.Series(((EMA_fast - EMA_slow) / EMA_slow) * 100, name="PPO") PPO_signal = pd.Series( PPO.ewm(ignore_na=False, span=signal, adjust=adjust).mean(), name="SIGNAL" ) PPO_histo = pd.Series(PPO - PPO_signal, name="HISTO") return pd.concat([PPO, PPO_signal, PPO_histo], axis=1) # ------------------> R <------------------------ # [0] __ Relative Strength Index (RSI) # EMA of up and down gives gain and loss # Relative Strength Index is gain / loss def rsi(data: DataFrame, period: int = 14,column: str = "close", adjust: bool = True,) -> Series: delta = data[column].diff() up, down = delta.copy(), delta.copy() up[up < 0] = 0 down[down > 0] = 0 _gain = up.ewm(alpha=1.0 / period, adjust=adjust).mean() _loss = down.abs().ewm(alpha=1.0 / period, adjust=adjust).mean() RS = _gain / _loss return pd.Series(100 - (100 / (1 + RS)), name=f'{period} period RSI' ) # [0] __ Rate of Change (ROC) def roc(data: DataFrame, period: int = 12, column: str = "close") -> Series: return pd.Series( (data[column].diff(period) / data[column].shift(period)) * 100, name="ROC" ) # ------------------> S <------------------------ # [0] __ Stop And Reverse (SAR) # The indicator is below prices when prices are rising and above # prices when prices are falling. # TODO def sar(data: DataFrame, af: int = 0.02, amax: int = 0.2) -> Series: high, low = data.high, data.low # Starting values sig0, xpt0, af0 = True, high[0], af _sar = [low[0] - (high - low).std()] for i in range(1, len(data)): sig1, xpt1, af1 = sig0, xpt0, af0 lmin = min(low[i - 1], low[i]) lmax = max(high[i - 1], high[i]) if sig1: sig0 = low[i] > _sar[-1] xpt0 = max(lmax, xpt1) else: sig0 = high[i] >= _sar[-1] xpt0 = min(lmin, xpt1) if sig0 == sig1: sari = _sar[-1] + (xpt1 - _sar[-1]) * af1 af0 = min(amax, af1 + af) if sig0: af0 = af0 if xpt0 > xpt1 else af1 sari = min(sari, lmin) else: af0 = af0 if xpt0 < xpt1 else af1 sari = max(sari, lmax) else: af0 = af sari = xpt0 _sar.append(sari) return pd.Series(_sar, index=data.index) # [0] __ Simple moving average (SMA) or moving average (MA) # Average of prev n day prices def sma(data,period: int = 10,column: str ='close') -> Series: return pd.Series( data[column].rolling(window = period,min_periods= 1).mean(), name = f'{period}_SMA' ) # [0] __ Simple moving median (SMM) or moving median (MM) # median of prev n day prices def smm(data,period: int = 10,column: str ='close') -> Series: return pd.Series( data[column].rolling(window = period,min_periods= 1).median(), name = f'{period}_SMM' ) # [0] __ Simple smoothed moving average (SSMA) or smoothed moving average() # smoothed (exponential + simple) average of prev n day prices def ssma(data,period: int = 10,column: str ='close',adjust: bool = True) -> Series: return pd.Series( data[column].ewm(ignore_na = False, alpha=1.0/period, min_periods=0, adjust=adjust).mean(), name = f'{period}_SSMA' ) # [0] __ The Schaff Trend Cycle (Oscillator) (STC) # TODO def stc(data: DataFrame,period_fast: int = 23,period_slow: int = 50,k_period: int = 10, d_period: int = 3,column: str = "close",adjust: bool = True) -> Series: EMA_fast = pd.Series( data[column].ewm(ignore_na=False, span=period_fast, adjust=adjust).mean(), name="EMA_fast", ) EMA_slow = pd.Series( data[column].ewm(ignore_na=False, span=period_slow, adjust=adjust).mean(), name="EMA_slow", ) MACD = pd.Series((EMA_fast - EMA_slow), name="MACD") STOK = pd.Series(( (MACD - MACD.rolling(window=k_period).min()) / (MACD.rolling(window=k_period).max() - MACD.rolling(window=k_period).min()) ) * 100) STOD = STOK.rolling(window=d_period).mean() STOD_DoubleSmooth = STOD.rolling(window=d_period).mean() # "double smoothed" return pd.Series(STOD_DoubleSmooth, name="{0} period STC".format(k_period)) # [0] __ (SQZMI) # TODO def sqzmi(data: DataFrame, period: int = 20, MA: Series = None) -> DataFrame: if not isinstance(MA, pd.core.series.Series): ma = pd.Series(sma(data, period)) else: ma = None bb = bbands(data, period=period, MA=ma) kc_ = kc(data, period=period, kc_mult=1.5) comb =
pd.concat([bb, kc_], axis=1)
pandas.concat
import datetime import os import time import numpy as np import pandas as pd from coredotfinance.binance import dataframe_util, datetime_util from coredotfinance.binance.api import ( api_24hr, api_avg_price, api_depth, api_exchange_info, api_klines, ) from coredotfinance.binance.utils import get_date_list def get_symbols() -> list: """Binance의 Symbol List 리턴""" response = api_exchange_info() symbol_list = [ response["symbols"][i]["symbol"] for i in range(len(response["symbols"])) ] return symbol_list def get_current_price(symbol) -> float: """대상 Symbol의 현재 가격 리턴""" print(symbol.upper()) response = api_avg_price(symbol.upper()) return float(response.get("price")) def get_orderbook(symbol, limit=None) -> pd.DataFrame: """대상 Symbol의 호가창(DataFrame) 리턴""" print(symbol.upper()) response = api_depth(symbol.upper(), limit=limit) bids = np.array(response["bids"]) asks = np.array(response["asks"]) concat = np.concatenate((bids, asks), axis=1) df = pd.DataFrame( concat, columns=["bid_price", "bid_volume", "ask_price", "ask_volume"] ) df = dataframe_util.rename_cols2kor(df) return df def get_24hr_all_price() -> pd.DataFrame: """모든 Symbol의 24시간 동안의 가격 정보(DataFrame) 리턴 (거래대금순 내림차순 정렬)""" response = api_24hr() df = pd.DataFrame(response) df["tradingValue"] = df["volume"].astype(float) * df["weightedAvgPrice"].astype( float ) isUSDT = df["symbol"].str.contains(".USDT", regex=True) cols = [ "symbol", "priceChange", "priceChangePercent", "openPrice", "highPrice", "lowPrice", "lastPrice", "volume", "tradingValue", ] df = ( df.loc[isUSDT, cols] .sort_values(by=["tradingValue"], ascending=False) .reset_index(drop=True) ) df = dataframe_util.rename_cols2kor(df) return df def get_ohlcv( symbol: str = "BTCUSDT", interval="1d", start=None, end=None, limit=1000 ) -> pd.DataFrame: """대상 symbol의 가격 정보(DataFrame) 리턴 Parameters ---------- symbol : str, optional Binance Symbol, by default "BTCUSDT" interval : str, optional 조회 간격 설정, by default "1d" (1m, 3m, 5m, 15m, 30m, 1h, 2h, 4h, 6h, 8h, 12h, 1d, 3d, 1w, 1M) start : str, optional 조회 시작 날짜(YYYYMMDD), by default 최근 날짜 end : str, optional 조회 끝 날짜(YYYYMMDD), by default 최근 날짜 limit : int, optional 조회 개수, by default 1000 Returns ------- pd.DataFrame 대상 symbol의 조회 조건에 맞는 일시별 시가/고가/저가/종가/거래량 DataFrame """ if start: start = datetime_util.convert_date2timestamp_sec(start) * 1000 # s -> ms if end: end = datetime_util.convert_date2timestamp_sec(end) * 1000 # s -> ms ohlcv = api_klines(symbol.upper(), interval, start, end, limit) df = pd.DataFrame(ohlcv).iloc[:, :6] df.columns = ["datetime", "open", "high", "low", "close", "volume"] df["datetime"] =
pd.to_datetime(df["datetime"], unit="ms")
pandas.to_datetime
# -*- coding: utf-8 -*- """ Functions for cleaning and processing the AHBA microarray dataset """ from pkg_resources import resource_filename from nibabel.volumeutils import Recoder import numpy as np import pandas as pd from scipy.spatial.distance import cdist from . import io, utils # AHBA structure IDs corresponding to different brain parts ONTOLOGY = Recoder( (('4008', 'cerebral cortex', 'cortex'), ('4275', 'cerebral nuclei', 'subcortex'), ('4391', 'diencephalon', 'subcortex'), ('9001', 'mesencephalon', 'subcortex'), ('4696', 'cerebellum', 'cerebellum'), ('9131', 'pons', 'brainstem'), ('9512', 'myelencephalon', 'brainstem'), ('9218', 'white matter', 'white matter'), ('9352', 'sulci & spaces', 'other'), ('4219', 'hippocampal formation', 'subcortex')), fields=('id', 'name', 'structure') ) def update_mni_coords(annotation): """ Replaces MNI coords in `annotation` with corrected coords from `alleninf` Parameters ---------- annotation : str Annotation file from Allen Brain Institute. Optimally obtained by calling `abagen.fetch_microarray()` and accessing the `annotation` attribute on the resulting object Returns ------- corrected : pandas.DataFrame Annotation data with corrected MNI coordinates References ---------- Updated MNI coordinates taken from https://github.com/chrisfilo/alleninf, which is licensed under the BSD-3 (reproduced here): Copyright (c) 2018, <NAME> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ coords = resource_filename('abagen', 'data/corrected_mni_coordinates.csv') coords =
pd.read_csv(coords)
pandas.read_csv
# coding:utf-8 # # The MIT License (MIT) # # Copyright (c) 2016-2020 # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import re from datetime import datetime import json import logging import webbrowser import numpy as np import pandas as pd from czsc.Data.data_fq import data_stock_to_fq from czsc.Fetch.mongo import FACTOR_DATABASE from czsc.Fetch.tdx import get_bar from czsc.Indicator import IndicatorSet from czsc.Utils.echarts_plot import kline_pro from czsc.Utils.logs import util_log_info from czsc.Utils.trade_date import TradeDate, util_get_real_date, util_get_next_day from czsc.Utils.transformer import DataEncoder def identify_direction(v1, v2): if v1 > v2: # 前面几根可能都是包含,这里直接初始赋值-1,上升趋势为正数 direction = 1 else: direction = -1 return direction def update_fx(bars, new_bars: list, fx_list: list, trade_date: list): """更新分型序列 k线中有direction,fx中没有direction字段 分型记对象样例: { 'date': Timestamp('2020-11-26 00:00:00'), 'fx_mark': -1, 低点用—1表示 'value': 138.0, 'fx_start': Timestamp('2020-11-25 00:00:00'), 'fx_end': Timestamp('2020-11-27 00:00:00'), } { 'date': Timestamp('2020-11-26 00:00:00'), 'fx_mark': +1, 高点用+1表示 'value': 150.67, 'fx_start': Timestamp('2020-11-25 00:00:00'), 'fx_end': Timestamp('2020-11-27 00:00:00'), } """ assert len(bars) > 0 bar = bars[-1].copy() if len(trade_date) > 1: if TradeDate(bar['date']) < TradeDate(trade_date[-1]): util_log_info('{} data is older than {} !'.format(bar['date'], trade_date[-1])) return trade_date.append(bar['date']) # 第1根K线没有方向,不需要任何处理 if len(bars) < 2: new_bars.append(bar) return False last_bar = new_bars[-1] cur_h, cur_l = bar['high'], bar['low'] last_h, last_l, last_dt = last_bar['high'], last_bar['low'], last_bar['date'] # 处理过包含关系,只需要用一个值识别趋势 direction = identify_direction(cur_h, last_h) # 第2根K线只需要更新方向 if len(bars) < 3: bar.update(direction=direction) new_bars.append(bar) return False last_direction = last_bar.get('direction') # 没有包含关系,需要进行分型识别,趋势有可能改变 if (cur_h > last_h and cur_l > last_l) or (cur_h < last_h and cur_l < last_l): new_bars.append(bar) # 分型识别 if last_direction * direction < 0: bar.update(direction=direction) if direction < 0: fx = { "date": last_bar['date'], "fx_mark": 1, "value": last_bar['high'], "fx_start": new_bars[-3]['date'], # 记录分型的开始和结束时间 "fx_end": bar['date'], # "direction": bar['direction'], } else: fx = { "date": last_bar['date'], "fx_mark": -1, "value": last_bar['low'], "fx_start": new_bars[-3]['date'], # 记录分型的开始和结束时间 "fx_end": bar['date'], # "direction": bar['direction'], } fx_list.append(fx) return True bar.update(direction=last_direction + np.sign(last_direction)) return False # 有包含关系,不需要进行分型识别,趋势不改变,direction数值增加 bar.update(direction=last_direction + np.sign(last_direction)) new_bars.pop(-1) # 有包含关系的前一根数据被删除,这里是个技巧 # 有包含关系,按方向分别处理,同时需要更新日期 if last_direction > 0: if cur_h < last_h: bar.update(high=last_h, date=last_dt) if cur_l < last_l: bar.update(low=last_l) elif last_direction < 0: if cur_l > last_l: bar.update(low=last_l, date=last_dt) if cur_h > last_h: bar.update(high=last_h) else: logging.error('{} last_direction: {} is wrong'.format(last_dt, last_direction)) raise ValueError new_bars.append(bar) return False class XdList(object): """存放线段""" def __init__(self, bars, indicators, trade_date): # 传入的是地址,不要修改 self.bars = bars self.indicators = indicators self.trade_date = trade_date # item存放数据元素 self.xd_list = [] # 否则指向同一个地址 # 低级别的中枢 self.zs_list = [] self.sig_list = [] # next是低一级别的线段 self.next = None # prev 指向高一级别的线段 self.prev = None def __len__(self): return len(self.xd_list) def __getitem__(self, item): return self.xd_list[item] def __setitem__(self, key, value): self.xd_list[key] = value def append(self, value): self.xd_list.append(value) def update_zs(self): """ { 'zs_start': 进入段的起点 'zs_end': 离开段的终点 'ZG': 中枢高点, 'ZD': 中枢低点, 'GG': 中枢最低点, 'DD': 中枢最高点, 'xd_list': list[dict] 'location': 中枢位置 } """ xd_list = self.xd_list if len(xd_list) < 3: return False zs_list = self.zs_list if len(zs_list) < 1: assert len(xd_list) < 4 zg = xd_list[0] if xd_list[0]['fx_mark'] > 0 else xd_list[1] zd = xd_list[0] if xd_list[0]['fx_mark'] < 0 else xd_list[1] zs = { 'ZG': zg, 'ZD': zd, 'GG': [zg], # 初始用list储存,记录高低点的变化过程,中枢完成时可能会回退 'DD': [zd], # 根据最高最低点的变化过程可以识别时扩散,收敛,向上还是向下的形态 'xd_list': xd_list[:2], 'weight': 1, # 记录中枢中段的数量 'location': 0, # 初始状态为0,说明没有方向, -1 表明下降第1个中枢, +2 表明上升第2个中枢 'real_loc': 0 # 除去只有一段的中枢 } zs_list.append(zs) return False # 确定性的笔参与中枢构建 last_zs = zs_list[-1] xd = xd_list[-2] if TradeDate(last_zs['xd_list'][-1]['date']) >= TradeDate(xd['date']): # 已经计算过中枢 return False if xd['fx_mark'] > 0: # 三卖 ,滞后,实际出现了一买信号 if xd['value'] < last_zs['ZD']['value']: zs_end = last_zs['xd_list'].pop(-1) if zs_end['date'] == last_zs['DD'][-1]['date']: last_zs['DD'].pop(-1) last_zs.update( zs_end=zs_end, weight=last_zs['weight'] - 1, DD=last_zs['DD'], real_loc=last_zs['real_loc'] + 1 if last_zs['weight'] == 2 else last_zs['real_loc'] ) zs = { 'zs_start': xd_list[-4], 'ZG': xd, 'ZD': zs_end, 'GG': [xd], 'DD': [zs_end], 'xd_list': [zs_end, xd], 'weight': 1, 'location': -1 if last_zs['location'] >= 0 else last_zs['location'] - 1, 'real_loc': -1 if last_zs['real_loc'] >= 0 else last_zs['real_loc'] - 1, } zs_list.append(zs) return True elif xd['value'] < last_zs['ZG']['value']: last_zs.update(ZG=xd) # 有可能成为离开段 elif xd['value'] > last_zs['GG'][-1]['value']: last_zs['GG'].append(xd) elif xd['fx_mark'] < 0: # 三买,滞后,实际出现了一卖信号 if xd['value'] > last_zs['ZG']['value']: zs_end = last_zs['xd_list'].pop(-1) if zs_end['date'] == last_zs['GG'][-1]['date']: last_zs['GG'].pop(-1) last_zs.update( zs_end=zs_end, weight=last_zs['weight'] - 1, GG=last_zs['GG'], real_loc=last_zs['real_loc'] - 1 if last_zs['weight'] == 2 else last_zs['real_loc'] ) zs = { 'zs_start': xd_list[-4], 'ZG': zs_end, 'ZD': xd, 'GG': [zs_end], 'DD': [xd], 'xd_list': [zs_end, xd], 'weight': 1, 'location': 1 if last_zs['location'] <= 0 else last_zs['location'] + 1, 'real_loc': 1 if last_zs['real_loc'] <= 0 else last_zs['real_loc'] + 1, } zs_list.append(zs) return True elif xd['value'] > last_zs['ZD']['value']: last_zs.update(ZD=xd) # 有可能成为离开段 elif xd['value'] < last_zs['DD'][-1]['value']: last_zs['DD'].append(xd) else: raise ValueError last_zs['xd_list'].append(xd) last_zs['weight'] = last_zs['weight'] + 1 return False def update_xd_eigenvalue(self): trade_date = self.trade_date xd = self.xd_list[-1] last_xd = self.xd_list[-2] # xd.update(pct_change=(xd['value'] - last_xd['value']) / last_xd['value']) # start = trade_date.index(last_xd['date']) end = trade_date.index(xd['date']) kn = end - start + 1 fx_mark = kn * np.sign(xd.get('fx_mark', xd.get('direction', 0))) dif = self.indicators.macd[end]['dif'] macd = sum([x['macd'] for x in self.indicators.macd[start: end + 1] if fx_mark * x['macd'] > 0]) xd.update(fx_mark=fx_mark, dif=dif, macd=macd) # xd.update(fx_mark=fx_mark, dif=dif, avg_macd=macd/kn) def update_xd(self): """更新笔分型序列 分型记对象样例: { 'date': Timestamp('2020-11-26 00:00:00'), 'fx_mark': -8, 低点,负数,表示下降趋势持续的K线根数 'value': 138.0, 'fx_start': Timestamp('2020-11-25 00:00:00'), 'fx_end': Timestamp('2020-11-27 00:00:00'), } { 'date': Timestamp('2020-11-26 00:00:00'), 'fx_mark': 7, 高点, 正数,表示上升趋势持续的根数 'value': 150.67, 'fx_start': Timestamp('2020-11-25 00:00:00'), 'fx_end': Timestamp('2020-11-27 00:00:00'), } """ # 至少3根同类型分型才可能出现线段,最后1根bi不确定,因此最后一段也不确定 if self.next is None: self.next = XdList(self.bars, self.indicators, self.trade_date) bi_list = self.xd_list xd_list = self.next if len(bi_list) < 4: return False if len(xd_list) < 1: # 线段不存在,初始化线段,找4个点的最高和最低点组成线段 bi_list = bi_list[:-1].copy() bi_list = sorted(bi_list, key=lambda x: x['value'], reverse=False) if TradeDate(bi_list[0]['date']) < TradeDate(bi_list[-1]['date']): xd_list.append(bi_list[0]) xd_list.append(bi_list[-1]) else: xd_list.append(bi_list[-1]) xd_list.append(bi_list[0]) xd_list.update_xd_eigenvalue() return True bi3 = bi_list[-3] xd = bi_list[-1].copy() last_xd = xd_list[-1] xd2 = xd_list[-2] # if xd['date'] > pd.to_datetime('2016-07-12'): # print('test') # 非分型结尾段,直接替换成分型, 没有新增段,后续不需要处理,同一个端点确认 if 'direction' in last_xd or xd['date'] == last_xd['date']: xd_list[-1] = xd # 日期相等的情况是否已经在内存中修改过了? xd_list.update_xd_eigenvalue() return True # assert xd['date'] > last_xd['date'] if TradeDate(xd['date']) <= TradeDate(last_xd['date']): util_log_info('The {} quotes bar input maybe wrong!'.format(xd['date'])) if bi3['fx_mark'] > 0: # -1和-3笔的方向相同,-1笔由于是未确认笔,可能没有fx_mark字段 # 同向延续 if last_xd['fx_mark'] > 0 and xd['value'] > last_xd['value']: xd_list[-1] = xd xd_list.update_xd_eigenvalue() return True # 反向判断 elif last_xd['fx_mark'] < 0: # 价格判断 if xd['value'] > xd2['value']: xd_list.append(xd) xd_list.update_xd_eigenvalue() return True # 出现三笔破坏线段,连续两笔,一笔比一笔高,寻找段之间的最高点 elif TradeDate(bi3['date']) > TradeDate(last_xd['date']) and xd['value'] > bi3['value']: index = -5 bi = bi_list[index] # # 连续两个高点没有碰到段前面一个低点 # try: # if TradeDate(bi['date']) < TradeDate(last_xd['date']) and \ # bi_list[index - 1]['value'] > bi3['value'] and \ # bi_list[index]['value'] > xd['value']: # return False # except Exception as err: # pass # # util_log_info('Last xd {}:{}'.format(last_xd['date'], err)) while TradeDate(bi['date']) > TradeDate(last_xd['date']): if xd['value'] < bi['value']: xd = bi index = index - 2 bi = bi_list[index] xd_list.append(xd) xd_list.update_xd_eigenvalue() return True elif bi3['fx_mark'] < 0: # 同向延续 if last_xd['fx_mark'] < 0 and xd['value'] < last_xd['value']: xd_list[-1] = xd xd_list.update_xd_eigenvalue() return True # 反向判断 elif last_xd['fx_mark'] > 0: # 价格判断 if xd['value'] < xd2['value']: xd_list.append(xd) xd_list.update_xd_eigenvalue() return True # 出现三笔破坏线段,连续两笔,一笔比一笔低,将最低的一笔作为段的起点,避免出现最低点不是端点的问题 elif TradeDate(bi3['date']) > TradeDate(last_xd['date']) and xd['value'] < bi3['value']: index = -5 bi = bi_list[index] # 连续两个个低点没有碰到段前面一高低点 # try: # if TradeDate(bi['date']) < TradeDate(last_xd['date']) and \ # bi_list[index - 1]['value'] < bi3['value'] and \ # bi_list[index]['value'] < xd['value']: # return False # except Exception as err: # pass # # util_log_info('Last xd {}:{}'.format(last_xd['date'], err)) while TradeDate(bi['date']) > TradeDate(last_xd['date']): if xd['value'] > bi['value']: xd = bi index = index - 2 bi = bi_list[index] xd_list.append(xd) xd_list.update_xd_eigenvalue() return True return False def update_sig(self): """ 线段更新后调用,判断是否出现买点 """ if len(self.zs_list) < 1: return False zs = self.zs_list[-1] xd = self.xd_list[-1] xd_list = zs['xd_list'].copy() if 'zs_start' in zs: xd_list.insert(0, zs['zs_start']) sig = { 'date': self.bars[-1]['date'], 'real_loc': zs['real_loc'], 'location': zs['location'], 'weight': zs['weight'], # 'fx_mark': xd['fx_mark'], # 'last_mark': last_xd['fx_mark'], # 'time_ratio': abs(xd['fx_mark'] / last_xd['fx_mark']) * 100, # 'pct_change': xd['pct_change'] * 100, # 'macd': xd['macd'], # 'avg_macd': xd['avg_macd'], } # if sig['date'] >= pd.to_datetime('2021-07-28'): # print(sig['date']) if xd['fx_mark'] > 0: # 上升趋势 # sig.update(GG_macd=zs['GG'][-1].get('macd', np.nan), GG_avg_macd=zs['GG'][-1].get('avg_macd', np.nan)) # if zs['location'] > 0 and zs.get('zs_start', False): # sig.update(start_macd=zs['zs_start']['macd'], start_avg_macd=zs['zs_start']['avg_macd']) sig.update(boll=self.indicators.boll[-1].get('UB', np.nan) / self.bars[-1]['high'] * 100 - 100) if xd['value'] > zs['GG'][-1]['value']: xd_mark = -1 # 如果weight=1, 背驰,有可能1卖 # resistance = np.nan # support = zs['GG'][-1]['value'] / xd['value'] - 1 elif xd['value'] > zs['ZG']['value']: xd_mark = -2 # 如果weight=1, 背驰,有可能2卖 # resistance = zs['GG'][-1]['value'] / xd['value'] - 1 # support = zs['ZG']['value'] / xd['value'] - 1 elif xd['value'] > zs['ZD']['value']: if sig['weight'] == 1: xd_mark = -2 else: xd_mark = -2.5 # resistance = zs['ZG']['value'] / xd['value'] - 1 # support = zs['ZD']['value'] / xd['value'] - 1 elif xd['value'] > zs['DD'][-1]['value']: xd_mark = -3 # 三卖 # resistance = zs['ZD']['value'] / xd['value'] - 1 # support = zs['DD'][-1]['value'] / xd['value'] - 1 else: xd_mark = -4 # 三卖 # resistance = zs['DD'][-1]['value'] / xd['value'] - 1 # support = np.nan elif xd['fx_mark'] < 0: # 下降趋势 # sig.update(DD_macd=zs['DD'][-1].get('macd', np.nan), DD_avg_macd=zs['DD'][-1].get('avg_macd', np.nan)) # if zs['location'] < 0 and zs.get('zs_start', False): # sig.update(start_macd=zs['zs_start']['macd'], start_avg_macd=zs['zs_start']['avg_macd']) sig.update(boll=100 - self.indicators.boll[-1].get('LB', np.nan) / self.bars[-1]['low'] * 100) if xd['value'] > zs['GG'][-1]['value']: # >GG的情况不会出现,因为当3买没有确认时,离开段的最高点也归属于当前中枢 xd_mark = 4 # 三买 # resistance = np.nan # support = zs['GG'][-1]['value'] / xd['value'] - 1 elif xd['value'] > zs['ZG']['value']: xd_mark = 3 # resistance = zs['GG'][-1]['value'] / xd['value'] - 1 # support = zs['ZG']['value'] / xd['value'] - 1 elif xd['value'] > zs['ZD']['value']: if sig['weight'] == 1: xd_mark = 2 else: xd_mark = 2.5 # resistance = zs['ZG']['value'] / xd['value'] - 1 # support = zs['ZD']['value'] / xd['value'] - 1 elif xd['value'] >= zs['DD'][-1]['value']: # 如果和中枢最低点的值相同,归为2买,因为段没有升级 xd_mark = 2 # 如果weight=1, 背驰,有可能2买 # resistance = zs['ZD']['value'] / xd['value'] - 1 # support = zs['DD'][-1]['value'] / xd['value'] - 1 else: xd_mark = 1 # 如果weight=1, 背驰,有可能1买 # resistance = zs['DD'][-1]['value'] / xd['value'] - 1 # support = np.nan else: raise ValueError # sig.update(xd_mark=xd_mark, support=support * 100, resistance=resistance * 100) sig.update(xd_mark=xd_mark) start_xd = xd_list[-1] # 当前线段持续的时间和幅度,下跌趋势回撤的比例 sig.update(valueback=(self.bars[-1]['close'] / start_xd['value'] - 1) * 100) sig.update(timeback=xd['fx_mark']) if xd_mark in [3, -3, 4, -4]: # 3买卖点,macd指标比较没有意义 sig.update(start=start_xd['fx_start'], dif=0, macd=0) self.sig_list.append(sig) return direction = np.sign(xd['fx_mark']) xd_list.reverse() # 寻找段的起点,比较背离,一般是中枢+进入段的最高点或者最点 for idx, _xd in enumerate(xd_list[1:]): if idx % 2 == 0: # 同向段 if _xd['value'] * direction > xd['value'] * direction: break else: if _xd['value'] * direction < start_xd['value'] * direction: start_xd = _xd # break sig.update(start=start_xd['fx_start']) index = xd_list.index(start_xd) - 1 if index < 0: # 只有当前一笔,无法比较 sig.update(dif=0, macd=0) self.sig_list.append(sig) return cmp_xd = xd_list[index] compare_dif = cmp_xd.get('dif') compare_macd = cmp_xd.get('macd') dif = xd.get('dif') macd = xd.get('macd') if compare_dif and dif: if dif * direction > compare_dif * direction: sig.update(dif=-1) else: sig.update(dif=1) if compare_macd and macd: if macd * direction > compare_macd * direction: sig.update(macd=-1) else: sig.update(macd=1) self.sig_list.append(sig) def update(self): self.update_zs() # 计算对应买卖点 self.update_sig() return self.update_xd() def update_bi(new_bars: list, fx_list: list, bi_list: XdList, trade_date: list): """更新笔序列 笔标记对象样例:和分型标记序列结构一样 { 'date': Timestamp('2020-11-26 00:00:00'), 'code': code, 'fx_mark': 'd', 'value': 138.0, 'fx_start': Timestamp('2020-11-25 00:00:00'), 'fx_end': Timestamp('2020-11-27 00:00:00'), } { 'date': Timestamp('2020-11-26 00:00:00'), 'code': code, 'fx_mark': 'g', 'value': 150.67, 'fx_start': Timestamp('2020-11-25 00:00:00'), 'fx_end': Timestamp('2020-11-27 00:00:00'), } return: True 笔的数据出现更新,包括新增笔或者笔的延续 """ # 每根k线都要对bi进行判断 bar = new_bars[-1].copy() if TradeDate(bar['date']) < TradeDate(trade_date[-1]): # 包含的K线,不会改变bi的状态,不需要处理 return False if len(fx_list) < 2: return False bi = fx_list[-1].copy() # 没有笔时.最开始两个分型作为第一笔,增量更新时从数据库取出两个端点构成的笔时确定的 if len(bi_list) < 1: bi2 = fx_list[-2].copy() bi_list.append(bi2) bi_list.append(bi) bi_list.update_xd_eigenvalue() return False last_bi = bi_list[-1] bar.update(value=bar['high'] if bar['direction'] > 0 else bar['low']) # if bar['date'] > pd.to_datetime('2020-09-08'): # print('error') # k 线确认模式,当前K线的日期比分型K线靠后,说明进来的数据时K线 if TradeDate(bar['date']) > TradeDate(bi['fx_end']): if 'direction' not in last_bi: # bi的结尾是分型 # 趋势延续替代,首先确认是否延续, 由于处理过包含,高低点可能不正确,反趋势的极值点会忽略 # 下一根继续趋势,端点后移,如果继续反趋势,该点忽略 # todo 处理过包含的bar,有一个判断是多余的,直接用bar['value] 参与判断 if (last_bi['fx_mark'] > 0 and bar['high'] > last_bi['value']) \ or (last_bi['fx_mark'] < 0 and bar['low'] < last_bi['value']): bi_list[-1] = bar bi_list.update_xd_eigenvalue() return True try: kn_inside = trade_date.index(bar['date']) - trade_date.index(last_bi['fx_end']) - 1 except: print('error') # todo 至少2根k线, 时间确认必须被和前一笔方向相反,会出现端点不是极值点的情况 if kn_inside > 1 and bar['direction'] * last_bi['fx_mark'] < 0: # 寻找同向的第一根分型 index = -1 while TradeDate(bi['date']) > TradeDate(last_bi['date']): if bar['direction'] * bi['fx_mark'] > 0: break index = index - 1 bi = fx_list[index] if (bar['direction'] * bi['fx_mark'] > 0) \ and (np.sign(bar['direction']) * bar['value'] < bi['fx_mark'] * bi['value']): bi['fx_end'] = bar['date'] # 影响似乎不大? bi_list.append(bi) else: bi_list.append(bar) bi_list.update_xd_eigenvalue() return True # 只有一个端点,没有价格确认 if len(bi_list) < 2: return False # 价格确认 # todo 处理过包含的bar,有一个判断是多余的,直接用bar['value] 参与判断 if (last_bi['fx_mark'] < 0 and bar['high'] > bi_list[-2]['value']) \ or (last_bi['fx_mark'] > 0 and bar['low'] < bi_list[-2]['value']): bi_list.append(bar) bi_list.update_xd_eigenvalue() return True else: # 原有未出现分型笔的延续 assert bar['direction'] * last_bi['direction'] > 0 # if bar['direction'] * last_bi['direction'] < 0: # print('error') # return False bi_list[-1] = bar bi_list.update_xd_eigenvalue() return True return False # 非分型结尾笔,直接替换成分型, 没有新增笔,后续不需要处理,同一个端点确认 if 'direction' in last_bi or bi['date'] == last_bi['date']: bi_list[-1] = bi bi_list.update_xd_eigenvalue() return True # fx_end处理,分型处理完后,因为分型确认滞后,所以还需要对fx_end 也就是当前K线进行处理,否则会出现缺失或者识别滞后的问题 # 由于时分型,只需要判断延续的问题,因此K线的方向要和上一笔一致 def handle_fx_end(): assert bar['date'] == bi['fx_end'] if bar['direction'] * last_bi['fx_mark'] < 0: return False if last_bi['fx_mark'] * bar['value'] > last_bi['fx_mark'] * last_bi['value']: bi_list[-1] = bar bi_list.update_xd_eigenvalue() return True # 分型处理,连续高低点处理,只判断是否后移,没有增加笔 # bi的fx_mark不一定为+1或者-1,因为要用sign函数取符号 # todo 为什么用 and 连接两个 if 结果错误 if last_bi['fx_mark'] * bi['fx_mark'] > 0: if np.sign(last_bi['fx_mark']) * last_bi['value'] < bi['fx_mark'] * bi['value']: bi_list[-1] = bi bi_list.update_xd_eigenvalue() return True else: # 笔确认是条件1、时间破坏,两个不同分型间至少有一根K线,2、价格破坏,向下的一笔破坏了上一笔的低点 kn_inside = trade_date.index(bi['fx_start']) - trade_date.index(last_bi['fx_end']) - 1 if kn_inside > 0: # 两个分型间至少有1根k线,端点有可能不是高低点 index = -2 while TradeDate(fx_list[index]['date']) > TradeDate(last_bi['date']): # 分析的fx_mark取值为-1和+1 if (bi['fx_mark'] * fx_list[index]['fx_mark'] > 0) \ and (bi['fx_mark'] * bi['value'] < fx_list[index]['fx_mark'] * fx_list[index]['value']): bi = fx_list[index].copy() # 分型结尾不变 bi['fx_end'] = fx_list[-1]['fx_end'] index = index - 1 bi_list.append(bi) bi_list.update_xd_eigenvalue() return True # 只有一个端点,没有价格确认 if len(bi_list) < 2: return False # 价格确认 # todo 处理过包含的bar,有一个判断是多余的,直接用bar['value] 参与判断 if (bi['fx_mark'] > 0 and bi['value'] > bi_list[-2]['value']) \ or (bi['fx_mark'] < 0 and bi['value'] < bi_list[-2]['value']): bi_list.append(bi) bi_list.update_xd_eigenvalue() return True return handle_fx_end() class CzscBase: def __init__(self): # self.freq = freq # assert isinstance(code, str) # self.code = code.upper() self.trade_date = [] # 用来查找索引 self.bars = [] self.indicators = IndicatorSet(self.bars) # self.indicators = None self.new_bars = [] self.fx_list = [] self.xd_list = XdList(self.bars, self.indicators, self.trade_date) # bi作为线段的head self.sig_list = [] def update(self): # 有包含关系时,不可能有分型出现,不出现分型时才需要 self.indicators.update() try: update_fx(bars=self.bars, new_bars=self.new_bars, fx_list=self.fx_list, trade_date=self.trade_date) except: print('error') if not update_bi( new_bars=self.new_bars, fx_list=self.fx_list, bi_list=self.xd_list, trade_date=self.trade_date ): return # 新增确定性的笔才处理段 xd_list = self.xd_list result = True index = 0 while result: result = xd_list.update() # 计算对应买卖点 if len(xd_list.sig_list) > 0: signal = xd_list.sig_list[-1] # signal.update(xd=index) # self.sig_list.append(signal) if index == 0: signal.update(xd=0) self.sig_list.append(signal) else: # 有趋势或者中枢段升级 if xd_list.zs_list[-1]['location'] != 0 or xd_list.zs_list[-1]['weight'] > 7: last_sig = self.sig_list[-1] last_sig.update(xd=index, xd_mark=signal['xd_mark']) last_sig['real_loc'] = signal['real_loc'] last_sig['location'] = signal['location'] last_sig['weight'] = signal['weight'] last_sig['valueback'] = signal['valueback'] last_sig['timeback'] = signal['timeback'] # if signal['xd_mark'] in [1, -1]: last_sig['dif{}'.format(index)] = signal.get('dif') last_sig['macd{}'.format(index)] = signal.get('macd') # else: # util_log_info('High level xd {} == low level xd {}'.format(index, index - 1)) temp_list = xd_list xd_list = xd_list.next xd_list.prev = temp_list index = index + 1 # 必须实现,每次输入一个行情数据,然后调用update看是否需要更新 def on_bar(self, bar): """ 输入数据格式 Index(['open', 'high', 'low', 'close', 'amount', 'volume', 'date', 'code'], dtype='object') 'date' 未 timestamp volume用来画图 """ raise NotImplementedError class CzscMongo(CzscBase): def __init__(self, code='rul8', data=None, start=None, end=None, freq='day', exchange=None): # 只处理一个品种 super().__init__() self.code = code self.freq = freq self.exchange = exchange # self._bi_list = fetch_future_bi_day(self.code, limit=2, format='dict') self._bi_list = [] self.old_count = len(self._bi_list) if len(self._bi_list) > 0: # self.fx_list = self._bi_list start = self._bi_list[-1]['fx_end'] elif start is None: start = '1990-01-01' if data is None: self.data = get_bar(code, start=start, end=end, freq=freq, exchange=exchange) # self.data = get_bar(code, start, end='2020-12-09', freq=freq, exchange=exchange) else: self.data = data def draw(self, chart_path=None): if len(self.bars) < 1: return chart = kline_pro( kline=self.bars, fx=self.fx_list, bs=[], xd=self.xd_list, # title=self.code + '_' + self.freq, width='1520px', height='580px' title=self.code + '_' + self.freq, width='2540px', height='850px' ) if not chart_path: chart_path = 'E:\\signal\\{}_{}.html'.format(self.code, self.freq) chart.render(chart_path) webbrowser.open(chart_path) def on_bar(self, bar): """ bar 格式 date 默认为 Timestamp,主要时画图函数使用 """ bar = bar.to_dict() # if 'trade' in bar: # bar['vol'] = bar.pop('trade') # bar['date'] = pd.to_datetime(bar['date']) self.bars.append(bar) try: self.update() except Exception as error: util_log_info(error) def run(self, start=None, end=None): if self.data is None or self.data.empty: util_log_info('{} {} quote data is empty'.format(self.code, self.freq)) return self.data.apply(self.on_bar, axis=1) # self.save() def save(self, collection=FACTOR_DATABASE.future_bi_day): try: logging.info('Now Saving Future_BI_DAY==== {}'.format(str(self.code))) code = self.code old_count = self.old_count new_count = len(self._bi_list) # 更新的数据,最后一个数据是未确定数据 update_count = new_count - old_count if update_count < 2: return bi_list = self._bi_list[old_count:new_count - 1] start = bi_list[0]['date'] end = bi_list[-1]['date'] logging.info( 'UPDATE_Future_BI_DAY \n Trying updating {} from {} to {}'.format(code, start, end), ) collection.insert_many(bi_list) except Exception as error: print(error) def save_sig(self, collection=FACTOR_DATABASE.czsz_sig_day): try: logging.info('Now Saving CZSC_SIG_DAY==== {}'.format(str(self.code))) code = self.code xd = self.xd_list index = 0 sig = [] while xd: df =
pd.DataFrame(xd.sig_list)
pandas.DataFrame
# -*- coding: utf-8 -*- # Copyright (c) 2018-2021, earthobservations developers. # Distributed under the MIT License. See LICENSE for more info. import logging import operator from abc import abstractmethod from enum import Enum from typing import Dict, Generator, List, Tuple, Union import numpy as np import pandas as pd from pint import Quantity from pytz import timezone from tqdm import tqdm from wetterdienst.core.scalar.result import StationsResult, ValuesResult from wetterdienst.metadata.columns import Columns from wetterdienst.metadata.resolution import Resolution from wetterdienst.metadata.timezone import Timezone from wetterdienst.metadata.unit import REGISTRY, OriginUnit, SIUnit from wetterdienst.util.enumeration import parse_enumeration_from_template from wetterdienst.util.logging import TqdmToLogger log = logging.getLogger(__name__) class ScalarValuesCore: """ Core for sources of point data where data is related to a station """ # Fields for type coercion, needed for separation from fields with actual data # that have to be parsed differently when having data in tabular form @property def _meta_fields(self) -> List[str]: """ Metadata fields that are independent of actual values and should be parsed differently :return: list of strings representing the metadata fields/columns """ if not self.stations.stations.tidy: fields = [ Columns.STATION_ID.value, Columns.DATE.value, ] else: fields = [ Columns.STATION_ID.value, Columns.DATASET.value, Columns.PARAMETER.value, Columns.DATE.value, Columns.VALUE.value, Columns.QUALITY.value, ] return fields # Fields for date coercion _date_fields = [Columns.DATE.value, Columns.FROM_DATE.value, Columns.TO_DATE.value] # TODO: add data type (forecast, observation, ...) # @property # @abstractmethod # def _has_quality(self) -> bool: # """Attribute that tells if a weather service has quality, which otherwise will # have to be set to NaN""" # pass @property def data_tz(self) -> timezone: """ Timezone of the published data """ return timezone(self._data_tz.value) @property @abstractmethod def _data_tz(self) -> Timezone: """ Timezone enumeration of published data. """ pass @property @abstractmethod def _irregular_parameters(self) -> Tuple[str]: """Declaration of irregular parameters which will have to be parsed differently then others e.g. when a parameter is a date.""" pass @property @abstractmethod def _integer_parameters(self) -> Tuple[str]: """ Integer parameters that will be parsed to integers. """ pass @property @abstractmethod def _string_parameters(self) -> Tuple[str]: """ String parameters that will be parsed to integers. """ pass @property def _complete_dates(self) -> pd.DatetimeIndex: """ Complete datetime index for the requested start and end date, used for building a complementary pandas DataFrame with the date column on which other DataFrames can be joined on :return: pandas.DatetimeIndex """ start_date, end_date = self.stations.start_date, self.stations.end_date if self.stations.stations.resolution == Resolution.MONTHLY: end_date += pd.Timedelta(days=31) elif self.stations.stations.resolution == Resolution.ANNUAL: end_date += pd.Timedelta(year=366) date_range = pd.date_range( start_date, end_date, freq=self.stations.frequency.value, tz=self.data_tz, ) return date_range @property def _base_df(self) -> pd.DataFrame: """ Base dataframe which is used for creating empty dataframes if no data is found or for merging other dataframes on the full dates :return: pandas DataFrame with a date column with complete dates """ return pd.DataFrame({Columns.DATE.value: self._complete_dates}) def convert_values_to_si(self, df: pd.DataFrame, dataset) -> pd.DataFrame: """ Function to convert values to metric units with help of conversion factors :param df: pandas DataFrame that should be converted to SI units :param dataset: dataset for which the conversion factors are created :return: pandas DataFrame with converted (SI) values """ def _convert_values_to_si(series): """ Helper function to apply conversion factors column wise to a pandas DataFrame :param series: pandas Series that should be converted :return: converted pandas Series """ op, factor = conversion_factors.get(series.name, (None, None)) if not op or not factor: return series return op(series, factor) conversion_factors = self._create_conversion_factors(dataset) df = df.apply(_convert_values_to_si, axis=0) return df def _create_conversion_factors( self, dataset ) -> Dict[str, Tuple[Union[operator.add, operator.mul], float]]: """ Function to create conversion factors based on a given dataset :param dataset: dataset for which conversion factors are created :return: dictionary with conversion factors for given parameter name """ dataset = dataset.name dataset_accessor = self.stations.stations._dataset_accessor if self.stations.stations._unique_dataset: units = self.stations.stations._unit_tree[dataset_accessor] else: units = self.stations.stations._unit_tree[dataset_accessor][dataset] conversion_factors = {} # TODO eventually we may split this into smaller functions for parameter in units: origin_unit, si_unit = parameter.value # Get parameter name parameter = parameter.name if self.stations.stations._unique_dataset: parameter_value = self.stations.stations._dataset_tree[ dataset_accessor ][parameter].value else: parameter_value = self.stations.stations._dataset_tree[ dataset_accessor ][dataset][parameter].value if si_unit == SIUnit.KILOGRAM_PER_SQUARE_METER.value: # Fixed conversion factors to kg / m², as it only applies # for water with density 1 g / cm³ if origin_unit == OriginUnit.MILLIMETER.value: conversion_factors[parameter_value] = (operator.mul, 1) else: raise ValueError( "manually set conversion factor for precipitation unit" ) elif si_unit == SIUnit.DEGREE_KELVIN.value: # Apply offset addition to temperature measurements # Take 0 as this is appropriate for adding on other numbers # (just the difference) degree_offset = Quantity(0, origin_unit).to(si_unit).magnitude conversion_factors[parameter_value] = (operator.add, degree_offset) elif si_unit == SIUnit.PERCENT.value: factor = REGISTRY(str(origin_unit)).to(str(si_unit)).magnitude conversion_factors[parameter_value] = (operator.mul, factor) else: # For multiplicative units we need to use 1 as quantity to apply the # appropriate factor conversion_factors[parameter_value] = ( operator.mul, Quantity(1, origin_unit).to(si_unit).magnitude, ) return conversion_factors def __init__(self, stations: StationsResult) -> None: self.stations = stations @classmethod def from_stations(cls, stations: StationsResult): return cls(stations) def __eq__(self, other): """ Equal method of request object """ return ( self.stations.station_id == other.stations.station_id and self.stations.parameter == other.stations.parameter and self.stations.start_date == other.stations.start_date and self.stations.end_date == other.stations.end_date ) pass def __str__(self): """ Str representation of request object """ # TODO: include source # TODO: include data type station_ids_joined = "& ".join( [str(station_id) for station_id in self.stations.station_id] ) parameters_joined = "& ".join( [ parameter.value for parameter, parameter_set in self.stations.stations.parameter ] ) return ", ".join( [ f"station_ids {station_ids_joined}", f"parameters {parameters_joined}", str(self.stations.start_date), str(self.stations.end_date), ] ) pass def _create_empty_station_parameter_df( self, station_id: str, parameter: Enum, dataset: Enum ) -> pd.DataFrame: """ Function to create an empty DataFrame :param station_id: :param parameter: :return: """ dataset_tree = self.stations.stations._dataset_tree resolution = self.stations.stations.resolution # if parameter is a whole dataset, take every parameter from the dataset instead if parameter == dataset: if self.stations.stations._unique_dataset: parameter = [*dataset_tree[resolution.name]] else: parameter = [*dataset_tree[resolution.name][dataset.name]] if self.stations.stations.tidy: if not self.stations.stations.start_date: return pd.DataFrame(None, columns=self._meta_fields) data = [] for par in
pd.Series(parameter)
pandas.Series
import pandas as pd def get_df(filepath, filetype='infer', index=None): if filetype == 'infer': filetype = filepath.split('.')[-1] # Read in file as DataFrame if filetype == 'csv': df = pd.read_csv(filepath) elif filetype == 'pickle' or filetype == 'pkl': df = pd.read_pickle(filepath) else: print(f"Error, filetype {filetype} not understood.") return -1 # Set index if index: df = df.set_index(index) df.sort_index(ascending=True, inplace=True) return df def concat_dfs(df1, df2, how='inner'): # Assumes DataFrames have the same index concatenated_df =
pd.merge(df1, df2, how=how)
pandas.merge
# Handle Rcat serial io import serial import requests import io import threading import time import pandas as pd import math import urllib.parse class SerialIO: def __init__(self): self.ser = None self.dataBuffer = [] self.thread = None self.active = False self.lastcall =
pd.Timestamp.now()
pandas.Timestamp.now
import warnings warnings.simplefilter(action='ignore', category=FutureWarning) import os import sys import threading from queue import Queue import pandas as pd from datetime import datetime, timedelta import time import numpy as np import json import toml import random import names import string import itertools as it import redis import pathos from pathos.pools import ProcessPool # Local from dragg.mpc_calc import MPCCalc, manage_home from dragg.redis_client import RedisClient from dragg.logger import Logger class Aggregator: def __init__(self): self.log = Logger("aggregator") self.data_dir = os.path.expanduser(os.environ.get('DATA_DIR','data')) self.outputs_dir = os.path.join('outputs') if not os.path.isdir(self.outputs_dir): os.makedirs(self.outputs_dir) self.config_file = os.path.join(self.data_dir, os.environ.get('CONFIG_FILE', 'config.toml')) self.ts_data_file = os.path.join(self.data_dir, os.environ.get('SOLAR_TEMPERATURE_DATA_FILE', 'nsrdb.csv')) self.spp_data_file = os.path.join(self.data_dir, os.environ.get('SPP_DATA_FILE', 'spp_data.xlsx')) self.required_keys = { "community": {"total_number_homes"}, "home": { "hvac": {"r_dist", "c_dist", "p_cool_dist", "p_heat_dist", "temp_sp_dist", "temp_deadband_dist"}, "wh": {"r_dist", "c_dist", "p_dist", "sp_dist", "deadband_dist", "size_dist", "waterdraw_file"}, "battery": {"max_rate", "capacity", "cap_bounds", "charge_eff", "discharge_eff", "cons_penalty"}, "pv": {"area", "efficiency"}, "hems": {"prediction_horizon", "discomfort", "disutility"} }, "simulation": {"start_datetime", "end_datetime", "random_seed", "load_zone", "check_type", "run_rbo_mpc"}, # "agg": {"action_horizon", "forecast_horizon", "base_price", "max_rp", "subhourly_steps"} "agg": {"base_price", "subhourly_steps"} } self.timestep = None # Set by redis_set_initial_values self.iteration = None # Set by redis_set_initial_values self.reward_price = None # Set by redis_set_initial_values self.start_hour_index = None # Set by calc_star_hour_index self.agg_load = 0 # Reset after each iteration self.collected_data = {} self.baseline_agg_load_list = [] # Aggregate load at every timestep from the baseline run self.max_agg_load = None # Set after baseline run, the maximum aggregate load over all the timesteps self.max_agg_load_list = [] self.start_dt = None # Set by _set_dt self.end_dt = None # Set by _set_dt self.hours = None # Set by _set_dt self.dt = None # Set by _set_dt self.num_timesteps = None # Set by _set_dt self.all_homes = None # Set by get_homes self.redis_client = RedisClient() self.config = self._import_config() self.check_type = self.config['simulation']['check_type'] # One of: 'pv_only', 'base', 'battery_only', 'pv_battery', 'all' self.thermal_trend = None self.max_daily_temp = None self.max_daily_ghi = None self.min_daily_temp = None self.prev_load = None self.ts_data = self._import_ts_data() # Temp: degC, RH: %, Pressure: mbar, GHI: W/m2 self._set_dt() self.spp_data = self._import_spp_data() # SPP: $/kWh self.tou_data = self._build_tou_price() # TOU: $/kWh self.all_data = self.join_data() self.all_rps = np.zeros(self.num_timesteps) self.all_sps = np.zeros(self.num_timesteps) self.case = "baseline" def _import_config(self): if not os.path.exists(self.config_file): self.log.logger.error(f"Configuration file does not exist: {self.config_file}") sys.exit(1) with open(self.config_file, 'r') as f: data = toml.load(f) d_keys = set(data.keys()) req_keys = set(self.required_keys.keys()) if not req_keys.issubset(d_keys): missing_keys = req_keys - d_keys self.log.logger.error(f"{missing_keys} must be configured in the config file.") sys.exit(1) else: for subsystem in self.required_keys.keys(): req_keys = set(self.required_keys[subsystem]) given_keys = set(data[subsystem]) if not req_keys.issubset(given_keys): missing_keys = req_keys - given_keys self.log.logger.error(f"Parameters for {subsystem}: {missing_keys} must be specified in the config file.") sys.exit(1) self.log.logger.info(f"Set the version write out to {data['simulation']['named_version']}") return data def _set_dt(self): """ Convert the start and end datetimes specified in the config file into python datetime objects. Calculate the number of hours for which the simulation will run. :return: """ try: self.start_dt = datetime.strptime(self.config['simulation']['start_datetime'], '%Y-%m-%d %H') self.end_dt = datetime.strptime(self.config['simulation']['end_datetime'], '%Y-%m-%d %H') except ValueError as e: self.log.logger.error(f"Error parsing datetimes: {e}") sys.exit(1) self.hours = self.end_dt - self.start_dt self.hours = int(self.hours.total_seconds() / 3600) self.num_timesteps = int(np.ceil(self.hours * self.dt)) self.log.logger.info(f"Start: {self.start_dt.isoformat()}; End: {self.end_dt.isoformat()}; Number of hours: {self.hours}") def _import_ts_data(self): """ Import timeseries data from file downloaded from NREL NSRDB. The function removes the top two lines. Columns which must be present: ["Year", "Month", "Day", "Hour", "Minute", "Temperature", "GHI"] Renames 'Temperature' to 'OAT' :return: pandas.DataFrame, columns: ts, GHI, OAT """ if not os.path.exists(self.ts_data_file): self.log.logger.error(f"Timeseries data file does not exist: {self.ts_data_file}") sys.exit(1) df = pd.read_csv(self.ts_data_file, skiprows=2) self.dt = int(self.config['agg']['subhourly_steps']) self.dt_interval = 60 // self.dt reps = [np.ceil(self.dt/2) if val==0 else np.floor(self.dt/2) for val in df.Minute] df = df.loc[np.repeat(df.index.values, reps)] interval_minutes = self.dt_interval * np.arange(self.dt) n_intervals = len(df.index) // self.dt x = np.tile(interval_minutes, n_intervals) df.Minute = x df = df.astype(str) df['ts'] = df[["Year", "Month", "Day", "Hour", "Minute"]].apply(lambda x: ' '.join(x), axis=1) df = df.rename(columns={"Temperature": "OAT"}) df["ts"] = df["ts"].apply(lambda x: datetime.strptime(x, '%Y %m %d %H %M')) df = df.filter(["ts", "GHI", "OAT"]) df[["GHI", "OAT"]] = df[["GHI", "OAT"]].astype(int) self.oat = df['OAT'].to_numpy() self.ghi = df['GHI'].to_numpy() df = df.set_index('ts') day_of_year = 0 self.thermal_trend = self.oat[4 * self.dt] - self.oat[0] self.max_daily_temp = max(self.oat[day_of_year*(self.dt*24):(day_of_year+1)*(self.dt*24)]) self.min_daily_temp = min(self.oat[day_of_year*(self.dt*24):(day_of_year+1)*(self.dt*24)]) self.max_daily_ghi = max(self.ghi[day_of_year*(self.dt*24):(day_of_year+1)*(self.dt*24)]) return df def _import_spp_data(self): """ Settlement Point Price (SPP) data as extracted from ERCOT historical DAM Load Zone and Hub Prices. url: http://www.ercot.com/mktinfo/prices. Only keeps SPP data, converts to $/kWh. Subtracts 1 hour from time to be inline with 23 hour day as required by pandas. :return: pandas.DataFrame, columns: ts, SPP """ if not self.config['agg']['spp_enabled']: return if not os.path.exists(self.spp_data_file): self.log.logger.error(f"SPP data file does not exist: {self.spp_data_file}") sys.exit(1) df_all = pd.read_excel(self.spp_data_file, sheet_name=None) k1 = list(df_all.keys())[0] df = df_all[k1] for k, v in df_all.items(): if k == k1: pass else: df = df.append(v, ignore_index=True) df = df[df["Settlement Point"] == self.config['simulation']['load_zone']] df["Hour Ending"] = df["Hour Ending"].str.replace(':00', '') df["Hour Ending"] = df["Hour Ending"].apply(pd.to_numeric) df["Hour Ending"] = df["Hour Ending"].apply(lambda x: x - 1) df["Hour Ending"] = df["Hour Ending"].astype(str) df['ts'] = df[["Delivery Date", "Hour Ending"]].apply(lambda x: ' '.join(x), axis=1) df = df.drop(columns=['Delivery Date', 'Hour Ending', 'Repeated Hour Flag', 'Settlement Point']) df = df.rename(columns={"Settlement Point Price": "SPP"}) col_order = ["ts", "SPP"] df = df[col_order] df["ts"] = datetime.strptime(df['ts'], '%m/%d/%Y %H') df["SPP"] = df['SPP'] / 1000 df = df.set_index('ts') return df def _build_tou_price(self): df = pd.DataFrame(index=
pd.date_range(start=self.start_dt, periods=self.hours, freq='H')
pandas.date_range
import sklearn from pprint import pprint # Standard Imports (Data Manipulation and Graphics) import numpy as np # Load the Numpy library with alias 'np' import pandas as pd # Load the Pandas library with alias 'pd' import seaborn as sns # Load the Seabonrn, graphics library with alias 'sns' import copy from scipy import stats from scipy import interp from os import listdir; from os.path import isfile, join from itertools import islice from IPython import display import ipywidgets as widgets import itertools import os; import sys # Matplotlib pyplot provides plotting API import matplotlib as mpl from matplotlib import pyplot as plt import chart_studio.plotly.plotly as py import matplotlib.image as mpimg # Preprocessing Imports # from sklearn.preprocessing import StandardScaler from sklearn import preprocessing from sklearn.decomposition import PCA from sklearn.decomposition import KernelPCA from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import StandardScaler # Standardize data (0 mean, 1 stdev) from sklearn.preprocessing import Normalizer # Normalize data (length of 1) from sklearn.preprocessing import Binarizer # Binarization # Imports for handling Training from sklearn.pipeline import Pipeline from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import GridSearchCV # After Training Analysis Imports from sklearn import metrics from sklearn.metrics import roc_curve, auc # Classifiers Imports # SVMs Classifieres from sklearn.svm import LinearSVC from sklearn.linear_model import SGDClassifier from sklearn import svm # Bayesian Classifieres from sklearn.naive_bayes import MultinomialNB from sklearn.naive_bayes import GaussianNB # Decision Tree Classifieres from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import RandomForestClassifier # Import scikit-learn classes: Hyperparameters Validation utility functions. from sklearn.model_selection import cross_val_score from sklearn.model_selection import LeavePOut from sklearn.model_selection import LeaveOneOut from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import validation_curve from sklearn.model_selection import learning_curve # Import scikit-learn classes: model's evaluation step utility functions. from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.metrics import plot_roc_curve from sklearn.metrics import roc_curve from sklearn.metrics import classification_report # --------------------------------------------------------------------------- # # Confusion Matirx & Roc Curve Custom # --------------------------------------------------------------------------- # def plot_conf_matrix(model, Xtest, ytest, title=None, plot_name="conf_matrix.png", show_figure=False, ax=None): y_model = model.predict(Xtest) mat = confusion_matrix(ytest, y_model) if ax is None: fig = plt.figure() sns.heatmap(mat, square=True, annot=True, cbar=False) plt.xlabel('predicted value') plt.ylabel('true value') if title: plt.title(title) plt.savefig(plot_name) if show_figure is True: plt.show() else: plt.close(fig) else: sns.heatmap(mat, square=True, annot=True, cbar=False, ax=ax) ax.set_xlabel('predicted value') ax.set_ylabel('true value') if title: ax.set_title(title) pass pass def plot_roc_curve_custom(model, X_test, y_test, label=None, title=None, plot_name="roc_curve.png", show_figure=False, ax=None): y_pred = model.predict_proba(X_test) # print('y_test', type(y_test)); print('y_pred', type(y_pred)); # print('y_test', y_test.shape); print('y_pred', y_pred.shape); # print('y_test', y_test[0], 'y_pred', y_pred[0]) # y_test_prob = np.array(list(map(lambda xi: [1, 0] if xi == 0 else [0, 1], y_test))) # fpr, tpr, _ = roc_curve(y_test_prob, y_pred) y_pred = np.argmax(y_pred, axis=1) fpr, tpr, _ = roc_curve(y_test, y_pred) roc_auc = auc(fpr, tpr) if ax is None: fig = plt.figure() plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % (roc_auc,)) plt.plot([0, 1], [0, 1], 'k--') plt.xlabel('False positive rate') plt.ylabel('True positive rate') if title: plt.title('ROC curve: {} | Auc {}'.format(title, f"{roc_auc:.2f}")) else: plt.title('ROC curve') plt.legend(loc='best') plt.savefig(plot_name) # plt.show() if show_figure is True: plt.show() else: plt.close(fig) else: ax.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % (roc_auc,)) ax.plot([0, 1], [0, 1], 'k--') ax.set_xlabel('False positive rate') ax.set_ylabel('True positive rate') if title: ax.set_title('ROC curve: {} | Auc {}'.format(title, f"{roc_auc:.2f}")) else: ax.set_title('ROC curve') ax.legend(loc='best') # plt.savefig(plot_name) # plt.show() pass return roc_auc def show_plots_fit_by_n(clf, kernel, n_components, Xtest, ytest): # Shos some plots if 'show_plot' flag is valued as True plot_roc_curve_custom( clf, Xtest, ytest, 'n_components={} | kernel={}'.format(n_components, kernel)) plot_conf_matrix( clf, Xtest, ytest, title='n_components={} | kernel={}'.format(10, kernel)) pass def add_records(data, cv_list, res_kf, res_loo, res_sscv): # record = list(map(lambda xi: f"{xi[0]:.2f} (+/-) {xi[1]:.2f}", [xi[1:] for xi in res_kf])) record_acc = list(map(lambda xi: f"{xi[1]:.2f}", [xi for xi in res_kf])) record_std = list(map(lambda xi: f"(+/-) {xi[2]:.2f}", [xi for xi in res_kf])) record = list(itertools.chain.from_iterable(list(zip(record_acc, record_std)))) record = record + [f"{res_loo[0]:.2f}"] record = record + [f"(+/-) {res_loo[1]:.2f}"] record = record + [f"{res_sscv[0]:.2f}"] record = record + [f"(+/-) {res_sscv[1]:.2f}"] # print('len record:', len(record)) if len(data) == 0: data = [[]] * (len(cv_list) + 2) for ii in range(0, len(data)): # print([record[ii*2], record[ii*2+1]]) data[ii] = data[ii] + [record[ii*2], record[ii*2+1]] # print(f'len data[{ii}]:', len(data[ii])) # data.append(copy.deepcopy(record)) # print(data) pass return data def KernelPCA_transform_data(n_components, kernel, Xtrain, Xtest=None, verbose=0): if verbose == 1: print('KernelPCA') print('-' * 100) # Perform kernel PCA kernel_pca =KernelPCA( \ n_components=n_components, \ kernel=kernel) if verbose == 1: print('KernelPCA - Fit') print('-' * 100) kernel_pca.fit(Xtrain) # Transform data accordingly with current Kernel Pca mode if verbose == 1: print('KernelPCA - Transform') print('-' * 100) Xtrain_transformed = kernel_pca.transform(Xtrain) if Xtest is None: return Xtrain_transformed, None Xtest_transformed = kernel_pca.transform(Xtest) return Xtrain_transformed, Xtest_transformed def prepare_output_df(cv_list, pca_kernels_list, data): # col_names_acc = list(map(lambda xi: f"ACC(cv={xi})", cv_list)) # col_names_st = list(map(lambda xi: f"STD(cv={xi})", cv_list)) # col_names = list(itertools.chain.from_iterable(list(zip(col_names_acc, col_names_st)))) # col_names = col_names + ['ACC(loo)', 'STD(loo)', 'ACC(Stfd-CV)', 'STD(Stfd-CV)'] col_names = list(map(lambda xi: f"CV={xi}".lower(), cv_list)) col_names = col_names + ['loo'.lower(), 'Stfd-CV'.lower()] idx_names = copy.deepcopy(col_names) col_names = [] for kernel in pca_kernels_list: col_names = col_names + [f"{kernel} - ACC".lower().capitalize(), f"{kernel} - STD".lower().capitalize()] # df = pd.DataFrame(data=data, columns=col_names, index=pca_kernels_list) # pprint(data) # pprint(col_names) df = pd.DataFrame(data=data, columns=col_names, index=idx_names) return df def prepare_output_df_baseline_fit(pca_kernels_list, data, estimator_name): col_names = [] for kernel in pca_kernels_list: col_names = col_names + [f"{kernel} - ACC".lower().capitalize(), f"{kernel} - F1".lower().capitalize()] df = pd.DataFrame(data=[data], columns=col_names, index=[estimator_name]) return df def prepare_output_df_grid_search(grid_searchs, pca_kernels, estimator_names, flag_no_computation=False): if flag_no_computation is True: return None, None data, data_auc = [], [] col_params_names = None for _, a_grid_search in enumerate(grid_searchs): tmp_res, tmp_auc = [], [] for _, (a_grid, _, auc, acc_test) in enumerate(a_grid_search): best_params_values = list(map(str, a_grid.best_params_.values())) # best_score = "%.2f" % (a_grid.best_score_,) # tmp_res = ([best_score] + best_params_values) best_score_tst = "%.2f" % (acc_test,) best_score_train = "%.2f" % (a_grid.best_score_,) tmp_res = ([best_score_train, best_score_tst] + best_params_values) tmp_auc.append("%.2f" % (auc,)) col_params_names = list(a_grid.best_params_.keys()) data.append(tmp_res) pass # data.append(tmp_res) data_auc.append(tmp_auc) pass # col_names = [f'{k} Acc' for k in pca_kernels] col_names = ["Acc Train", "Acc Test"] + col_params_names indeces = [] for estimator_name in estimator_names: indeces.extend([f'{estimator_name} {k}' for k in pca_kernels]) df = pd.DataFrame(data=data, columns=col_names, index=indeces) col_names = [f'{k} AUC' for k in pca_kernels] df_auc = pd.DataFrame(data=data_auc, columns=col_names, index=estimator_names) return df, df_auc # --------------------------------------------------------------------------- # # Utilities Functions Custom Stratified Training and Test Set Creation # --------------------------------------------------------------------------- # def get_indices(class_ith_indeces, chunks=2): divisor = len(class_ith_indeces) // chunks max_len = max(len(class_ith_indeces) - divisor, divisor) p1a = class_ith_indeces[:max_len] p2a = class_ith_indeces[max_len:] return [p1a, p2a] def get_data(p_train, p_test, X, y): ytrain_ = np.array([y[ii] for ii in p_train]) ytest_ = np.array([y[ii] for ii in p_test]) Xtrain_ = np.array([np.array(X[ii]) for ii in p_train]) Xtest_ = np.array([np.array(X[ii]) for ii in p_test]) assert len(ytrain_) == len(Xtrain_), f"Train {len(ytrain_)} != {len(Xtrain_)} Test {len(ytest_)} ?? {len(Xtest_)}" assert len(ytest_) == len(Xtest_),f"Train {len(ytrain_)} ?? {len(Xtrain_)} Test {len(ytest_)} != {len(Xtest_)}" return Xtrain_, Xtest_, ytrain_, ytest_ def get_stratified_groups(X, y): # Get N-stratified Groups class_0_indeces = list(map(lambda val: val[0], filter(lambda val: val[1] == -1, enumerate(y)))) class_1_indeces = list(map(lambda val: val[0], filter(lambda val: val[1] == 1, enumerate(y)))) p_class0 = get_indices(class_0_indeces) p_class1 = get_indices(class_1_indeces) # ytrain_ = [y[ii]for ii in p1a] + [y[ii]for ii in p1b] # ytest_ = [y[ii]for ii in p2a] + [y[ii]for ii in p2b] p_train = p_class0[0] + p_class1[0] p_test = p_class0[1] + p_class1[1] Xtrain_, Xtest_, ytrain_, ytest_ = get_data(p_train, p_test, X, y) return Xtrain_, Xtest_, ytrain_, ytest_ def create_widget_list_df(df_list, show_widget=False): res_list = [] for df in df_list: if show_widget is True: widget = widgets.Output() with widget: display.display(df); pass res_list.append(widget) else: print(df) if show_widget is True: hbox = widgets.HBox(res_list) return hbox return def create_widget_list_df_vertical(df_list, show_widget=False): res_list = [] for df in df_list: if show_widget is True: widget = widgets.Output() with widget: display.display(df); pass res_list.append(widget) else: print(df) pass if show_widget is True: vbox = widgets.VBox(res_list) return vbox return def merge_dfs_by_common_columns(df1, df2, axis=0, ignore_index=True): if df2 is None: return df1 elif df1 is None: return df2 res = list(set(df1.columns).intersection(set(df2.columns))) df_res = pd.concat([df1[res], df2[res]], axis=axis, ignore_index=ignore_index) if df1.index.equals(df2.index) is False: indeces = pd.Index(list(df1.index) + list(df2.index)) return df_res.set_index(indeces) return df_res def reshape_dfs_acc(list_df, num_col=4, n_cp_list=[2, 9, 11]): assert len(list_df) == len(n_cp_list) updated_list = [] for df, ncp in zip(list_df, n_cp_list): indeces = list(df.index) estimators_names = list(set(list(map(lambda xi: xi.split(" ")[0], indeces)))) columns_names = list(set(list(map(lambda xi: xi.split(" ")[1], indeces)))) data = [] for ii in range(0, df.shape[0], num_col): a_record = df.iloc[ii:(ii+num_col), 0].values data.append(a_record) pass columns_names = list(map(lambda xi: f"{xi}(PCs={ncp})", columns_names)) df = pd.DataFrame(data=data, columns=columns_names, index=estimators_names) updated_list.append(df) return updated_list def show_df_with_mean_at_bottom(df): # show_df_with_mean_at_bottom(df_strfd) # df_strfd.head(df_strfd.shape[0]) def s2f(a_str): if a_str.startswith("("): return float(a_str[5:]) return float(a_str) result = df.applymap(s2f).mean(axis=0) def f2s(a_num): return "%.2f" % (a_num, ) data = np.array(list(map(f2s, result.values))) df_tmp =
pd.DataFrame(data=[data], columns=df.columns, index=["Mean Values"])
pandas.DataFrame
import unittest import pandas as pd import pandas.util.testing as pt import tia.util.fmt as fmt def tof(astr): return float(astr.replace(",", "")) class TestFormat(unittest.TestCase): def ae(self, expected, fct, value, **kwargs): cb = fct(**kwargs) actual = cb(value) self.assertEqual(expected, actual) def test_default_formats(self): B = float("-1,250,500,880.76".replace(",", "")) M = B / 1000.0 k = M / 1000.0 p = k / 1000000.0 tests = [ (B, "$(1.3B)", fmt.BillionDollarsFormatter), (B, "(1.3B)", fmt.BillionsFormatter), (M, "$(1.3M)", fmt.MillionDollarsFormatter), (M, "(1.3M)", fmt.MillionsFormatter), (k, "$(1.3k)", fmt.ThousandDollarsFormatter), (k, "(1.3k)", fmt.ThousandsFormatter), (k, "(1,250.50)", fmt.FloatFormatter), (k, "(1,251)", fmt.IntFormatter), # Floats (k, "-1,251", fmt.new_int_formatter(commas=1, parens=False)), (k, "-1251", fmt.new_int_formatter(commas=0, parens=False)), (abs(k), "1251", fmt.new_int_formatter(commas=0, parens=False)), (abs(k), "1,251", fmt.new_int_formatter(commas=1)), (str(k), "-1,251", fmt.new_int_formatter(commas=1, coerce=True, parens=0)), # Ints (k, "-1,251", fmt.new_int_formatter(commas=1, parens=False)), (k, "-1251", fmt.new_int_formatter(commas=0, parens=False)), (abs(k), "1251", fmt.new_int_formatter(commas=0, parens=False)), (abs(k), "1,251", fmt.new_int_formatter(commas=1)), # Percents (0.12433, "12.4%", fmt.new_percent_formatter(commas=1, precision=1)), (0.12433, "12.433%", fmt.new_percent_formatter(commas=1, precision=3)), ( -0.12433, "-12.4%", fmt.new_percent_formatter(commas=1, parens=0, precision=1), ), ( -0.12433, "(12.4%)", fmt.new_percent_formatter(commas=1, parens=1, precision=1), ), ] for val, expected, fct in tests: actual = fct(val) self.assertEqual(expected, actual) # Test if it were a list actual = fct([val] * 5) self.assertEqual([expected] * 5, actual) # Test if it were a series actual = fct(pd.Series([val] * 5)) pt.assert_series_equal(pd.Series([expected] * 5), actual) # Test if it were a DataFrame actual = fct(pd.DataFrame({"a": [val] * 5, "b": [val] * 5})) pt.assert_frame_equal( pd.DataFrame({"a": [expected] * 5, "b": [expected] * 5}), actual ) def test_fmt_datetime(self): self.assertEqual( fmt.new_datetime_formatter("%Y-%m")(pd.to_datetime("1/1/2013")), "2013-01" ) def test_guess_formatter(self): for n, t in (3, "k"), (6, "M"), (9, "B"): m = 10 ** n s = pd.Series([2.1 * m, -20.1 * m, 200.1 * m]) actual = fmt.guess_formatter(s, precision=1)(s) expected = pd.Series(["2.1" + t, "(20.1%s)" % t, "200.1" + t]) pt.assert_series_equal(expected, actual) # percents s = pd.Series([0.024, -0.561, 0.987]) actual = fmt.guess_formatter(s, precision=1, pcts=1)(s) expected = pd.Series(["2.4%", "(56.1%)", "98.7%"]) pt.assert_series_equal(expected, actual) def test_dynamic_formatter(self): kwargs = dict(precision=1, commas=1, parens=1, pcts=1, trunc_dot_zeros=1) byrow = fmt.new_dynamic_formatter("row", **kwargs) bycol = fmt.new_dynamic_formatter("col", **kwargs) bycell = fmt.new_dynamic_formatter("cell", **kwargs) todt = pd.to_datetime f = pd.DataFrame( dict( pcts=[0.1, 0.2343, -0.9234], flt=[123.0, 1234.0, -12345.0], ts=[todt("1/1/2012"), todt("1/1/2013"), todt("1/1/2014")], ) ) # by column expected_bycol = { "pcts": ["10%", "23.4%", "(92.3%)"], "flt": ["123", "1,234", "(12,345)"], "ts": ["2012-01-01", "2013-01-01", "2014-01-01"], } pt.assert_frame_equal(
pd.DataFrame(expected_bycol)
pandas.DataFrame
#!/usr/bin/python # -*- coding: utf-8 -*- """ Module to hold core processing/analysis functions for Ocean iodide (Oi!) project Notes ---- ML = Machine Learning target = the value aiming to be estimated or provided in training feature = a induivual conpoinet of a predictor vector assigned to a target ( could be called an attribute ) predictor = vector assigned to a target value Please see Paper(s) for more details: <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>.: A machine learning based global sea-surface iodide distribution, Earth Syst. Sci. Data Discuss., https://doi.org/10.5194/essd-2019-40, in review, 2019. """ import numpy as np import pandas as pd import xarray as xr import matplotlib.pyplot as plt import cartopy import cartopy.crs as ccrs # import AC_tools (https://github.com/tsherwen/AC_tools.git) import AC_tools as AC import sparse2spatial as s2s import sparse2spatial.utils as utils import sparse2spatial.ancillaries2grid_oversample as ancillaries2grid import sparse2spatial.archiving as archiving import sparse2spatial.ancillaries as ancillaries import sparse2spatial.RFRbuild as build import sparse2spatial.archiving as archiving import sparse2spatial.analysis as analysis import sparse2spatial.RFRanalysis as RFRanalysis import sparse2spatial.plotting as plotting #from sparse2spatial.RFRanalysis import get_stats_on_models from sparse2spatial.analysis import add_ensemble_avg_std_to_dataset from sparse2spatial.RFRbuild import get_top_models from sparse2spatial.RFRbuild import build_or_get_models # Get iodide specific functions #from observations import get_dataset_processed4ML import observations as obs import project_misc as misc import plotting_and_analysis as plt_analysis def main(): """ Main driver if run directly from command line. unhash functionality to call. Notes ------- - Calls for the full pipeline used for producing a new sea-surface iodide field are listed below. However, many of these now in the following functions in the same folder: plotting_and_analysis.py process_new_observations.py project_misc.py observations.py emissions.py - simply unhash the functions to be run below. """ # ---- ---- Over-arching settings # target='Iodide' # Setup the data directory structure (only needs to be done once)) # NOTE: the locations of s2s and data are set in script/<target>'s *.rc file # utils.check_or_mk_directory_structure(target=target) # General settings rm_Skagerrak_data = True rebuild = False # rm_Skagerrak_data = False # Use top models from full dataset ( now: nOutliers + nSkagerak RFR_dict = build_or_get_models_iodide( rebuild=rebuild, rm_Skagerrak_data=rm_Skagerrak_data) # RFR_dict = build_or_get_models_iodide( rm_Skagerrak_data=False ) # topmodels = get_top_models(RFR_dict=RFR_dict, vars2exclude=['DOC', 'Prod'], n=10) print(RFR_dict.keys()) # print(topmodels) # Check statistics on prediction # print(stats) # Get the dictionary of models and their features model_feature_dict = utils.get_model_features_used_dict(rtn_dict=True) print(model_feature_dict) # print(model_feature_dict['NO3+DOC+Phos']) # ---- ----- ----- ----- ----- ----- ----- ----- ----- # ----- ----- Evaluating input datasets # General plots of all species # misc.get_diagnostic_plots_analysis4observations() # ---- ----- ----- ----- ----- ----- ----- ----- ----- # ----- ----- Processing of observations (& extraction of ancillaries) # --- Get iodide observations? # df = obs.get_iodide_obs() # --- Re-process file? # df = obs.get_iodide_obs(process_new_iodide_obs_file=True) # --- Re-process ancillaries file? # obs.process_iodide_obs_ancillaries_2_csv() # get_core_Chance2014_obs() # --- Process MLD csv files? (Just for ease of use/data munging) # ancillaries.process_MLD_csv2NetCDF() # Check extracted data against observations. # misc.compare_obs_ancillaries_with_extracted_values() # ---- ----- ----- ----- ----- ----- ----- ----- ----- # ----- ----- Build ancillary variable dataset file # --- # Build either a full or low res ancillary NetCDF file # res = '0.125x0.125' # res = '4x5' # low resolution to test extraction etc? # Get indicies to extract for variables in imported NetCDF # ancillaries2grid.mk_array_of_indices4locations4res( res=res ) # Extract the variables to NetCDF # ancillaries2grid.extract_feature_variables2NetCDF( res=res ) # Interpolate the fields at full resolution # ancillaries.interpolate_NaNs_in_feature_variables( res=res ) # ---- ----- ----- ----- ----- ----- ----- ----- ----- # ----- ----- Building new iodide field (inc. Machine learning) # --- # (Re-)Build all models # (random stats means this gives the same answer everytime) # build_or_get_models_iodide(rebuild=True, # rm_Skagerrak_data=rm_Skagerrak_data ) # --- Update the predictor array values # res='4x5' # plt_analysis.set_SAL_and_NIT_above_65N_to_avg(res=res) # --- Predict values globally (only use 0.125) # extra string for NetCDF save name xsave_str = 'TEST_' # make NetCDF predictions from the main array save2NetCDF = True # resolution to use? (full='0.125x0.125', test at lower e.g. '4x5') # res = '0.125x0.125' # res = '4x5' # mk_iodide_predictions_from_ancillaries(None, res=res, RFR_dict=RFR_dict, # use_updated_predictor_NetCDF=False, # save2NetCDF=save2NetCDF, # rm_Skagerrak_data=rm_Skagerrak_data, # topmodels=topmodels, # xsave_str=xsave_str, # add_ensemble2ds=True) # ---- ----- ----- ----- ----- ----- ----- ----- ----- # ----- ----- Sensitivity testing of the new iodide field # --- # make NetCDF predictions from the updated arrays # vars2use = [ # 'WOA_Nitrate', 'WOA_Salinity', 'WOA_Phosphate', 'WOA_TEMP_K', 'Depth_GEBCO', # ] # folder = None # use_updated_predictor_NetCDF = False # # # for res in ['4x5', '2x2.5']: # for res in ['4x5']: # # # for res in ['0.125x0.125',]: # # setup a pool to bulk process # p = Pool( len(vars2use) ) # # for var2use in vars2use: # # now predict the arrays from this. # p.map( partial(mk_iodide_predictions_from_ancillaries, res=res, # RFR_dict=RFR_dict, folder=folder, # use_updated_predictor_NetCDF=use_updated_predictor_NetCDF, # save2NetCDF=save2NetCDF ), # vars2use # ) # # close the pool # p.close() # --- test the updates predictions # res = '0.125x0.125' # folder = None # for var2use in vars2use: # # extr_str = '_INTERP_NEAREST_DERIVED' # extr_str = '_UPDATED_{}'.format( var2use ) # plot_predicted_iodide_vs_lat_figure_ENSEMBLE( RFR_dict=RFR_dict, # extr_str=extr_str, res=res, folder=folder ) # then test if depth is set to # ---- ----- ----- ----- ----- ----- ----- ----- ----- # ----- ----- Plots / Analsis for sea-surface iodide ML paper # Get shared data # RFR_dict = build_or_get_models() # --- 2D analysis # Plot up spatial comparison of obs. and params # plt_analysis.plot_up_obs_spatially_against_predictions( RFR_dict=RFR_dict ) # Test which plotting options to use (to display markers) # plt_analysis.plot_up_obs_spatially_against_predictions_options( # RFR_dict=RFR_dict ) # plot up the input variables spatially # res = '0.125x0.125' # res = '4x5' # plt_analysis.plot_up_input_ancillaries_spatially( res=res, RFR_dict=RFR_dict, # save2png=True) # Plot up the 2D differences in predictions # res= '0.125x0.125' # res= '4x5' # plt_analysis.plot_up_spatial_changes_in_predicted_values( res=res, window=True, # f_size=30) # Get stats from the 4x5 and 2x2.5 predictions # analysis.get_stats_on_spatial_predictions_4x5_2x25() # analysis.get_stats_on_spatial_predictions_4x5_2x25_by_lat() # Get stats from the 0.125x0.125 prediction # analysis.get_stats_on_spatial_predictions_0125x0125() # Calculate the average predicted surface conc (only 4x5. 2x2.5 too? ) # plt_analysis.calculate_average_predicted_surface_conc() # AGU calcs at 4x5 # Plot up latitude vs. predicted iodide # plt_analysis.plot_predicted_iodide_vs_lat_figure() # Seasonal prediction of iodide by month # plt_analysis.plot_monthly_predicted_iodide( res='4x5' ) # plt_analysis.plot_monthly_predicted_iodide( res='0.125x0.125' ) # plt_analysis.plot_monthly_predicted_iodide_diff( res='0.125x0.125' ) # explore the extracted data in the arctic and AnatArctic # plt_analysis.explore_extracted_data_in_Oi_prj_explore_Arctic_Antarctic_obs() # Check the sensitivity to input variables >= 65 N # plt_analysis.mk_PDFs_to_show_the_sensitivty_input_vars_65N_and_up( # save_str='TEST_V' ) # --- Point-for-point analysis # build ODR plots for obs. vs. model # plt_analysis.analyse_X_Y_correlations_ODR( RFR_dict=RFR_dict, context='poster' ) # plt_analysis.analyse_X_Y_correlations_ODR( RFR_dict=RFR_dict, context='paper' ) # Analyse the X_Y correlations # plt_analysis.analyse_X_Y_correlations( RFR_dict=RFR_dict ) # Get the importance of individual features for prediction # RFRanalysis.get_feature_importance( RFR_dict=RFR_dict ) # Get general stats on the current models # RFRanalysis.get_stats_on_models( RFR_dict=RFR_dict ) # Get tabulated performance # mk_table_of_point_for_point_performance(RFR_dict=RFR_dict) # mk_table_of_point_for_point_performance_ALL(RFR_dict=RFR_dict) # mk_table_of_point_for_point_performance_TESTSET(RFR_dict=RFR_dict) # Get CDF and PDF plots for test, training, entire, and residual # plt_analysis.plot_up_CDF_and_PDF_of_obs_and_predictions( df=RFR_dict['df'] ) # Plot up various spatial plots for iodide concs + std. # plt_analysis.plot_up_ensemble_avg_and_std_spatially( # rm_Skagerrak_data=rm_Skagerrak_data # ) # --- Spatial analysis for specific locations # explore the observational data in the Arctic # misc.explore_observational_data_in_Arctic_parameter_space( RFR_dict=RFR_dict ) # plot up where decision points are # plt_analysis.plot_spatial_area4core_decisions( res='4x5' ) # plt_analysis.plot_spatial_area4core_decisions( res='0.125x0.125' ) # Explore the sensitivity to data denial # plt_analysis.explore_sensitivity_of_65N2data_denial( res='4x5' ) # plt_analysis.explore_sensitivity_of_65N2data_denial( res='2x2.5' ) # plt_analysis.explore_sensitivity_of_65N2data_denial( res='0.125x0.125' ) # --- Analysis of models build # testset analysis # plt_analysis.test_model_sensitiivty2training_test_split() # driver not in use yet! # RFRanalysis.run_tests_on_testing_dataset_split_quantiles() # RFRanalysis.run_tests_on_testing_dataset_split() # selection of variables to build models # hyperparameter tuning of selected models # Analysis of the spatial variance of individual ensemble members # rm_Skagerrak_data = True # rm_Skagerrak_data = False # plt_analysis.analyse_dataset_error_in_ensemble_members( res='0.125x0.125', \ # rebuild_models=False, remake_NetCDFs=False, # rm_Skagerrak_data=rm_Skagerrak_data, # topmodels=topmodels ) # plt_analysis.analyse_dataset_error_in_ensemble_members( res='0.125x0.125', \ # rebuild_models=True, remake_NetCDFs=True, \ # rm_Skagerrak_data=rm_Skagerrak_data, # topmodels=topmodels # ) # Common resolutions # archiving.regrid_output_to_common_res_as_NetCDFs(topmodels=topmodels, # rm_Skagerrak_data=rm_Skagerrak_data) # --- Do tree by tree analysis # Extract trees to .dot files (used make make the single tree figures) # RFRanalysis.extract_trees4models( RFR_dict=RFR_dict, N_trees2output=50 ) # Plot up interpretation of trees # Now in TreeSurgeon - see separate repository on github # https://github.com/wolfiex/TreeSurgeon # analysis of node spliting # RFRanalysis.analyse_nodes_in_models( RFR_dict=RFR_dict ) # analysis of outputted trees # RFRanalysis.analyse_nodes_in_models() # --- Do futher analysis on the impact of the depth variable plt_analysis.do_analysis_processing_linked_to_depth_variable() # plot this up and other figures for the ML paper plt_analysis.plot_spatial_figures_for_ML_paper_with_cartopy() # - pass if no functions are uncommented pass def run_tests_on_testing_dataset_split(model_name=None, n_estimators=500, features_used=None, target='Iodide', df=None): """ Run tests on the sensitivity of model to test/training choices Parameters ------- target (str): Name of the target variable (e.g. iodide) df (pd.DataFrame): dataframe containing target and feature variables n_estimators (int), number of estimators (decision trees) to use features_used (list): list of the features within the model_name model model_name (str): name of model to build Returns ------- (None) """ from sklearn.ensemble import RandomForestRegressor from sklearn.externals import joblib # target='Iodide' # ----- Local variables # Get unprocessed input data at observation points if isinstance(df, type(None)): df = get_processed_df_obs_mod() # NOTE this df contains values >400nM # ---- get the data # Which "features" (variables) to use if isinstance(features_used, type(None)): # model_name = 'ALL' model_name = 'RFR(TEMP+DEPTH+SAL)' features_used = utils.get_model_features_used_dict(model_name) # --- local variables # dictionary of test set variables random_split_var = 'rn. 20%' strat_split_var = 'strat. 20%' # set a basis for filenames to saved as save_filename_str = 'Oi_prj_test_training_selection' # random_states = [38, 39, 40, 41, 42, 43, 44 ] # random_states = [36, 37, 38, 39, 40, 41, 42, ] # random_states = np.arange(33, 43, 1) random_states = np.arange(25, 45, 1) # Formatted variable name for iodide Iaq = '[I$^{-}_{aq}$]' # --- set testset to evaulte TSETS = {} TSETS_N = {} TSETS_nsplits = {} # - no vals above 400 Tname = 'All' tmp_ts = df[features_used+[target]].copy() TSETS_N[Tname] = tmp_ts.shape[0] TSETS[Tname] = tmp_ts TSETS_nsplits[Tname] = 4 # - no vals above 400 # Tname = '{}<400'.format( Iaq ) # tmp_ts = df.loc[ df['Iodide']<400 ][ features_used+[target] ].copy() # TSETS_N[Tname] = tmp_ts.shape[0] # TSETS[Tname] = tmp_ts # TSETS_nsplits[Tname] = 4 # # - no vals above 450 # Tname = '{}<450'.format( Iaq ) # tmp_ts = df.loc[ df['Iodide']<450 ][ features_used+[target] ].copy() # TSETS_N[Tname] = tmp_ts.shape[0] # TSETS[Tname] = tmp_ts # TSETS_nsplits[Tname] = 4 # - no vals above 350 # Tname = '{}<350'.format( Iaq ) # tmp_ts = df.loc[ df['Iodide']<350 ][ features_used+[target] ].copy() # TSETS_N[Tname] = tmp_ts.shape[0] # TSETS[Tname] = tmp_ts # TSETS_nsplits[Tname] = 4 # - remove estuarine (no values < 30 salinity?) values Tname = 'SAL>=30 \n & no outliers' bool1 = df['WOA_Salinity'] >= 30 # also remove outliers bool2 = df['Iodide'] < utils.get_outlier_value(df=df, var2use='Iodide') tmp_ts = df.loc[bool1 & bool2][features_used+[target]].copy() TSETS_N[Tname] = tmp_ts.shape[0] TSETS[Tname] = tmp_ts TSETS_nsplits[Tname] = 4 # - remove estuarine (no values < 30 salinity?) values # Tname = 'SAL>=30 \n & {}'.format( Iaq ) + '<98$^{th}$' # bool1 = df['WOA_Salinity']>=30 # bool2 = df['Iodide'] < np.percentile( df['Iodide'].values, 98 ) # # also remove values where iodide <400 # tmp_ts = df.loc[ bool1 & bool2 ][ features_used+[target] ].copy() # TSETS_N[Tname] = tmp_ts.shape[0] # TSETS[Tname] = tmp_ts # TSETS_nsplits[Tname] = 4 # - remove estuarine (no values < 30 salinity?) values # Tname = 'SAL>=30 \n & {}'.format( Iaq ) + '<98$^{th}$' # bool1 = df['WOA_Salinity']>=30 # bool2 = df['Iodide'] < np.percentile( df['Iodide'].values, 98 ) # # also remove values where iodide <400 # tmp_ts = df.loc[ bool1 & bool2 ][ features_used+[target] ].copy() # TSETS_N[Tname] = tmp_ts.shape[0] # TSETS[Tname] = tmp_ts # TSETS_nsplits[Tname] = 4 # - Just coastal Tname = 'Just coastal\n& no outliers' bool1 = df['Coastal'] == 1 # also remove outliers bool2 = df['Iodide'] < utils.get_outlier_value(df=df, var2use='Iodide') tmp_ts = df.loc[bool1 & bool2][features_used+[target]].copy() TSETS_N[Tname] = tmp_ts.shape[0] TSETS[Tname] = tmp_ts TSETS_nsplits[Tname] = 4 # - Just coastal # Tname = 'Coastal \n & {}'.format( Iaq )+ '<98$^{th}$' # bool1 = df['Coastal'] ==1 # # also remove values where iodide <98 # bool2 = df['Iodide'] < np.percentile( df['Iodide'].values, 98 ) # tmp_ts = df.loc[ bool1 & bool2 ][ features_used+[target] ].copy() # TSETS_N[Tname] = tmp_ts.shape[0] # TSETS[Tname] = tmp_ts # TSETS_nsplits[Tname] = 4 # - non-coastal Tname = 'Just non-coastal\n& no outliers' bool1 = df['Coastal'] == 0 # also remove outliers bool2 = df['Iodide'] < utils.get_outlier_value(df=df, var2use='Iodide') tmp_ts = df.loc[bool1 & bool2][features_used+[target]].copy() TSETS_N[Tname] = tmp_ts.shape[0] TSETS[Tname] = tmp_ts TSETS_nsplits[Tname] = 4 # - non-coastal # Tname = 'Non Coastal \n & {}'.format( Iaq )+ '<98$^{th}$' # bool1 = df['Coastal'] == 0 # # also remove values where iodide <98 # bool2 = df['Iodide'] < np.percentile( df['Iodide'].values, 98 ) # tmp_ts = df.loc[ bool1 & bool2 ][ features_used+[target] ].copy() # TSETS_N[Tname] = tmp_ts.shape[0] # TSETS[Tname] = tmp_ts # TSETS_nsplits[Tname] = 4 # - only that < 98th # Tname = '{} '.format( Iaq ) +'<98$^{th}$' # bool_ = df['Iodide'] < np.percentile( df['Iodide'].values, 98 ) # tmp_ts = df.loc[ bool_ ][ features_used+[target] ].copy() # # also remove values where iodide <400 # tmp_ts = tmp_ts.loc[ df['Iodide']<400 ] # TSETS_N[Tname] = tmp_ts.shape[0] # TSETS[Tname] = tmp_ts # TSETS_nsplits[Tname] = 4 # - only that < 99th # Tname = '{} '.format( Iaq ) + '<99$^{th}$' # bool_ = df['Iodide'] >= np.percentile( df['Iodide'].values, 99 ) # tmp_ts = df.loc[ bool_ ][ features_used+[target] ].copy() # # also remove values where iodide <400 # tmp_ts = tmp_ts.loc[ df['Iodide']<400 ] # TSETS_N[Tname] = tmp_ts.shape[0] # TSETS[Tname] = tmp_ts # TSETS_nsplits[Tname] = 4 # - No Skagerrak # Tname = 'No Skagerrak' # bool_ = df['Data_Key'].values != 'Truesdale_2003_I' # tmp_ts = df.loc[ bool_ ][ features_used+[target] ].copy() # # also remove values where iodide <400 # TSETS_N[Tname] = tmp_ts.shape[0] # TSETS[Tname] = tmp_ts # TSETS_nsplits[Tname] = 4 # - No Skagerrak # Tname = 'No Skagerrak \n or {}'.format( Iaq )+ '>98$^{th}$' # bool1 = df['Data_Key'].values == 'Truesdale_2003_I' # bool2 = df['Iodide'] > np.percentile( df['Iodide'].values, 98 ) # index2drop = df.loc[ bool1 | bool2, : ].index # tmp_ts = df.drop( index2drop )[ features_used+[target] ].copy() # also remove values where iodide <400 TSETS_N[Tname] = tmp_ts.shape[0] TSETS[Tname] = tmp_ts TSETS_nsplits[Tname] = 4 # - No outliers Tname = 'No outliers' bool_ = df['Iodide'] < utils.get_outlier_value(df=df, var2use='Iodide') tmp_ts = df.loc[bool_][features_used+[target]].copy() # also remove values where iodide <400 TSETS_N[Tname] = tmp_ts.shape[0] TSETS[Tname] = tmp_ts TSETS_nsplits[Tname] = 4 # - No Skagerrak Tname = 'No Skagerrak \n or outliers' bool1 = df['Data_Key'].values == 'Truesdale_2003_I' bool2 = df['Iodide'] > utils.get_outlier_value(df=df, var2use='Iodide') index2drop = df.loc[bool1 | bool2, :].index tmp_ts = df.drop(index2drop)[features_used+[target]].copy() # also remove values where iodide <400 TSETS_N[Tname] = tmp_ts.shape[0] TSETS[Tname] = tmp_ts TSETS_nsplits[Tname] = 4 # --- build models using testsets # Get RMSE_df = pd.DataFrame() # Now loop TSETS for Tname in TSETS.keys(): # Initialise lists to store data in RMSE_l = [] # get random state to use for random_state in random_states: print('Using: random_state={}'.format(random_state)) # Get testset df_tmp = TSETS[Tname].copy() # force index to be a range df_tmp.index = range(df_tmp.shape[0]) print(Tname, df_tmp.shape) # Stratified split by default, unless random var in name rand_strat = True rand_20_80 = False # get the training and test set returned_vars = mk_iodide_test_train_sets(df=df_tmp, rand_20_80=rand_20_80, random_state=random_state, nsplits=TSETS_nsplits[Tname], rand_strat=rand_strat, features_used=features_used, ) train_set, test_set, test_set_targets = returned_vars # set the training and test sets train_features = df_tmp[features_used].loc[train_set.index] train_labels = df_tmp[[target]].loc[train_set.index] test_features = df_tmp[features_used].loc[test_set.index] test_labels = df_tmp[[target]].loc[test_set.index] # build the model - NOTE THIS MUST BE RE-DONE! # ( otherwise the model is being re-trained ) model = RandomForestRegressor(random_state=random_state, n_estimators=n_estimators, criterion='mse') # Fit the model model.fit(train_features, train_labels) # Predict the values df_tmp[Tname] = model.predict(df_tmp[features_used].values) # Get the stats against the test group df_tmp = df_tmp[[Tname, target]].loc[test_set.index] # Get MSE and RMSE MSE = (df_tmp[target]-df_tmp[Tname])**2 MSE = np.mean(MSE) std = np.std(df_tmp[Tname].values) # Return stats on bias and variance # (just use RMSE and std dev. for now) RMSE_l += [np.sqrt(MSE)] del df_tmp, train_features, train_labels, test_features, test_labels del model # Save the results to a dictionary RMSE_df[Tname] = RMSE_l # --- Get stats on the ensemble values # Get general stats on ensemble RMSE_stats = pd.DataFrame(RMSE_df.describe().copy()).T # ad number of samples RMSE_stats['N'] = pd.Series(TSETS_N) # sort to order by mean RMSE_stats.sort_values(by='mean', inplace=True) # sort the main Dataframe by the magnitude of the mean RMSE_df = RMSE_df[list(RMSE_stats.index)] # work out the deviation from mean of the ensemble pcent_var = '% from mean' means = RMSE_stats['mean'] pcents = ((means - means.mean()) / means.mean() * 100).values RMSE_stats[pcent_var] = pcents # update order of columns first_cols = ['mean', 'N'] order4cols = [i for i in RMSE_stats.columns if i not in first_cols] RMSE_stats = RMSE_stats[first_cols + order4cols] # print to screen print(RMSE_stats) pstr = '{:<13} - mean: {:.2f} (% from ensemble mean: {:.2f})' for col in RMSE_stats.T.columns: vals2print = RMSE_stats.T[col][['mean', pcent_var]].values print(pstr.format(col.replace("\n", ""), *vals2print)) # remove the '\n' symbols etc from the column names RMSE_stats2save = RMSE_stats.copy() RMSE_stats2save.index = [i.replace('\n', '') for i in RMSE_stats.index] # save to csv RMSE_stats2save.to_csv(save_filename_str+'.csv') # ---- Do some further analysis and save this to a text file a = open(save_filename_str+'_analysis.txt', 'w') # Set a header print('This file contains analysis of the training set selection', file=a) print('\n', file=a) # which files are being analysed? print('---- Detail range of RMSE values by build', file=a) for test_ in RMSE_stats.T.columns: df_tmp = RMSE_stats.T[test_].T min_ = df_tmp['min'] max_ = df_tmp['max'] range_ = max_ - min_ test_ = test_.replace("\n", "") # print range for test_ ptr_str = "range for '{:<20}' : {:.3g} ({:.5g}-{:.5g})" print(ptr_str.format(test_, range_, min_, max_), file=a) # print this as a % of the mean mean_ = df_tmp['mean'] prange_ = range_ / mean_ * 100 pmin_ = min_ / mean_ * 100 pmax_ = max_ / mean_ * 100 ptr_str = "range as % of mean ({:.3g}) for'{:<20}':" ptr_str += ": {:.3g} % ({:.5g} % -{:.5g} %)" print(ptr_str.format(mean_, test_, prange_, pmin_, pmax_), file=a) a.close() # --- Setup the datafframes for plotting ( long form needed ) RMSE_df = RMSE_df.melt() # rename columns ylabel_str = 'RMSE (nM)' RMSE_df.rename(columns={'value': ylabel_str}, inplace=True) # --- Plot up the test runs CB_color_cycle = AC.get_CB_color_cycle() import seaborn as sns sns.set(color_codes=True) sns.set_context("paper") dpi = 320 # --- plot up the results as violin plots fig, ax = plt.subplots(figsize=(10, 3.5), dpi=dpi) # plot up these values ax = sns.violinplot(x='variable', y=ylabel_str, data=RMSE_df, palette=CB_color_cycle, ax=ax) # remove the variable label from the x axis ax.xaxis.label.set_visible(False) # force yaxis extent ymax = AC.myround(RMSE_df[ylabel_str].max(), base=25, round_up=True) ax.set_ylim(-15, ymax+25+10) # add N value to plot f_size = 10 xlabels = [i.get_text() for i in ax.get_xticklabels()] # set locations for N lael if len(xlabels) == 7: x_l = np.linspace(0.041, 0.9025, len(xlabels)) if len(xlabels) == 6: x_l = np.linspace(0.055, 0.89, len(xlabels)) else: x_l = np.linspace(0.035, 0.9025, len(xlabels)) # loop and add N value for xlabel_n, xlabel in enumerate(xlabels): N = TSETS_N[xlabel] # Set location for label alt_text_x = x_l[xlabel_n] alt_text_y = 0.035 # Setup label and plot alt_text = 'N={}'.format(N) ax.annotate(alt_text, xy=(alt_text_x, alt_text_y), textcoords='axes fraction', ) # Adjust positions of subplot bottom = 0.095 top = 0.975 left = 0.075 right = 0.975 fig.subplots_adjust(bottom=bottom, top=top, left=left, right=right,) # save the plot plt.savefig(save_filename_str+'_sensitivity_violin.png', dpi=dpi) plt.close() # --------------------------------------------------------------------------- # ---------- Functions to generate/predict modelled field ------------------- # --------------------------------------------------------------------------- def mk_iodide_predictions_from_ancillaries(var2use, res='4x5', target='Iodide', models_dict=None, features_used_dict=None, RFR_dict=None, dsA=None, stats=None, folder=None, use_updated_predictor_NetCDF=False, save2NetCDF=False, plot2check=False, models2compare=[], topmodels=None, rm_Skagerrak_data=False, xsave_str='', add_ensemble2ds=False, verbose=True, debug=False): """ Make a NetCDF file of predicted vairables for a given resolution Parameters ------- var2use (str): var to use as main model prediction rm_Skagerrak_data (bool): remove the data from the Skagerrak region RFR_dict (dict): dictionary of core variables and data target (str): Name of the target variable (e.g. iodide) res (str): horizontal resolution of dataset (e.g. 4x5) models_dict (dict): dictionary of models (values) and their names (keys) features_used_dict (dict): dictionary of models (keys) and their features (values) use_updated_predictor_NetCDF (bool): Returns ------- Notes ----- """ # -local variables # extract the models... if isinstance(RFR_dict, type(None)): RFR_dict = build_or_get_models( rm_Skagerrak_data=rm_Skagerrak_data ) # set models to always predict values for if (len(models2compare) == 0): models2compare = [ # Ones using all variable options 'RFR(TEMP+DEPTH+SAL+NO3+DOC)', 'RFR(TEMP+DOC+Phos)', 'RFR(TEMP+DEPTH+SAL+Prod)', # ones just using variable options 'RFR(TEMP+SAL+NO3)', 'RFR(TEMP+DEPTH+SAL+Phos)', 'RFR(TEMP+SWrad+NO3+MLD+SAL)', 'RFR(TEMP+DEPTH+SAL)', # Temperature for zeroth order 'RFR(TEMP)', # ones in v8.1 topmodels 'RFR(TEMP+DEPTH+SAL+SWrad)', 'RFR(TEMP+DEPTH+NO3+SWrad)', 'RFR(TEMP+NO3+MLD+SAL)', 'RFR(TEMP+DEPTH+SAL+NO3)', 'RFR(TEMP+DEPTH+SAL+ChlrA)', 'RFR(TEMP+DEPTH+NO3)', 'RFR(TEMP+NO3)', # ones in topmodels_nSkagerrak 'RFR(TEMP+DEPTH+SAL)', 'RFR(SWrad+SAL+DEPTH)', 'RFR(TEMP+SAL)' ] # Make sure the top 10 models are included # ( with derivative variables ) if isinstance(topmodels, type(None)): # Get stats on models in RFR_dict if isinstance(stats, type(None)): stats = get_stats_on_models(RFR_dict=RFR_dict, analysis4coastal=True, verbose=False) topmodels = get_top_models(RFR_dict=RFR_dict, stats=stats, vars2exclude=['DOC', 'Prod']) models2compare += topmodels # Remove any double ups models2compare = list(set(models2compare)) # Get the variables required here if isinstance(models_dict, type(None)): models_dict = RFR_dict['models_dict'] if isinstance(features_used_dict, type(None)): features_used_dict = RFR_dict['features_used_dict'] # Get location to save file and set filename if isinstance(folder, type(None)): folder = utils.get_file_locations('data_root')+'/data/' extr_str = '_INTERP_NEAREST_DERIVED' # Add lines to save strings if use_updated_predictor_NetCDF: xsave_str += '_UPDATED_{}'.format(var2use) extr_str += xsave_str if rm_Skagerrak_data: xsave_str += '_No_Skagerrak' if isinstance(dsA, type(None)): filename = 'Oi_prj_feature_variables_{}.nc'.format(res) dsA = xr.open_dataset(folder + filename) # --- Make a da for each model ds_l = [] for modelname in models2compare: print(modelname) # get model model = models_dict[modelname] # get testinng features features_used = utils.get_model_features_used_dict(modelname) # Make a DataSet of predicted values ds_tmp = utils.mk_da_of_predicted_values(model=model, modelname=modelname, res=res, features_used=features_used, dsA=dsA) # Add attributes to the prediction ds_tmp = add_attrs2iodide_ds(ds_tmp, add_global_attrs=False, varname=modelname) # Savea ds_l += [ds_tmp] # Combine datasets ds = xr.merge(ds_l) # - Also get values for parameterisations # Chance et al (2013) param = u'Chance2014_STTxx2_I' arr = utils.calc_I_Chance2014_STTxx2_I(dsA['WOA_TEMP'].values) ds[param] = ds[modelname] # use existing array as dummy to fill ds[param].values = arr # MacDonald et al (2013) param = 'MacDonald2014_iodide' arr = utils.calc_I_MacDonald2014(dsA['WOA_TEMP'].values) ds[param] = ds[modelname] # use existing array as dummy to fill ds[param].values = arr # Add ensemble to ds too if add_ensemble2ds: print('WARNING: Using topmodels for ensemble as calculated here') ds = add_ensemble_avg_std_to_dataset(ds=ds, RFR_dict=RFR_dict, topmodels=topmodels, res=res, save2NetCDF=False) # - Do a quick diagnostic plot if plot2check: for var_ in ds.data_vars: # plot an annual average arr = ds[var_].mean(dim='time') AC.map_plot(arr, res=res) plt.title(var_) plt.show() # Add global variables ds = add_attrs2iodide_ds(ds, add_varname_attrs=False) # - Save to NetCDF if save2NetCDF: filename = 'Oi_prj_predicted_{}_{}{}.nc'.format(target, res, xsave_str) ds.to_netcdf(filename) else: return ds def mk_table_of_point_for_point_performance(RFR_dict=None, df=None, testset='Test set (strat. 20%)', inc_ensemble=False, var2use='RFR(Ensemble)', target='Iodide'): """ Make a table to summarise point-for-point performance Parameters ------- target (str): Name of the target variable (e.g. iodide) var2use (str): variable name to use for ensemble prediction testset (str): Testset to use, e.g. stratified sampling over quartiles for 20%:80% inc_ensemble (bool), include the ensemble (var2use) in the analysis RFR_dict (dict): dictionary of core variables and data df (pd.DataFrame): dataframe containing target and feature variables Returns ------- (None) """ # Get data objects as dictionary and extract dataframe if not provided. if isinstance(RFR_dict, type(None)): RFR_dict = build_or_get_models() if isinstance(df, type(None)): df = RFR_dict['df'] # Get stats on model tuns runs stats = get_stats_on_models(RFR_dict=RFR_dict, df=df, analysis4coastal=True, var2use=var2use, inc_ensemble=inc_ensemble, verbose=False) # Select param values of interest (and give updated title names ) rename_titles = {u'Chance2014_STTxx2_I': 'Chance et al. (2014)', u'MacDonald2014_iodide': 'MacDonald et al. (2014)', var2use : var2use, 'Iodide': 'Obs.', } # Set the stats to use first_columns = [ 'mean', 'std', '25%', '50%', '75%', 'RMSE ({})'.format(testset), 'RMSE (all)', ] stats = stats[first_columns] # Rename columns (50% to median and ... ) cols2rename = { '50%': 'median', 'std': 'std. dev.', 'RMSE ({})'.format(testset): 'RMSE (withheld)' } stats.rename(columns=cols2rename, inplace=True) # Only select params of interest stats = stats.T[rename_titles.values()].T # Rename stats.rename(index=rename_titles, inplace=True) # Set filename and save detail on models csv_name = 'Oi_prj_point_for_point_comp4tabale.csv' stats.round(1).to_csv(csv_name) def mk_table_of_point_for_point_performance_TESTSET(RFR_dict=None, df=None, testset='Test set (strat. 20%)', inc_ensemble=False, var2use='RFR(Ensemble)', target='Iodide'): """ Make a table to summarise point-for-point performance within testset Parameters ------- target (str): Name of the target variable (e.g. iodide) var2use (str): variable name to use for ensemble prediction testset (str): Testset to use, e.g. stratified sampling over quartiles for 20%:80% inc_ensemble (bool), include the ensemble (var2use) in the analysis RFR_dict (dict): dictionary of core variables and data df (pd.DataFrame): dataframe containing target and feature variables Returns ------- (None) """ # Get data objects as dictionary and extract dataframe if not provided. if isinstance(RFR_dict, type(None)): RFR_dict = build_or_get_models() if isinstance(df, type(None)): df = RFR_dict['df'] # Just select the testing dataset df = df.loc[df[testset] == True, :] # Get stats on model tuns runs stats = get_stats_on_models(RFR_dict=RFR_dict, df=df, analysis4coastal=True, inc_ensemble=inc_ensemble, verbose=False) # Select param values of interest (and give updated title names ) rename_titles = {u'Chance2014_STTxx2_I': 'Chance et al. (2014)', u'MacDonald2014_iodide': 'MacDonald et al. (2014)', 'RFR(Ensemble)': 'RFR(Ensemble)', 'Iodide': 'Obs.', } # Set the stats to use for in csv output first_columns = [ 'mean', 'std', '25%', '50%', '75%', 'RMSE ({})'.format(testset), 'RMSE (all)', ] stats = stats[first_columns] # Rename columns (50% to median and ... ) cols2rename = { '50%': 'median', 'std': 'std. dev.', 'RMSE ({})'.format(testset): 'RMSE (withheld)' } stats.rename(columns=cols2rename, inplace=True) # Only select params of interest stats = stats.T[rename_titles.values()].T # Rename stats.rename(index=rename_titles, inplace=True) # Set filename and save detail on models csv_name = 'Oi_prj_point_for_point_comp4tabale_TESTSET.csv' stats.round(1).to_csv(csv_name) def mk_table_of_point_for_point_performance_ALL(RFR_dict=None, df=None, testset='Test set (strat. 20%)', var2use='RFR(Ensemble)', inc_ensemble=False, target='Iodide'): """ Make a table to summarise point-for-point performance for all datapoints Parameters ------- target (str): Name of the target variable (e.g. iodide) var2use (str): variable name to use for ensemble prediction testset (str): Testset to use, e.g. stratified sampling over quartiles for 20%:80% inc_ensemble (bool), include the ensemble (var2use) in the analysis RFR_dict (dict): dictionary of core variables and data df (pd.DataFrame): dataframe containing target and feature variables Returns ------- (None) """ # Get data objects as dictionary and extract dataframe if not provided. if isinstance(RFR_dict, type(None)): RFR_dict = build_or_get_models() if isinstance(df, type(None)): df = RFR_dict['df'] # Get stats on model tuns runs stats = get_stats_on_models(RFR_dict=RFR_dict, df=df, analysis4coastal=True, verbose=False) # Select param values of interest (and give updated title names ) rename_titles = {u'Chance2014_STTxx2_I': 'Chance et al. (2014)', u'MacDonald2014_iodide': 'MacDonald et al. (2014)', 'RFR(Ensemble)': 'RFR(Ensemble)', target: 'Obs.', } # Set the stats to use first_columns = [ 'mean', 'std', '25%', '50%', '75%', 'RMSE ({})'.format(testset), 'RMSE (all)', ] stats = stats[first_columns] # Rename columns to more standard names for stats (e.g. 50% to median and ... ) cols2rename = { '50%': 'median', 'std': 'std. dev.', 'RMSE ({})'.format(testset): 'RMSE (withheld)' } stats.rename(columns=cols2rename, inplace=True) # Rename the columns stats.rename(index=rename_titles, inplace=True) # Set filename and save detail on models csv_name = 'Oi_prj_point_for_point_comp4tabale_ALL.csv' stats.round(1).to_csv(csv_name) # Also save a .csv of values without derived values index2use = [i for i in stats.index if all( [ii not in i for ii in derived])] stats = stats.T stats = stats[index2use] stats = stats.T csv_name = 'Oi_prj_point_for_point_comp4tabale_ALL_NO_DERIV.csv' stats.round(1).to_csv(csv_name) def get_dataset_processed4ML(restrict_data_max=False, rm_Skagerrak_data=False, rm_outliers=True, rm_LOD_filled_data=False): """ Get dataset as a DataFrame with standard munging settings Parameters ------- restrict_data_max (bool): restrict the obs. data to a maximum value? rm_Skagerrak_data (bool): remove the data from the Skagerrak region rm_outliers (bool): remove the outliers from the observational dataset Returns ------- (pd.DataFrame) """ from observations import add_extra_vars_rm_some_data from observations import get_processed_df_obs_mod # - Local variables features_used = None target = 'Iodide' # - The following settings are set to False as default # settings for incoming feature data restrict_min_salinity = False use_median4chlr_a_NaNs = False add_modulus_of_lat = False # Apply transforms to data? do_not_transform_feature_data = True # Just use the forest outcomes and do not optimise use_forest_without_optimising = True # KLUDGE - this is for N=85 median_4MLD_when_NaN_or_less_than_0 = False # This is no longer needed? # KLUDGE - this is for depth values greater than zero median_4depth_when_greater_than_0 = False # - Get data as a DataFrame df = get_processed_df_obs_mod() # NOTE this df contains values >400nM # Add extra vairables and remove some data. df = add_extra_vars_rm_some_data(df=df, restrict_data_max=restrict_data_max, restrict_min_salinity=restrict_min_salinity, rm_Skagerrak_data=rm_Skagerrak_data, rm_outliers=rm_outliers, rm_LOD_filled_data=rm_LOD_filled_data, ) # add # - Add test and training set assignment to columns # print( 'WARNING - What testing had been done on training set selection?!' ) # Choose a sub set of data to exclude from the input data... # from sklearn.model_selection import train_test_split # targets = df[ [target] ] # # Use a standard 20% test set. # train_set, test_set = train_test_split( targets, test_size=0.2, \ # random_state=42 ) # standard split vars? (values= rand_20_80, rand_strat ) ways2split_data = { 'rn. 20%': (True, False), 'strat. 20%': (False, True), } # Loop training/test split methods for key_ in ways2split_data.keys(): # Get settings rand_20_80, rand_strat = ways2split_data[key_] # Copy a df for splitting # df_tmp = df['Iodide'].copy() # Now split using existing function returned_vars = build.mk_test_train_sets(df=df.copy(), target=target, rand_20_80=rand_20_80, rand_strat=rand_strat, features_used=df.columns.tolist(), ) train_set, test_set, test_set_targets = returned_vars # Now assign the values key_varname = 'Test set ({})'.format(key_) df[key_varname] = False df.loc[test_set.index, key_varname] = True df.loc[train_set.index, key_varname] = False return df # --------------------------------------------------------------------------- # ---------- Wrappers for s2s ------------- # --------------------------------------------------------------------------- def build_or_get_models_iodide(rm_Skagerrak_data=True, rm_LOD_filled_data=False, rm_outliers=True, rebuild=False): """ Wrapper call to build_or_get_models for sea-surface iodide Parameters ------- rm_Skagerrak_data (bool): remove the data from the Skagerrak region rm_LOD_filled_data (bool): remove the observational values below LOD add_modulus_of_lat (bool): add the modulus of lat to dataframe rm_outliers (bool): remove the observational outliers from the dataframe Returns ------- (dict) """ # Get the dictionary of model names and features (specific to iodide) model_feature_dict = utils.get_model_features_used_dict(rtn_dict=True) # Get the observational dataset prepared for ML pipeline df = get_dataset_processed4ML( rm_Skagerrak_data=rm_Skagerrak_data, rm_outliers=rm_outliers, ) # Exclude data from the Skaggerakk data? if rm_Skagerrak_data: model_sub_dir = '/TEMP_MODELS_No_Skagerrak/' else: model_sub_dir = '/TEMP_MODELS/' if rebuild: RFR_dict = build_or_get_models(save_model_to_disk=True, model_feature_dict=model_feature_dict, df=df, read_model_from_disk=False, model_sub_dir=model_sub_dir, delete_existing_model_files=True) else: RFR_dict = build_or_get_models(save_model_to_disk=False, model_feature_dict=model_feature_dict, df=df, read_model_from_disk=True, model_sub_dir=model_sub_dir, delete_existing_model_files=False) return RFR_dict def get_stats_on_models(df=None, testset='Test set (strat. 20%)', target='Iodide', inc_ensemble=False, analysis4coastal=False, var2use='RFR(Ensemble)', plot_up_model_performance=True, RFR_dict=None, add_sklean_metrics=False, verbose=True, debug=False): """ Analyse the stats on of params and obs. Parameters ------- analysis4coastal (bool): include analysis of data split by coastal/non-coastal target (str): Name of the target variable (e.g. iodide) testset (str): Testset to use, e.g. stratified sampling over quartiles for 20%:80% inc_ensemble (bool): include the ensemble (var2use) in the analysis var2use (str): var to use as main model prediction debug (bool): print out debugging output? add_sklean_metrics (bool): include core sklearn metrics Returns ------- (pd.DataFrame) """ # --- Get data if isinstance(RFR_dict, type(None)): RFR_dict = build_or_get_models() # select dataframe with observations and predictions in it if isinstance(df, type(None)): df = RFR_dict['df'] # model names model_names = RFR_dict['model_names'] features_used_dict = RFR_dict['features_used_dict'] N_features_used = RFR_dict['N_features_used'] oob_scores = RFR_dict['oob_scores'] # - Evaluate performance of models (e.g. Root Mean Square Error (RMSE) ) # Also evaluate parameterisations param_names = [] if target == 'Iodide': param_names += [u'Chance2014_STTxx2_I', u'MacDonald2014_iodide', # u'Chance2014_Multivariate', ] # Aslo include the ensemble parameters if inc_ensemble: param_names += [var2use] # Calculate performance stats = calc_performance_of_params(df=df, params=param_names+model_names) # Just test on test set df_tmp = df.loc[df[testset] == True, :] stats_sub1 = utils.get_df_stats_MSE_RMSE(params=param_names+model_names, df=df_tmp[[target]+model_names+param_names], dataset_str=testset, target=target, add_sklean_metrics=add_sklean_metrics).T stats2concat = [stats, stats_sub1] if analysis4coastal: # Add testing on coastal dataset_split = 'Coastal' df_tmp = df.loc[(df['Coastal'] == 1), :] stats_sub2 = utils.get_df_stats_MSE_RMSE(params=param_names+model_names, df=df_tmp[[target]+model_names+param_names], target=target, dataset_str=dataset_split, add_sklean_metrics=add_sklean_metrics).T # Add testing on non-coastal dataset_split = 'Non coastal' df_tmp = df.loc[(df['Coastal'] == 0), :] stats_sub3 = utils.get_df_stats_MSE_RMSE(params=param_names+model_names, df=df_tmp[[target]+model_names+param_names], target=target, dataset_str=dataset_split, add_sklean_metrics=add_sklean_metrics).T # Add testing on coastal dataset_split = 'Coastal ({})'.format(testset) df_tmp = df.loc[(df['Coastal'] == 1) & (df[testset] == True), :] stats_sub4 = utils.get_df_stats_MSE_RMSE(params=param_names+model_names, df=df_tmp[[target]+model_names+param_names], target=target, dataset_str=dataset_split, add_sklean_metrics=add_sklean_metrics).T # Add testing on non-coastal dataset_split = 'Non coastal ({})'.format(testset) df_tmp = df.loc[(df['Coastal'] == 0) & (df[testset] == True), :] stats_sub5 = utils.get_df_stats_MSE_RMSE(params=param_names+model_names, df=df_tmp[[target]+model_names+param_names], target=target, dataset_str=dataset_split, add_sklean_metrics=add_sklean_metrics).T # Statistics to concat stats2concat += [stats_sub2, stats_sub3, stats_sub4, stats_sub5, ] # Combine all stats (RMSE and general stats) stats = pd.concat(stats2concat) # Add number of features too stats = stats.T feats =
pd.DataFrame(index=model_names)
pandas.DataFrame
# -*- coding: utf-8 -*- # pylint: disable-msg=W0612,E1101 import itertools import warnings from warnings import catch_warnings from datetime import datetime from pandas.types.common import (is_integer_dtype, is_float_dtype, is_scalar) from pandas.compat import range, lrange, lzip, StringIO, lmap from pandas.tslib import NaT from numpy import nan from numpy.random import randn import numpy as np import pandas as pd from pandas import option_context from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice from pandas.core.api import (DataFrame, Index, Series, Panel, isnull, MultiIndex, Timestamp, Timedelta, UInt64Index) from pandas.formats.printing import pprint_thing from pandas import concat from pandas.core.common import PerformanceWarning from pandas.tests.indexing.common import _mklbl import pandas.util.testing as tm from pandas import date_range _verbose = False # ------------------------------------------------------------------------ # Indexing test cases def _generate_indices(f, values=False): """ generate the indicies if values is True , use the axis values is False, use the range """ axes = f.axes if values: axes = [lrange(len(a)) for a in axes] return itertools.product(*axes) def _get_value(f, i, values=False): """ return the value for the location i """ # check agains values if values: return f.values[i] # this is equiv of f[col][row]..... # v = f # for a in reversed(i): # v = v.__getitem__(a) # return v with catch_warnings(record=True): return f.ix[i] def _get_result(obj, method, key, axis): """ return the result for this obj with this key and this axis """ if isinstance(key, dict): key = key[axis] # use an artifical conversion to map the key as integers to the labels # so ix can work for comparisions if method == 'indexer': method = 'ix' key = obj._get_axis(axis)[key] # in case we actually want 0 index slicing try: xp = getattr(obj, method).__getitem__(_axify(obj, key, axis)) except: xp = getattr(obj, method).__getitem__(key) return xp def _axify(obj, key, axis): # create a tuple accessor axes = [slice(None)] * obj.ndim axes[axis] = key return tuple(axes) class TestIndexing(tm.TestCase): _objs = set(['series', 'frame', 'panel']) _typs = set(['ints', 'uints', 'labels', 'mixed', 'ts', 'floats', 'empty', 'ts_rev']) def setUp(self): self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2)) self.frame_ints = DataFrame(np.random.randn(4, 4), index=lrange(0, 8, 2), columns=lrange(0, 12, 3)) self.panel_ints = Panel(np.random.rand(4, 4, 4), items=lrange(0, 8, 2), major_axis=lrange(0, 12, 3), minor_axis=lrange(0, 16, 4)) self.series_uints = Series(np.random.rand(4), index=UInt64Index(lrange(0, 8, 2))) self.frame_uints = DataFrame(np.random.randn(4, 4), index=UInt64Index(lrange(0, 8, 2)), columns=UInt64Index(lrange(0, 12, 3))) self.panel_uints = Panel(np.random.rand(4, 4, 4), items=UInt64Index(lrange(0, 8, 2)), major_axis=UInt64Index(lrange(0, 12, 3)), minor_axis=UInt64Index(lrange(0, 16, 4))) self.series_labels = Series(np.random.randn(4), index=list('abcd')) self.frame_labels = DataFrame(np.random.randn(4, 4), index=list('abcd'), columns=list('ABCD')) self.panel_labels = Panel(np.random.randn(4, 4, 4), items=list('abcd'), major_axis=list('ABCD'), minor_axis=list('ZYXW')) self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8]) self.frame_mixed = DataFrame(np.random.randn(4, 4), index=[2, 4, 'null', 8]) self.panel_mixed = Panel(np.random.randn(4, 4, 4), items=[2, 4, 'null', 8]) self.series_ts = Series(np.random.randn(4), index=date_range('20130101', periods=4)) self.frame_ts = DataFrame(np.random.randn(4, 4), index=date_range('20130101', periods=4)) self.panel_ts = Panel(np.random.randn(4, 4, 4), items=date_range('20130101', periods=4)) dates_rev = (date_range('20130101', periods=4) .sort_values(ascending=False)) self.series_ts_rev = Series(np.random.randn(4), index=dates_rev) self.frame_ts_rev = DataFrame(np.random.randn(4, 4), index=dates_rev) self.panel_ts_rev = Panel(np.random.randn(4, 4, 4), items=dates_rev) self.frame_empty = DataFrame({}) self.series_empty = Series({}) self.panel_empty = Panel({}) # form agglomerates for o in self._objs: d = dict() for t in self._typs: d[t] = getattr(self, '%s_%s' % (o, t), None) setattr(self, o, d) def check_values(self, f, func, values=False): if f is None: return axes = f.axes indicies = itertools.product(*axes) for i in indicies: result = getattr(f, func)[i] # check agains values if values: expected = f.values[i] else: expected = f for a in reversed(i): expected = expected.__getitem__(a) tm.assert_almost_equal(result, expected) def check_result(self, name, method1, key1, method2, key2, typs=None, objs=None, axes=None, fails=None): def _eq(t, o, a, obj, k1, k2): """ compare equal for these 2 keys """ if a is not None and a > obj.ndim - 1: return def _print(result, error=None): if error is not None: error = str(error) v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s," "key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" % (name, result, t, o, method1, method2, a, error or '')) if _verbose: pprint_thing(v) try: rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a)) try: xp = _get_result(obj, method2, k2, a) except: result = 'no comp' _print(result) return detail = None try: if is_scalar(rs) and is_scalar(xp): self.assertEqual(rs, xp) elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
pandas.util.testing.assert_series_equal
from Modules.appLogger import application_logger from Modules.DataLoader import predictionDataLoader from Modules.SaveLoadModel import saveLoadModel from Modules.DataPreprocessor import dataPreprocessor import pandas as pd class predictData: """ Class Name: predictData Description: Predicts the rating of a restaurant based on the inputs. Input: None Output: CSV file containing the ratings of the restaurants given in the input file. On Failure: Raise Exception Written By: <NAME> Version: 1.0 Revisions: None """ def __init__(self): try: self.prediction_logs = pd.read_csv('Logs\\Prediction Logs\\prediction_logs.csv') self.prediction_logs.drop('Unnamed :0', axis = 1, inplace= True) except: self.prediction_logs =
pd.DataFrame(columns=['date','time','logs'])
pandas.DataFrame
from collections import deque from datetime import datetime import operator import re import numpy as np import pytest import pytz import pandas as pd from pandas import DataFrame, MultiIndex, Series import pandas._testing as tm import pandas.core.common as com from pandas.core.computation.expressions import _MIN_ELEMENTS, _NUMEXPR_INSTALLED from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int # ------------------------------------------------------------------- # Comparisons class TestFrameComparisons: # Specifically _not_ flex-comparisons def test_frame_in_list(self): # GH#12689 this should raise at the DataFrame level, not blocks df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD")) msg = "The truth value of a DataFrame is ambiguous" with pytest.raises(ValueError, match=msg): df in [None] def test_comparison_invalid(self): def check(df, df2): for (x, y) in [(df, df2), (df2, df)]: # we expect the result to match Series comparisons for # == and !=, inequalities should raise result = x == y expected = pd.DataFrame( {col: x[col] == y[col] for col in x.columns}, index=x.index, columns=x.columns, ) tm.assert_frame_equal(result, expected) result = x != y expected = pd.DataFrame( {col: x[col] != y[col] for col in x.columns}, index=x.index, columns=x.columns, ) tm.assert_frame_equal(result, expected) msgs = [ r"Invalid comparison between dtype=datetime64\[ns\] and ndarray", "invalid type promotion", ( # npdev 1.20.0 r"The DTypes <class 'numpy.dtype\[.*\]'> and " r"<class 'numpy.dtype\[.*\]'> do not have a common DType." ), ] msg = "|".join(msgs) with pytest.raises(TypeError, match=msg): x >= y with pytest.raises(TypeError, match=msg): x > y with pytest.raises(TypeError, match=msg): x < y with pytest.raises(TypeError, match=msg): x <= y # GH4968 # invalid date/int comparisons df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"]) df["dates"] = pd.date_range("20010101", periods=len(df)) df2 = df.copy() df2["dates"] = df["a"] check(df, df2) df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"]) df2 = pd.DataFrame( { "a": pd.date_range("20010101", periods=len(df)), "b": pd.date_range("20100101", periods=len(df)), } ) check(df, df2) def test_timestamp_compare(self): # make sure we can compare Timestamps on the right AND left hand side # GH#4982 df = pd.DataFrame( { "dates1": pd.date_range("20010101", periods=10), "dates2": pd.date_range("20010102", periods=10), "intcol": np.random.randint(1000000000, size=10), "floatcol": np.random.randn(10), "stringcol": list(tm.rands(10)), } ) df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"} for left, right in ops.items(): left_f = getattr(operator, left) right_f = getattr(operator, right) # no nats if left in ["eq", "ne"]: expected = left_f(df, pd.Timestamp("20010109")) result = right_f(pd.Timestamp("20010109"), df) tm.assert_frame_equal(result, expected) else: msg = ( "'(<|>)=?' not supported between " "instances of 'numpy.ndarray' and 'Timestamp'" ) with pytest.raises(TypeError, match=msg): left_f(df, pd.Timestamp("20010109")) with pytest.raises(TypeError, match=msg): right_f(pd.Timestamp("20010109"), df) # nats expected = left_f(df, pd.Timestamp("nat")) result = right_f(pd.Timestamp("nat"), df) tm.assert_frame_equal(result, expected) def test_mixed_comparison(self): # GH#13128, GH#22163 != datetime64 vs non-dt64 should be False, # not raise TypeError # (this appears to be fixed before GH#22163, not sure when) df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]]) other = pd.DataFrame([["a", "b"], ["c", "d"]]) result = df == other assert not result.any().any() result = df != other assert result.all().all() def test_df_boolean_comparison_error(self): # GH#4576, GH#22880 # comparing DataFrame against list/tuple with len(obj) matching # len(df.columns) is supported as of GH#22800 df = pd.DataFrame(np.arange(6).reshape((3, 2))) expected = pd.DataFrame([[False, False], [True, False], [False, False]]) result = df == (2, 2) tm.assert_frame_equal(result, expected) result = df == [2, 2] tm.assert_frame_equal(result, expected) def test_df_float_none_comparison(self): df = pd.DataFrame( np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"] ) result = df.__eq__(None) assert not result.any().any() def test_df_string_comparison(self): df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}]) mask_a = df.a > 1 tm.assert_frame_equal(df[mask_a], df.loc[1:1, :]) tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :]) mask_b = df.b == "foo" tm.assert_frame_equal(df[mask_b], df.loc[0:0, :]) tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :]) class TestFrameFlexComparisons: # TODO: test_bool_flex_frame needs a better name def test_bool_flex_frame(self): data = np.random.randn(5, 3) other_data = np.random.randn(5, 3) df = pd.DataFrame(data) other = pd.DataFrame(other_data) ndim_5 = np.ones(df.shape + (1, 3)) # Unaligned def _check_unaligned_frame(meth, op, df, other): part_o = other.loc[3:, 1:].copy() rs = meth(part_o) xp = op(df, part_o.reindex(index=df.index, columns=df.columns)) tm.assert_frame_equal(rs, xp) # DataFrame assert df.eq(df).values.all() assert not df.ne(df).values.any() for op in ["eq", "ne", "gt", "lt", "ge", "le"]: f = getattr(df, op) o = getattr(operator, op) # No NAs tm.assert_frame_equal(f(other), o(df, other)) _check_unaligned_frame(f, o, df, other) # ndarray tm.assert_frame_equal(f(other.values), o(df, other.values)) # scalar tm.assert_frame_equal(f(0), o(df, 0)) # NAs msg = "Unable to coerce to Series/DataFrame" tm.assert_frame_equal(f(np.nan), o(df, np.nan)) with pytest.raises(ValueError, match=msg): f(ndim_5) # Series def _test_seq(df, idx_ser, col_ser): idx_eq = df.eq(idx_ser, axis=0) col_eq = df.eq(col_ser) idx_ne = df.ne(idx_ser, axis=0) col_ne = df.ne(col_ser) tm.assert_frame_equal(col_eq, df == pd.Series(col_ser)) tm.assert_frame_equal(col_eq, -col_ne) tm.assert_frame_equal(idx_eq, -idx_ne) tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T) tm.assert_frame_equal(col_eq, df.eq(list(col_ser))) tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0)) tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0)) idx_gt = df.gt(idx_ser, axis=0) col_gt = df.gt(col_ser) idx_le = df.le(idx_ser, axis=0) col_le = df.le(col_ser) tm.assert_frame_equal(col_gt, df > pd.Series(col_ser)) tm.assert_frame_equal(col_gt, -col_le) tm.assert_frame_equal(idx_gt, -idx_le) tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T) idx_ge = df.ge(idx_ser, axis=0) col_ge = df.ge(col_ser) idx_lt = df.lt(idx_ser, axis=0) col_lt = df.lt(col_ser) tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser)) tm.assert_frame_equal(col_ge, -col_lt) tm.assert_frame_equal(idx_ge, -idx_lt) tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T) idx_ser = pd.Series(np.random.randn(5)) col_ser = pd.Series(np.random.randn(3)) _test_seq(df, idx_ser, col_ser) # list/tuple _test_seq(df, idx_ser.values, col_ser.values) # NA df.loc[0, 0] = np.nan rs = df.eq(df) assert not rs.loc[0, 0] rs = df.ne(df) assert rs.loc[0, 0] rs = df.gt(df) assert not rs.loc[0, 0] rs = df.lt(df) assert not rs.loc[0, 0] rs = df.ge(df) assert not rs.loc[0, 0] rs = df.le(df) assert not rs.loc[0, 0] def test_bool_flex_frame_complex_dtype(self): # complex arr = np.array([np.nan, 1, 6, np.nan]) arr2 = np.array([2j, np.nan, 7, None]) df = pd.DataFrame({"a": arr}) df2 = pd.DataFrame({"a": arr2}) msg = "|".join( [ "'>' not supported between instances of '.*' and 'complex'", r"unorderable types: .*complex\(\)", # PY35 ] ) with pytest.raises(TypeError, match=msg): # inequalities are not well-defined for complex numbers df.gt(df2) with pytest.raises(TypeError, match=msg): # regression test that we get the same behavior for Series df["a"].gt(df2["a"]) with pytest.raises(TypeError, match=msg): # Check that we match numpy behavior here df.values > df2.values rs = df.ne(df2) assert rs.values.all() arr3 = np.array([2j, np.nan, None]) df3 = pd.DataFrame({"a": arr3}) with pytest.raises(TypeError, match=msg): # inequalities are not well-defined for complex numbers df3.gt(2j) with pytest.raises(TypeError, match=msg): # regression test that we get the same behavior for Series df3["a"].gt(2j) with pytest.raises(TypeError, match=msg): # Check that we match numpy behavior here df3.values > 2j def test_bool_flex_frame_object_dtype(self): # corner, dtype=object df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]}) df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]}) result = df1.ne(df2) exp = pd.DataFrame({"col": [False, True, False]}) tm.assert_frame_equal(result, exp) def test_flex_comparison_nat(self): # GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT, # and _definitely_ not be NaN df = pd.DataFrame([pd.NaT]) result = df == pd.NaT # result.iloc[0, 0] is a np.bool_ object assert result.iloc[0, 0].item() is False result = df.eq(pd.NaT) assert result.iloc[0, 0].item() is False result = df != pd.NaT assert result.iloc[0, 0].item() is True result = df.ne(pd.NaT) assert result.iloc[0, 0].item() is True @pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"]) def test_df_flex_cmp_constant_return_types(self, opname): # GH 15077, non-empty DataFrame df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]}) const = 2 result = getattr(df, opname)(const).dtypes.value_counts() tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)])) @pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"]) def test_df_flex_cmp_constant_return_types_empty(self, opname): # GH 15077 empty DataFrame df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]}) const = 2 empty = df.iloc[:0] result = getattr(empty, opname)(const).dtypes.value_counts() tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)])) def test_df_flex_cmp_ea_dtype_with_ndarray_series(self): ii = pd.IntervalIndex.from_breaks([1, 2, 3]) df = pd.DataFrame({"A": ii, "B": ii}) ser = pd.Series([0, 0]) res = df.eq(ser, axis=0) expected = pd.DataFrame({"A": [False, False], "B": [False, False]}) tm.assert_frame_equal(res, expected) ser2 = pd.Series([1, 2], index=["A", "B"]) res2 = df.eq(ser2, axis=1) tm.assert_frame_equal(res2, expected) # ------------------------------------------------------------------- # Arithmetic class TestFrameFlexArithmetic: def test_floordiv_axis0(self): # make sure we df.floordiv(ser, axis=0) matches column-wise result arr = np.arange(3) ser = pd.Series(arr) df = pd.DataFrame({"A": ser, "B": ser}) result = df.floordiv(ser, axis=0) expected = pd.DataFrame({col: df[col] // ser for col in df.columns}) tm.assert_frame_equal(result, expected) result2 = df.floordiv(ser.values, axis=0) tm.assert_frame_equal(result2, expected) @pytest.mark.skipif(not _NUMEXPR_INSTALLED, reason="numexpr not installed") @pytest.mark.parametrize("opname", ["floordiv", "pow"]) def test_floordiv_axis0_numexpr_path(self, opname): # case that goes through numexpr and has to fall back to masked_arith_op op = getattr(operator, opname) arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100 df = pd.DataFrame(arr) df["C"] = 1.0 ser = df[0] result = getattr(df, opname)(ser, axis=0) expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns}) tm.assert_frame_equal(result, expected) result2 = getattr(df, opname)(ser.values, axis=0) tm.assert_frame_equal(result2, expected) def test_df_add_td64_columnwise(self): # GH 22534 Check that column-wise addition broadcasts correctly dti = pd.date_range("2016-01-01", periods=10) tdi = pd.timedelta_range("1", periods=10) tser = pd.Series(tdi) df = pd.DataFrame({0: dti, 1: tdi}) result = df.add(tser, axis=0) expected = pd.DataFrame({0: dti + tdi, 1: tdi + tdi}) tm.assert_frame_equal(result, expected) def test_df_add_flex_filled_mixed_dtypes(self): # GH 19611 dti = pd.date_range("2016-01-01", periods=3) ser = pd.Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]") df = pd.DataFrame({"A": dti, "B": ser}) other = pd.DataFrame({"A": ser, "B": ser}) fill = pd.Timedelta(days=1).to_timedelta64() result = df.add(other, fill_value=fill) expected = pd.DataFrame( { "A": pd.Series( ["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]" ), "B": ser * 2, } ) tm.assert_frame_equal(result, expected) def test_arith_flex_frame( self, all_arithmetic_operators, float_frame, mixed_float_frame ): # one instance of parametrized fixture op = all_arithmetic_operators def f(x, y): # r-versions not in operator-stdlib; get op without "r" and invert if op.startswith("__r"): return getattr(operator, op.replace("__r", "__"))(y, x) return getattr(operator, op)(x, y) result = getattr(float_frame, op)(2 * float_frame) expected = f(float_frame, 2 * float_frame) tm.assert_frame_equal(result, expected) # vs mix float result = getattr(mixed_float_frame, op)(2 * mixed_float_frame) expected = f(mixed_float_frame, 2 * mixed_float_frame) tm.assert_frame_equal(result, expected) _check_mixed_float(result, dtype=dict(C=None)) @pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"]) def test_arith_flex_frame_mixed( self, op, int_frame, mixed_int_frame, mixed_float_frame ): f = getattr(operator, op) # vs mix int result = getattr(mixed_int_frame, op)(2 + mixed_int_frame) expected = f(mixed_int_frame, 2 + mixed_int_frame) # no overflow in the uint dtype = None if op in ["__sub__"]: dtype = dict(B="uint64", C=None) elif op in ["__add__", "__mul__"]: dtype = dict(C=None) tm.assert_frame_equal(result, expected) _check_mixed_int(result, dtype=dtype) # vs mix float result = getattr(mixed_float_frame, op)(2 * mixed_float_frame) expected = f(mixed_float_frame, 2 * mixed_float_frame) tm.assert_frame_equal(result, expected) _check_mixed_float(result, dtype=dict(C=None)) # vs plain int result = getattr(int_frame, op)(2 * int_frame) expected = f(int_frame, 2 * int_frame) tm.assert_frame_equal(result, expected) def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame): # one instance of parametrized fixture op = all_arithmetic_operators # Check that arrays with dim >= 3 raise for dim in range(3, 6): arr = np.ones((1,) * dim) msg = "Unable to coerce to Series/DataFrame" with pytest.raises(ValueError, match=msg): getattr(float_frame, op)(arr) def test_arith_flex_frame_corner(self, float_frame): const_add = float_frame.add(1) tm.assert_frame_equal(const_add, float_frame + 1) # corner cases result = float_frame.add(float_frame[:0]) tm.assert_frame_equal(result, float_frame * np.nan) result = float_frame[:0].add(float_frame) tm.assert_frame_equal(result, float_frame * np.nan) with pytest.raises(NotImplementedError, match="fill_value"): float_frame.add(float_frame.iloc[0], fill_value=3) with pytest.raises(NotImplementedError, match="fill_value"): float_frame.add(float_frame.iloc[0], axis="index", fill_value=3) def test_arith_flex_series(self, simple_frame): df = simple_frame row = df.xs("a") col = df["two"] # after arithmetic refactor, add truediv here ops = ["add", "sub", "mul", "mod"] for op in ops: f = getattr(df, op) op = getattr(operator, op) tm.assert_frame_equal(f(row), op(df, row)) tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T) # special case for some reason tm.assert_frame_equal(df.add(row, axis=None), df + row) # cases which will be refactored after big arithmetic refactor tm.assert_frame_equal(df.div(row), df / row) tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T) # broadcasting issue in GH 7325 df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="int64") expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]]) result = df.div(df[0], axis="index") tm.assert_frame_equal(result, expected) df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="float64") expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]]) result = df.div(df[0], axis="index") tm.assert_frame_equal(result, expected) def test_arith_flex_zero_len_raises(self): # GH 19522 passing fill_value to frame flex arith methods should # raise even in the zero-length special cases ser_len0 = pd.Series([], dtype=object) df_len0 = pd.DataFrame(columns=["A", "B"]) df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) with pytest.raises(NotImplementedError, match="fill_value"): df.add(ser_len0, fill_value="E") with pytest.raises(NotImplementedError, match="fill_value"): df_len0.sub(df["A"], axis=None, fill_value=3) def test_flex_add_scalar_fill_value(self): # GH#12723 dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float") df = pd.DataFrame({"foo": dat}, index=range(6)) exp = df.fillna(0).add(2) res = df.add(2, fill_value=0) tm.assert_frame_equal(res, exp) class TestFrameArithmetic: def test_td64_op_nat_casting(self): # Make sure we don't accidentally treat timedelta64(NaT) as datetime64 # when calling dispatch_to_series in DataFrame arithmetic ser = pd.Series(["NaT", "NaT"], dtype="timedelta64[ns]") df = pd.DataFrame([[1, 2], [3, 4]]) result = df * ser expected = pd.DataFrame({0: ser, 1: ser})
tm.assert_frame_equal(result, expected)
pandas._testing.assert_frame_equal
from __future__ import division, print_function import os import click import numpy as np import pandas as pd def load_games(game_data_fname, remove_ties=False): """Load data containing results of each game and return a DataFrame. Parameters ---------- game_data_fname : str, filename of Armchair Analysis GAME table remove_ties : boolean, optional Returns ------- games : DataFrame """ games = pd.read_csv(game_data_fname, index_col=0) # Data from 2000 import is less reliable, omit this season # and use regular season games only. games = (games.query('seas >= 2001 & wk <= 17') .drop(['stad', 'temp', 'humd', 'wspd', 'wdir', 'cond', 'surf'], axis='columns')) games['winner'] = games.apply(winner, axis=1) if remove_ties: games = games[games['winner'] != 'TIE'] return games def winner(row): """Returns the team name that won the game, otherwise returns 'TIE'""" if row.ptsv > row.ptsh: return row.v elif row.ptsh > row.ptsv: return row.h else: return 'TIE' def load_pbp(pbp_data_fname, games, remove_knees=False): """Load the play by play data and return a DataFrame. Parameters ---------- pbp_data_fname : str, location of play by play data games : DataFrame, game-level DataFrame created by load_games remove_knees : boolean, optional Returns ------- pbp : DataFrame """ pbp = pd.read_csv(pbp_data_fname, index_col=1, low_memory=False, usecols=['gid', 'pid', 'off', 'def', 'type', 'qtr', 'min', 'sec', 'kne', 'ptso', 'ptsd', 'timo', 'timd', 'dwn', 'ytg', 'yfog', 'yds', 'fd', 'fgxp', 'good', 'pnet', 'pts', 'detail']) # Remove overtime pbp = pbp[pbp.qtr <= 4] # pid 183134 should have a value of 0 for min, but has "0:00" pbp['min'] = pbp['min'].replace({'0:00': 0}) pbp['min'] = pbp['min'].astype(np.int64) # Restrict to regular season games after 2000 pbp = pbp[pbp.gid.isin(games.index)] if remove_knees: pbp = pbp[pbp.kne.isnull()] return pbp def switch_offense(df): """Swap game state columns for offense & defense dependent variables. The play by play data has some statistics on punts and kickoffs in terms of the receiving team. Switch these to reflect the game state for the kicking team.""" df.loc[(df['type'] == 'PUNT') | (df['type'] == 'KOFF'), ['off', 'def', 'ptso', 'ptsd', 'timo', 'timd']] = df.loc[ (df['type'] == 'PUNT') | (df['type'] == 'KOFF'), ['def', 'off', 'ptsd', 'ptso', 'timd', 'timo']].values # If any points are scored on a PUNT/KOFF, they are given in terms # of the receiving team -- switch this. df.loc[(df['type'] == 'PUNT') | (df['type'] == 'KOFF'), 'pts'] = ( -1 * df.loc[(df['type'] == 'PUNT') | (df['type'] == 'KOFF'), 'pts'].values) return df def code_fourth_downs(df): """Parse all fourth downs and determine if teams intended to go for it, punt, or attempt a field goal. If intent is not clear, do not include the play. """ fourths = df.loc[df.dwn == 4, :].copy() fourths['goforit'] = np.zeros(fourths.shape[0]) fourths['punt'] = np.zeros(fourths.shape[0]) fourths['kick'] = np.zeros(fourths.shape[0]) # Omit false start, delay of game, encroachment, neutral zone infraction # We cannot infer from these plays if the offense was going to # go for it or not. omitstring = (r'encroachment|false start|delay of game|neutral zone ' 'infraction') fourths = fourths[-(fourths.detail.str.contains(omitstring, case=False))] # Ran a play fourths.loc[(fourths['type'] == 'RUSH') | (fourths['type'] == 'PASS'), 'goforit'] = 1 fourths.loc[(fourths['type'] == 'RUSH') | (fourths['type'] == 'PASS'), 'punt'] = 0 fourths.loc[(fourths['type'] == 'RUSH') | (fourths['type'] == 'PASS'), 'kick'] = 0 # Field goal attempts and punts fourths.loc[(fourths['type'] == 'FGXP') | (fourths['type'] == 'PUNT'), 'goforit'] = 0 fourths.loc[(fourths['type'] == 'FGXP'), 'kick'] = 1 fourths.loc[(fourths['type'] == 'PUNT'), 'punt'] = 1 # Punted, but penalty on play puntstring = r'punts|out of bounds' fourths.loc[(fourths['type'] == 'NOPL') & (fourths.detail.str.contains(puntstring, case=False)), 'punt'] = 1 # Kicked, but penalty on play kickstring = r'field goal is|field goal attempt' fourths.loc[(fourths['type'] == 'NOPL') & (fourths.detail.str.contains(kickstring, case=False)), 'kick'] = 1 # Went for it, but penalty on play gostring = (r'pass to|incomplete|sacked|left end|up the middle|' 'pass interference|right tackle|right guard|right end|' 'pass intended|left tackle|left guard|pass deep|' 'pass short|up the middle') fourths.loc[(fourths['type'] == 'NOPL') & (fourths.detail.str.contains(gostring, case=False)) & -(fourths.detail.str.contains(puntstring, case=False)) & -(fourths.detail.str.contains(kickstring, case=False)), 'goforit'] = 1 fourths = fourths[fourths[['goforit', 'punt', 'kick']].sum(axis=1) == 1] return fourths def fg_success_rate(fg_data_fname, out_fname, min_pid=473957): """Historical field goal success rates by field position. By default, uses only attempts from >= 2011 season to reflect more improved kicker performance. Returns and writes results to a CSV. NOTE: These are somewhat sparse and irregular at longer FG ranges. This is because kickers who attempt long FGs are not selected at random -- they are either in situations which require a long FG attempt or are kickers with a known long range. The NYT model uses a logistic regression kicking model developed by <NAME> to smooth out these rates. """ fgs = pd.read_csv(fg_data_fname) fgs = fgs.loc[(fgs.fgxp == 'FG') & (fgs.pid >= min_pid)].copy() fgs_grouped = fgs.groupby('dist')['good'].agg( {'N': len, 'average': np.mean}).reset_index() fgs_grouped['yfog'] = 100 - (fgs_grouped.dist - 17) fgs_grouped[['yfog', 'average']].to_csv(out_fname, index=False) return fgs_grouped def nyt_fg_model(fname, outname): """Sub in simple logit for field goal success rates.""" fgs = pd.read_csv(fname) fgs['yfog'] = 100 - (fgs.fg_distance - 17) fgs.to_csv(outname) return fgs def punt_averages(punt_data_fname, out_fname, joined): """Group punts by kicking field position to get average return distance. Currently does not incorporate the possibility of a muffed punt or punt returned for a TD. """ punts =
pd.read_csv(punt_data_fname, index_col=0)
pandas.read_csv
import numpy as np import pandas as pd from numpy.testing import assert_array_equal from pandas.testing import assert_frame_equal from nose.tools import (assert_equal, assert_almost_equal, raises, ok_, eq_) from rsmtool.preprocessor import (FeaturePreprocessor, FeatureSubsetProcessor, FeatureSpecsProcessor) class TestFeaturePreprocessor: def setUp(self): self.fpp = FeaturePreprocessor() def test_select_candidates_with_N_or_more_items(self): data = pd.DataFrame({'candidate': ['a'] * 3 + ['b'] * 2 + ['c'], 'sc1': [2, 3, 1, 5, 6, 1]}) df_included_expected = pd.DataFrame({'candidate': ['a'] * 3 + ['b'] * 2, 'sc1': [2, 3, 1, 5, 6]}) df_excluded_expected = pd.DataFrame({'candidate': ['c'], 'sc1': [1]}) (df_included, df_excluded) = FeaturePreprocessor.select_candidates(data, 2) assert_frame_equal(df_included, df_included_expected) assert_frame_equal(df_excluded, df_excluded_expected) def test_select_candidates_with_N_or_more_items_all_included(self): data = pd.DataFrame({'candidate': ['a'] * 2 + ['b'] * 2 + ['c'] * 2, 'sc1': [2, 3, 1, 5, 6, 1]}) (df_included, df_excluded) = FeaturePreprocessor.select_candidates(data, 2) assert_frame_equal(df_included, data) assert_equal(len(df_excluded), 0) def test_select_candidates_with_N_or_more_items_all_excluded(self): data = pd.DataFrame({'candidate': ['a'] * 3 + ['b'] * 2 + ['c'], 'sc1': [2, 3, 1, 5, 6, 1]}) (df_included, df_excluded) = FeaturePreprocessor.select_candidates(data, 4) assert_frame_equal(df_excluded, data) assert_equal(len(df_included), 0) def test_select_candidates_with_N_or_more_items_custom_name(self): data = pd.DataFrame({'ID': ['a'] * 3 + ['b'] * 2 + ['c'], 'sc1': [2, 3, 1, 5, 6, 1]}) df_included_expected = pd.DataFrame({'ID': ['a'] * 3 + ['b'] * 2, 'sc1': [2, 3, 1, 5, 6]}) df_excluded_expected = pd.DataFrame({'ID': ['c'], 'sc1': [1]}) (df_included, df_excluded) = FeaturePreprocessor.select_candidates(data, 2, 'ID') assert_frame_equal(df_included, df_included_expected) assert_frame_equal(df_excluded, df_excluded_expected) def test_rename_no_columns(self): df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'length', 'raw', 'candidate', 'feature1', 'feature2']) df = self.fpp.rename_default_columns(df, [], 'spkitemid', 'sc1', 'sc2', 'length', 'raw', 'candidate') assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'length', 'raw', 'candidate', 'feature1', 'feature2']) def test_rename_no_columns_some_values_none(self): df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'feature1', 'feature2']) df = self.fpp.rename_default_columns(df, [], 'spkitemid', 'sc1', 'sc2', None, None, None) assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'feature1', 'feature2']) def test_rename_no_used_columns_but_unused_columns_with_default_names(self): df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'length', 'feature1', 'feature2']) df = self.fpp.rename_default_columns(df, [], 'spkitemid', 'sc1', 'sc2', None, None, None) assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', '##length##', 'feature1', 'feature2']) def test_rename_used_columns(self): df = pd.DataFrame(columns=['id', 'r1', 'r2', 'words', 'SR', 'feature1', 'feature2']) df = self.fpp.rename_default_columns(df, [], 'id', 'r1', 'r2', 'words', 'SR', None) assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'length', 'raw', 'feature1', 'feature2']) def test_rename_used_columns_and_unused_columns_with_default_names(self): df = pd.DataFrame(columns=['id', 'r1', 'r2', 'words', 'raw', 'feature1', 'feature2']) df = self.fpp.rename_default_columns(df, [], 'id', 'r1', 'r2', 'words', None, None) assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'length', '##raw##', 'feature1', 'feature2']) def test_rename_used_columns_with_swapped_names(self): df = pd.DataFrame(columns=['id', 'sc1', 'sc2', 'raw', 'words', 'feature1', 'feature2']) df = self.fpp.rename_default_columns(df, [], 'id', 'sc2', 'sc1', 'words', None, None) assert_array_equal(df.columns, ['spkitemid', 'sc2', 'sc1', '##raw##', 'length', 'feature1', 'feature2']) def test_rename_used_columns_but_not_features(self): df = pd.DataFrame(columns=['id', 'sc1', 'sc2', 'length', 'feature2']) df = self.fpp.rename_default_columns(df, ['length'], 'id', 'sc1', 'sc2', None, None, None) assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'length', 'feature2']) def test_rename_candidate_column(self): df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'length', 'apptNo', 'feature1', 'feature2']) df = self.fpp.rename_default_columns(df, [], 'spkitemid', 'sc1', 'sc2', None, None, 'apptNo') assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', '##length##', 'candidate', 'feature1', 'feature2']) def test_rename_candidate_named_sc2(self): df = pd.DataFrame(columns=['id', 'sc1', 'sc2', 'question', 'l1', 'score']) df_renamed = self.fpp.rename_default_columns(df, [], 'id', 'sc1', None, None, 'score', 'sc2') assert_array_equal(df_renamed.columns, ['spkitemid', 'sc1', 'candidate', 'question', 'l1', 'raw']) @raises(KeyError) def test_check_subgroups_missing_columns(self): df = pd.DataFrame(columns=['a', 'b', 'c']) subgroups = ['a', 'd'] FeaturePreprocessor.check_subgroups(df, subgroups) def test_check_subgroups_nothing_to_replace(self): df = pd.DataFrame({'a': ['1', '2'], 'b': ['32', '34'], 'd': ['abc', 'def']}) subgroups = ['a', 'd'] df_out = FeaturePreprocessor.check_subgroups(df, subgroups) assert_frame_equal(df_out, df) def test_check_subgroups_replace_empty(self): df = pd.DataFrame({'a': ['1', ''], 'b': [' ', '34'], 'd': ['ab c', ' ']}) subgroups = ['a', 'd'] df_expected = pd.DataFrame({'a': ['1', 'No info'], 'b': [' ', '34'], 'd': ['ab c', 'No info']}) df_out = FeaturePreprocessor.check_subgroups(df, subgroups) assert_frame_equal(df_out, df_expected) def test_filter_on_column(self): bad_df = pd.DataFrame({'spkitemlab': np.arange(1, 9, dtype='int64'), 'sc1': ['00', 'TD', '02', '03'] * 2}) df_filtered_with_zeros = pd.DataFrame({'spkitemlab': [1, 3, 4, 5, 7, 8], 'sc1': [0.0, 2.0, 3.0] * 2}) df_filtered = pd.DataFrame({'spkitemlab': [3, 4, 7, 8], 'sc1': [2.0, 3.0] * 2}) (output_df_with_zeros, output_excluded_df_with_zeros) = self.fpp.filter_on_column(bad_df, 'sc1', 'spkitemlab', exclude_zeros=False) output_df, output_excluded_df = self.fpp.filter_on_column(bad_df, 'sc1', 'spkitemlab', exclude_zeros=True) assert_frame_equal(output_df_with_zeros, df_filtered_with_zeros) assert_frame_equal(output_df, df_filtered) def test_filter_on_column_all_non_numeric(self): bad_df = pd.DataFrame({'sc1': ['A', 'I', 'TD', 'TD'] * 2, 'spkitemlab': range(1, 9)}) expected_df_excluded = bad_df.copy() expected_df_excluded.drop('sc1', axis=1, inplace=True) df_filtered, df_excluded = self.fpp.filter_on_column(bad_df, 'sc1', 'spkitemlab', exclude_zeros=True) ok_(df_filtered.empty) ok_("sc1" not in df_filtered.columns) assert_frame_equal(df_excluded, expected_df_excluded, check_dtype=False) def test_filter_on_column_std_epsilon_zero(self): # Test that the function exclude columns where std is returned as # very low value rather than 0 data = {'id': np.arange(1, 21, dtype='int64'), 'feature_ok': np.arange(1, 21), 'feature_zero_sd': [1.5601] * 20} bad_df = pd.DataFrame(data=data) output_df, output_excluded_df = self.fpp.filter_on_column(bad_df, 'feature_zero_sd', 'id', exclude_zeros=False, exclude_zero_sd=True) good_df = bad_df[['id', 'feature_ok']].copy() assert_frame_equal(output_df, good_df) ok_(output_excluded_df.empty) def test_filter_on_column_with_inf(self): # Test that the function exclude columns where feature value is 'inf' data = pd.DataFrame({'feature_1': [1.5601, 0, 2.33, 11.32], 'feature_ok': np.arange(1, 5)}) data['feature_with_inf'] = 1 / data['feature_1'] data['id'] = np.arange(1, 5, dtype='int64') bad_df = data[np.isinf(data['feature_with_inf'])].copy() good_df = data[~np.isinf(data['feature_with_inf'])].copy() bad_df.reset_index(drop=True, inplace=True) good_df.reset_index(drop=True, inplace=True) output_df, output_excluded_df = self.fpp.filter_on_column(data, 'feature_with_inf', 'id', exclude_zeros=False, exclude_zero_sd=True) assert_frame_equal(output_df, good_df) assert_frame_equal(output_excluded_df, bad_df) def test_filter_on_flag_column_empty_flag_dictionary(self): # no flags specified, keep the data frame as is df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'], 'sc1': [1, 2, 1, 3], 'feature': [2, 3, 4, 5], 'flag1': [0, 0, 0, 0], 'flag2': [1, 2, 2, 1]}) flag_dict = {} df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict) assert_frame_equal(df_new, df) eq_(len(df_excluded), 0) def test_filter_on_flag_column_nothing_to_exclude_int_column_and_dict(self): df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'], 'sc1': [1, 2, 1, 3], 'feature': [2, 3, 4, 5], 'flag1': [0, 1, 2, 3]}) flag_dict = {'flag1': [0, 1, 2, 3, 4]} df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict) assert_frame_equal(df_new, df) eq_(len(df_excluded), 0) def test_filter_on_flag_column_nothing_to_exclude_float_column_and_dict(self): df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'], 'sc1': [1, 2, 1, 3], 'feature': [2, 3, 4, 5], 'flag1': [0.5, 1.1, 2.2, 3.6]}) flag_dict = {'flag1': [0.5, 1.1, 2.2, 3.6, 4.5]} df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict) assert_frame_equal(df_new, df) eq_(len(df_excluded), 0) def test_filter_on_flag_column_nothing_to_exclude_str_column_and_dict(self): df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'], 'sc1': [1, 2, 1, 3], 'feature': [2, 3, 4, 5], 'flag1': ['a', 'b', 'c', 'd']}) flag_dict = {'flag1': ['a', 'b', 'c', 'd', 'e']} df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict) assert_frame_equal(df_new, df) eq_(len(df_excluded), 0) def test_filter_on_flag_column_nothing_to_exclude_float_column_int_dict(self): df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'], 'sc1': [1, 2, 1, 3], 'feature': [2, 3, 4, 5], 'flag1': [0.0, 1.0, 2.0, 3.0]}) flag_dict = {'flag1': [0, 1, 2, 3, 4]} df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict) assert_frame_equal(df_new, df) eq_(len(df_excluded), 0) def test_filter_on_flag_column_nothing_to_exclude_int_column_float_dict(self): df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'], 'sc1': [1, 2, 1, 3], 'feature': [2, 3, 4, 5], 'flag1': [0, 1, 2, 3]}) flag_dict = {'flag1': [0.0, 1.0, 2.0, 3.0, 4.5]} df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict) assert_frame_equal(df_new, df) eq_(len(df_excluded), 0) def test_filter_on_flag_column_nothing_to_exclude_str_column_float_dict(self): df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'], 'sc1': [1, 2, 1, 3], 'feature': [2, 3, 4, 5], 'flag1': ['4', '1', '2', '3.5']}) flag_dict = {'flag1': [0.0, 1.0, 2.0, 3.5, 4.0]} df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict) assert_frame_equal(df_new, df) eq_(len(df_excluded), 0) def test_filter_on_flag_column_nothing_to_exclude_float_column_str_dict(self): df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'], 'sc1': [1, 2, 1, 3], 'feature': [2, 3, 4, 5], 'flag1': [4.0, 1.0, 2.0, 3.5]}) flag_dict = {'flag1': ['1', '2', '3.5', '4', 'TD']} df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict) assert_frame_equal(df_new, df) eq_(len(df_excluded), 0) def test_filter_on_flag_column_nothing_to_exclude_str_column_int_dict(self): df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'], 'sc1': [1, 2, 1, 3], 'feature': [2, 3, 4, 5], 'flag1': ['0.0', '1.0', '2.0', '3.0']}) flag_dict = {'flag1': [0, 1, 2, 3, 4]} df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict) assert_frame_equal(df_new, df) eq_(len(df_excluded), 0) def test_filter_on_flag_column_nothing_to_exclude_int_column_str_dict(self): df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'], 'sc1': [1, 2, 1, 3], 'feature': [2, 3, 4, 5], 'flag1': [0, 1, 2, 3]}) flag_dict = {'flag1': ['0.0', '1.0', '2.0', '3.0', 'TD']} df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict) assert_frame_equal(df_new, df) eq_(len(df_excluded), 0) def test_filter_on_flag_column_nothing_to_exclude_mixed_type_column_str_dict(self): df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'], 'sc1': [1, 2, 1, 3], 'feature': [2, 3, 4, 5], 'flag1': [0, '1.0', 2, 3.5]}) flag_dict = {'flag1': ['0.0', '1.0', '2.0', '3.5', 'TD']} df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict) assert_frame_equal(df_new, df) eq_(len(df_excluded), 0) def test_filter_on_flag_column_nothing_to_exclude_mixed_type_column_int_dict(self): df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'], 'sc1': [1, 2, 1, 3], 'feature': [2, 3, 4, 5], 'flag1': [0, '1.0', 2, 3.0]}) flag_dict = {'flag1': [0, 1, 2, 3, 4]} df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict) assert_frame_equal(df_new, df) eq_(len(df_excluded), 0) def test_filter_on_flag_column_nothing_to_exclude_mixed_type_column_float_dict(self): df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'], 'sc1': [1, 2, 1, 3], 'feature': [2, 3, 4, 5], 'flag1': [0, '1.5', 2, 3.5]}) flag_dict = {'flag1': [0.0, 1.5, 2.0, 3.5, 4.0]} df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict) assert_frame_equal(df_new, df) eq_(len(df_excluded), 0) def test_filter_on_flag_column_nothing_to_exclude_int_column_mixed_type_dict(self): df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'], 'sc1': [1, 2, 1, 3], 'feature': [2, 3, 4, 5], 'flag1': [0, 1, 2, 3]}) flag_dict = {'flag1': [0, 1, 2, 3.0, 3.5, 'TD']} df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
pandas.testing.assert_frame_equal
import numpy as np import pandas as pd # Compute moving averages across a defined window. Used to compute regimes # INTERPRETATION: The regime is the short MAV minus the long MAV. A positive value indicates # a bullish trend, so we want to buy as soon as the regime turns positive. # Therefore, we want to identify in our data window points where the regime # transitions from negative to positive (to buy) or from positive to negative (to sell) def compute_mav_regime(short_interval, long_interval, data): # Labels for new columns short_label = "%sd_mav" % (str(short_interval)) long_label = "%sd_mav" % (str(long_interval)) # Compute the moving averages data[short_label] = np.round(data["Close"].rolling(window = short_interval, center = False).mean(), 2) data[long_label] = np.round(data["Close"].rolling(window = long_interval, center = False).mean(), 2) # Filter out the empty filler data (i.e. data for days needed to compute MAV_0 # but which itself does not have a MAV value calculated for it) data = data.dropna(how = "any") regime = (data[short_label] - data[long_label] > 0).apply(lambda x: 1 if x==True else -1) return regime # regime = data[short_label] - data[long_label] > 0 # regime = regime.apply(lambda x: 1 if x==True else -1) # return regime # Compute gain/loss days and use to calculate on-balance volume (OBV) # INTERPRETATION: OBV correlates volume to the stock's ability to appreciate on a day-to-day basis. # therefore, if we see that OBV is rising and price is not, it's a good time to buy because the rising # OBV suggests that price is soon to follow. # Therefore, we want a way to compare OBV and price (maybe MAV?). The higher OBV/MAV, the stronger # the buy signal is. As that value decreases we will know to sell def compute_obv(data): indicator_col = (data["Close"] - data["Open"] > 0).apply(lambda x: 1 if x==True else -1) obv_col = (data["Volume"]*indicator_col).cumsum() return obv_col # Compute moving average convergence-divergence (MACD) as a difference of exponential moving averages # and also compute signal line, report both signals (MACD sign, as well as MACD against signal line) # INTERPRETATION: Same as regime, simply using a different scheme of averages # TODO - Fix these calculations - the EWM return type does not allow for series subtraction def compute_macd(data): exp_26 = np.round(data["Close"].ewm(span = 26).mean(), 2) exp_12 = np.round(data["Close"].ewm(span = 12).mean(), 2) macd = (exp_12 - exp_26 > 0).apply(lambda x: 1 if x==True else -1) macd_signal = (macd - macd.ewm(span = 9).mean() > 0).apply(lambda x: 1 if x==True else -1) return macd_signal ################################################ ################################################ # TODO: Insert method to do RSI calculations # See http://www.investopedia.com/terms/r/rsi.asp ################################################ ################################################ def pandas_candlestick_ohlc(dat, stick = "day", otherseries = None): """ :param dat: pandas DataFrame object with datetime64 index, and float columns "Open", "High", "Low", and "Close", likely created via DataReader from "yahoo" :param stick: A string or number indicating the period of time covered by a single candlestick. Valid string inputs include "day", "week", "month", and "year", ("day" default), and any numeric input indicates the number of trading days included in a period :param otherseries: An iterable that will be coerced into a list, containing the columns of dat that hold other series to be plotted as lines This will show a Japanese candlestick plot for stock data stored in dat, also plotting other series if passed. """ mondays = WeekdayLocator(MONDAY) # major ticks on the mondays alldays = DayLocator() # minor ticks on the days dayFormatter = DateFormatter('%d') # e.g., 12 # Create a new DataFrame which includes OHLC data for each period specified by stick input transdat = dat.loc[:,["Open", "High", "Low", "Close"]] if (type(stick) == str): if stick == "day": plotdat = transdat stick = 1 # Used for plotting elif stick in ["week", "month", "year"]: if stick == "week": transdat["week"] = pd.to_datetime(transdat.index).map(lambda x: x.isocalendar()[1]) # Identify weeks elif stick == "month": transdat["month"] = pd.to_datetime(transdat.index).map(lambda x: x.month) # Identify months transdat["year"] =
pd.to_datetime(transdat.index)
pandas.to_datetime
import json import gzip import argparse import pandas as pd def main(): # Parse command line arguments parser = argparse.ArgumentParser() parser = argparse.ArgumentParser(description='Parse information from HPA json export and saves as CSV.') parser.add_argument('-i', '--input', help='HPA JSON input file name.', required=True, type=str) parser.add_argument('-o', '--output', help='Output file name.', required=True, type=str) args = parser.parse_args() # Get parameters: input_file = args.input output_file = args.output parsed_entries = [] with open(input_file, "r") as f: for line in f: entry = json.loads(line) parsed_entry = { 'id': entry['Ensembl'], 'hpa_subcellular_location': entry['Subcellular location'], 'hpa_rna_tissue_distribution': entry['RNA tissue distribution'], 'hpa_rna_tissue_specificity': entry['RNA tissue specificity'], } parsed_entry['hpa_rna_specific_tissues'] = list(entry['RNA tissue specific NX'].keys()) if entry['RNA tissue specific NX'] is not None else None parsed_entries.append(parsed_entry) parsed_entries_df =
pd.DataFrame(parsed_entries)
pandas.DataFrame
# -*- coding: utf-8 -*- """ Tests of the `masci_tools.vis.data` module """ import pytest from itertools import product import numpy as np import pandas as pd import copy USE_CDS = True try: from bokeh.models import ColumnDataSource except ImportError: USE_CDS = False def test_normalize_list_or_array(): """ Test of the normalize_list_or_array function """ from masci_tools.vis.data import normalize_list_or_array #Single array x = np.linspace(-1, 1, 10) data = normalize_list_or_array(x, 'x', {}) assert data == {'x': x} y = [np.linspace(-5, 1, 10), [1, 2, 3, 5]] data = normalize_list_or_array(y, 'y', data) assert data == [{'x_0': x, 'y_0': y[0]}, {'x_1': x, 'y_1': y[1]}] z = 5 data = normalize_list_or_array(z, 'z', data) assert data == [{'x_0': x, 'y_0': y[0], 'z_0': z}, {'x_1': x, 'y_1': y[1], 'z_1': 5}] color = ['red', 'blue'] data = normalize_list_or_array(color, 'color', data) assert data == [{ 'x_0': x, 'y_0': y[0], 'z_0': z, 'color_0': 'red' }, { 'x_1': x, 'y_1': y[1], 'z_1': z, 'color_1': 'blue' }] color2 = [pd.Series([1, 2, 3]), pd.Series([4, 5, 6])] data = normalize_list_or_array(color2, 'color', data) assert data == [{ 'x_0': x, 'y_0': y[0], 'z_0': z, 'color_0': color2[0] }, { 'x_1': x, 'y_1': y[1], 'z_1': z, 'color_1': color2[1] }] too_long_data = [np.linspace(0, 1, 2), np.linspace(3, 4, 5), np.linspace(6, 7, 8)] with pytest.raises(ValueError): data = normalize_list_or_array(too_long_data, 'dont_enter_this', data) def test_normalize_list_or_array_flatten_np(): """ Test of the normalize_list_or_array function with flatten_np=True """ from masci_tools.vis.data import normalize_list_or_array x = np.linspace(-1, 1, 10) y = np.linspace(-1, 1, 10) xv, yv = np.meshgrid(x, y) data = normalize_list_or_array(xv, 'x', {}, flatten_np=True) data = normalize_list_or_array(yv, 'y', data, flatten_np=True) assert data['x'].shape == (100,) assert data['y'].shape == (100,) def test_normalize_list_or_array_forbid_split_up(): """ Test of the normalize_list_or_array function with forbid_split_up=True """ from masci_tools.vis.data import normalize_list_or_array x = np.linspace(-1, 1, 10) data = normalize_list_or_array(x, 'x', {}, forbid_split_up=True) assert data == {'x': x} y = [np.linspace(-5, 1, 10), [1, 2, 3, 5]] data = normalize_list_or_array(y, 'y', data, forbid_split_up=True) assert data == {'x': x, 'y': y} ENTRIES = [{ 'x': 'x', 'y': 'y' }, { 'x_values': 'test', 'y': ['y1', 'y2', 'y3'] }, { 'color': ['test', 'x'], 'type': ['y1', 'y2'] }] COLUMNS = [[{ 'x': 'x', 'y': 'y' }], [{ 'x_values': 'test', 'y': 'y1' }, { 'x_values': 'test', 'y': 'y2' }, { 'x_values': 'test', 'y': 'y3' }], [{ 'color': 'test', 'type': 'y1' }, { 'color': 'x', 'type': 'y2' }]] x_data = np.linspace(-10, 10, 101) dict_data = { 'x': x_data, 'test': x_data * 4, 'y': x_data**2, 'y1': 5 * x_data - 10, 'y2': np.cos(x_data), 'y3': np.exp(x_data) } #yapf: disable dict_data_multiple = [[{ 'x': dict_data['x'], 'y': dict_data['y'] }], [{ 'test': dict_data['test'], 'y1': dict_data['y1'] }, { 'test': dict_data['test'], 'y2': dict_data['y2'] }, { 'test': dict_data['test'], 'y3': dict_data['y3'] }], [{ 'test': dict_data['test'], 'y1': dict_data['y1'] }, { 'x': dict_data['x'], 'y2': dict_data['y2'] }]] #yapf: enable SINGLE_SOURCES = [dict_data,
pd.DataFrame(data=dict_data)
pandas.DataFrame
from unittest.mock import MagicMock, patch import numpy as np import pandas as pd import pytest from sklearn.model_selection import StratifiedKFold from evalml import AutoMLSearch from evalml.automl.callbacks import raise_error_callback from evalml.automl.pipeline_search_plots import SearchIterationPlot from evalml.exceptions import PipelineNotFoundError from evalml.model_family import ModelFamily from evalml.objectives import ( FraudCost, Precision, PrecisionMicro, Recall, get_core_objectives, get_objective, ) from evalml.pipelines import ( BinaryClassificationPipeline, MulticlassClassificationPipeline, PipelineBase, TimeSeriesBinaryClassificationPipeline, TimeSeriesMulticlassClassificationPipeline, ) from evalml.pipelines.components.utils import get_estimators from evalml.pipelines.utils import make_pipeline from evalml.preprocessing import TimeSeriesSplit, split_data from evalml.problem_types import ProblemTypes def test_init(X_y_binary): X, y = X_y_binary automl = AutoMLSearch( X_train=X, y_train=y, problem_type="binary", max_iterations=1, n_jobs=1 ) automl.search() assert automl.n_jobs == 1 assert isinstance(automl.rankings, pd.DataFrame) assert isinstance(automl.best_pipeline, PipelineBase) automl.best_pipeline.predict(X) # test with dataframes automl = AutoMLSearch(
pd.DataFrame(X)
pandas.DataFrame
import numpy as np import pandas as pd from ..master_equation import master_equation as meq #import MSI.master_equation.master_equation as meq import copy import re import cantera as ct class OptMatrix(object): def __init__(self): self.S_matrix = None self.s_matrix = None self.Y_matrix = None self.y_matrix = None self.z_matrix = None self.delta_X = None self.X = None self.sigma = None # #loads one experiment into self.matrix. Decides padding based on previous matrix or handle based on total exp num? def build_Z(self, exp_dict_list:list, parsed_yaml_file_list:list, loop_counter:int = 0, reaction_uncertainty=None, master_equation_uncertainty_df=None, master_equation_reaction_list=[], master_equation_flag = False): ''' Builds the Z vector. Arguments: exp_dic_list -- the dictonary that is built after a simulation that contains things like sensitivity coefficients parsed_yaml_file_list -- a list of dictonaries that contain the information stored in the yaml files. Keyword Arguments: loop_counter -- keeps track of the iteration number for the optimization (default 0) reaction_uncertainty -- a csv file that contains all the reactions in the cti file being used for optimization and their corresponding A,n and Ea uncertainty values (default None) master_equation_uncertainty_df -- a pandas dataframe that contains the reactions being treated with theory paramters along with the associated uncertainty values of those paramters (default None) master_equation_reaction_list -- a list of the reactions being treated with theory paramters (default []) master_equation_flag -- a boolean that indicates if reactions being represented by theory parameters are being used in the optimization (default False) ''' Z = [] Z_data_Frame = [] sigma = [] def jsr_temp_uncertainties(experiment_dict): if 'Relative_Uncertainty' in list(experiment_dict['experimental_data'][0].columns): temp_uncertainties=experiment_dict['experimental_data'][0]['Relative_Uncertainty'].values temp_uncertainties = list(temp_uncertainties) else: temp_uncertainties=experiment_dict['uncertainty']['temperature_relative_uncertainty']*np.ones(np.shape(experiment_dict['experimental_data'][0]['Temperature'].values)) temp_uncertainties = list(temp_uncertainties) return temp_uncertainties def flow_reactor_time_shift_uncertainties(parsed_yaml_file_list,experiment_dict): if len(parsed_yaml_file_list['timeShiftOriginal']) ==1: time_shift_uncertainties = [experiment_dict['uncertainty']['time_shift_uncertainty']] elif len(parsed_yaml_file_list['timeShiftOriginal']) >1: time_shift_uncertainties = [experiment_dict['uncertainty']['time_shift_uncertainty']]*len(parsed_yaml_file_list['timeShiftOriginal']) return time_shift_uncertainties def flow_reactor_temp_uncertainties(experiment_dict): if 'Temperature_Uncertainty' in list(experiment_dict['experimental_data'][0].columns): temp_uncertainties=experiment_dict['experimental_data'][0]['Temperature_Uncertainty'].values temp_uncertainties = list(temp_uncertainties) else: temp_uncertainties=experiment_dict['uncertainty']['temperature_relative_uncertainty']*np.ones(np.shape(experiment_dict['experimental_data'][0]['Temperature'].values)) temp_uncertainties = list(temp_uncertainties) return temp_uncertainties def flame_speed_temp_uncertainties(experiment_dict): if 'Relative_Uncertainty' in list(experiment_dict['experimental_data'][0].columns) and 'Temperature' in list(experiment_dict['experimental_data'][0].columns): temp_uncertainties=experiment_dict['experimental_data'][0]['Relative_Uncertainty'].values temp_uncertainties = list(temp_uncertainties) elif 'Temperature' in list(experiment_dict['experimental_data'][0].columns): temp_uncertainties=experiment_dict['uncertainty']['temperature_relative_uncertainty']*np.ones(np.shape(experiment_dict['experimental_data'][0]['Temperature'].values)) temp_uncertainties = list(temp_uncertainties) elif 'Pressure' in list(experiment_dict['experimental_data'][0].columns) or 'Phi' in list(experiment_dict['experimental_data'][0].columns): temp_uncertainties=experiment_dict['uncertainty']['temperature_relative_uncertainty'] temp_uncertainties = list(temp_uncertainties) return temp_uncertainties def flame_speed_press_uncertainties(experiment_dict): if 'Relative_Uncertainty' in list(experiment_dict['experimental_data'][0].columns) and 'Pressure' in list(experiment_dict['experimental_data'][0].columns): press_uncertainties=experiment_dict['experimental_data'][0]['Relative_Uncertainty'].values press_uncertainties = list(press_uncertainties) elif 'Pressure' in list(experiment_dict['experimental_data'][0].columns): press_uncertainties=experiment_dict['uncertainty']['pressure_relative_uncertainty']*np.ones(np.shape(experiment_dict['experimental_data'][0]['Pressure'].values)) press_uncertainties = list(temp_uncertainties) elif 'Temperature' in list(experiment_dict['experimental_data'][0].columns) or 'Phi' in list(experiment_dict['experimental_data'][0].columns): press_uncertainties=experiment_dict['uncertainty']['pressure_relative_uncertainty'] press_uncertainties = list(temp_uncertainties) return press_uncertainties def igdelay_temp_uncertainties(experiment_dict): if 'Relative_Uncertainty' in list(experiment_dict['experimental_data'][0].columns) and 'pressure' in list(experiment_dict['experimental_data'][0].columns) and len(experiment_dict['simulation'].temperatures) == len(experiment_dict['simulation'].pressures) and len(experiment_dict['simulation'].temperatures)>1 and len(experiment_dict['simulation'].pressures)>1: temp_uncertainties=experiment_dict['experimental_data'][0]['Relative_Uncertainty'].values temp_uncertainties = list(temp_uncertainties)*len(experiment_dict['simulation'].temperatures) elif 'Relative_Uncertainty' in list(experiment_dict['experimental_data'][0].columns) and 'temperature' in list(experiment_dict['experimental_data'][0].columns): temp_uncertainties=experiment_dict['experimental_data'][0]['Relative_Uncertainty'].values temp_uncertainties = list(temp_uncertainties) elif 'temperature' in list(experiment_dict['experimental_data'][0].columns): temp_uncertainties=experiment_dict['uncertainty']['temperature_relative_uncertainty']*np.ones(np.shape(experiment_dict['experimental_data'][0]['temperature'].values)) temp_uncertainties = list(temp_uncertainties) #stub this is where we are editing elif 'pressure' in list(experiment_dict['experimental_data'][0].columns) and len(experiment_dict['simulation'].temperatures) == len(experiment_dict['simulation'].pressures) and len(experiment_dict['simulation'].temperatures)>1 and len(experiment_dict['simulation'].pressures)>1 : temp_uncertainties=experiment_dict['uncertainty']['temperature_relative_uncertainty']*np.ones(np.shape(experiment_dict['experimental_data'][0]['pressure'].values)) temp_uncertainties = list(temp_uncertainties)* len(experiment_dict['simulation'].temperatures) elif 'pressure' in list(experiment_dict['experimental_data'][0].columns) and len(experiment_dict['simulation'].temperatures) != len(experiment_dict['simulation'].pressures): temp_uncertainties=experiment_dict['uncertainty']['temperature_relative_uncertainty'] temp_uncertainties = list(temp_uncertainties) elif len(experiment_dict['conditions_to_run'])>1 and len(experiment_dict['simulation'].temperatures)>1 and len(experiment_dict['simulation'].pressures)>1 and len(experiment_dict['simulation'].temperatures) == len(experiment_dict['simulation'].pressures): temp_uncertainties=experiment_dict['uncertainty']['temperature_relative_uncertainty'] temp_uncertainties = list(temp_uncertainties) * len(experiment_dict['simulation'].temperatures) elif len(experiment_dict['conditions_to_run'])>1: temp_uncertainties=experiment_dict['uncertainty']['temperature_relative_uncertainty'] temp_uncertainties = list(temp_uncertainties) return temp_uncertainties def igdelay_press_uncertainties(experiment_dict): if 'Relative_Uncertainty' in list(experiment_dict['experimental_data'][0].columns) and 'temperature' in list(experiment_dict['experimental_data'][0].columns) and len(experiment_dict['simulation'].temperatures) == len(experiment_dict['simulation'].pressures) and len(experiment_dict['simulation'].temperatures)>1 and len(experiment_dict['simulation'].pressures)>1: press_uncertainties=experiment_dict['experimental_data'][0]['Relative_Uncertainty'].values press_uncertainties = list(press_uncertainties)*len(experiment_dict['simulation'].temperatures) elif 'Relative_Uncertainty' in list(experiment_dict['experimental_data'][0].columns) and 'pressure' in list(experiment_dict['experimental_data'][0].columns): press_uncertainties=experiment_dict['experimental_data'][0]['Relative_Uncertainty'].values press_uncertainties = list(press_uncertainties) elif 'pressure' in list(experiment_dict['experimental_data'][0].columns): press_uncertainties=experiment_dict['uncertainty']['pressure_relative_uncertainty']*np.ones(np.shape(experiment_dict['experimental_data'][0]['pressure'].values)) press_uncertainties = list(press_uncertainties) elif 'temperature' in list(experiment_dict['experimental_data'][0].columns) and len(experiment_dict['simulation'].temperatures) == len(experiment_dict['simulation'].pressures) and len(experiment_dict['simulation'].temperatures)>1 and len(experiment_dict['simulation'].pressures)>1: press_uncertainties=experiment_dict['uncertainty']['pressure_relative_uncertainty'] press_uncertainties = list(press_uncertainties) * len(experiment_dict['simulation'].temperatures) #stub this is where editing is happening elif 'temperature' in list(experiment_dict['experimental_data'][0].columns) and len(experiment_dict['simulation'].temperatures) != len(experiment_dict['simulation'].pressures): press_uncertainties=experiment_dict['uncertainty']['pressure_relative_uncertainty'] press_uncertainties = list(press_uncertainties) elif len(experiment_dict['conditions_to_run'])>1 and len(experiment_dict['simulation'].temperatures)>1 and len(experiment_dict['simulation'].pressures)>1 and len(experiment_dict['simulation'].temperatures) == len(experiment_dict['simulation'].pressures): press_uncertainties=experiment_dict['uncertainty']['pressure_relative_uncertainty'] press_uncertainties = list(press_uncertainties)* len(experiment_dict['simulation'].temperatures) elif len(experiment_dict['conditions_to_run'])>1: press_uncertainties=experiment_dict['uncertainty']['pressure_relative_uncertainty'] press_uncertainties = list(press_uncertainties) return press_uncertainties def rcm_temp_uncertainties(experiment_dict): if 'Relative_Uncertainty' in list(experiment_dict['experimental_data'][0].columns) and 'temperature' in list(experiment_dict['experimental_data'][0].columns): temp_uncertainties=experiment_dict['experimental_data'][0]['Relative_Uncertainty'].values temp_uncertainties = list(temp_uncertainties) elif 'temperature' in list(experiment_dict['experimental_data'][0].columns) and len(experiment_dict['simulation'].fullParsedYamlFile['temperatures'])==len(experiment_dict['simulation'].fullParsedYamlFile['pressures']): temp_uncertainties=experiment_dict['uncertainty']['temperature_relative_uncertainty']*np.ones(np.shape(experiment_dict['experimental_data'][0]['temperature'].values)) temp_uncertainties = list(temp_uncertainties) elif 'pressure' in list(experiment_dict['experimental_data'][0].columns): temp_uncertainties=experiment_dict['uncertainty']['temperature_relative_uncertainty'] temp_uncertainties = list(temp_uncertainties) elif len(experiment_dict['conditions_to_run'])>1: temp_uncertainties=experiment_dict['uncertainty']['temperature_relative_uncertainty'] temp_uncertainties = list(temp_uncertainties) return temp_uncertainties def rcm_press_uncertainties(experiment_dict): if 'Relative_Uncertainty' in list(experiment_dict['experimental_data'][0].columns) and 'pressure' in list(experiment_dict['experimental_data'][0].columns): press_uncertainties=experiment_dict['experimental_data'][0]['Relative_Uncertainty'].values press_uncertainties = list(press_uncertainties) elif 'pressure' in list(experiment_dict['experimental_data'][0].columns) and len(experiment_dict['simulation'].fullParsedYamlFile['temperatures'])==len(experiment_dict['simulation'].fullParsedYamlFile['pressures']): press_uncertainties=experiment_dict['uncertainty']['pressure_relative_uncertainty']*np.ones(np.shape(experiment_dict['experimental_data'][0]['pressure'].values)) press_uncertainties = list(press_uncertainties) elif 'temperature' in list(experiment_dict['experimental_data'][0].columns) and len(experiment_dict['simulation'].fullParsedYamlFile['temperatures'])==len(experiment_dict['simulation'].fullParsedYamlFile['pressures']): press_uncertainties=experiment_dict['uncertainty']['pressure_relative_uncertainty']*np.ones(np.shape(experiment_dict['experimental_data'][0]['temperature'].values)) press_uncertainties = list(press_uncertainties) elif len(experiment_dict['conditions_to_run'])>1: press_uncertainties=experiment_dict['uncertainty']['pressure_relative_uncertainty'] press_uncertainties = list(press_uncertainties) return press_uncertainties #need to append to sigma def uncertainty_calc(relative_uncertainty,absolute_uncertainty,data,experimental_data): absolute_uncertainty=float(absolute_uncertainty) length_of_data = data.shape[0] if 'Relative_Uncertainty' in list(experimental_data.columns): x_dependent_uncertainty = experimental_data['Relative_Uncertainty'].values relative_uncertainty_array = copy.deepcopy(x_dependent_uncertainty) relative_uncertainty_array = relative_uncertainty_array.reshape((relative_uncertainty_array.shape[0],1)) elif 'Relative_Uncertainty' not in list(experimental_data.columns): relative_uncertainty_array = np.full((length_of_data,1),relative_uncertainty) relative_uncertainty_array = relative_uncertainty_array.reshape((relative_uncertainty_array.shape[0],1)) if 'Absolute_Uncertainty' in list(experimental_data.columns): x_dependent_a_uncertainty = experimental_data['Absolute_Uncertainty'].values absolute_uncertainty_array = copy.deepcopy(x_dependent_a_uncertainty) #Fix this to deal with 0 data. absolute_uncertainty_array = np.divide(absolute_uncertainty_array,data) absolute_uncertainty_array = absolute_uncertainty_array.reshape((absolute_uncertainty_array.shape[0],1)) elif 'Absolute_Uncertainty' not in list(experimental_data.columns): absolute_uncertainty_array = np.divide(absolute_uncertainty,data) absolute_uncertainty_array = absolute_uncertainty_array.reshape((absolute_uncertainty_array.shape[0],1)) total_uncertainty = np.sqrt(np.square(relative_uncertainty_array) + np.square(absolute_uncertainty_array)) un_weighted_uncertainty = copy.deepcopy(total_uncertainty) if 'W' not in list(experimental_data.columns): weighting_factor = (1/length_of_data**.5) total_uncertainty = np.divide(total_uncertainty,weighting_factor) total_uncertainty = total_uncertainty.reshape((total_uncertainty.shape[0],1)) elif 'W' in list(experimental_data.columns): weighting_factor = experimental_data['W'].values weighting_factor = weighting_factor.reshape((weighting_factor.shape[0],1)) total_uncertainty = np.divide(total_uncertainty,weighting_factor) #total_uncertainty = total_uncertainty/weighting_factor return total_uncertainty,un_weighted_uncertainty #tab, start working here tomorrow with how we want to read in csv file for i,exp_dic in enumerate(exp_dict_list): counter = 0 #print(exp_dic) for j,observable in enumerate(exp_dic['mole_fraction_observables']+ exp_dic['concentration_observables']+ exp_dic['flame_speed_observables']+ exp_dic['ignition_delay_observables']): if observable == None: pass else: if observable in exp_dic['mole_fraction_observables']: ## add ppm statment here ? check if it exists? and add concentration statment below just for parcing total_uncertainty,un_weighted_uncertainty = uncertainty_calc(exp_dic['uncertainty']['mole_fraction_relative_uncertainty'][counter], exp_dic['uncertainty']['mole_fraction_absolute_uncertainty'][counter], exp_dic['experimental_data'][counter][observable].values,exp_dic['experimental_data'][counter]) total_uncertainty = total_uncertainty.reshape((total_uncertainty.shape[0],1)) un_weighted_uncertainty = un_weighted_uncertainty.reshape((un_weighted_uncertainty.shape[0], 1)) elif observable in exp_dic['concentration_observables'] and '_ppm' in exp_dic['experimental_data'][counter].columns[1]: total_uncertainty,un_weighted_uncertainty = uncertainty_calc(exp_dic['uncertainty']['concentration_relative_uncertainty'][counter], exp_dic['uncertainty']['concentration_absolute_uncertainty'][counter], exp_dic['experimental_data'][counter][observable+'_ppm'].values,exp_dic['experimental_data'][counter]) total_uncertainty = total_uncertainty.reshape((total_uncertainty.shape[0], 1)) un_weighted_uncertainty = un_weighted_uncertainty.reshape((un_weighted_uncertainty.shape[0], 1)) elif observable in exp_dic['concentration_observables'] and '_mol/cm^3' in exp_dic['experimental_data'][counter].columns[1]: total_uncertainty,un_weighted_uncertainty = uncertainty_calc(exp_dic['uncertainty']['concentration_relative_uncertainty'][counter], exp_dic['uncertainty']['concentration_absolute_uncertainty'][counter], exp_dic['experimental_data'][counter][observable+'_mol/cm^3'].values,exp_dic['experimental_data'][counter]) total_uncertainty = total_uncertainty.reshape((total_uncertainty.shape[0],1)) un_weighted_uncertainty = un_weighted_uncertainty.reshape((un_weighted_uncertainty.shape[0], 1)) elif observable in exp_dic['flame_speed_observables'] and '_cm/s' in exp_dic['experimental_data'][counter].columns[1]: total_uncertainty,un_weighted_uncertainty = uncertainty_calc(exp_dic['uncertainty']['flame_speed_relative_uncertainty'][counter], exp_dic['uncertainty']['flame_speed_absolute_uncertainty'][counter], exp_dic['experimental_data'][counter][observable+'_cm/s'].values,exp_dic['experimental_data'][counter]) total_uncertainty = total_uncertainty.reshape((total_uncertainty.shape[0],1)) un_weighted_uncertainty = un_weighted_uncertainty.reshape((un_weighted_uncertainty.shape[0], 1)) elif observable in exp_dic['ignition_delay_observables'] and '_s'in exp_dic['experimental_data'][counter].columns[1]: total_uncertainty,un_weighted_uncertainty = uncertainty_calc(exp_dic['uncertainty']['ignition_delay_relative_uncertainty'][counter], exp_dic['uncertainty']['ignition_delay_absolute_uncertainty'][counter], exp_dic['experimental_data'][counter][observable+'_s'].values,exp_dic['experimental_data'][counter]) total_uncertainty = total_uncertainty.reshape((total_uncertainty.shape[0],1)) un_weighted_uncertainty = un_weighted_uncertainty.reshape((un_weighted_uncertainty.shape[0], 1)) else: raise Exception('We Do Not Have This Unit Installed, Please Use Mole Fraction, ppm, mol/cm^3 or cm/s') Z.append(total_uncertainty) sigma.append(un_weighted_uncertainty) tempList = [observable+'_'+'experiment'+str(i)]*np.shape(total_uncertainty)[0] Z_data_Frame.extend(tempList) #print(Z_data_Frame) counter+=1 if 'absorbance_observables' in list(exp_dic.keys()): wavelengths = parsed_yaml_file_list[i]['absorbanceCsvWavelengths'] for k,wl in enumerate(wavelengths): total_uncertainty,un_weighted_uncertainty = uncertainty_calc(exp_dic['uncertainty']['absorbance_relative_uncertainty'][k], exp_dic['uncertainty']['absorbance_absolute_uncertainty'][k], exp_dic['absorbance_experimental_data'][k]['Absorbance_'+str(wl)].values,exp_dic['absorbance_experimental_data'][k]) total_uncertainty = total_uncertainty.reshape((total_uncertainty.shape[0], 1)) un_weighted_uncertainty = un_weighted_uncertainty.reshape((un_weighted_uncertainty.shape[0], 1)) tempList = [str(wl)+'_'+'experiment'+'_'+str(i)]*np.shape(total_uncertainty)[0] Z_data_Frame.extend(tempList) Z.append(total_uncertainty) sigma.append(un_weighted_uncertainty) Z = np.vstack((Z)) sigma = np.vstack((sigma)) #Here we are adding A,n,and Ea uncertainty #we go do not through an additional step to make sure that the A,N and Ea #values are paired with the correct reactions as in the old code, #because we wrote a function to make the excel sheet which will arrange things in the correct order #We also need to decide if we want to put this in as ln values or not in the spreadsheet active_parameters = [] reaction_uncertainty = pd.read_csv(reaction_uncertainty) #Flatten master equation reaction list flatten = lambda *n: (e for a in n for e in (flatten(*a) if isinstance(a, (tuple, list)) else (a,))) flattened_master_equation_reaction_list = list(flatten(master_equation_reaction_list)) if master_equation_flag: for reaction in flattened_master_equation_reaction_list: index = reaction_uncertainty.loc[reaction_uncertainty['Reaction'] == reaction].index[0] reaction_uncertainty = reaction_uncertainty.drop([index]) #tab fix this correctly, this unit needs to be fixed when we make a decision what the spreadsheet looks like uncertainty_As = reaction_uncertainty['Uncertainty A (unit)'].values uncertainty_As = uncertainty_As.reshape((uncertainty_As.shape[0], 1)) #uncertainty_As = np.log(uncertainty_As) Z = np.vstack((Z,uncertainty_As)) sigma = np.vstack((sigma,uncertainty_As)) for variable in range(uncertainty_As.shape[0]): Z_data_Frame.append('A'+'_'+str(variable)) active_parameters.append('A'+'_'+str(variable)) uncertainty_ns = reaction_uncertainty['Uncertainty N (unit)'].values uncertainty_ns = uncertainty_ns.reshape((uncertainty_ns.shape[0], 1)) Z = np.vstack((Z,uncertainty_ns)) sigma = np.vstack((sigma,uncertainty_ns)) for variable in range(uncertainty_ns.shape[0]): Z_data_Frame.append('n'+'_'+str(variable)) active_parameters.append('n'+'_'+str(variable)) uncertainty_Eas = reaction_uncertainty['Uncertainty Ea (unit)'].values uncertainty_Eas = uncertainty_Eas.reshape((uncertainty_Eas.shape[0], 1)) Z = np.vstack((Z,uncertainty_Eas)) sigma = np.vstack((sigma,uncertainty_Eas)) for variable in range(uncertainty_Eas.shape[0]): Z_data_Frame.append('Ea'+'_'+str(variable)) active_parameters.append('Ea'+'_'+str(variable)) if master_equation_flag == True: master_equation_uncertainty = [] for i,reaction in enumerate(master_equation_reaction_list): if type(reaction)==str: master_equation_uncertainty.append(list(master_equation_uncertainty_df[reaction].dropna().values)) elif type(reaction)==tuple: column_headers = master_equation_uncertainty_df.columns.to_list() for sub_reaction in reaction: if sub_reaction in column_headers: master_equation_uncertainty.append(list(master_equation_uncertainty_df[sub_reaction].dropna().values)) # if master_equation_flag ==True: # master_equation_uncertainty = [] # for col in master_equation_uncertainty_df: # master_equation_uncertainty.append(list(master_equation_uncertainty_df[col].dropna().values)) if master_equation_flag == True: for i,reaction in enumerate(master_equation_reaction_list): if type(reaction)==str: for j,paramter in enumerate(master_equation_uncertainty_df[reaction].dropna()): Z_data_Frame.append(str(reaction)+'_'+'P'+'_'+str(j)) active_parameters.append(master_equation_reaction_list[i]+'_P_'+str(j)) elif type(reaction)==tuple: column_headers = master_equation_uncertainty_df.columns.to_list() for sub_reaction in reaction: if sub_reaction in column_headers: for j,paramter in enumerate(master_equation_uncertainty_df[sub_reaction].dropna()): Z_data_Frame.append(str(reaction)+'_'+'P'+'_'+str(j)) active_parameters.append(str(master_equation_reaction_list[i])+'_P_'+str(j)) # for i,reaction in enumerate(master_equation_uncertainty): # for j,uncer in enumerate(reaction): # Z_data_Frame.append('R'+'_'+str(i)+'_'+'P'+str(j)) # #This might not look right in the data frame but we can try # #stub # active_parameters.append(master_equation_reaction_list[i]+'_P_'+str(j)) ##check this master_equation_uncertainty = [item for sublist in master_equation_uncertainty for item in sublist] master_equation_uncertainty = np.array(master_equation_uncertainty) master_equation_uncertainty = master_equation_uncertainty.reshape((master_equation_uncertainty.shape[0], 1)) Z = np.vstack((Z,master_equation_uncertainty)) sigma = np.vstack((sigma,master_equation_uncertainty)) #This is going to have to be simulation specific if exp_dict_list[0]['simulation'].physicalSens ==1: for i, exp_dic in enumerate(exp_dict_list): if re.match('[Ss]hock [Tt]ube',exp_dict_list[i]['simulation_type']) and re.match('[Ss]pecies[- ][Pp]rofile',exp_dict_list[i]['experiment_type']): #for i,exp_dic in enumerate(exp_dict_list): experiment_physical_uncertainty = [] #Temperature Uncertainty experiment_physical_uncertainty.append(exp_dic['uncertainty']['temperature_relative_uncertainty']) Z_data_Frame.append('T'+'_'+'experiment'+'_'+str(i)) active_parameters.append('T'+'_'+'experiment'+'_'+str(i)) #Pressure Uncertainty experiment_physical_uncertainty.append(exp_dic['uncertainty']['pressure_relative_uncertainty']) Z_data_Frame.append('P'+'_'+'experiment'+'_'+str(i)) active_parameters.append('P'+'_'+'experiment'+'_'+str(i)) #Species Uncertainty species_uncertainties = exp_dic['uncertainty']['species_relative_uncertainty']['dictonary_of_values'] species_to_loop = exp_dic['uncertainty']['species_relative_uncertainty']['species'] dilluant = ['Ar','AR','ar','HE','He','he','Kr','KR','kr','Xe','XE','xe','NE','Ne','ne'] for specie in species_to_loop: if specie in dilluant: continue experiment_physical_uncertainty.append(species_uncertainties[specie]) Z_data_Frame.append('X'+'_'+str(specie)+'_'+'experiment'+'_'+str(i)) active_parameters.append('X'+'_'+str(specie)+'_'+'experiment'+'_'+str(i)) experiment_physical_uncertainty.append(exp_dic['uncertainty']['time_shift_absolute_uncertainty']) Z_data_Frame.append('Time_shift'+'_'+'experiment'+'_'+str(i)) active_parameters.append('Time_shift'+'_'+'experiment'+'_'+str(i)) experiment_physical_uncertainty = np.array(experiment_physical_uncertainty) experiment_physical_uncertainty = experiment_physical_uncertainty.reshape((experiment_physical_uncertainty.shape[0], 1)) Z = np.vstack((Z,experiment_physical_uncertainty)) sigma = np.vstack((sigma,experiment_physical_uncertainty)) elif re.match('[Jj][Ss][Rr]',exp_dict_list[i]['simulation_type']): #ASK MARK WHAT TO ADD HERE #for i,exp_dic in enumerate(exp_dict_list): experiment_physical_uncertainty = [] #Temperature Uncertainty temp_uncertainties=jsr_temp_uncertainties(exp_dic) experiment_physical_uncertainty=experiment_physical_uncertainty+temp_uncertainties #experiment_physical_uncertainty.append(exp_dic['uncertainty']['temperature_relative_uncertainty']) Z_data_Frame=Z_data_Frame+['T'+'_'+'experiment'+'_'+str(i)]*len(temp_uncertainties) active_parameters=active_parameters+['T'+'_'+'experiment'+'_'+str(i)]*len(temp_uncertainties) #Pressure Uncertainty experiment_physical_uncertainty.append(exp_dic['uncertainty']['pressure_relative_uncertainty']) Z_data_Frame.append('P'+'_'+'experiment'+'_'+str(i)) active_parameters.append('P'+'_'+'experiment'+'_'+str(i)) #Species Uncertainty species_uncertainties = exp_dic['uncertainty']['species_relative_uncertainty']['dictonary_of_values'] species_to_loop = exp_dic['uncertainty']['species_relative_uncertainty']['species'] dilluant = ['Ar','AR','ar','HE','He','he','Kr','KR','kr','Xe','XE','xe','NE','Ne','ne'] for specie in species_to_loop: if specie in dilluant: continue experiment_physical_uncertainty.append(species_uncertainties[specie]) Z_data_Frame.append('X'+'_'+str(specie)+'_'+'experiment'+'_'+str(i)) active_parameters.append('X'+'_'+str(specie)+'_'+'experiment'+'_'+str(i)) experiment_physical_uncertainty.append(exp_dic['uncertainty']['restime_relative_uncertainty']) Z_data_Frame.append('R_experiment_'+str(i)) active_parameters.append('R_experiment_'+str(i)) experiment_physical_uncertainty = np.array(experiment_physical_uncertainty) experiment_physical_uncertainty = experiment_physical_uncertainty.reshape((experiment_physical_uncertainty.shape[0],1)) Z = np.vstack((Z,experiment_physical_uncertainty)) sigma = np.vstack((sigma,experiment_physical_uncertainty)) #print(Z_data_Frame) elif re.match('[Ff]lame[- ][Ss]peed',exp_dict_list[i]['simulation_type']) and re.match('[Oo][Nn][Ee]|[1][ -][dD][ -][Ff]lame',exp_dict_list[i][['experiment_type']]): #for i,exp_dic in enumerate(exp_dict_list): experiment_physical_uncertainty = [] #Temperature Uncertainty temp_uncertainties=flame_speed_temp_uncertainties(exp_dic) experiment_physical_uncertainty=experiment_physical_uncertainty+temp_uncertainties Z_data_Frame=Z_data_Frame+['T'+'_'+'experiment'+'_'+str(i)]*len(temp_uncertainties) active_parameters=active_parameters+['T'+'_'+'experiment'+'_'+str(i)]*len(temp_uncertainties) #Pressure Uncertainty press_uncertainties = flame_speed_press_uncertainties(exp_dic) Z_data_Frame.append('P'+'_'+'experiment'+'_'+str(i))*len(press_uncertainties) active_parameters.append('P'+'_'+'experiment'+'_'+str(i))*len(press_uncertainties) #Species Uncertainty conditions = exp_dic['conditions'] species_uncertainties = exp_dic['uncertainty']['species_relative_uncertainty']['dictonary_of_values'] species_to_loop = exp_dic['uncertainty']['species_relative_uncertainty']['species'] list_with_most_species_in_them = [] for specie in species_to_loop: list_with_most_species_in_them.append(len(conditions[specie])) max_species = max(list_with_most_species_in_them) if 'Diluant' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys() or 'diluant' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys(): diluant = exp_dic['uncertainty']['species_relative_uncertainty']['type_dict']['diluant'] for nmbr_of_species_sets in range(max_species): for specie in species_to_loop: if specie in dilluant: continue experiment_physical_uncertainty.append(species_uncertainties[specie]) Z_data_Frame.append('X'+'_'+str(specie)+'_'+'experiment'+'_'+str(i)) active_parameters.append('X'+'_'+str(specie)+'_'+'experiment'+'_'+str(i)) experiment_physical_uncertainty = np.array(experiment_physical_uncertainty) experiment_physical_uncertainty = experiment_physical_uncertainty.reshape((experiment_physical_uncertainty.shape[0], 1)) Z = np.vstack((Z,experiment_physical_uncertainty)) sigma = np.vstack((sigma,experiment_physical_uncertainty)) elif re.match('[Ss]pecies[- ][Pp]rofile',exp_dict_list[i]['experiment_type']) and re.match('[Ff]low[ -][Rr]eactor',exp_dict_list[i]['simulation_type']): #ASK MARK WHAT TO ADD HERE #for i,exp_dic in enumerate(exp_dict_list): experiment_physical_uncertainty = [] #Temperature Uncertainty temp_uncertainties=flow_reactor_temp_uncertainties(exp_dic) experiment_physical_uncertainty=experiment_physical_uncertainty+temp_uncertainties #experiment_physical_uncertainty.append(exp_dic['uncertainty']['temperature_relative_uncertainty']) Z_data_Frame=Z_data_Frame+['T'+'_'+'experiment'+'_'+str(i)]*len(temp_uncertainties) active_parameters=active_parameters+['T'+'_'+'experiment'+'_'+str(i)]*len(temp_uncertainties) #Pressure Uncertainty experiment_physical_uncertainty.append(exp_dic['uncertainty']['pressure_relative_uncertainty']) Z_data_Frame.append('P'+'_'+'experiment'+'_'+str(i)) active_parameters.append('P'+'_'+'experiment'+'_'+str(i)) #Species Uncertainty species_uncertainties = exp_dic['uncertainty']['species_relative_uncertainty']['dictonary_of_values'] species_to_loop = exp_dic['uncertainty']['species_relative_uncertainty']['species'] dilluant = ['Ar','AR','ar','HE','He','he','Kr','KR','kr','Xe','XE','xe','NE','Ne','ne'] for specie in species_to_loop: if specie in dilluant: continue experiment_physical_uncertainty.append(species_uncertainties[specie]) Z_data_Frame.append('X'+'_'+str(specie)+'_'+'experiment'+'_'+str(i)) active_parameters.append('X'+'_'+str(specie)+'_'+'experiment'+'_'+str(i)) time_shift_uncertainties = flow_reactor_time_shift_uncertainties(parsed_yaml_file_list[i],exp_dic) experiment_physical_uncertainty=experiment_physical_uncertainty+time_shift_uncertainties Z_data_Frame=Z_data_Frame+['Time_Shift'+'_'+'experiment'+'_'+str(i)]*len(time_shift_uncertainties) active_parameters=active_parameters+['Time_Shift'+'_'+'experiment'+'_'+str(i)]*len(time_shift_uncertainties) experiment_physical_uncertainty = np.array(experiment_physical_uncertainty) experiment_physical_uncertainty = experiment_physical_uncertainty.reshape((experiment_physical_uncertainty.shape[0],1)) Z = np.vstack((Z,experiment_physical_uncertainty)) sigma = np.vstack((sigma,experiment_physical_uncertainty)) elif re.match('[Ss]hock[- ][Tt]ube',exp_dict_list[i]['simulation_type']) and re.match('[Ii]gnition[- ][Dd]elay',exp_dict_list[i]['experiment_type']): #for i,exp_dic in enumerate(exp_dict_list): if len(exp_dic['simulation'].temperatures) == len(exp_dic['simulation'].pressures) and len(exp_dic['simulation'].temperatures) >1 and len(exp_dic['simulation'].pressures) >1: # print('inside z matrix') experiment_physical_uncertainty = [] #Temperature Uncertainty temp_uncertainties=igdelay_temp_uncertainties(exp_dic) experiment_physical_uncertainty=experiment_physical_uncertainty+temp_uncertainties for index in range(len(temp_uncertainties)): Z_data_Frame.append('T'+str(index+1)+'_'+'experiment'+'_'+str(i)) active_parameters.append('T'+str(index+1)+'_'+'experiment'+'_'+str(i)) #Z_data_Frame=Z_data_Frame+['T'+'_'+'experiment'+'_'+str(i)]*len(temp_uncertainties) #active_parameters=active_parameters+['T'+'_'+'experiment'+'_'+str(i)]*len(temp_uncertainties) #Pressure Uncertainty press_uncertainties = igdelay_press_uncertainties(exp_dic) for index in range(len(press_uncertainties)): Z_data_Frame.append('P'+str(index+1)+'_'+'experiment'+'_'+str(i)) active_parameters.append('P'+str(index+1)+'_'+'experiment'+'_'+str(i)) #Z_data_Frame=Z_data_Frame+['P'+'_'+'experiment'+'_'+str(i)]*len(press_uncertainties) #active_parameters=active_parameters+['P'+'_'+'experiment'+'_'+str(i)]*len(press_uncertainties) experiment_physical_uncertainty=experiment_physical_uncertainty+press_uncertainties #print(len(press_uncertainties)) #Species Uncertainty conditions = exp_dic['conditions_dict_list'] species_uncertainties = exp_dic['uncertainty']['species_relative_uncertainty']['dictonary_of_values'] species_to_loop = list(exp_dic['conditions_dict_list'].keys()) list_with_most_species_in_them = [] for specie in species_to_loop: list_with_most_species_in_them.append(len(conditions[specie])) max_species = max(list_with_most_species_in_them) diluent=[] if 'Diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys() or 'diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys(): diluent = exp_dic['uncertainty']['species_relative_uncertainty']['type_dict']['diluent'] singular_species=[] for species in list(exp_dic['simulation'].fullParsedYamlFile['conditions'].keys()): if len(exp_dic['simulation'].fullParsedYamlFile['conditions'][species])==1 and species not in diluent: singular_species.append(species) for x,species in enumerate(exp_dic['simulation'].fullParsedYamlFile['speciesNames']): if species in singular_species and species not in diluent: Z_data_Frame.append('X'+str(x+1)+'_'+species+'_experiment_'+str(i)) experiment_physical_uncertainty.append(species_uncertainties[specie]) active_parameters.append('X'+str(x+1)+'_'+species+'_'+'experiment'+'_'+str(i)) elif species not in singular_species and species not in diluent: for j in range(len(exp_dic['simulation'].fullParsedYamlFile['conditions_to_run'])): Z_data_Frame.append('X'+str(x+1)+'_'+species+'_experiment_'+str(i)) experiment_physical_uncertainty.append(species_uncertainties[specie]) active_parameters.append('X'+str(x+1)+'_'+species+'_'+'experiment'+'_'+str(i)) experiment_physical_uncertainty.append(exp_dic['uncertainty']['time_shift_absolute_uncertainty']) Z_data_Frame.append('Time_shift'+'_'+'experiment'+'_'+str(i)) active_parameters.append('Time_shift'+'_'+'experiment'+'_'+str(i)) experiment_physical_uncertainty = np.array(experiment_physical_uncertainty) experiment_physical_uncertainty = experiment_physical_uncertainty.reshape((experiment_physical_uncertainty.shape[0], 1)) Z = np.vstack((Z,experiment_physical_uncertainty)) sigma = np.vstack((sigma,experiment_physical_uncertainty)) else: experiment_physical_uncertainty = [] #Temperature Uncertainty temp_uncertainties=igdelay_temp_uncertainties(exp_dic) experiment_physical_uncertainty=experiment_physical_uncertainty+temp_uncertainties for index in range(len(temp_uncertainties)): Z_data_Frame.append('T'+str(index+1)+'_'+'experiment'+'_'+str(i)) active_parameters.append('T'+str(index+1)+'_'+'experiment'+'_'+str(i)) #Z_data_Frame=Z_data_Frame+['T'+'_'+'experiment'+'_'+str(i)]*len(temp_uncertainties) #active_parameters=active_parameters+['T'+'_'+'experiment'+'_'+str(i)]*len(temp_uncertainties) #Pressure Uncertainty press_uncertainties = igdelay_press_uncertainties(exp_dic) for index in range(len(press_uncertainties)): Z_data_Frame.append('P'+str(index+1)+'_'+'experiment'+'_'+str(i)) active_parameters.append('P'+str(index+1)+'_'+'experiment'+'_'+str(i)) #Z_data_Frame=Z_data_Frame+['P'+'_'+'experiment'+'_'+str(i)]*len(press_uncertainties) #active_parameters=active_parameters+['P'+'_'+'experiment'+'_'+str(i)]*len(press_uncertainties) experiment_physical_uncertainty=experiment_physical_uncertainty+press_uncertainties #print(len(press_uncertainties)) #Species Uncertainty conditions = exp_dic['conditions_dict_list'] species_uncertainties = exp_dic['uncertainty']['species_relative_uncertainty']['dictonary_of_values'] species_to_loop = list(exp_dic['conditions_dict_list'].keys()) list_with_most_species_in_them = [] for specie in species_to_loop: list_with_most_species_in_them.append(len(conditions[specie])) max_species = max(list_with_most_species_in_them) diluent=[] if 'Diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys() or 'diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys(): diluent = exp_dic['uncertainty']['species_relative_uncertainty']['type_dict']['diluent'] singular_species=[] for species in list(exp_dic['simulation'].fullParsedYamlFile['conditions'].keys()): if len(exp_dic['simulation'].fullParsedYamlFile['conditions'][species])==1 and species not in diluent: singular_species.append(species) for x,species in enumerate(exp_dic['simulation'].fullParsedYamlFile['speciesNames']): if species in singular_species and species not in diluent: Z_data_Frame.append('X'+str(x+1)+'_'+species+'_experiment_'+str(i)) experiment_physical_uncertainty.append(species_uncertainties[specie]) active_parameters.append('X'+str(x+1)+'_'+species+'_'+'experiment'+'_'+str(i)) elif species not in singular_species and species not in diluent: for j in range(len(exp_dic['simulation'].fullParsedYamlFile['conditions_to_run'])): Z_data_Frame.append('X'+str(x+1)+'_'+species+'_experiment_'+str(i)) experiment_physical_uncertainty.append(species_uncertainties[specie]) active_parameters.append('X'+str(x+1)+'_'+species+'_'+'experiment'+'_'+str(i)) experiment_physical_uncertainty.append(exp_dic['uncertainty']['time_shift_absolute_uncertainty']) Z_data_Frame.append('Time_shift'+'_'+'experiment'+'_'+str(i)) active_parameters.append('Time_shift'+'_'+'experiment'+'_'+str(i)) experiment_physical_uncertainty = np.array(experiment_physical_uncertainty) experiment_physical_uncertainty = experiment_physical_uncertainty.reshape((experiment_physical_uncertainty.shape[0], 1)) Z = np.vstack((Z,experiment_physical_uncertainty)) sigma = np.vstack((sigma,experiment_physical_uncertainty)) elif re.match('[Rr][Cc][Mm]',exp_dict_list[i]['simulation_type']) and re.match('[Ii]gnition[- ][Dd]elay',exp_dict_list[i]['experiment_type']): #for i,exp_dic in enumerate(exp_dict_list): experiment_physical_uncertainty = [] #Temperature Uncertainty temp_uncertainties=rcm_temp_uncertainties(exp_dic) experiment_physical_uncertainty=experiment_physical_uncertainty+temp_uncertainties for index in range(len(temp_uncertainties)): Z_data_Frame.append('T'+str(index+1)+'_'+'experiment'+'_'+str(i)) active_parameters.append('T'+str(index+1)+'_'+'experiment'+'_'+str(i)) #Z_data_Frame=Z_data_Frame+['T'+'_'+'experiment'+'_'+str(i)]*len(temp_uncertainties) #active_parameters=active_parameters+['T'+'_'+'experiment'+'_'+str(i)]*len(temp_uncertainties) #Pressure Uncertainty press_uncertainties = rcm_press_uncertainties(exp_dic) for index in range(len(press_uncertainties)): Z_data_Frame.append('P'+str(index+1)+'_'+'experiment'+'_'+str(i)) active_parameters.append('P'+str(index+1)+'_'+'experiment'+'_'+str(i)) #Z_data_Frame=Z_data_Frame+['P'+'_'+'experiment'+'_'+str(i)]*len(press_uncertainties) #active_parameters=active_parameters+['P'+'_'+'experiment'+'_'+str(i)]*len(press_uncertainties) experiment_physical_uncertainty=experiment_physical_uncertainty+press_uncertainties #print(len(press_uncertainties)) #Species Uncertainty conditions = exp_dic['conditions_dict_list'] species_uncertainties = exp_dic['uncertainty']['species_relative_uncertainty']['dictonary_of_values'] species_to_loop = list(exp_dic['conditions_dict_list'].keys()) list_with_most_species_in_them = [] for specie in species_to_loop: list_with_most_species_in_them.append(len(conditions[specie])) max_species = max(list_with_most_species_in_them) diluent=[] if 'Diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys() or 'diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys(): diluent = exp_dic['uncertainty']['species_relative_uncertainty']['type_dict']['diluent'] singular_species=[] for species in list(exp_dic['simulation'].fullParsedYamlFile['conditions'].keys()): if len(exp_dic['simulation'].fullParsedYamlFile['conditions'][species])==1 and species not in diluent: singular_species.append(species) for x,species in enumerate(exp_dic['simulation'].fullParsedYamlFile['speciesNames']): if species in singular_species and species not in diluent: Z_data_Frame.append('X'+str(x+1)+'_'+species+'_experiment_'+str(i)) experiment_physical_uncertainty.append(species_uncertainties[specie]) active_parameters.append('X'+str(x+1)+'_'+species+'_'+'experiment'+'_'+str(i)) elif species not in singular_species and species not in diluent: for j in range(len(exp_dic['simulation'].fullParsedYamlFile['conditions_to_run'])): Z_data_Frame.append('X'+str(x+1)+'_'+species+'_experiment_'+str(i)) experiment_physical_uncertainty.append(species_uncertainties[specie]) active_parameters.append('X'+str(x+1)+'_'+species+'_'+'experiment'+'_'+str(i)) experiment_physical_uncertainty.append(exp_dic['uncertainty']['time_shift_absolute_uncertainty']) Z_data_Frame.append('Time_shift'+'_'+'experiment'+'_'+str(i)) active_parameters.append('Time_shift'+'_'+'experiment'+'_'+str(i)) experiment_physical_uncertainty = np.array(experiment_physical_uncertainty) experiment_physical_uncertainty = experiment_physical_uncertainty.reshape((experiment_physical_uncertainty.shape[0], 1)) Z = np.vstack((Z,experiment_physical_uncertainty)) sigma = np.vstack((sigma,experiment_physical_uncertainty)) #print(exp_dict_list[i]['simulation_type'],exp_dict_list[i]['experiment_type']) #building dictonary to keep track of independtend coupled coefficients count = 0 coef_dict = {} uncertainties_of_coefficents = [] for i,exp_dic in enumerate(exp_dict_list): if 'perturbed_coef' not in exp_dic.keys(): continue dictonary_of_coef_and_uncertainty = exp_dic['uncertainty']['coupled_coef_and_uncertainty'] for x in dictonary_of_coef_and_uncertainty: if x not in coef_dict.keys(): coef_dict[x] = dictonary_of_coef_and_uncertainty[x] for x in coef_dict: for y in coef_dict[x]: if y[0]!=0: #this might cause a problem in the future count+=1 uncertainties_of_coefficents.append(y) Z_data_Frame.append('Sigma'+'_'+str(count)) active_parameters.append('Sigma'+'_'+str(count)) uncertainties_of_coefficents = np.array(uncertainties_of_coefficents) if uncertainties_of_coefficents.any() == True: uncertainties_of_coefficents = uncertainties_of_coefficents.reshape((uncertainties_of_coefficents.shape[0], 1)) Z = np.vstack((Z,uncertainties_of_coefficents)) sigma = np.vstack((sigma,uncertainties_of_coefficents)) #return(Z,Z_data_Frame) #print('THIS IS Z',Z_data_Frame) Z_data_Frame = pd.DataFrame({'value': Z_data_Frame,'Uncertainty': Z.reshape((Z.shape[0],))}) self.z_matrix = Z self.sigma = sigma #print(Z.shape) return Z,Z_data_Frame,sigma,active_parameters def load_Y(self, exp_dict_list:list,parsed_yaml_file_list:list, loop_counter:int = 0, X:dict={}, master_equation_reactions = [], master_equation_uncertainty_df = None, master_equation_flag = False): def natural_log_difference(experiment,model): natural_log_diff = np.log(np.array(experiment)) - np.log(np.array(model)) return natural_log_diff Y = [] Y_data_Frame = [] for i,exp_dic in enumerate(exp_dict_list): counter = 0 for j,observable in enumerate((exp_dic['mole_fraction_observables']+ exp_dic['concentration_observables'] + exp_dic['flame_speed_observables']+ exp_dic['ignition_delay_observables'])): if observable == None: pass else: #if you need to add something with concentration add it here if 'ppm' in exp_dic['experimental_data'][counter].columns.tolist()[1]: if re.match('[Ss]hock [Tt]ube',exp_dict_list[i]['simulation_type']): natural_log_diff = natural_log_difference(exp_dic['experimental_data'][counter][observable+'_ppm'].values, (exp_dic['simulation'].timeHistoryInterpToExperiment[observable].dropna().values)*1e6) natural_log_diff = natural_log_diff.reshape((natural_log_diff.shape[0],1)) if re.match('[Ss]pecies[- ][Pp]rofile',exp_dict_list[i]['experiment_type']) and re.match('[Ff]low[ -][Rr]eactor',exp_dict_list[i]['simulation_type']): natural_log_diff = natural_log_difference(exp_dic['experimental_data'][counter][observable+'_ppm'].values, (exp_dic['simulation'].timeHistories[0][observable].dropna().values)*1e6) natural_log_diff = natural_log_diff.reshape((natural_log_diff.shape[0],1)) if re.match('[Jj][Ss][Rr]',exp_dict_list[i]['simulation_type']): natural_log_diff = natural_log_difference(exp_dic['experimental_data'][counter][observable+'_ppm'].values, (exp_dic['simulation'].timeHistories[0][observable].dropna().values)*1e6) natural_log_diff = natural_log_diff.reshape((natural_log_diff.shape[0],1)) elif 'mol/cm^3' in exp_dic['experimental_data'][counter].columns.tolist()[1]: if re.match('[Ss]hock [Tt]ube',exp_dict_list[i]['simulation_type']) and re.match('[Ss]pecies[- ][Pp]rofile',exp_dict_list[i]['experiment_type']): concentration = np.true_divide(1,exp_dic['simulation'].pressureAndTemperatureToExperiment[counter]['temperature'].to_numpy())*exp_dic['simulation'].pressureAndTemperatureToExperiment[counter]['pressure'].to_numpy() concentration *= (1/(8.314e6))*exp_dic['simulation'].timeHistoryInterpToExperiment[observable].dropna().to_numpy() natural_log_diff = natural_log_difference(exp_dic['experimental_data'][counter][observable+'_mol/cm^3'].to_numpy(),concentration) natural_log_diff = natural_log_diff.reshape((natural_log_diff.shape[0], 1)) if re.match('[Ss]pecies[- ][Pp]rofile',exp_dict_list[i]['experiment_type']) and re.match('[Ff]low[ -][Rr]eactor',exp_dict_list[i]['simulation_type']): concentration = np.true_divide(1,exp_dic['simulation'].timeHistories[0]['temperature'].to_numpy())*exp_dic['simulation'].timeHistories[0]['pressure'].to_numpy() concentration *= (1/(8.314e6))*exp_dic['simulation'].timeHistories[0][observable].dropna().to_numpy() natural_log_diff = natural_log_difference(exp_dic['experimental_data'][counter][observable+'_mol/cm^3'].to_numpy(),concentration) natural_log_diff = natural_log_diff.reshape((natural_log_diff.shape[0], 1)) if re.match('[Jj][Ss][Rr]',exp_dict_list[i]['simulation_type']): concentration = np.true_divide(1.0,exp_dic['simulation'].pressure*ct.one_atm)*np.array(exp_dic['simulation'].temperatures) concentration *= (1/(8.314e6))*exp_dic['simulation'].timeHistories[0][observable].dropna().to_numpy() natural_log_diff = natural_log_difference(exp_dic['experimental_data'][counter][observable+'_mol/cm^3'].to_numpy(),concentration) natural_log_diff = natural_log_diff.reshape((natural_log_diff.shape[0], 1)) elif 'cm/s' in exp_dic['experimental_data'][counter].columns.tolist()[1]: if re.match('[Ff]lame [Ss]peed',exp_dict_list[i]['simulation_type']) and re.match('[Oo][Nn][Ee]|[1][ -][dD][ -][Ff]lame',exp_dict_list[i]['experiment_type']): natural_log_diff = natural_log_difference(exp_dic['experimental_data'][counter][observable+'_cm/s'].to_numpy(), exp_dic['simulation'].timeHistories[0][observable]) natural_log_diff = natural_log_diff.reshape((natural_log_diff.shape[0], 1)) elif 's' in exp_dic['experimental_data'][counter].columns.tolist()[1]: if re.match('[Ii]gnition[- ][Dd]elay',exp_dict_list[i]['experiment_type']): #check these units would be in seconds of ms? natural_log_diff = natural_log_difference(exp_dic['experimental_data'][counter][observable+'_s'].to_numpy(), exp_dic['simulation'].timeHistories[0]['delay']) natural_log_diff = natural_log_diff.reshape((natural_log_diff.shape[0], 1)) else: if re.match('[Ss]hock [Tt]ube',exp_dict_list[i]['simulation_type']) and re.match('[Ss]pecies[- ][Pp]rofile',exp_dict_list[i]['experiment_type']): natural_log_diff = natural_log_difference(exp_dic['experimental_data'][counter][observable].values, exp_dic['simulation'].timeHistoryInterpToExperiment[observable].dropna().values) natural_log_diff = natural_log_diff.reshape((natural_log_diff.shape[0], 1)) if re.match('[Jj][Ss][Rr]',exp_dict_list[i]['simulation_type']): natural_log_diff = natural_log_difference(exp_dic['experimental_data'][counter][observable].values, exp_dic['simulation'].timeHistories[0][observable].values) natural_log_diff = natural_log_diff.reshape((natural_log_diff.shape[0], 1)) if re.match('[Ss]pecies[- ][Pp]rofile',exp_dict_list[i]['experiment_type']) and re.match('[Ff]low[ -][Rr]eactor',exp_dict_list[i]['simulation_type']): natural_log_diff = natural_log_difference(exp_dic['experimental_data'][counter][observable].values, exp_dic['simulation'].timeHistories[0][observable].values) natural_log_diff = natural_log_diff.reshape((natural_log_diff.shape[0], 1)) tempList = [observable+'_'+'experiment'+str(i)]*np.shape(natural_log_diff)[0] Y_data_Frame.extend(tempList) Y.append(natural_log_diff) counter+=1 if 'absorbance_observables' in list(exp_dic.keys()): wavelengths = parsed_yaml_file_list[i]['absorbanceCsvWavelengths'] for k,wl in enumerate(wavelengths): natural_log_diff = natural_log_difference(exp_dic['absorbance_experimental_data'][k]['Absorbance_'+str(wl)].values,exp_dic['absorbance_model_data'][wl]) natural_log_diff = natural_log_diff.reshape((natural_log_diff.shape[0], 1)) tempList = [str(wl)+'_'+'experiment'+'_'+str(i)]*np.shape(natural_log_diff)[0] Y_data_Frame.extend(tempList) Y.append(natural_log_diff) Y = np.vstack((Y)) #YdataFrame = pd.DataFrame({'value': YdataFrame,'ln_difference': Y}) reactions_in_cti_file = exp_dict_list[0]['simulation'].processor.solution.reaction_equations() #assembling the target values portion of the Y matrix #getting the size of the cti file from the first simulation because #they all use the same cti file and it shouldn't matter # add in a conditional statment for if there is master equation data #which is getting included in the simulation #Flatten master equation reaction list flatten = lambda *n: (e for a in n for e in (flatten(*a) if isinstance(a, (tuple, list)) else (a,))) flattened_master_equation_reaction_list = list(flatten(master_equation_reactions)) if master_equation_flag ==True: A_n_Ea_length = int((len(reactions_in_cti_file) - len(flattened_master_equation_reaction_list))*3) number_of_molecular_parameters_list = [] for col in master_equation_uncertainty_df: number_of_molecular_parameters_list.append(len(master_equation_uncertainty_df[col].dropna().values)) number_of_molecular_parameters = sum(number_of_molecular_parameters_list) #print('we do not have master equation installed yet') #subtract out the necessary target values and add the other ones in else: A_n_Ea_length = len(reactions_in_cti_file)*3 #addint the zeros to the Y array #adding the strings to the dictonary ## making a,n and Ea zero list A_n_Ea_zeros = np.zeros((A_n_Ea_length,1)) if master_equation_flag ==True: molecular_paramter_zeros = np.zeros((number_of_molecular_parameters,1)) for variable in range(A_n_Ea_length//3): Y_data_Frame.append('A'+'_'+str(variable)) for variable in range(A_n_Ea_length//3): Y_data_Frame.append('n'+'_'+str(variable)) for variable in range(A_n_Ea_length//3): Y_data_Frame.append('Ea'+'_'+str(variable)) #make this the order of master equation list if master_equation_flag == True: for i,reaction in enumerate(master_equation_reactions): if type(reaction)==str: for j,paramter in enumerate(master_equation_uncertainty_df[reaction].dropna()): Y_data_Frame.append(str(reaction)+'_P'+'_'+str(j)) elif type(reaction)==tuple: column_headers = master_equation_uncertainty_df.columns.to_list() for sub_reaction in reaction: if sub_reaction in column_headers: for j,paramter in enumerate(master_equation_uncertainty_df[sub_reaction].dropna()): Y_data_Frame.append(str(reaction)+'_P'+'_'+str(j)) # if master_equation_flag == True: # for i,value in enumerate(number_of_molecular_parameters_list): # for j,parameter in enumerate(range(value)): # Y_data_Frame.append('R'+'_'+str(i)+'P'+'_'+str(j)) if loop_counter == 0: Y = np.vstack((Y,A_n_Ea_zeros)) if master_equation_flag ==True: Y = np.vstack((Y,molecular_paramter_zeros)) else: #print('we do not have loop counter installed yet') #need to check what we would need to do here #should be tottal X ? #clean this part of the code up here temp_array = np.array(X['As_ns_Eas'])*-1 temp_array = temp_array.reshape((temp_array.shape[0], 1)) Y = np.vstack((Y, temp_array)) #clean this part of the code up here #tab if master_equation_flag == True: temp_array = np.array(X['molecular_parameters'])*-1 temp_array = temp_array.reshape((temp_array.shape[0], 1)) Y = np.vstack((Y,temp_array)) #Assembling the phsycial portion of the Y matrix if exp_dict_list[0]['simulation'].physicalSens ==1: #print(exp_dict_list) for i,exp_dic in enumerate(exp_dict_list): if loop_counter ==0: if re.match('[Ss]hock [Tt]ube',exp_dict_list[i]['simulation_type']) and re.match('[Ss]pecies[ -][Pp]rofile',exp_dict_list[i]['experiment_type']): dic_of_conditions = exp_dic['simulation'].conditions #subtract out the dilluant species_in_simulation = len(set(dic_of_conditions.keys()).difference(['Ar','AR','ar','HE','He','he','Kr','KR','kr','Xe','XE','xe','NE','Ne','ne'])) #add two for Temperature and Pressure len_of_phsycial_observables_in_simulation = species_in_simulation + 2 + 1 temp_zeros = np.zeros((len_of_phsycial_observables_in_simulation,1)) #stacking the zeros onto the Y array Y = np.vstack((Y,temp_zeros)) Y_data_Frame.append('T'+'_'+'experiment'+'_'+str(i)) Y_data_Frame.append('P'+'_'+'experiment'+'_'+str(i)) for variable in range(species_in_simulation): Y_data_Frame.append('X'+'_'+str(variable)+'_'+'experiment'+'_'+str(i)) Y_data_Frame.append('Time_shift'+'_'+'experiment'+'_'+str(i)) elif re.match('[Jj][Ss][Rr]',exp_dict_list[i]['simulation_type']) and re.match('[Ss]pecies[ -][Pp]rofile',exp_dict_list[i]['experiment_type']): dict_of_conditions = exp_dic['simulation'].conditions species_in_simulation = len(set(dict_of_conditions.keys()).difference(['Ar','AR','ar','HE','He','he','Kr','KR','kr','Xe','XE','xe','NE','Ne','ne'])) temperatures_in_simulation = len(exp_dic['simulation'].temperatures) pressure_in_simulation = 1 restime_in_simulation = 1 len_of_phsycial_observables_in_simulation = species_in_simulation+temperatures_in_simulation+pressure_in_simulation+restime_in_simulation temp_zeros = np.zeros((len_of_phsycial_observables_in_simulation,1)) Y = np.vstack((Y,temp_zeros)) for value in range(temperatures_in_simulation): Y_data_Frame.append('T'+'_'+'experiment'+'_'+str(i)) Y_data_Frame.append('P'+'_'+'experiment'+'_'+str(i)) for variable in range(species_in_simulation): Y_data_Frame.append('X'+'_'+str(variable)+'_'+'experiment'+'_'+str(i)) Y_data_Frame.append('R_experiment_'+str(i)) elif re.match('[Ff]lame [Ss]peed',exp_dict_list[i]['simulation_type']) and re.match('[Oo][Nn][Ee]|[1][ -][dD][ -][Ff]lame',exp_dict_list[i]['experimentType']): conditions = exp_dic['conditions_dict_list'] species_to_loop = exp_dic['uncertainty']['species_relative_uncertainty']['species'] pressures_in_simulation = len(exp_dic['simulation'].pressures) list_with_most_species_in_them = [] for specie in species_to_loop: list_with_most_species_in_them.append(len(conditions[specie])) max_species = max(list_with_most_species_in_them) if 'Diluant' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys() or 'diluant' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys(): diluant = exp_dic['uncertainty']['species_relative_uncertainty']['type_dict']['diluant'] # species_in_simulation = list(exp_dic['conditions_to_run'][0].keys()) species_in_simulation = len(set(species_in_simulation).difference(diluant)) * max_species temperatures_in_simulation = len(exp_dic['simulation'].temperatures) pressure_in_simulation = len(exp_dic['simulation'].pressures) len_of_phsycial_observables_in_simulation = species_in_simulation+temperatures_in_simulation+pressure_in_simulation temp_zeros = np.zeros((len_of_phsycial_observables_in_simulation,1)) Y = np.vstack((Y,temp_zeros)) for value in range(temperatures_in_simulation): Y_data_Frame.append('T'+'_'+'experiment'+'_'+str(i)) for value in range(pressures_in_simulation): Y_data_Frame.append('P'+'_'+'experiment'+'_'+str(i)) for variable in range(species_in_simulation): Y_data_Frame.append('X'+'_'+str(variable)+'_'+'experiment'+'_'+str(i)) elif re.match('[Ss]hock [Tt]ube',exp_dict_list[i]['simulation_type']) and re.match('[Ii]gnition[- ][Dd]elay',exp_dict_list[i]['experiment_type']): conditions = exp_dic['conditions_dict_list'] species_uncertainties = exp_dic['uncertainty']['species_relative_uncertainty']['dictonary_of_values'] species_to_loop = list(exp_dic['conditions_dict_list'].keys()) list_with_most_species_in_them = [] for specie in species_to_loop: list_with_most_species_in_them.append(len(conditions[specie])) max_species = max(list_with_most_species_in_them) diluent=[] if 'Diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys() or 'diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys(): diluent = exp_dic['uncertainty']['species_relative_uncertainty']['type_dict']['diluent'] singular_species=[] for species in list(exp_dic['simulation'].fullParsedYamlFile['conditions'].keys()): if len(exp_dic['simulation'].fullParsedYamlFile['conditions'][species])==1 and species not in diluent: singular_species.append(species) #species_in_simulation = len(set(dict_of_conditions.keys()).difference(diluant)) * max_species species = copy.deepcopy(species_to_loop) species_in_simulation = int(len(singular_species)+((len(set(exp_dic['simulation'].fullParsedYamlFile['speciesNames']).difference(diluent))-len(singular_species))*len(exp_dic['simulation'].fullParsedYamlFile['conditions_to_run']))) temperatures_in_simulation = len(exp_dic['simulation'].temperatures) pressures_in_simulation = len(exp_dic['simulation'].pressures) time_shift_length = 1 #print(species_in_simulation,temperatures_in_simulation,pressures_in_simulation) len_of_phsycial_observables_in_simulation = species_in_simulation+temperatures_in_simulation+pressures_in_simulation + time_shift_length temp_zeros = np.zeros((len_of_phsycial_observables_in_simulation,1)) Y = np.vstack((Y,temp_zeros)) for value in range(temperatures_in_simulation): Y_data_Frame.append('T'+str(value+1)+'_'+'experiment'+'_'+str(i)) for value in range(pressures_in_simulation): Y_data_Frame.append('P'+str(value+1)+'_'+'experiment'+'_'+str(i)) diluent=[] if 'Diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys() or 'diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys(): diluent = exp_dic['uncertainty']['species_relative_uncertainty']['type_dict']['diluent'] for x,species in enumerate(exp_dic['simulation'].fullParsedYamlFile['speciesNames']): if species in singular_species and species not in diluent: Y_data_Frame.append('X'+str(x+1)+'_'+species+'_experiment_'+str(i)) elif species not in singular_species and species not in diluent: for j in range(len(exp_dic['simulation'].fullParsedYamlFile['conditions_to_run'])): Y_data_Frame.append('X'+str(x+1)+'_'+species+'_experiment_'+str(i)) Y_data_Frame.append('Time_shift'+'_'+'experiment'+'_'+str(i)) elif re.match('[Rr][Cc][Mm]',exp_dict_list[i]['simulation_type']) and re.match('[Ii]gnition[- ][Dd]elay',exp_dict_list[i]['experiment_type']): conditions = exp_dic['conditions_dict_list'] species_uncertainties = exp_dic['uncertainty']['species_relative_uncertainty']['dictonary_of_values'] species_to_loop = list(exp_dic['conditions_dict_list'].keys()) list_with_most_species_in_them = [] for specie in species_to_loop: list_with_most_species_in_them.append(len(conditions[specie])) max_species = max(list_with_most_species_in_them) diluent=[] if 'Diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys() or 'diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys(): diluent = exp_dic['uncertainty']['species_relative_uncertainty']['type_dict']['diluent'] singular_species=[] for species in list(exp_dic['simulation'].fullParsedYamlFile['conditions'].keys()): if len(exp_dic['simulation'].fullParsedYamlFile['conditions'][species])==1 and species not in diluent: singular_species.append(species) #species_in_simulation = len(set(dict_of_conditions.keys()).difference(diluant)) * max_species species = copy.deepcopy(species_to_loop) species_in_simulation = int(len(singular_species)+((len(set(exp_dic['simulation'].fullParsedYamlFile['speciesNames']).difference(diluent))-len(singular_species))*len(exp_dic['simulation'].fullParsedYamlFile['conditions_to_run']))) temperatures_in_simulation = len(exp_dic['simulation'].temperatures) pressures_in_simulation = len(exp_dic['simulation'].pressures) time_shift_length = 1 #print(species_in_simulation,temperatures_in_simulation,pressures_in_simulation) len_of_phsycial_observables_in_simulation = species_in_simulation+temperatures_in_simulation+pressures_in_simulation + time_shift_length temp_zeros = np.zeros((len_of_phsycial_observables_in_simulation,1)) Y = np.vstack((Y,temp_zeros)) for value in range(temperatures_in_simulation): Y_data_Frame.append('T'+str(value+1)+'_'+'experiment'+'_'+str(i)) for value in range(pressures_in_simulation): Y_data_Frame.append('P'+str(value+1)+'_'+'experiment'+'_'+str(i)) diluent=[] if 'Diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys() or 'diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys(): diluent = exp_dic['uncertainty']['species_relative_uncertainty']['type_dict']['diluent'] for x,species in enumerate(exp_dic['simulation'].fullParsedYamlFile['speciesNames']): if species in singular_species and species not in diluent: Y_data_Frame.append('X'+str(x+1)+'_'+species+'_experiment_'+str(i)) elif species not in singular_species and species not in diluent: for j in range(len(exp_dic['simulation'].fullParsedYamlFile['conditions_to_run'])): Y_data_Frame.append('X'+str(x+1)+'_'+species+'_experiment_'+str(i)) Y_data_Frame.append('Time_shift'+'_'+'experiment'+'_'+str(i)) elif re.match('[Ss]pecies[- ][Pp]rofile',exp_dict_list[i]['experiment_type']) and re.match('[Ff]low[ -][Rr]eactor',exp_dict_list[i]['simulation_type']): dict_of_conditions = exp_dic['simulation'].conditions species_in_simulation = len(set(dict_of_conditions.keys()).difference(['Ar','AR','ar','HE','He','he','Kr','KR','kr','Xe','XE','xe','NE','Ne','ne'])) temperatures_in_simulation = len(exp_dic['simulation'].temperatures) time_shift_in_simulation = len(parsed_yaml_file_list[i]['timeShiftOriginal']) pressure_in_simulation = 1 len_of_phsycial_observables_in_simulation = species_in_simulation+temperatures_in_simulation+pressure_in_simulation+time_shift_in_simulation temp_zeros = np.zeros((len_of_phsycial_observables_in_simulation,1)) Y = np.vstack((Y,temp_zeros)) for value in range(temperatures_in_simulation): Y_data_Frame.append('T'+'_'+'experiment'+'_'+str(i)) Y_data_Frame.append('P'+'_'+'experiment'+'_'+str(i)) for variable in range(species_in_simulation): Y_data_Frame.append('X'+'_'+str(variable)+'_'+'experiment'+'_'+str(i)) for variable in range(time_shift_in_simulation): Y_data_Frame.append('Time_shift'+'_'+str(variable)+'_'+'experiment'+'_'+str(i)) else: if re.match('[Ss]hock [Tt]ube',exp_dict_list[i]['simulation_type']) and re.match('[Ss]pecies[- ][Pp]rofile',exp_dict_list[i]['experiment_type']): dic_of_conditions = exp_dic['simulation'].conditions #subtract out the dilluant species_in_simulation = len(set(dic_of_conditions.keys()).difference(['Ar','AR','ar','HE','He','he','Kr','KR','kr','Xe','XE','xe','NE','Ne','ne'])) Y_data_Frame.append('T'+'_'+'experiment'+'_'+str(i)) Y_data_Frame.append('P'+'_'+'experiment'+'_'+str(i)) for variable in range(species_in_simulation): Y_data_Frame.append('X'+'_'+str(variable)+'_'+'experiment'+'_'+str(i)) Y_data_Frame.append('Time_shift'+'_'+'experiment'+'_'+str(i)) elif re.match('[Jj][Ss][Rr]',exp_dict_list[i]['simulation_type']): dict_of_conditions = exp_dic['simulation'].conditions species_in_simulation = len(set(dict_of_conditions.keys()).difference(['Ar','AR','ar','HE','He','he','Kr','KR','kr','Xe','XE','xe','NE','Ne','ne'])) temperatures_in_simulation = len(exp_dic['simulation'].temperatures) pressure_in_simulation = 1 restime_in_simulation = 1 len_of_phsycial_observables_in_simulation = species_in_simulation+temperatures_in_simulation+pressure_in_simulation+restime_in_simulation for value in range(temperatures_in_simulation): Y_data_Frame.append('T'+'_'+'experiment'+'_'+str(i)) Y_data_Frame.append('P'+'_'+'experiment'+'_'+str(i)) for variable in range(species_in_simulation): Y_data_Frame.append('X'+'_'+str(variable)+'_'+'experiment'+'_'+str(i)) Y_data_Frame.append('R_experiment_'+str(i)) elif re.match('[Ff]lame [Ss]peed',exp_dict_list[i]['simulation_type']) and re.match('[Oo][Nn][Ee]|[1][ -][dD][ -][Ff]lame',exp_dict_list[i]['experimentType']): species_to_loop = exp_dic['uncertainty']['species_relative_uncertainty']['species'] list_with_most_species_in_them = [] for specie in species_to_loop: list_with_most_species_in_them.append(len(conditions[specie])) max_species = max(list_with_most_species_in_them) if 'Diluant' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys() or 'diluant' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys(): diluant = exp_dic['uncertainty']['species_relative_uncertainty']['type_dict']['diluant'] species_in_simulation = len(set(dict_of_conditions.keys()).difference(diluant)) * max_species temperatures_in_simulation = len(exp_dic['simulation'].temperatures) pressure_in_simulation = len(exp_dic['simulation'].pressures) len_of_phsycial_observables_in_simulation = species_in_simulation+temperatures_in_simulation+pressure_in_simulation for value in range(temperatures_in_simulation): Y_data_Frame.append('T'+'_'+'experiment'+'_'+str(i)) for value in range(pressures_in_simulation): Y_data_Frame.append('P'+'_'+'experiment'+'_'+str(i)) for variable in range(species_in_simulation): Y_data_Frame.append('X'+'_'+str(variable)+'_'+'experiment'+'_'+str(i)) elif re.match('[Ss]pecies[- ][Pp]rofile',exp_dict_list[i]['experiment_type']) and re.match('[Ff]low[ -][Rr]eactor',exp_dict_list[i]['simulation_type']): dict_of_conditions = exp_dic['simulation'].conditions species_in_simulation = len(set(dict_of_conditions.keys()).difference(['Ar','AR','ar','HE','He','he','Kr','KR','kr','Xe','XE','xe','NE','Ne','ne'])) temperatures_in_simulation = len(exp_dic['simulation'].temperatures) time_shift_in_simulation = len(parsed_yaml_file_list[i]['timeShiftOriginal']) pressure_in_simulation = 1 for value in range(temperatures_in_simulation): Y_data_Frame.append('T'+'_'+'experiment'+'_'+str(i)) Y_data_Frame.append('P'+'_'+'experiment'+'_'+str(i)) for variable in range(species_in_simulation): Y_data_Frame.append('X'+'_'+str(variable)+'_'+'experiment'+'_'+str(i)) for variable in range(time_shift_in_simulation): Y_data_Frame.append('Time_shift'+'_'+str(variable)+'_'+'experiment'+'_'+str(i)) elif re.match('[Ss]hock [Tt]ube',exp_dict_list[i]['simulation_type']) and re.match('[Ii]gnition[- ][Dd]elay',exp_dict_list[i]['experiment_type']): conditions = exp_dic['conditions_dict_list'] species_to_loop = list(exp_dic['conditions_dict_list'].keys()) temperatures_in_simulation = len(exp_dic['simulation'].temperatures) pressures_in_simulation = len(exp_dic['simulation'].pressures) list_with_most_species_in_them = [] for specie in species_to_loop: list_with_most_species_in_them.append(len(conditions[specie])) max_species = max(list_with_most_species_in_them) diluant=[] if 'Diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys() or 'diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys(): diluant = exp_dic['uncertainty']['species_relative_uncertainty']['type_dict']['diluent'] for value in range(temperatures_in_simulation): Y_data_Frame.append('T'+str(value+1)+'_'+'experiment'+'_'+str(i)) for value in range(pressures_in_simulation): Y_data_Frame.append('P'+str(value+1)+'_'+'experiment'+'_'+str(i)) diluent=[] if 'Diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys() or 'diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys(): diluent = exp_dic['uncertainty']['species_relative_uncertainty']['type_dict']['diluent'] singular_species=[] for species in list(exp_dic['simulation'].fullParsedYamlFile['conditions'].keys()): if len(exp_dic['simulation'].fullParsedYamlFile['conditions'][species])==1 and species not in diluent: singular_species.append(species) for x,species in enumerate(exp_dic['simulation'].fullParsedYamlFile['speciesNames']): if species in singular_species and species not in diluent: Y_data_Frame.append('X'+str(x+1)+'_'+species+'_experiment_'+str(i)) elif species not in singular_species and species not in diluent: for j in range(len(exp_dic['simulation'].fullParsedYamlFile['conditions_to_run'])): Y_data_Frame.append('X'+str(x+1)+'_'+species+'_experiment_'+str(i)) Y_data_Frame.append('Time_shift'+'_'+'experiment'+'_'+str(i)) elif re.match('[Rr][Cc][Mm]',exp_dict_list[i]['simulation_type']) and re.match('[Ii]gnition[- ][Dd]elay',exp_dict_list[i]['experiment_type']): conditions = exp_dic['conditions_dict_list'] species_to_loop = list(exp_dic['conditions_dict_list'].keys()) temperatures_in_simulation = len(exp_dic['simulation'].temperatures) pressures_in_simulation = len(exp_dic['simulation'].pressures) list_with_most_species_in_them = [] for specie in species_to_loop: list_with_most_species_in_them.append(len(conditions[specie])) max_species = max(list_with_most_species_in_them) diluant=[] if 'Diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys() or 'diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys(): diluant = exp_dic['uncertainty']['species_relative_uncertainty']['type_dict']['diluent'] for value in range(temperatures_in_simulation): Y_data_Frame.append('T'+str(value+1)+'_'+'experiment'+'_'+str(i)) for value in range(pressures_in_simulation): Y_data_Frame.append('P'+str(value+1)+'_'+'experiment'+'_'+str(i)) diluent=[] if 'Diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys() or 'diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys(): diluent = exp_dic['uncertainty']['species_relative_uncertainty']['type_dict']['diluent'] singular_species=[] for species in list(exp_dic['simulation'].fullParsedYamlFile['conditions'].keys()): if len(exp_dic['simulation'].fullParsedYamlFile['conditions'][species])==1 and species not in diluent: singular_species.append(species) for x,species in enumerate(exp_dic['simulation'].fullParsedYamlFile['speciesNames']): if species in singular_species and species not in diluent: Y_data_Frame.append('X'+str(x+1)+'_'+species+'_experiment_'+str(i)) elif species not in singular_species and species not in diluent: for j in range(len(exp_dic['simulation'].fullParsedYamlFile['conditions_to_run'])): Y_data_Frame.append('X'+str(x+1)+'_'+species+'_experiment_'+str(i)) Y_data_Frame.append('Time_shift'+'_'+'experiment'+'_'+str(i)) if i==len(exp_dict_list)-1: temp_array = np.array(X['physical_observables'])*-1 temp_array = temp_array.reshape((temp_array.shape[0], 1)) Y = np.vstack((Y,temp_array)) #Assembling the portion of the Y matrix for the absorbance coefficient sensitiviteis pert_coef = {} #build a dict matching pert_coef to their experiment and wavelength. #length of the dict gives padding information for exp in exp_dict_list: if 'perturbed_coef' not in exp.keys(): continue perturbed_for_exp = exp['perturbed_coef'] for x in perturbed_for_exp: if x[0][2] not in pert_coef.keys(): pert_coef[x[0][2]] = [x[1]] else: pert_coef[x[0][2]].append(x[1]) num_ind_pert_coef = len(pert_coef) temp_zeros = np.zeros((num_ind_pert_coef,1)) if loop_counter == 0: Y = np.vstack((Y,temp_zeros)) else: if 'absorbance_coefficent_observables' in X.keys(): #temp_array = np.array(X['absorbance_coefficent_observables']) temp_array = X['absorbance_coefficent_observables'] temp_array = [a for a in temp_array if a != 'null'] #temp_array = temp_array[temp_array!=0] #temp_array = temp_array[temp_array!=0] temp_array = np.array(temp_array) temp_array = np.array(temp_array)*-1 temp_array = temp_array.reshape((temp_array.shape[0], 1)) Y = np.vstack((Y,temp_array)) for x in range(num_ind_pert_coef): Y_data_Frame.append('Sigma'+'_'+str(x)) Y_data_Frame = pd.DataFrame({'value': Y_data_Frame,'ln_difference': Y.reshape((Y.shape[0],))}) self.Y_matrix = Y #print(Y.shape,'Y matrix without k targets') return Y, Y_data_Frame def load_S(self, exp_dict_list:list,parsed_yaml_list:list, dk=.01, master_equation_reactions = [], mapped_master_equation_sensitivites=np.array(()), master_equation_uncertainty_df = None, master_equation_flag = False): #preprocessing for padding num_exp = len(exp_dict_list) pert_coef = {} #build a dict matching pert_coef to their experiment and wavelength. #length of the dict gives padding information list_to_keep_order_of_coef = [] for exp in exp_dict_list: if 'perturbed_coef' not in exp.keys(): continue perturbed_for_exp = exp['perturbed_coef'] for x in perturbed_for_exp: if x[0][2] not in pert_coef.keys(): pert_coef[x[0][2]] = [x[1]] else: pert_coef[x[0][2]].append(x[1]) if x[0][2] not in list_to_keep_order_of_coef: list_to_keep_order_of_coef.append(x[0][2]) num_ind_pert_coef = len(pert_coef) #print(pert_coef.keys()) #print(num_ind_pert_coef," sigmas") #establish # of independent pert before hand, to proper pad the observables, put in list, make a dict of cc, # values will be a list of tabs data? # use the list to get the padding size k_sens_for_whole_simulation = [] p_sens_for_whole_simulation = [] abs_coef_sens_for_whole_simulation = [] temps = [] for i,exp in enumerate(exp_dict_list): ttl_kinetic_observables_for_exp = [] obs_counter =0 for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables']+ exp['ignition_delay_observables']): if observable == None: continue #return exp['ksens']['A'] #print(np.shape(exp['ksens']['A'][obs_counter])) #print(np.shape(exp['ksens']['N'][obs_counter])) #print(np.shape(exp['ksens']['Ea'][obs_counter])) single_obs_matrix = np.hstack((exp['ksens']['A'][obs_counter], exp['ksens']['N'][obs_counter], exp['ksens']['Ea'][obs_counter])) #print(single_obs_matrix) ttl_kinetic_observables_for_exp.append(single_obs_matrix) obs_counter +=1 if 'perturbed_coef' in exp.keys(): wavelengths = parsed_yaml_list[i]['absorbanceCsvWavelengths'] for k,wl in enumerate(wavelengths): single_obs_matrix = np.hstack((exp['absorbance_ksens'][wl][0], exp['absorbance_ksens'][wl][1], exp['absorbance_ksens'][wl][2])) ttl_kinetic_observables_for_exp.append(single_obs_matrix) ttl_kinetic_observables_for_exp = np.vstack((ttl_kinetic_observables_for_exp)) k_sens_for_whole_simulation.append(ttl_kinetic_observables_for_exp) #print(np.shape(k_sens_for_whole_simulation)) ####vstack ttl_kinetic_observables_for_exp and append somwehre else if exp['simulation'].physicalSens ==1: ttl_phsycal_obs_for_exp = [] for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables'] + exp['ignition_delay_observables']): obs_counter = 0 if observable == None: continue if re.match('[Ss]hock [Tt]ube',exp['simulation_type']) and re.match('[Ss]pecies[- ][Pp]rofile',exp['experiment_type']): temperature_sensitivity = exp['temperature'][observable].dropna().values temperature_sensitivity = temperature_sensitivity.reshape((temperature_sensitivity.shape[0], 1)) time_shift_sensitivity = exp['time_shift'][observable].dropna().values time_shift_sensitivity = time_shift_sensitivity.reshape((time_shift_sensitivity.shape[0], 1)) pressure_sensitivity = exp['pressure'][observable].dropna().values pressure_sensitivity = pressure_sensitivity.reshape((pressure_sensitivity.shape[0], 1)) species_sensitivty = [] for df in exp['species']: single_species_sensitivty = df[observable].dropna().values single_species_sensitivty = single_species_sensitivty.reshape((single_species_sensitivty.shape[0] ,1)) species_sensitivty.append(single_species_sensitivty) species_sensitivty = np.hstack((species_sensitivty)) elif re.match('[Jj][Ss][Rr]',exp['simulation_type']): temperature_sensitivity=np.array(exp['temperature'][observable])*np.identity(len(exp['simulation'].temperatures)) pressure_sensitivity = exp['pressure'][observable].dropna().values pressure_sensitivity = pressure_sensitivity.reshape((pressure_sensitivity.shape[0], 1)) restime_sensitivity=exp['restime_sens'][observable].dropna().values restime_sensitivity = restime_sensitivity.reshape((restime_sensitivity.shape[0],1)) species_sensitivty = [] for df in exp['species']: single_species_sensitivty = df[observable].dropna().values single_species_sensitivty = single_species_sensitivty.reshape((single_species_sensitivty.shape[0],1)) species_sensitivty.append(single_species_sensitivty) species_sensitivty = np.hstack((species_sensitivty)) restime_sensitivity=exp['restime_sens'][observable].dropna().values restime_sensitivity = restime_sensitivity.reshape((restime_sensitivity.shape[0],1)) elif re.match('[Ss]pecies[- ][Pp]rofile',exp_dict_list[i]['experiment_type']) and re.match('[Ff]low[ -][Rr]eactor',exp_dict_list[i]['simulation_type']): temperature_sensitivity=np.array(exp['temperature'][observable])*np.identity(len(exp['simulation'].temperatures)) pressure_sensitivity = exp['pressure'][observable].dropna().values pressure_sensitivity = pressure_sensitivity.reshape((pressure_sensitivity.shape[0], 1)) species_sensitivty = [] for df in exp['species']: single_species_sensitivty = df[observable].dropna().values single_species_sensitivty = single_species_sensitivty.reshape((single_species_sensitivty.shape[0],1)) species_sensitivty.append(single_species_sensitivty) species_sensitivty = np.hstack((species_sensitivty)) if len(parsed_yaml_list[i]['timeShiftOriginal'])>1: time_shift_sensitivity = np.array(exp['time_shift'][observable])*np.identity(len(exp['simulation'].temperatures)) else: time_shift_sensitivity = np.array(exp['time_shift'][observable]) time_shift_sensitivity = time_shift_sensitivity.reshape((time_shift_sensitivity.shape[0], 1)) elif re.match('[Ii]gnition[- ][Dd]elay',exp['experiment_type']) and re.match('[Ss]hock[- ][Tt]ube',exp['simulation_type']): #CHECK HOW MANY SPECIES THERE ARE. conditions = exp['conditions_dict_list'] species_to_loop = list(exp['conditions_dict_list'].keys()) list_with_most_species_in_them = [] for specie in species_to_loop: list_with_most_species_in_them.append(len(conditions[specie])) if len(exp['simulation'].temperatures)>1 and len(exp['simulation'].pressures)==1: temperature_sensitivity=np.array(exp['temperature']['delay'])*np.identity(len(exp['simulation'].temperatures)) pressure_sensitivity = exp['pressure']['delay'].dropna().values pressure_sensitivity = pressure_sensitivity.reshape((pressure_sensitivity.shape[0], 1)) species_sensitivty = [] for df in exp['species']: single_species_sensitivty = df['delay'].dropna().values single_species_sensitivty = single_species_sensitivty.reshape((single_species_sensitivty.shape[0],1)) species_sensitivty.append(single_species_sensitivty) species_sensitivty = np.hstack((species_sensitivty)) time_shift_sensitivity = exp['time_shift']['delay'].dropna().values time_shift_sensitivity = time_shift_sensitivity.reshape((time_shift_sensitivity.shape[0], 1)) #print("INSIDE HERE") elif len(exp['simulation'].pressures)>1 and len(exp['simulation'].temperatures)==1: pressure_sensitivity = np.array(exp['pressure']['delay'])*np.identity(len(exp['simulation'].pressures)) temperature_sensitivity = exp['temperature']['delay'].dropna().values temperature_sensitivity = temperature_sensitivity.reshape((temperature_sensitivity.shape[0], 1)) species_sensitivty = [] for df in exp['species']: single_species_sensitivty = df['delay'].dropna().values single_species_sensitivty = single_species_sensitivty.reshape((single_species_sensitivty.shape[0],1)) species_sensitivty.append(single_species_sensitivty) species_sensitivty = np.hstack((species_sensitivty)) time_shift_sensitivity = exp['time_shift']['delay'].dropna().values time_shift_sensitivity = time_shift_sensitivity.reshape((time_shift_sensitivity.shape[0], 1)) elif len(exp['simulation'].pressures)==1 and len(exp['simulation'].temperatures)==1 and len(list_with_most_species_in_them)>1: pressure_sensitivity = exp['pressure']['delay'].dropna().values pressure_sensitivity = pressure_sensitivity.reshape((pressure_sensitivity.shape[0], 1)) temperature_sensitivity = exp['temperature']['delay'].dropna().values temperature_sensitivity = temperature_sensitivity.reshape((temperature_sensitivity.shape[0], 1)) species_sensitivty=[] conditions = exp['conditions_dict_list'] species_to_loop = list(exp['conditions_dict_list'].keys()) list_with_most_species_in_them = [] for specie in species_to_loop: list_with_most_species_in_them.append(len(conditions[specie])) diluent=[] if 'Diluent' in exp['uncertainty']['species_relative_uncertainty']['type_dict'].keys() or 'diluent' in exp['uncertainty']['species_relative_uncertainty']['type_dict'].keys(): diluent = exp['uncertainty']['species_relative_uncertainty']['type_dict']['diluent'] singular_species=[] for species in list(exp['simulation'].fullParsedYamlFile['conditions'].keys()): if len(exp['simulation'].fullParsedYamlFile['conditions'][species])==1 and species not in diluent: singular_species.append(species) for x,species in enumerate(exp['simulation'].fullParsedYamlFile['speciesNames']): if species in singular_species and species not in diluent: single_species_sensitivty = exp['species'][x]['delay'].dropna().values single_species_sensitivty = single_species_sensitivty.reshape((single_species_sensitivty.shape[0],1)) #print(single_species_sensitivty) species_sensitivty.append(single_species_sensitivty) elif species not in singular_species and species not in diluent: single_species_sensitivty = np.array(exp['species'][x]['delay'])*np.identity(len(exp['species'][x]['delay'])) species_sensitivty.append(single_species_sensitivty) species_sensitivty=np.hstack((species_sensitivty)) time_shift_sensitivity = exp['time_shift']['delay'].dropna().values time_shift_sensitivity = time_shift_sensitivity.reshape((time_shift_sensitivity.shape[0], 1)) elif len(exp['simulation'].pressures)>1 and len(exp['simulation'].temperatures)>1 and len(list_with_most_species_in_them)>1 and len(exp['simulation'].pressures)==len(exp['simulation'].temperatures): temperature_sensitivity=np.array(exp['temperature']['delay'])*np.identity(len(exp['simulation'].temperatures)) pressure_sensitivity = np.array(exp['pressure']['delay'])*np.identity(len(exp['simulation'].pressures)) species_sensitivty=[] conditions = exp['conditions_dict_list'] species_to_loop = list(exp['conditions_dict_list'].keys()) list_with_most_species_in_them = [] for specie in species_to_loop: list_with_most_species_in_them.append(len(conditions[specie])) diluent=[] if 'Diluent' in exp['uncertainty']['species_relative_uncertainty']['type_dict'].keys() or 'diluent' in exp['uncertainty']['species_relative_uncertainty']['type_dict'].keys(): diluent = exp['uncertainty']['species_relative_uncertainty']['type_dict']['diluent'] singular_species=[] for species in list(exp['simulation'].fullParsedYamlFile['conditions'].keys()): if len(exp['simulation'].fullParsedYamlFile['conditions'][species])==1 and species not in diluent: singular_species.append(species) for x,species in enumerate(exp['simulation'].fullParsedYamlFile['speciesNames']): if species in singular_species and species not in diluent: single_species_sensitivty = exp['species'][x]['delay'].dropna().values single_species_sensitivty = single_species_sensitivty.reshape((single_species_sensitivty.shape[0],1)) #print(single_species_sensitivty) species_sensitivty.append(single_species_sensitivty) elif species not in singular_species and species not in diluent: single_species_sensitivty = np.array(exp['species'][x]['delay'])*np.identity(len(exp['species'][x]['delay'])) species_sensitivty.append(single_species_sensitivty) species_sensitivty=np.hstack((species_sensitivty)) time_shift_sensitivity = exp['time_shift']['delay'].dropna().values time_shift_sensitivity = time_shift_sensitivity.reshape((time_shift_sensitivity.shape[0], 1)) elif len(exp['simulation'].pressures)>1 and len(exp['simulation'].temperatures)>1 and len(exp['simulation'].pressures) == len(exp['simulation'].temperatures): temperature_sensitivity=np.array(exp['temperature']['delay'])*np.identity(len(exp['simulation'].temperatures)) pressure_sensitivity = np.array(exp['pressure']['delay'])*np.identity(len(exp['simulation'].pressures)) species_sensitivty = [] for df in exp['species']: single_species_sensitivty = df['delay'].dropna().values single_species_sensitivty = single_species_sensitivty.reshape((single_species_sensitivty.shape[0],1)) species_sensitivty.append(single_species_sensitivty) species_sensitivty = np.hstack((species_sensitivty)) time_shift_sensitivity = exp['time_shift']['delay'].dropna().values time_shift_sensitivity = time_shift_sensitivity.reshape((time_shift_sensitivity.shape[0], 1)) elif re.match('[Ii]gnition[- ][Dd]elay',exp['experiment_type']) and re.match('[Rr][Cc][Mm]',exp['simulation_type']): if len(exp['simulation'].temperatures)>1 and len(exp['simulation'].pressures)>1: temperature_sensitivity=np.array(exp['temperature']['delay'])*np.identity(len(exp['simulation'].temperatures)) pressure_sensitivity = np.array(exp['pressure']['delay'])*np.identity(len(exp['simulation'].pressures)) species_sensitivty = [] for df in exp['species']: single_species_sensitivty = df['delay'].dropna().values single_species_sensitivty = single_species_sensitivty.reshape((single_species_sensitivty.shape[0],1)) species_sensitivty.append(single_species_sensitivty) species_sensitivty = np.hstack((species_sensitivty)) time_shift_sensitivity = exp['time_shift']['delay'].dropna().values time_shift_sensitivity = time_shift_sensitivity.reshape((time_shift_sensitivity.shape[0], 1)) elif len(exp['simulation'].temperatures)>1: temperature_sensitivity=np.array(exp['temperature']['delay'])*np.identity(len(exp['simulation'].temperatures)) pressure_sensitivity = exp['pressure']['delay'].dropna().values pressure_sensitivity = pressure_sensitivity.reshape((pressure_sensitivity.shape[0], 1)) species_sensitivty = [] for df in exp['species']: single_species_sensitivty = df['delay'].dropna().values single_species_sensitivty = single_species_sensitivty.reshape((single_species_sensitivty.shape[0],1)) species_sensitivty.append(single_species_sensitivty) species_sensitivty = np.hstack((species_sensitivty)) time_shift_sensitivity = exp['time_shift']['delay'].dropna().values time_shift_sensitivity = time_shift_sensitivity.reshape((time_shift_sensitivity.shape[0], 1)) #print("INSIDE HERE") elif len(exp['simulation'].pressures)>1: pressure_sensitivity = np.array(exp['pressure']['delay'])*np.identity(len(exp['simulation'].pressures)) temperature_sensitivity = exp['temperature']['delay'].dropna().values temperature_sensitivity = temperature_sensitivity.reshape((temperature_sensitivity.shape[0], 1)) species_sensitivty = [] for df in exp['species']: single_species_sensitivty = df['delay'].dropna().values single_species_sensitivty = single_species_sensitivty.reshape((single_species_sensitivty.shape[0],1)) species_sensitivty.append(single_species_sensitivty) species_sensitivty = np.hstack((species_sensitivty)) time_shift_sensitivity = exp['time_shift']['delay'].dropna().values time_shift_sensitivity = time_shift_sensitivity.reshape((time_shift_sensitivity.shape[0], 1)) elif len(exp['simulation'].pressures)==1 and len(exp['simulation'].temperatures)==1: pressure_sensitivity = exp['pressure']['delay'].dropna().values pressure_sensitivity = pressure_sensitivity.reshape((pressure_sensitivity.shape[0], 1)) temperature_sensitivity = exp['temperature']['delay'].dropna().values temperature_sensitivity = temperature_sensitivity.reshape((temperature_sensitivity.shape[0], 1)) species_sensitivty=[] conditions = exp['conditions_dict_list'] species_to_loop = list(exp['conditions_dict_list'].keys()) list_with_most_species_in_them = [] for specie in species_to_loop: list_with_most_species_in_them.append(len(conditions[specie])) diluent=[] if 'Diluent' in exp['uncertainty']['species_relative_uncertainty']['type_dict'].keys() or 'diluent' in exp['uncertainty']['species_relative_uncertainty']['type_dict'].keys(): diluent = exp['uncertainty']['species_relative_uncertainty']['type_dict']['diluent'] singular_species=[] for species in list(exp['simulation'].fullParsedYamlFile['conditions'].keys()): if len(exp['simulation'].fullParsedYamlFile['conditions'][species])==1 and species not in diluent: singular_species.append(species) for x,species in enumerate(exp['simulation'].fullParsedYamlFile['speciesNames']): if species in singular_species and species not in diluent: single_species_sensitivty = exp['species'][x]['delay'].dropna().values single_species_sensitivty = single_species_sensitivty.reshape((single_species_sensitivty.shape[0],1)) #print(single_species_sensitivty) species_sensitivty.append(single_species_sensitivty) elif species not in singular_species and species not in diluent: single_species_sensitivty = np.array(exp['species'][x]['delay'])*np.identity(len(exp['species'][x]['delay'])) species_sensitivty.append(single_species_sensitivty) species_sensitivty=np.hstack((species_sensitivty)) time_shift_sensitivity = exp['time_shift']['delay'].dropna().values time_shift_sensitivity = time_shift_sensitivity.reshape((time_shift_sensitivity.shape[0], 1)) elif re.match('[Ff]lame[- ][Ss]peed',exp['simulation_type']) and re.match('[Oo][Nn][Ee]|[1][ -][dD][ -][Ff]lame',exp['experiment_type']): len_of_temperature_list = len(exp['simulation'].temperatures) if len_of_temperature_list > 1: temperature_sensitivity=np.array(exp['temperature'][observable])*np.identity(len(exp['simulation'].temperatures)) else: temperature_sensitivity = np.array(exp['temperature'][observable]) temperature_sensitivity = temperature_sensitivity.reshape((temperature_sensitivity.shape[0], 1)) len_of_pressure_list = len(exp['simulation'].pressures) if len_of_pressure_list >1: pressure_sensitivity=np.array(exp['pressure'][observable])*np.identity(len(exp['simulation'].pressures)) else: pressure_sensitivity=np.array(exp['pressure'][observable]) pressure_sensitivity = pressure_sensitivity.reshape((pressure_sensitivity.shape[0], 1)) #FIX THIS #print('FIXXXX') species_sensitivty = [] for df in exp['species']: single_species_sensitivty = df[observable].dropna().values single_species_sensitivty = single_species_sensitivty.reshape((single_species_sensitivty.shape[0] ,1)) species_sensitivty.append(single_species_sensitivty) species_sensitivty = np.hstack((species_sensitivty)) if re.match('[Jj][Ss][Rr]',exp['simulation_type']): single_obs_physical = np.hstack((temperature_sensitivity,pressure_sensitivity,species_sensitivty,restime_sensitivity)) elif re.match('[Ss]hock [Tt]ube',exp['simulation_type']) and re.match('[Ss]pecies[- ][Pp]rofile',exp['experiment_type']): single_obs_physical = np.hstack((temperature_sensitivity,pressure_sensitivity,species_sensitivty,time_shift_sensitivity)) elif re.match('[Ss]pecies[- ][Pp]rofile',exp_dict_list[i]['experiment_type']) and re.match('[Ff]low[ -][Rr]eactor',exp_dict_list[i]['simulation_type']): single_obs_physical = np.hstack((temperature_sensitivity,pressure_sensitivity,species_sensitivty,time_shift_sensitivity)) elif re.match('[Ii]gnition[- ][Dd]elay',exp['experiment_type']) and re.match('[Ss]hock[- ][Tt]ube',exp['simulation_type']): single_obs_physical = np.hstack((temperature_sensitivity,pressure_sensitivity,species_sensitivty,time_shift_sensitivity)) #print("INSIDE HERE") elif re.match('[Ii]gnition[- ][Dd]elay',exp['experiment_type']) and re.match('[Rr][Cc][Mm]',exp['simulation_type']): single_obs_physical = np.hstack((temperature_sensitivity,pressure_sensitivity,species_sensitivty,time_shift_sensitivity)) ttl_phsycal_obs_for_exp.append(single_obs_physical) obs_counter +=1 if 'perturbed_coef' in exp.keys(): wavelengths = parsed_yaml_list[i]['absorbanceCsvWavelengths'] for k,wl in enumerate(wavelengths): physical_sens = [] for p_sens in exp['absorbance_psens']: array = p_sens[wl] array = array.reshape((array.shape[0],1)) physical_sens.append(array) for time_sens in exp['absorbance_time_shift']: array2 = p_sens[wl] array2 = array2.reshape((array2.shape[0],1)) physical_sens.append(array2) physical_sens = np.hstack((physical_sens)) ttl_phsycal_obs_for_exp.append(physical_sens) ttl_phsycal_obs_for_exp = np.vstack((ttl_phsycal_obs_for_exp)) p_sens_for_whole_simulation.append(ttl_phsycal_obs_for_exp) ####################################################################################################################################################### if 'perturbed_coef' in exp.keys(): ttl_absorbance_obs_for_exp = [] wavelengths = parsed_yaml_list[i]['absorbanceCsvWavelengths'] for k,wl in enumerate(wavelengths): perturbed_coefficeints = [] index_list = [] for xx in range(len(parsed_yaml_list[i]['coupledCoefficients'])): for yy in range(len(parsed_yaml_list[i]['coupledCoefficients'][xx])): ff = parsed_yaml_list[i]['functionalForm'][xx][yy] #temp = list(parsed_yaml_list[i]['coupledCoefficients'][xx][yy]) for zz in range(len(parsed_yaml_list[i]['coupledCoefficients'][xx][yy])): temp = list(parsed_yaml_list[i]['coupledCoefficients'][xx][yy]) coefficent = parsed_yaml_list[i]['coupledCoefficients'][xx][yy][zz] if coefficent!=0: perturbed_coefficent=coefficent+coefficent*dk if zz==1 and ff =='F': #change back tab perturbed_coefficent = coefficent + .01*coefficent temp[zz] = perturbed_coefficent key = tuple(temp) indx = list_to_keep_order_of_coef.index(key) index_list.append(indx) exp_index_sigma = temps.count(key) temps.append(key) array = pert_coef[key][exp_index_sigma][wl] array = array.reshape((array.shape[0],1)) perturbed_coefficeints.append(array) missing_sigmas = [] for indp_sigma in range(len(list_to_keep_order_of_coef)): if indp_sigma not in index_list: missing_sigmas.append(indp_sigma) perturbed_coefficents_padded_with_zeros = [] count_sigma=0 for indp_sigma in range(len(list_to_keep_order_of_coef)): if indp_sigma in missing_sigmas: zero_array = np.zeros((perturbed_coefficeints[0].shape[0],1)) perturbed_coefficents_padded_with_zeros.append(zero_array) else: perturbed_coefficents_padded_with_zeros.append(perturbed_coefficeints[count_sigma]) count_sigma +=1 perturbed_coefficents_padded_with_zeros = np.hstack((perturbed_coefficents_padded_with_zeros)) ttl_absorbance_obs_for_exp.append(perturbed_coefficents_padded_with_zeros) ttl_absorbance_obs_for_exp = np.vstack((ttl_absorbance_obs_for_exp)) abs_coef_sens_for_whole_simulation.append(ttl_absorbance_obs_for_exp) #vstack ttl_absorbance_obs_for_exp and append somehwere else else: abs_coef_sens_for_whole_simulation.append(0) ###################################################################################################################################################### flatten = lambda *n: (e for a in n for e in (flatten(*a) if isinstance(a, (tuple, list)) else (a,))) flattened_master_equation_reaction_list = list(flatten(master_equation_reactions)) #assembling the S matrix from the individual experiments #master_equation = False if master_equation_flag == True: S_ksens = np.vstack((k_sens_for_whole_simulation)) A_k = np.hsplit(S_ksens,3)[0] N_k = np.hsplit(S_ksens,3)[1] Ea_k = np.hsplit(S_ksens,3)[2] number_of_master_equation_reactions = len(flattened_master_equation_reaction_list) A_k = A_k[:,:-number_of_master_equation_reactions] N_k = N_k[:,:-number_of_master_equation_reactions] Ea_k = Ea_k[:,:-number_of_master_equation_reactions] S_ksens = np.hstack((A_k,N_k,Ea_k)) #print(np.shape(S_ksens),'this is the shape of the S matrix before MP') S_ksens = np.hstack((S_ksens,mapped_master_equation_sensitivites)) else: S_ksens = np.vstack((k_sens_for_whole_simulation)) def sum_of_zeros(idx,array,column_list): rows_behind = array.shape[0] rows_infront = array.shape[0] columns_behind = sum(column_list[:idx]) columns_infront = sum(column_list[idx+1:]) behind_tuple = (rows_behind,columns_behind) infront_tuple = (rows_infront,columns_infront) return (behind_tuple,infront_tuple) if exp_dict_list[0]['simulation'].physicalSens ==1: number_of_columns_in_psens_arrays = [] number_of_rows_in_psens_arrays=[] for i,array in enumerate(p_sens_for_whole_simulation): number_of_rows_in_psens_arrays.append(array.shape[0]) number_of_columns_in_psens_arrays.append(array.shape[1]) p_sens_whole_simulation_with_padding = [] for i,array in enumerate(p_sens_for_whole_simulation): zero_array_behind = np.zeros(sum_of_zeros(i,array,number_of_columns_in_psens_arrays)[0]) if zero_array_behind.shape[1] != 0: array = np.hstack((zero_array_behind,array)) zero_array_infront = np.zeros(sum_of_zeros(i,array,number_of_columns_in_psens_arrays)[1]) if zero_array_infront.shape[1] != 0: array = np.hstack((array,zero_array_infront)) p_sens_whole_simulation_with_padding.append(array) S_psens = np.vstack((p_sens_whole_simulation_with_padding)) ############################################################################################## absorb_coef_whole_simulation_with_padding = [] for i,exp in enumerate(exp_dict_list): single_experiment_absorption = [] if exp['mole_fraction_observables'][0] != None or exp['concentration_observables'][0] != None or exp['ignition_delay_observables'][0] != None: if 'perturbed_coef' not in exp.keys(): zero_array_for_observables_padding = np.zeros((number_of_rows_in_psens_arrays[i], num_ind_pert_coef)) single_experiment_absorption.append(zero_array_for_observables_padding) if 'perturbed_coef' in exp.keys(): zero_padded_aborption_coef_array = abs_coef_sens_for_whole_simulation[i] combined = abs_coef_sens_for_whole_simulation[i] if exp['mole_fraction_observables'][0] != None or exp['concentration_observables'][0] != None or exp['ignition_delay_observables'][0] != None: zero_array_for_observables_padding = np.zeros((number_of_rows_in_psens_arrays[i]-zero_padded_aborption_coef_array.shape[0], num_ind_pert_coef)) combined = np.vstack((zero_array_for_observables_padding,zero_padded_aborption_coef_array)) single_experiment_absorption.append(combined) single_experiment_absorption = np.vstack((single_experiment_absorption)) absorb_coef_whole_simulation_with_padding.append(single_experiment_absorption) absorb_coef_whole_simulation_with_padding = np.vstack((absorb_coef_whole_simulation_with_padding)) S_abs_coef = absorb_coef_whole_simulation_with_padding #return((S_ksens,S_psens,S_abs_coef)) #print(np.shape(S_ksens),np.shape(S_psens),np.shape(S_abs_coef)) S_matrix = np.hstack((S_ksens,S_psens,S_abs_coef)) shape = np.shape(S_matrix)[1] #append identy matrix identity_matrix = np.identity(shape) # identity_matrix[1,0]=.1 # identity_matrix[0,1]=.1 # identity_matrix[0,20]=.1 # identity_matrix[20,0]=.1 # identity_matrix[39,0]=.1 # identity_matrix[0,39]=.1 ####making edits to this just for masten test S_matrix = np.vstack((S_matrix,identity_matrix)) self.S_matrix = S_matrix S_matrix_wo_k_targets = copy.deepcopy(self.S_matrix) self.S_matrix_wo_k_targets = S_matrix_wo_k_targets #print(S_matrix_wo_k_targets.shape,'S matrix without k targets') S_matrix_df = pd.DataFrame(S_matrix) return S_matrix def grouping_physical_model_parameters(self,exp:list): final_groups=[] for i in exp['simulation'].fullParsedYamlFile['overallDict'].keys(): if not re.match('[dD]iluent',i['type']): final_groups.append(i) def breakup_X(self, X, exp_dict_list:list, exp_uncertainty_dict_list_original:list, loop_counter:int = 0, master_equation_uncertainty_df=None, master_equation_reactions = [], master_equation_flag = False): X_to_subtract_from_Y = {} reactions_in_cti_file = exp_dict_list[0]['simulation'].processor.solution.reaction_equations() number_of_reactions = len(reactions_in_cti_file) ####Grab off updates directly for the CTI file ####need to add master equation reactions ################################################################## if loop_counter !=0: X_new = X else: X_new = X ################################################################## #print('USING BURKE X VALUES') #X = pd.read_csv('MSI/data/test_data/burke_X_values.csv') #X= X['Burke_Value'].values #X = X.reshape(X.shape[0],1) ################################################################ ################################################################## #print('RUNNING TEST') #X_new = np.zeros(np.shape(X_new)) #X_new[79] = .01 # print(X_new) # X_new[847] = -0.007258986471821074 # X_new[848] = -0.07160891432785314 # X_new[849] = -0.038747789992729584 # X_new[850] = -0.09184808671928052 # X_new[851] = -0.13343314153597205 # X_new[852] = 0.0046931837946472 # X_new[853] = -0.007191276020250346 #X= X['Burke_Value'].values #X = X.reshape(X.shape[0],1) #zeros = np.zeros((X_new.shape)) #X_new = zeros # X_new[873,0] = .01 # print("X_NEW") ################################################################ flatten = lambda *n: (e for a in n for e in (flatten(*a) if isinstance(a, (tuple, list)) else (a,))) flattened_master_equation_reaction_list = list(flatten(master_equation_reactions)) X_new = list(X_new.flatten()) if exp_dict_list[0]['simulation'].kineticSens ==1: value1 = 3*(number_of_reactions - len(flattened_master_equation_reaction_list)) AsNsEas = X_new[:value1] X_to_subtract_from_Y['As_ns_Eas'] = AsNsEas #### pickup here dividedBy = int(len(AsNsEas) / 3) def list_slice(S,step): return [S[i::step] for i in range(step)] resortedList = list_slice(AsNsEas,dividedBy) innerDict = ['A','n','Ea'] l = [dict(zip(innerDict,resortedList[x])) for x in range(len(resortedList))] Keys= [] for xx in range(int(value1/3)): Keys.append('r'+str(xx)) deltaXAsNsEas = dict(zip(Keys,l)) innerDictNew = ['A_update','n_update','Ea_update'] ll = [dict(zip(innerDictNew,resortedList[x])) for x in range(len(resortedList))] kinetic_paramter_dict = dict(zip(reactions_in_cti_file,ll)) #molecularParams = np.array([.1,.2,.3,.4,.2,.3,.4]).flatten().tolist() # might need to fix this based on how lei is passing me information, check in notebook if master_equation_flag == True: # number_of_molecular_parameters_list = [] # for col in master_equation_uncertainty_df: # number_of_molecular_parameters_list.append(len(master_equation_uncertainty_df[col].dropna().values)) number_of_molecular_parameters_list = [] for i,reaction in enumerate(master_equation_reactions): if type(reaction)==str: number_of_molecular_parameters_list.append(len(list(master_equation_uncertainty_df[reaction].dropna().values))) elif type(reaction)==tuple: column_headers = master_equation_uncertainty_df.columns.to_list() for sub_reaction in reaction: if sub_reaction in column_headers: number_of_molecular_parameters_list.append(len(list(master_equation_uncertainty_df[sub_reaction].dropna().values))) sum_of_moleular_paramters = sum(number_of_molecular_parameters_list) value2 = sum_of_moleular_paramters deltaXmolecularParams = X_new[value1:(value1+value2)] X_to_subtract_from_Y['molecular_parameters'] = deltaXmolecularParams molecular_paramters_by_reaction = [] reaction_numbers = [] start_mp = 0 for r,number in enumerate(number_of_molecular_parameters_list): stop_mp = start_mp + number molecular_paramters_by_reaction.append(deltaXmolecularParams[start_mp:stop_mp]) start_mp = stop_mp reaction_numbers.append('R_'+str(r)) delta_x_molecular_params_by_reaction_dict = dict(zip(master_equation_reactions,molecular_paramters_by_reaction)) list_of_mp = [] for i,reaction in enumerate(molecular_paramters_by_reaction): temp=[] for j,value in enumerate(reaction): temp.append('Paramter_'+str(j)+'_Update') list_of_mp.append(temp) inner_dict_temp = [dict(zip(list_of_mp[x],molecular_paramters_by_reaction[x])) for x in range(len(molecular_paramters_by_reaction))] inner_dict_temp_2 = dict(zip(master_equation_reactions,inner_dict_temp)) kinetic_paramter_dict.update(inner_dict_temp_2) #its possible this kinetic paramters dict might break else: value2 = 0 physical_observables = [] previous_value = 0 physical_observables_for_Y = [] if exp_dict_list[0]['simulation'].physicalSens ==1: for i,exp_dic in enumerate(exp_dict_list): if re.match('[Ss]hock [Tt]ube',exp_dic['simulation_type']) and re.match('[Ss]pecies[- ][Pp]rofile',exp_dic['experiment_type']): dic_of_conditions = exp_dic['simulation'].conditions #subtract out the dilluant species_in_simulation = len(set(dic_of_conditions.keys()).difference(['Ar','AR','ar','HE','He','he','Kr','KR','kr','Xe','XE','xe','NE','Ne','ne'])) #add two for Temperature and Pressure len_of_phsycial_observables_in_simulation = species_in_simulation + 2 +1 new_value = previous_value + len_of_phsycial_observables_in_simulation single_experiment_physical_observables = X_new[(value1+value2+previous_value):(value1+value2+new_value)] physical_observables_for_Y.append(single_experiment_physical_observables) temp_keys = [] #stacking the zeros onto the Y array temp_keys.append('T'+'_'+'experiment'+'_'+str(i)) temp_keys.append('P'+'_'+'experiment'+'_'+str(i)) for variable in range(species_in_simulation): temp_keys.append('X'+'_'+str(variable)+'_'+'experiment'+'_'+str(i)) temp_keys.append('Time_shift'+'_'+'experiment'+'_'+str(i)) temp_dict = dict(zip(temp_keys,single_experiment_physical_observables)) physical_observables.append(temp_dict) ##come back to this and do a test on paper previous_value = new_value elif re.match('[Ss]hock [Tt]ube',exp_dic['simulation_type']) and re.match('[Ii]gnition[- ][Dd]elay',exp_dic['experiment_type']): diluent=[] if 'Diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys() or 'diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys(): diluent = exp_dic['uncertainty']['species_relative_uncertainty']['type_dict']['diluent'] singular_species=[] for species in list(exp_dic['simulation'].fullParsedYamlFile['conditions'].keys()): if len(exp_dic['simulation'].fullParsedYamlFile['conditions'][species])==1 and species not in diluent: singular_species.append(species) species_in_simulation = int(len(singular_species)+((len(exp_dic['simulation'].fullParsedYamlFile['speciesNames'])-len(singular_species))*len(exp_dic['simulation'].fullParsedYamlFile['conditions_to_run']))) len_of_phsycial_observables_in_simulation = species_in_simulation + len(exp_dic['simulation'].pressures)+len(exp_dic['simulation'].temperatures)+1 new_value = previous_value + len_of_phsycial_observables_in_simulation single_experiment_physical_observables = X_new[(value1+value2+previous_value):(value1+value2+new_value)] physical_observables_for_Y.append(single_experiment_physical_observables) temp_keys = [] #stacking the zeros onto the Y array for j in range(len(exp_dic['simulation'].temperatures)): temp_keys.append('T'+str(j+1)+'_'+'experiment'+'_'+str(i)) #stacking the zeros onto the Y array for j in range(len(exp_dic['simulation'].pressures)): temp_keys.append('P'+str(j+1)+'_'+'experiment'+'_'+str(i)) for x,species in enumerate(exp_dic['simulation'].fullParsedYamlFile['speciesNames']): if species in singular_species and species not in diluent: temp_keys.append('X'+str(x+1)+'_cond'+str(0)+'_'+species+'_experiment_'+str(i)) elif species not in singular_species and species not in diluent: for j in range(len(exp_dic['simulation'].fullParsedYamlFile['conditions_to_run'])): temp_keys.append('X'+str(x+1)+'_cond'+str(j)+'_'+species+'_experiment_'+str(i)) temp_keys.append('Time_shift'+'_'+'experiment'+'_'+str(i)) temp_dict = dict(zip(temp_keys,single_experiment_physical_observables)) physical_observables.append(temp_dict) ##come back to this and do a test on paper previous_value = new_value #print(temp_dict) elif re.match('[Rc][Cc][Mm]',exp_dic['simulation_type']) and re.match('[Ii]gnition[- ][Dd]elay',exp_dic['experiment_type']): diluent=[] if 'Diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys() or 'diluent' in exp_dic['uncertainty']['species_relative_uncertainty']['type_dict'].keys(): diluent = exp_dic['uncertainty']['species_relative_uncertainty']['type_dict']['diluent'] singular_species=[] for species in list(exp_dic['simulation'].fullParsedYamlFile['conditions'].keys()): if len(exp_dic['simulation'].fullParsedYamlFile['conditions'][species])==1 and species not in diluent: singular_species.append(species) species_in_simulation = int(len(singular_species)+((len(exp_dic['simulation'].fullParsedYamlFile['speciesNames'])-len(singular_species))*len(exp_dic['simulation'].fullParsedYamlFile['conditions_to_run']))) len_of_phsycial_observables_in_simulation = species_in_simulation + len(exp_dic['simulation'].pressures)+len(exp_dic['simulation'].temperatures)+1 new_value = previous_value + len_of_phsycial_observables_in_simulation single_experiment_physical_observables = X_new[(value1+value2+previous_value):(value1+value2+new_value)] physical_observables_for_Y.append(single_experiment_physical_observables) temp_keys = [] #stacking the zeros onto the Y array for j in range(len(exp_dic['simulation'].temperatures)): temp_keys.append('T'+str(j+1)+'_'+'experiment'+'_'+str(i)) #stacking the zeros onto the Y array for j in range(len(exp_dic['simulation'].pressures)): temp_keys.append('P'+str(j+1)+'_'+'experiment'+'_'+str(i)) for x,species in enumerate(exp_dic['simulation'].fullParsedYamlFile['speciesNames']): if species in singular_species and species not in diluent: temp_keys.append('X'+str(x+1)+'_cond'+str(0)+'_'+species+'_experiment_'+str(i)) elif species not in singular_species and species not in diluent: for j in range(len(exp_dic['simulation'].fullParsedYamlFile['conditions_to_run'])): temp_keys.append('X'+str(x+1)+'_cond'+str(j)+'_'+species+'_experiment_'+str(i)) temp_keys.append('Time_shift'+'_'+'experiment'+'_'+str(i)) temp_dict = dict(zip(temp_keys,single_experiment_physical_observables)) physical_observables.append(temp_dict) ##come back to this and do a test on paper previous_value = new_value elif re.match('[Jj][Ss][Rr]',exp_dic['simulation_type']): dic_of_conditions = exp_dic['simulation'].conditions #subtract out the dilluant species_in_simulation = len(set(dic_of_conditions.keys()).difference(['Ar','AR','ar','HE','He','he','Kr','KR','kr','Xe','XE','xe','NE','Ne','ne'])) #add two for Temperature and Pressure len_of_phsycial_observables_in_simulation = species_in_simulation + 1+len(exp_dic['simulation'].temperatures)+1 #print(len_of_phsycial_observables_in_simulation) new_value = previous_value + len_of_phsycial_observables_in_simulation single_experiment_physical_observables = X_new[(value1+value2+previous_value):(value1+value2+new_value)] #print(len(single_experiment_physical_observables)) physical_observables_for_Y.append(single_experiment_physical_observables) temp_keys = [] #stacking the zeros onto the Y array for j in range(len(exp_dic['simulation'].temperatures)): temp_keys.append('T'+str(j+1)+'_'+'experiment'+'_'+str(i)) temp_keys.append('P'+'_'+'experiment'+'_'+str(i)) for variable in range(species_in_simulation): temp_keys.append('X'+'_'+str(variable)+'_'+'experiment'+'_'+str(i)) temp_keys.append('R'+'_'+'experiment'+'_'+str(i)) temp_dict = dict(zip(temp_keys,single_experiment_physical_observables)) physical_observables.append(temp_dict) ##come back to this and do a test on paper previous_value = new_value elif re.match('[Ss]pecies[- ][Pp]rofile',exp_dict_list[i]['experiment_type']) and re.match('[Ff]low[ -][Rr]eactor',exp_dict_list[i]['simulation_type']): dic_of_conditions = exp_dic['simulation'].conditions #subtract out the dilluant species_in_simulation = len(set(dic_of_conditions.keys()).difference(['Ar','AR','ar','HE','He','he','Kr','KR','kr','Xe','XE','xe','NE','Ne','ne'])) #add two for Temperature and Pressure time_shift_length = len(exp_dic['simulation'].fullParsedYamlFile['timeShiftOriginal']) len_of_phsycial_observables_in_simulation = species_in_simulation + 1+len(exp_dic['simulation'].temperatures)+time_shift_length #print(len_of_phsycial_observables_in_simulation) new_value = previous_value + len_of_phsycial_observables_in_simulation single_experiment_physical_observables = X_new[(value1+value2+previous_value):(value1+value2+new_value)] #print(len(single_experiment_physical_observables)) physical_observables_for_Y.append(single_experiment_physical_observables) temp_keys = [] #stacking the zeros onto the Y array for j in range(len(exp_dic['simulation'].temperatures)): temp_keys.append('T'+str(j+1)+'_'+'experiment'+'_'+str(i)) temp_keys.append('P'+'_'+'experiment'+'_'+str(i)) for variable in range(species_in_simulation): temp_keys.append('X'+'_'+str(variable)+'_'+'experiment'+'_'+str(i)) for j in range(time_shift_length): temp_keys.append('Time_Shift'+str(j+1)+'_'+'experiment'+'_'+str(i)) temp_dict = dict(zip(temp_keys,single_experiment_physical_observables)) physical_observables.append(temp_dict) ##come back to this and do a test on paper previous_value = new_value physical_observables_for_Y = [item for sublist in physical_observables_for_Y for item in sublist] X_to_subtract_from_Y['physical_observables'] = physical_observables_for_Y test_abs = [] absorbance_coefficients_for_Y = [] coef_dict = {} coef_dict_list = [] absorbance_coef_update_dict = {} for i,exp_dic in enumerate(exp_uncertainty_dict_list_original): if 'coupled_coef_and_uncertainty' not in exp_dic.keys(): continue dictonary_of_coef_and_uncertainty = exp_dic['coupled_coef_and_uncertainty'] #tab start working here tomorrow, need to pass in the original version of this dict #dictonary_of_coef_and_uncertainty = {(140000, 0.0): ([0.7], [0.0]), (1270000, 0.0): ([0.7], [0.0])} for x in dictonary_of_coef_and_uncertainty: if x not in coef_dict.keys(): coef_dict[x] = dictonary_of_coef_and_uncertainty[x] if x not in coef_dict_list: coef_dict_list.append(x) start_abs = 0 stop_abs = 1 for i,cof in enumerate(coef_dict_list): temp=[] temp2=[] # counter=1 for value in cof: if value==0: temp.append([0]) temp2.append(['null']) else: temp.append(X_new[(value1+value2+new_value+start_abs):(value1+value2+new_value+stop_abs)]) temp2.append(X_new[(value1+value2+new_value+start_abs):(value1+value2+new_value+stop_abs)]) start_abs = stop_abs stop_abs +=1 temp = [item for sublist in temp for item in sublist] temp2 = [item for sublist in temp2 for item in sublist] absorbance_coef_update_dict[cof] = temp absorbance_coefficients_for_Y.append(temp2) test_abs.append(temp2) # return everything in a dictonary?? absorbance_coefficients_for_Y = [item for sublist in absorbance_coefficients_for_Y for item in sublist] X_to_subtract_from_Y['absorbance_coefficent_observables'] = absorbance_coefficients_for_Y # if master_equation_flag == False: return deltaXAsNsEas,physical_observables,absorbance_coef_update_dict,X_to_subtract_from_Y,kinetic_paramter_dict else: return deltaXAsNsEas,physical_observables,absorbance_coef_update_dict,X_to_subtract_from_Y,delta_x_molecular_params_by_reaction_dict,kinetic_paramter_dict def matrix_manipulation(self,runCounter,S_matrix,Y_matrix,z_matrix,XLastItteration = np.array(()),active_parameters=[]): #RUnning test to link up to paramters ################################################## #s_temp = np.zeros((1,S_matrix.shape[1])) #s_temp[0,886]=1 #s_temp[0,888]=-1 #y_temp = np.zeros((1,1)) #y_temp[0,0]=0 #z_temp=np.zeros((1,1)) #z_temp[0,0]=.00001 #S_matrix=np.vstack((S_matrix,s_temp)) #Y_matrix = np.vstack((Y_matrix,y_temp)) #z_matrix = np.vstack((z_matrix,z_temp)) ################################################## # print("ONLY CONSIDERING RATE CONSTANT TARGETS") # for value in np.arange(0,401): # z_matrix[value,0] =1000000 ################################################## one_over_z = np.true_divide(1,z_matrix) #print(Y_matrix) y_matrix = Y_matrix * one_over_z s_matrix = S_matrix * (one_over_z.flatten()[:,np.newaxis]) self.y_matrix = y_matrix sTimesZ = S_matrix * (z_matrix.flatten())[:,np.newaxis] #calculate covariance matrix shape = np.shape(self.S_matrix_wo_k_targets) s_wo_k_targets = s_matrix[:shape[0],:shape[1]] identity_matrix = s_wo_k_targets[shape[0]-len(active_parameters):,:] #try: if runCounter==0: c = np.dot(np.transpose(identity_matrix),identity_matrix) c = np.linalg.inv(c) prior_diag = np.diag(c) prior_sigmas = np.sqrt(prior_diag) covariance_prior_df = pd.DataFrame(c) if active_parameters: covariance_prior_df.columns = active_parameters covariance_prior_df.reindex(labels = active_parameters) prior_diag_df = pd.DataFrame({'parameter': active_parameters,'value': prior_diag.reshape((prior_diag.shape[0],))}) sorted_prior_diag = prior_diag_df.sort_values(by=['value']) prior_sigmas_df = pd.DataFrame({'parameter': active_parameters,'value': prior_sigmas.reshape((prior_sigmas.shape[0],))}) else: c = np.dot(np.transpose(s_matrix),s_matrix) c = np.linalg.inv(c) covariance_posterior_df = pd.DataFrame(c) if active_parameters: covariance_posterior_df.columns = active_parameters covariance_posterior_df.reindex(labels = active_parameters) posterior_diag = np.diag(c) posterior_sigmas = np.sqrt(posterior_diag) posterior_sigmas_df = pd.DataFrame({'parameter': active_parameters,'value': posterior_sigmas.reshape((posterior_sigmas.shape[0],))}) posterior_diag_df = pd.DataFrame({'parameter': active_parameters,'value': posterior_diag.reshape((posterior_diag.shape[0],))}) sorted_posterior_diag = posterior_diag_df.sort_values(by=['value']) # except: # #stub # print('WE ARE IN THE EXCEPT STATMENT') # if runCounter==0: # c = -1 # c = -1 # prior_diag = -1 # prior_sigmas = -1 # covariance_prior_df = -1 # prior_diag_df = -1 # sorted_prior_diag = -1 # prior_sigmas_df = -1 # else: # c = -1 # c =-1 # covariance_posterior_df = -1 # posterior_diag = -1 # posterior_sigmas = -1 # posterior_sigmas_df = -1 # posterior_diag_df = -1 # sorted_posterior_diag = -1 self.covariance = c self.s_matrix = s_matrix psudoInverse = np.linalg.pinv(s_matrix) delta_X = np.dot(psudoInverse,y_matrix) self.delta_X = delta_X if runCounter == 0: XlastItteration = np.zeros(np.shape(delta_X)) else: XlastItteration = XLastItteration X = XlastItteration + delta_X #STUB THIS IS FOR A TESTING ITTERATION ##################################################################### #X = np.zeros(np.shape(delta_X)) # X[564] = .01 ##################################################################### self.X = X #STUB THIS try: X_data_frame = pd.DataFrame({'value': active_parameters,'Parameter': X.reshape((X.shape[0],))}) except: X_data_frame = -1 if runCounter==0: return X,c,s_matrix,y_matrix,delta_X,z_matrix,X_data_frame,prior_diag,prior_diag_df,sorted_prior_diag,covariance_prior_df,prior_sigmas_df else: return X,c,s_matrix,y_matrix,delta_X,z_matrix,X_data_frame,posterior_diag,posterior_diag_df,sorted_posterior_diag,covariance_posterior_df,posterior_sigmas_df class Adding_Target_Values(meq.Master_Equation): def __init__(self,S_matrix,Y_matrix,z_matrix,sigma,Y_data_Frame,z_data_Frame): self.S_matrix = S_matrix self.Y_matrix = Y_matrix self.z_matrix = z_matrix self.sigma = sigma self.Y_data_Frame = Y_data_Frame self.z_data_Frame = z_data_Frame meq.Master_Equation.__init__(self) def target_values_Y(self,target_value_csv, exp_dict_list:list, Y_data_Frame, master_equation_reactions): import cantera as ct Y_df_list = [] Y_values = [] #make sure we put the reactions into the file in the units cantera uses target_value_csv = pd.read_csv(target_value_csv) target_reactions = target_value_csv['Reaction'] target_temp = target_value_csv['temperature'] target_press = target_value_csv['pressure'] target_k = target_value_csv['k'] bath_gas = target_value_csv['M'] reactions_in_cti_file = exp_dict_list[0]['simulation'].processor.solution.reaction_equations() gas = ct.Solution(exp_dict_list[0]['simulation'].processor.cti_path) diff_in_ks_for_Y = [] def check_if_M_in_reactants(list_to_append_to, gas, reactants_in_target_reactions, reverse_reactants_in_target_reaction): if reverse_reactants_in_target_reaction !=None: for reaction_number_in_cti_file in range(gas.n_reactions): if (gas.reactants(reaction_number_in_cti_file) == reactants_in_target_reactions or gas.reactants(reaction_number_in_cti_file) == reverse_reactants_in_target_reaction or gas.reactants(reaction_number_in_cti_file) == reactants_in_target_reactions + ' (+M)' or gas.reactants(reaction_number_in_cti_file) == reverse_reactants_in_target_reaction + ' (+M)' or gas.reactants(reaction_number_in_cti_file) == reactants_in_target_reactions + ' + M' or gas.reactants(reaction_number_in_cti_file) == reverse_reactants_in_target_reaction + ' + M') : list_to_append_to.append(reactions_in_cti_file[reaction_number_in_cti_file]) elif reverse_reactants_in_target_reaction==None: for reaction_number_in_cti_file in range(gas.n_reactions): if (gas.reactants(reaction_number_in_cti_file) == reactants_in_target_reactions or gas.reactants(reaction_number_in_cti_file) == reverse_reactants_in_target_reaction or gas.reactants(reaction_number_in_cti_file) == reactants_in_target_reactions + ' (+M)' or gas.reactants(reaction_number_in_cti_file) == reactants_in_target_reactions + ' + M'): list_to_append_to.append(reactions_in_cti_file[reaction_number_in_cti_file]) return list_to_append_to for i,reaction in enumerate(target_reactions): #ask about the mixture composition #if reaction not in flattened_linked_channel_reactions: if '*' not in reaction and reaction != 'More Complex Combination Rule' and '(+)' not in reaction: index_in_cti_file = gas.reaction_equations().index(reaction) units_reaction_types=['ElementaryReaction', 'PlogReaction', 'ChebyshevReaction', 'ThreeBodyReaction', 'FalloffReaction'] coeff_sum = sum(gas.reaction(index_in_cti_file).reactants.values()) if target_press[i] == 0: pressure = 1e-9 else: pressure = target_press[i] if bath_gas[i] !=0: gas.TPX = target_temp[i],pressure*101325,{'H2O':.013,'O2':.0099,'H':.0000007,'Ar':.9770993} else: gas.TPX = target_temp[i],pressure*101325,{'Ar':.99} reaction_number_in_cti = reactions_in_cti_file.index(reaction) k = gas.forward_rate_constants[reaction_number_in_cti] if coeff_sum==1: k = k elif coeff_sum==2: k=k*1000 elif coeff_sum==3: k=k*1000000 #check and make sure we are subtracting in the correct order difference = np.log(target_k[i]) - np.log(k) diff_in_ks_for_Y.append(difference) Y_df_list.append(reaction) Y_values.append(difference) #elif reaction in flattened_linked_channel_reactions: elif '*' in reaction and reaction != 'More Complex Combination Rule' and '/' not in reaction: reactions_in_cti_file_with_these_reactants = [] #might be a more comprehensive way to do this reactants_in_target_reactions = reaction.split('<=>')[0].rstrip() reverse_reactants_in_target_reaction=None if len(reactants_in_target_reactions.split('+'))>1: reverse_reactants_in_target_reaction = reactants_in_target_reactions.split('+') temp = reverse_reactants_in_target_reaction[1] + ' '+ '+' +' '+ reverse_reactants_in_target_reaction[0] temp = temp.lstrip() temp = temp.rstrip() reverse_reactants_in_target_reaction = temp for reaction_number_in_cti_file in range(gas.n_reactions): if gas.reactants(reaction_number_in_cti_file) == reactants_in_target_reactions or gas.reactants(reaction_number_in_cti_file) == reverse_reactants_in_target_reaction: reactions_in_cti_file_with_these_reactants.append(reactions_in_cti_file[reaction_number_in_cti_file]) reactions_in_cti_file_with_these_reactants = check_if_M_in_reactants(reactions_in_cti_file_with_these_reactants, gas, reactants_in_target_reactions, reactants_in_target_reactions) if target_press[i] == 0: pressure = 1e-9 else: pressure = target_press[i] if bath_gas[i] !=0: gas.TPX = target_temp[i],pressure*101325,{'H2O':.013,'O2':.0099,'H':.0000007,'Ar':.9770993} else: gas.TPX = target_temp[i],pressure*101325,{'Ar':.99} tottal_k = [] for secondary_reaction in reactions_in_cti_file_with_these_reactants: reaction_number_in_cti = reactions_in_cti_file.index(secondary_reaction) coeff_sum = sum(gas.reaction(reaction_number_in_cti).reactants.values()) k = gas.forward_rate_constants[reaction_number_in_cti] if coeff_sum==1: k=k elif coeff_sum==2: k = k*1000 elif coeff_sum==3: k= k*1000000 tottal_k.append(k) #check and make sure we are subtracting in the correct order k=sum(tottal_k) difference = np.log(target_k[i]) - np.log(k) diff_in_ks_for_Y.append(difference) #I guess i could append the tuple Y_df_list.append(reaction) Y_values.append(difference) elif '/' in reaction: reactants_in_numerator = reaction.split('/')[0].rstrip() reactants_in_numerator = reactants_in_numerator.lstrip() reactants_in_denominator = reaction.split('/')[1].rstrip() reactants_in_denominator = reactants_in_denominator.lstrip() reactions_in_cti_file_with_these_reactants_numerator = [] reactions_in_cti_file_with_these_reactants_denominator = [] #take back here if '*' in reactants_in_numerator: reactants_in_target_reactions_numerator = reactants_in_numerator.split('<=>')[0].rstrip() reverse_reactants_in_target_reaction_in_numerator=None if len(reactants_in_target_reactions_numerator.split('+'))>1: reverse_reactants_in_target_reaction_in_numerator = reactants_in_target_reactions_numerator.split('+') temp = reverse_reactants_in_target_reaction_in_numerator[1] + ' '+ '+' +' '+ reverse_reactants_in_target_reaction_in_numerator[0] temp = temp.lstrip() temp = temp.rstrip() reverse_reactants_in_target_reaction_in_numerator = temp for reaction_number_in_cti_file in range(gas.n_reactions): if gas.reactants(reaction_number_in_cti_file) == reactants_in_target_reactions_numerator or gas.reactants(reaction_number_in_cti_file) == reverse_reactants_in_target_reaction_in_numerator: reactions_in_cti_file_with_these_reactants_numerator.append(reactions_in_cti_file[reaction_number_in_cti_file]) reactions_in_cti_file_with_these_reactants_numerator = check_if_M_in_reactants(reactions_in_cti_file_with_these_reactants_numerator, gas, reactants_in_target_reactions_numerator, reverse_reactants_in_target_reaction_in_numerator) else: #need to figure out how to split addition of reactions if '(+)' not in reactants_in_numerator: reactions_in_cti_file_with_these_reactants_numerator.append(reactants_in_numerator) else: list_of_reactions_in_numerator = reactants_in_numerator.split('(+)') list_of_reactions_in_numerator_cleaned=[] for reaction in list_of_reactions_in_numerator: reaction = reaction.rstrip() reaction = reaction.lstrip() list_of_reactions_in_numerator_cleaned.append(reaction) reactions_in_cti_file_with_these_reactants_numerator = list_of_reactions_in_numerator_cleaned if '*' in reactants_in_denominator: reactants_in_target_reactions_denominator = reactants_in_denominator.split('<=>')[0].rstrip() reverse_reactants_in_target_reaction_in_denominator=None if len(reactants_in_target_reactions_denominator.split('+'))>1: reverse_reactants_in_target_reaction_in_denominator = reactants_in_target_reactions_denominator.split('+') temp = reverse_reactants_in_target_reaction_in_denominator[1] + ' '+ '+' +' '+ reverse_reactants_in_target_reaction_in_denominator[0] temp = temp.lstrip() temp = temp.rstrip() reverse_reactants_in_target_reaction_in_denominator = temp for reaction_number_in_cti_file in range(gas.n_reactions): if gas.reactants(reaction_number_in_cti_file) == reactants_in_target_reactions_denominator or gas.reactants(reaction_number_in_cti_file) == reverse_reactants_in_target_reaction_in_denominator: reactions_in_cti_file_with_these_reactants_denominator.append(reactions_in_cti_file[reaction_number_in_cti_file]) reactions_in_cti_file_with_these_reactants_denominator = check_if_M_in_reactants(reactions_in_cti_file_with_these_reactants_denominator, gas, reactants_in_target_reactions_denominator, reverse_reactants_in_target_reaction_in_denominator) else: #need to figure out how to split addition of reactions if '(+)' not in reactants_in_denominator: reactions_in_cti_file_with_these_reactants_denominator.append(reactants_in_denominator) else: list_of_reactions_in_denominator = reactants_in_denominator.split('(+)') list_of_reactions_in_denominator_cleaned=[] for reaction in list_of_reactions_in_denominator: reaction = reaction.rstrip() reaction = reaction.lstrip() list_of_reactions_in_denominator_cleaned.append(reaction) reactions_in_cti_file_with_these_reactants_denominator = list_of_reactions_in_denominator_cleaned if target_press[i] == 0: pressure = 1e-9 else: pressure = target_press[i] if bath_gas[i] !=0: gas.TPX = target_temp[i],pressure*101325,{'H2O':.013,'O2':.0099,'H':.0000007,'Ar':.9770993} else: gas.TPX = target_temp[i],pressure*101325,{'Ar':.99} tottal_k_numerator = [] for secondary_reaction in reactions_in_cti_file_with_these_reactants_numerator: reaction_number_in_cti = reactions_in_cti_file.index(secondary_reaction) coeff_sum = sum(gas.reaction(reaction_number_in_cti).reactants.values()) k = gas.forward_rate_constants[reaction_number_in_cti] if coeff_sum==1: k=k elif coeff_sum==2: k = k*1000 elif coeff_sum==3: k = k*1000000 tottal_k_numerator.append(k) #check and make sure we are subtracting in the correct order k_numerator=sum(tottal_k_numerator) tottal_k_denominator = [] for secondary_reaction in reactions_in_cti_file_with_these_reactants_denominator: reaction_number_in_cti = reactions_in_cti_file.index(secondary_reaction) coeff_sum = sum(gas.reaction(reaction_number_in_cti).reactants.values()) k = gas.forward_rate_constants[reaction_number_in_cti] if coeff_sum==1: k=k elif coeff_sum==2: k = k*1000 elif coeff_sum==3: k = k*1000000 tottal_k_denominator.append(k) k_denominator=sum(tottal_k_denominator) k = k_numerator/k_denominator difference = np.log(target_k[i]) - np.log(k) #print(k_numerator,k_denominator) ##print(target_k[i],k) diff_in_ks_for_Y.append(difference) #I guess i could append the tuple Y_df_list.append(reaction) Y_values.append(difference) elif '(+)' in reaction and '/' not in reaction and '*' not in reaction: list_of_reactions = reaction.split('(+)') list_of_reactions_cleaned=[] for reaction in list_of_reactions: reaction = reaction.rstrip() reaction = reaction.lstrip() list_of_reactions_cleaned.append(reaction) reactions_in_cti_file_with_these_reactants = list_of_reactions_cleaned if target_press[i] == 0: pressure = 1e-9 else: pressure = target_press[i] if bath_gas[i] !=0: gas.TPX = target_temp[i],pressure*101325,{'H2O':.013,'O2':.0099,'H':.0000007,'Ar':.9770993} else: gas.TPX = target_temp[i],pressure*101325,{'Ar':.99} tottal_k = [] for secondary_reaction in reactions_in_cti_file_with_these_reactants: reaction_number_in_cti = reactions_in_cti_file.index(secondary_reaction) coeff_sum = sum(gas.reaction(reaction_number_in_cti).reactants.values()) k = gas.forward_rate_constants[reaction_number_in_cti] if coeff_sum==1: k=k elif coeff_sum==2: k = k*1000 elif coeff_sum==3: k= k*1000000 tottal_k.append(k) #check and make sure we are subtracting in the correct order k=sum(tottal_k) difference = np.log(target_k[i]) - np.log(k) diff_in_ks_for_Y.append(difference) #I guess i could append the tuple Y_df_list.append(reaction) Y_values.append(difference) elif reaction == 'More Complex Combination Rule': print('do someting else ') k_targets_for_y = np.array(diff_in_ks_for_Y) k_targets_for_y = k_targets_for_y.reshape((k_targets_for_y.shape[0],1)) Y_values = np.array(Y_values) Y_df_temp = pd.DataFrame({'value': Y_df_list,'ln_difference': Y_values.reshape((Y_values.shape[0],))}) Y_data_Frame = Y_data_Frame.append(Y_df_temp, ignore_index=True) #print(k_targets_for_y.shape,'k targets for y') return k_targets_for_y,Y_data_Frame def target_values_for_Z(self,target_value_csv,z_data_Frame): z_over_w = [] sigma = [] target_value_csv =
pd.read_csv(target_value_csv)
pandas.read_csv
# pragma pylint: disable=missing-docstring,W0212,C0103 from datetime import datetime from pathlib import Path from unittest.mock import MagicMock, PropertyMock import pandas as pd import pytest from arrow import Arrow from filelock import Timeout from freqtrade import OperationalException from freqtrade.data.converter import parse_ticker_dataframe from freqtrade.data.history import load_tickerdata_file from freqtrade.optimize import setup_configuration, start_hyperopt from freqtrade.optimize.default_hyperopt import DefaultHyperOpt from freqtrade.optimize.default_hyperopt_loss import DefaultHyperOptLoss from freqtrade.optimize.hyperopt import Hyperopt from freqtrade.resolvers.hyperopt_resolver import (HyperOptLossResolver, HyperOptResolver) from freqtrade.state import RunMode from freqtrade.strategy.interface import SellType from tests.conftest import (get_args, log_has, log_has_re, patch_exchange, patched_configuration_load_config_file) @pytest.fixture(scope='function') def hyperopt(default_conf, mocker): default_conf.update({'spaces': ['all']}) patch_exchange(mocker) return Hyperopt(default_conf) @pytest.fixture(scope='function') def hyperopt_results(): return pd.DataFrame( { 'pair': ['ETH/BTC', 'ETH/BTC', 'ETH/BTC'], 'profit_percent': [-0.1, 0.2, 0.3], 'profit_abs': [-0.2, 0.4, 0.6], 'trade_duration': [10, 30, 10], 'sell_reason': [SellType.STOP_LOSS, SellType.ROI, SellType.ROI] } ) # Functions for recurrent object patching def create_trials(mocker, hyperopt, testdatadir) -> None: """ When creating trials, mock the hyperopt Trials so that *by default* - we don't create any pickle'd files in the filesystem - we might have a pickle'd file so make sure that we return false when looking for it """ hyperopt.trials_file = testdatadir / 'optimize/ut_trials.pickle' mocker.patch.object(Path, "is_file", MagicMock(return_value=False)) stat_mock = MagicMock() stat_mock.st_size = PropertyMock(return_value=1) mocker.patch.object(Path, "stat", MagicMock(return_value=False)) mocker.patch.object(Path, "unlink", MagicMock(return_value=True)) mocker.patch('freqtrade.optimize.hyperopt.dump', return_value=None) return [{'loss': 1, 'result': 'foo', 'params': {}}] def test_setup_hyperopt_configuration_without_arguments(mocker, default_conf, caplog) -> None: patched_configuration_load_config_file(mocker, default_conf) args = [ '--config', 'config.json', 'hyperopt' ] config = setup_configuration(get_args(args), RunMode.HYPEROPT) assert 'max_open_trades' in config assert 'stake_currency' in config assert 'stake_amount' in config assert 'exchange' in config assert 'pair_whitelist' in config['exchange'] assert 'datadir' in config assert log_has('Using data directory: {} ...'.format(config['datadir']), caplog) assert 'ticker_interval' in config assert not log_has_re('Parameter -i/--ticker-interval detected .*', caplog) assert 'position_stacking' not in config assert not log_has('Parameter --enable-position-stacking detected ...', caplog) assert 'timerange' not in config assert 'runmode' in config assert config['runmode'] == RunMode.HYPEROPT def test_setup_hyperopt_configuration_with_arguments(mocker, default_conf, caplog) -> None: patched_configuration_load_config_file(mocker, default_conf) mocker.patch( 'freqtrade.configuration.configuration.create_datadir', lambda c, x: x ) args = [ '--config', 'config.json', '--datadir', '/foo/bar', 'hyperopt', '--ticker-interval', '1m', '--timerange', ':100', '--enable-position-stacking', '--disable-max-market-positions', '--epochs', '1000', '--spaces', 'all', '--print-all' ] config = setup_configuration(get_args(args), RunMode.HYPEROPT) assert 'max_open_trades' in config assert 'stake_currency' in config assert 'stake_amount' in config assert 'exchange' in config assert 'pair_whitelist' in config['exchange'] assert 'datadir' in config assert config['runmode'] == RunMode.HYPEROPT assert log_has('Using data directory: {} ...'.format(config['datadir']), caplog) assert 'ticker_interval' in config assert log_has('Parameter -i/--ticker-interval detected ... Using ticker_interval: 1m ...', caplog) assert 'position_stacking' in config assert log_has('Parameter --enable-position-stacking detected ...', caplog) assert 'use_max_market_positions' in config assert log_has('Parameter --disable-max-market-positions detected ...', caplog) assert log_has('max_open_trades set to unlimited ...', caplog) assert 'timerange' in config assert log_has('Parameter --timerange detected: {} ...'.format(config['timerange']), caplog) assert 'epochs' in config assert log_has('Parameter --epochs detected ... Will run Hyperopt with for 1000 epochs ...', caplog) assert 'spaces' in config assert log_has('Parameter -s/--spaces detected: {}'.format(config['spaces']), caplog) assert 'print_all' in config assert log_has('Parameter --print-all detected ...', caplog) def test_hyperoptresolver(mocker, default_conf, caplog) -> None: patched_configuration_load_config_file(mocker, default_conf) hyperopt = DefaultHyperOpt delattr(hyperopt, 'populate_buy_trend') delattr(hyperopt, 'populate_sell_trend') mocker.patch( 'freqtrade.resolvers.hyperopt_resolver.HyperOptResolver._load_hyperopt', MagicMock(return_value=hyperopt(default_conf)) ) x = HyperOptResolver(default_conf, ).hyperopt assert not hasattr(x, 'populate_buy_trend') assert not hasattr(x, 'populate_sell_trend') assert log_has("Hyperopt class does not provide populate_sell_trend() method. " "Using populate_sell_trend from the strategy.", caplog) assert log_has("Hyperopt class does not provide populate_buy_trend() method. " "Using populate_buy_trend from the strategy.", caplog) assert hasattr(x, "ticker_interval") def test_hyperoptresolver_wrongname(mocker, default_conf, caplog) -> None: default_conf.update({'hyperopt': "NonExistingHyperoptClass"}) with pytest.raises(OperationalException, match=r'Impossible to load Hyperopt.*'): HyperOptResolver(default_conf, ).hyperopt def test_hyperoptlossresolver(mocker, default_conf, caplog) -> None: hl = DefaultHyperOptLoss mocker.patch( 'freqtrade.resolvers.hyperopt_resolver.HyperOptLossResolver._load_hyperoptloss', MagicMock(return_value=hl) ) x = HyperOptLossResolver(default_conf, ).hyperoptloss assert hasattr(x, "hyperopt_loss_function") def test_hyperoptlossresolver_wrongname(mocker, default_conf, caplog) -> None: default_conf.update({'hyperopt_loss': "NonExistingLossClass"}) with pytest.raises(OperationalException, match=r'Impossible to load HyperoptLoss.*'): HyperOptLossResolver(default_conf, ).hyperopt def test_start_not_installed(mocker, default_conf, caplog, import_fails) -> None: start_mock = MagicMock() patched_configuration_load_config_file(mocker, default_conf) mocker.patch('freqtrade.optimize.hyperopt.Hyperopt.start', start_mock) patch_exchange(mocker) args = [ '--config', 'config.json', 'hyperopt', '--epochs', '5' ] args = get_args(args) with pytest.raises(OperationalException, match=r"Please ensure that the hyperopt dependencies"): start_hyperopt(args) def test_start(mocker, default_conf, caplog) -> None: start_mock = MagicMock() patched_configuration_load_config_file(mocker, default_conf) mocker.patch('freqtrade.optimize.hyperopt.Hyperopt.start', start_mock) patch_exchange(mocker) args = [ '--config', 'config.json', 'hyperopt', '--epochs', '5' ] args = get_args(args) start_hyperopt(args) assert log_has('Starting freqtrade in Hyperopt mode', caplog) assert start_mock.call_count == 1 def test_start_no_data(mocker, default_conf, caplog) -> None: patched_configuration_load_config_file(mocker, default_conf) mocker.patch('freqtrade.optimize.hyperopt.load_data', MagicMock(return_value={})) mocker.patch( 'freqtrade.optimize.hyperopt.get_timeframe', MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) ) patch_exchange(mocker) args = [ '--config', 'config.json', 'hyperopt', '--epochs', '5' ] args = get_args(args) start_hyperopt(args) assert log_has('No data found. Terminating.', caplog) def test_start_filelock(mocker, default_conf, caplog) -> None: start_mock = MagicMock(side_effect=Timeout(Hyperopt.get_lock_filename(default_conf))) patched_configuration_load_config_file(mocker, default_conf) mocker.patch('freqtrade.optimize.hyperopt.Hyperopt.start', start_mock) patch_exchange(mocker) args = [ '--config', 'config.json', 'hyperopt', '--epochs', '5' ] args = get_args(args) start_hyperopt(args) assert log_has("Another running instance of freqtrade Hyperopt detected.", caplog) def test_loss_calculation_prefer_correct_trade_count(default_conf, hyperopt_results) -> None: hl = HyperOptLossResolver(default_conf).hyperoptloss correct = hl.hyperopt_loss_function(hyperopt_results, 600) over = hl.hyperopt_loss_function(hyperopt_results, 600 + 100) under = hl.hyperopt_loss_function(hyperopt_results, 600 - 100) assert over > correct assert under > correct def test_loss_calculation_prefer_shorter_trades(default_conf, hyperopt_results) -> None: resultsb = hyperopt_results.copy() resultsb.loc[1, 'trade_duration'] = 20 hl = HyperOptLossResolver(default_conf).hyperoptloss longer = hl.hyperopt_loss_function(hyperopt_results, 100) shorter = hl.hyperopt_loss_function(resultsb, 100) assert shorter < longer def test_loss_calculation_has_limited_profit(default_conf, hyperopt_results) -> None: results_over = hyperopt_results.copy() results_over['profit_percent'] = hyperopt_results['profit_percent'] * 2 results_under = hyperopt_results.copy() results_under['profit_percent'] = hyperopt_results['profit_percent'] / 2 hl = HyperOptLossResolver(default_conf).hyperoptloss correct = hl.hyperopt_loss_function(hyperopt_results, 600) over = hl.hyperopt_loss_function(results_over, 600) under = hl.hyperopt_loss_function(results_under, 600) assert over < correct assert under > correct def test_sharpe_loss_prefers_higher_profits(default_conf, hyperopt_results) -> None: results_over = hyperopt_results.copy() results_over['profit_percent'] = hyperopt_results['profit_percent'] * 2 results_under = hyperopt_results.copy() results_under['profit_percent'] = hyperopt_results['profit_percent'] / 2 default_conf.update({'hyperopt_loss': 'SharpeHyperOptLoss'}) hl = HyperOptLossResolver(default_conf).hyperoptloss correct = hl.hyperopt_loss_function(hyperopt_results, len(hyperopt_results), datetime(2019, 1, 1), datetime(2019, 5, 1)) over = hl.hyperopt_loss_function(results_over, len(hyperopt_results), datetime(2019, 1, 1), datetime(2019, 5, 1)) under = hl.hyperopt_loss_function(results_under, len(hyperopt_results), datetime(2019, 1, 1), datetime(2019, 5, 1)) assert over < correct assert under > correct def test_onlyprofit_loss_prefers_higher_profits(default_conf, hyperopt_results) -> None: results_over = hyperopt_results.copy() results_over['profit_percent'] = hyperopt_results['profit_percent'] * 2 results_under = hyperopt_results.copy() results_under['profit_percent'] = hyperopt_results['profit_percent'] / 2 default_conf.update({'hyperopt_loss': 'OnlyProfitHyperOptLoss'}) hl = HyperOptLossResolver(default_conf).hyperoptloss correct = hl.hyperopt_loss_function(hyperopt_results, len(hyperopt_results), datetime(2019, 1, 1), datetime(2019, 5, 1)) over = hl.hyperopt_loss_function(results_over, len(hyperopt_results), datetime(2019, 1, 1), datetime(2019, 5, 1)) under = hl.hyperopt_loss_function(results_under, len(hyperopt_results), datetime(2019, 1, 1), datetime(2019, 5, 1)) assert over < correct assert under > correct def test_log_results_if_loss_improves(hyperopt, capsys) -> None: hyperopt.current_best_loss = 2 hyperopt.total_epochs = 2 hyperopt.log_results( { 'loss': 1, 'current_epoch': 1, 'results_explanation': 'foo.', 'is_initial_point': False } ) out, err = capsys.readouterr() assert ' 2/2: foo. Objective: 1.00000' in out def test_no_log_if_loss_does_not_improve(hyperopt, caplog) -> None: hyperopt.current_best_loss = 2 hyperopt.log_results( { 'loss': 3, } ) assert caplog.record_tuples == [] def test_save_trials_saves_trials(mocker, hyperopt, testdatadir, caplog) -> None: trials = create_trials(mocker, hyperopt, testdatadir) mock_dump = mocker.patch('freqtrade.optimize.hyperopt.dump', return_value=None) hyperopt.trials = trials hyperopt.save_trials() trials_file = testdatadir / 'optimize' / 'ut_trials.pickle' assert log_has(f"Saving 1 evaluations to '{trials_file}'", caplog) mock_dump.assert_called_once() def test_read_trials_returns_trials_file(mocker, hyperopt, testdatadir, caplog) -> None: trials = create_trials(mocker, hyperopt, testdatadir) mock_load = mocker.patch('freqtrade.optimize.hyperopt.load', return_value=trials) hyperopt_trial = hyperopt.read_trials() trials_file = testdatadir / 'optimize' / 'ut_trials.pickle' assert log_has(f"Reading Trials from '{trials_file}'", caplog) assert hyperopt_trial == trials mock_load.assert_called_once() def test_roi_table_generation(hyperopt) -> None: params = { 'roi_t1': 5, 'roi_t2': 10, 'roi_t3': 15, 'roi_p1': 1, 'roi_p2': 2, 'roi_p3': 3, } assert hyperopt.custom_hyperopt.generate_roi_table(params) == {0: 6, 15: 3, 25: 1, 30: 0} def test_start_calls_optimizer(mocker, default_conf, caplog, capsys) -> None: dumper = mocker.patch('freqtrade.optimize.hyperopt.dump', MagicMock()) mocker.patch('freqtrade.optimize.hyperopt.load_data', MagicMock()) mocker.patch( 'freqtrade.optimize.hyperopt.get_timeframe', MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) ) parallel = mocker.patch( 'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel', MagicMock(return_value=[{'loss': 1, 'results_explanation': 'foo result', 'params': {'buy': {}, 'sell': {}, 'roi': {}, 'stoploss': 0.0}}]) ) patch_exchange(mocker) default_conf.update({'config': 'config.json.example', 'epochs': 1, 'timerange': None, 'spaces': 'all', 'hyperopt_jobs': 1, }) hyperopt = Hyperopt(default_conf) hyperopt.backtesting.strategy.tickerdata_to_dataframe = MagicMock() hyperopt.custom_hyperopt.generate_roi_table = MagicMock(return_value={}) hyperopt.start() parallel.assert_called_once() out, err = capsys.readouterr() assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out assert dumper.called # Should be called twice, once for tickerdata, once to save evaluations assert dumper.call_count == 2 assert hasattr(hyperopt.backtesting.strategy, "advise_sell") assert hasattr(hyperopt.backtesting.strategy, "advise_buy") assert hasattr(hyperopt, "max_open_trades") assert hyperopt.max_open_trades == default_conf['max_open_trades'] assert hasattr(hyperopt, "position_stacking") def test_format_results(hyperopt): # Test with BTC as stake_currency trades = [ ('ETH/BTC', 2, 2, 123), ('LTC/BTC', 1, 1, 123), ('XPR/BTC', -1, -2, -246) ] labels = ['currency', 'profit_percent', 'profit_abs', 'trade_duration'] df = pd.DataFrame.from_records(trades, columns=labels) result = hyperopt.format_results(df) assert result.find(' 66.67%') assert result.find('Total profit 1.00000000 BTC') assert result.find('2.0000Σ %') # Test with EUR as stake_currency trades = [ ('ETH/EUR', 2, 2, 123), ('LTC/EUR', 1, 1, 123), ('XPR/EUR', -1, -2, -246) ] df = pd.DataFrame.from_records(trades, columns=labels) result = hyperopt.format_results(df) assert result.find('Total profit 1.00000000 EUR') def test_has_space(hyperopt): hyperopt.config.update({'spaces': ['buy', 'roi']}) assert hyperopt.has_space('roi') assert hyperopt.has_space('buy') assert not hyperopt.has_space('stoploss') hyperopt.config.update({'spaces': ['all']}) assert hyperopt.has_space('buy') def test_populate_indicators(hyperopt, testdatadir) -> None: tick = load_tickerdata_file(testdatadir, 'UNITTEST/BTC', '1m') tickerlist = {'UNITTEST/BTC': parse_ticker_dataframe(tick, '1m', pair="UNITTEST/BTC", fill_missing=True)} dataframes = hyperopt.backtesting.strategy.tickerdata_to_dataframe(tickerlist) dataframe = hyperopt.custom_hyperopt.populate_indicators(dataframes['UNITTEST/BTC'], {'pair': 'UNITTEST/BTC'}) # Check if some indicators are generated. We will not test all of them assert 'adx' in dataframe assert 'mfi' in dataframe assert 'rsi' in dataframe def test_buy_strategy_generator(hyperopt, testdatadir) -> None: tick = load_tickerdata_file(testdatadir, 'UNITTEST/BTC', '1m') tickerlist = {'UNITTEST/BTC': parse_ticker_dataframe(tick, '1m', pair="UNITTEST/BTC", fill_missing=True)} dataframes = hyperopt.backtesting.strategy.tickerdata_to_dataframe(tickerlist) dataframe = hyperopt.custom_hyperopt.populate_indicators(dataframes['UNITTEST/BTC'], {'pair': 'UNITTEST/BTC'}) populate_buy_trend = hyperopt.custom_hyperopt.buy_strategy_generator( { 'adx-value': 20, 'fastd-value': 20, 'mfi-value': 20, 'rsi-value': 20, 'adx-enabled': True, 'fastd-enabled': True, 'mfi-enabled': True, 'rsi-enabled': True, 'trigger': 'bb_lower' } ) result = populate_buy_trend(dataframe, {'pair': 'UNITTEST/BTC'}) # Check if some indicators are generated. We will not test all of them assert 'buy' in result assert 1 in result['buy'] def test_generate_optimizer(mocker, default_conf) -> None: default_conf.update({'config': 'config.json.example'}) default_conf.update({'timerange': None}) default_conf.update({'spaces': 'all'}) default_conf.update({'hyperopt_min_trades': 1}) trades = [ ('TRX/BTC', 0.023117, 0.000233, 100) ] labels = ['currency', 'profit_percent', 'profit_abs', 'trade_duration'] backtest_result =
pd.DataFrame.from_records(trades, columns=labels)
pandas.DataFrame.from_records
import numpy as np import shutil import pandas as pd import os import json import re from sklearn.model_selection import StratifiedKFold RANDOM_SEED = 2018 # Set seed for reproduction datapath = "./kkbox-music-recommendation-challenge/" # !!! Directly using pd.read_csv() leads an error: #rows < 2296833 # songs_df = pd.read_csv(os.path.join(datapath, "songs.csv"), encoding="utf-8", dtype=object) song_list = [] song_header = [] with open(os.path.join(datapath, "songs.csv"), 'r', encoding="utf-8") as fid: k = 0 for line in fid: k += 1 splits = line.strip().split(",") if len(splits) != 7: print(line) splits = splits[0:7] # correction if k == 1: print("headers", splits) song_header = splits else: song_list.append(splits) songs_df = pd.DataFrame(song_list, columns=song_header) print("songs_df shape", songs_df.shape) songs_df['language'] = songs_df['language'].map(lambda x: str(int(float(x))) if not pd.isnull(x) else "") songs_df['genre_ids'] = songs_df['genre_ids'].map(lambda x: x.replace("|", " ") if not pd.isnull(x) else "") song_ids = set(songs_df['song_id'].dropna().unique()) person_names = set(songs_df['artist_name'].dropna().unique()) | set(songs_df['composer'].dropna().unique())\ | set(songs_df['lyricist'].dropna().unique()) def name_tokenize(name_str): persons = re.split(r"[\|\\/&;]", name_str) return [x.replace("\"", "").strip() for x in persons if x.replace("\"", "").strip() != ""] person_set = [] for name_str in person_names: person_set += name_tokenize(name_str) person_set = set(person_set) person_set = sorted(list(person_set)) # sort for reproduction person_dict = dict(list(zip(person_set, range(1, len(person_set) + 1)))) with open("person_id.json", "w", encoding="utf-8") as fout: person_index = dict(list(zip(range(1, len(person_set) + 1), person_set))) json.dump(person_index, fout, indent=4, ensure_ascii=False) del person_index def encode_name(name_str): names = name_tokenize(name_str) names = [str(person_dict[x]) for x in names] return " ".join(names) songs_df['artist_name'] = songs_df['artist_name'].map(lambda x: encode_name(x) if not pd.isnull(x) else "") songs_df['composer'] = songs_df['composer'].map(lambda x: encode_name(x) if not pd.isnull(x) else "") songs_df['lyricist'] = songs_df['lyricist'].map(lambda x: encode_name(x) if not pd.isnull(x) else "") # !!! Directly using pd.read_csv() leads an error: #rows < 2296869 # song_extra_info_df = pd.read_csv(os.path.join(datapath, "song_extra_info.csv"), encoding="utf-8") song_extra_list = [] song_extra_header = [] with open(os.path.join(datapath, "song_extra_info.csv"), 'r', encoding="utf-8") as fid: k = 0 for line in fid: k += 1 splits = line.strip().split(",") if len(splits) != 3: print(line) if k == 1: song_extra_header = splits else: song_extra_list.append(splits) print(k - 1, "lines in song_extra_info.csv") song_extra_info_df = pd.DataFrame(song_extra_list, columns=song_extra_header) print("song_extra_info_df shape", song_extra_info_df.shape) song_ids = song_ids | set(song_extra_info_df['song_id'].dropna().unique()) song_names = set(song_extra_info_df['name'].dropna().unique()) song_names = sorted(list(song_names)) song_name_dict = dict(list(zip(song_names, range(1, len(song_names) + 1)))) song_extra_info_df["name"] = song_extra_info_df["name"].map(lambda x: song_name_dict[x] if not pd.isnull(x) else "") with open("song_name.json", "w", encoding="utf-8") as fout: song_name_index = dict(list(zip(range(1, len(song_names) + 1), song_names))) json.dump(song_name_index, fout, indent=4, ensure_ascii=False) del song_name_index with open(os.path.join(datapath, "members.csv"), 'r') as fid: print(sum(1 for line in fid) - 1, "lines in members.csv") members_df = pd.read_csv(os.path.join(datapath, "members.csv")) print("members_df shape", members_df.shape) user_ids = set(members_df['msno'].dropna().unique()) with open(os.path.join(datapath, "train.csv"), 'r') as fid: print(sum(1 for line in fid) - 1, "lines in train.csv") train_df = pd.read_csv(os.path.join(datapath, "train.csv")) print("train_df shape", train_df.shape) song_ids = sorted(list(song_ids | set(train_df['song_id'].dropna().unique()))) user_ids = sorted(list(user_ids | set(train_df['msno'].dropna().unique()))) song_dict = dict(list(zip(song_ids, range(1, len(song_ids) + 1)))) user_dict = dict(list(zip(user_ids, range(1, len(user_ids) + 1)))) with open("user_id.json", "w") as fout: user_index = dict(list(zip(range(1, len(user_ids) + 1), user_ids))) json.dump(user_index, fout, indent=4) del user_index with open("song_id.json", "w") as fout: song_index = dict(list(zip(range(1, len(song_ids) + 1), song_ids))) json.dump(song_index, fout, indent=4) del song_index train_with_user = pd.merge(train_df, right=members_df, on="msno", how="left") train_with_user_song =
pd.merge(train_with_user, right=songs_df, on="song_id", how="left")
pandas.merge
import datetime from collections import OrderedDict import pandas as pd from google.cloud import bigquery CLIENT = None PROJECT_ID = None def insert_date_range(sql, date_range): start, end = date_range if start is None and end is None: return sql if start is None: return sql + ' WHERE `date` <= DATE("%s")' % end if end is None: return sql + ' WHERE `date` >= DATE("%s")' % start return sql + ' WHERE DATE("%s") <= `date` AND `date` <= DATE("%s")' % (start, end) # define helper fns: def query_covariate_df_from_gbq(pid, date_range, covariate): """ Query a table from Google BigQuery, via SQL. :param pid: patient id (str) :param covariate: `heartrate`, `step`, `sleep` """ assert covariate in ['heartrate', 'steps', 'sleep'] columns = ['Date', 'Time', 'Source', 'Value'] if covariate != 'sleep': sql = """ SELECT date, time, device, value FROM `%s.%s.%s` """ % (PROJECT_ID, pid, covariate) else: sql = """ SELECT date, time, device, type, value FROM `%s.%s.%s` """ % (PROJECT_ID, pid, covariate) columns = ['Date', 'Time', 'Source', 'Value', 'n_sleep_seconds'] sql = insert_date_range(sql, date_range) df = CLIENT.query(sql).to_dataframe() df.columns = columns try: df['date_time'] = pd.to_datetime(df['date_time']) except KeyError: # if there is SHIT it in the db df['date_time'] = df['date_time'] = ['%s %s' % (d, t) for d, t in zip(df['Date'].values, df['Time'].values)] df['date_time'] = pd.to_datetime(df['date_time']) df.drop(['Date', 'Time'], inplace=True, axis=1) # df = df.set_index('date_time').drop('Test', axis=0).reset_index() # df['date_time'] = pd.to_datetime(df['date_time']) df['UserID'] = pid if covariate == 'sleep': df = df[['UserID', 'Source', 'Value', 'n_sleep_seconds', 'date_time']] df['n_sleep_seconds'] = pd.to_numeric(df['n_sleep_seconds']) else: df = df[['UserID', 'Source', 'Value', 'date_time']] df['Value'] = pd.to_numeric(df['Value']) return df def preprocess_covariate_df(pid, pid_df, covariate): """ Preprocess a covariate dataframe: - expand data to 1 min resolution - expand sleep data :param covariate: `heartrate`, `steps` or `sleep` :return: """ pid_df_expanded = [] # do the following per device and concatenate afterwards. for device, ddf in pid_df.groupby('Source'): if covariate == 'sleep': # apple hk data if any(['InBed' in ddf['Value'].unique(), 'Asleep' in ddf['Value'].unique()]): ddf.columns = ['uid', 'device', 'sleep', 'date_time'] elif ddf.empty: ddf.columns = ['uid', 'device', 'sleep', 'date_time'] ddf = ddf.set_index('date_time').resample('T').median().reset_index() ddf['sleep'] = 0. # fitbit data elif any(['rem' in ddf['Value'].unique(), 'awake' in ddf['Value'].unique(), 'wake' in ddf['Value'].unique(), 'deep' in ddf['Value'].unique(), 'restless' in ddf['Value'].unique(), 'alseep' in ddf['Value'].unique(), 'unknown' in ddf['Value'].unique(), ]): # we need to expand: expanded_dfs = [] for i, r in ddf.iterrows(): n_mins = r['n_sleep_seconds'] // 60 df = pd.DataFrame([r['Value']] * n_mins, index=pd.date_range(r['date_time'].round(freq='T'), periods=n_mins, freq='T')) df['uid'] = r['UserID'] expanded_dfs.append(df) ddf = pd.concat(expanded_dfs, sort=True, axis=0) # delete dublicate indices: ddf = ddf.loc[~ddf.index.duplicated(keep='first')] ddf.reset_index(inplace=True) ddf.columns = ['date_time', 'sleep', 'uid'] # sort out the user ID else: # corrupted fitbit data ddf.columns = ['uid', 'device', 'sleep', 'date_time'] uid = ddf['uid'].unique()[0] ddf['sleep'] = 0. ddf = ddf.set_index('date_time').resample('T').median().reset_index() ddf['uid'] = uid ddf['device'] = device ddf = ddf[['uid', 'device', 'sleep', 'date_time']] ddf['sleep'] = ddf['sleep'].astype(float) elif covariate == 'steps': ddf.columns = ['uid', 'device', 'steps', 'date_time'] ddf['steps'] = ddf['steps'].astype(float) ddf = ddf.set_index('date_time').resample('T').mean().reset_index() elif covariate == 'heartrate': ddf.columns = ['uid', 'device', 'heart_rate', 'date_time'] ddf['heart_rate'] = ddf['heart_rate'].astype(float) ddf = ddf.set_index('date_time').resample('T').median().reset_index() ddf['uid'] = pid ddf['device'] = device ddf = ddf.loc[~ddf.index.duplicated(keep='first')] pid_df_expanded.append(ddf) try: pid_df = pd.concat(pid_df_expanded, axis=0) except ValueError: raise OSError('Empty input files!') pid_df = pid_df.set_index(['device', 'date_time']).sort_index() return pid_df def get_PID_df_per_device(pid, dfs, devices=['fitbit'], ndays=1000): """ This returns a pid_df per device in the input .csvs or .jsons Possible Devices: ['FB-Fitbit', # Fitbit 'HK-Connect', # Garmin 'HK-Health', # ?? 'HK-iPhone', # Phone -> Steps only 'HK-Motiv', # motiv ring 'HK-Apple', # apple watch 'HK-Biostrap' # Biostrap ] :param pid: :return: """ data_per_device = OrderedDict() for d in devices: p_dfs = [] for covariate in dfs.keys(): try: p_dfs.append(dfs[covariate].xs(d, level='device', drop_level=True).drop('uid', axis=1)) except KeyError: print('No %s data found for %s' % (covariate, d)) pdf = pd.DataFrame(columns=[covariate]) pdf.index.name = 'date_time' p_dfs.append(pdf) device_df = p_dfs[0].join(p_dfs[1], how='outer') device_df = device_df.join(p_dfs[2], how='outer') try: last_timestamp = device_df.index.values[-1] limit = last_timestamp - pd.Timedelta(days=ndays) device_df = device_df.loc[limit:last_timestamp] except IndexError: pass device_df['uid'] = pid if device_df.index.name != 'date_time': device_df.reset_index(inplace=True) device_df.set_index('date_time', inplace=True) device_df.dropna(subset=['heart_rate', 'steps', # 'sleep' ], axis=0, thresh=1, inplace=True) device_df[['heart_rate', 'steps']] = device_df[['heart_rate', 'steps']].astype(float) data_per_device[d] = device_df return data_per_device def impute_PID_df(in_df, slen, granularity, **kwargs): """ The main preprocessing function. IMPORTANT: As we reasample, we need to binarize the sleep before doing this. :param in_df: :return: """ uid = in_df['uid'].unique() assert len(uid) == 1, 'There must be exactly 1 ID per user.' in_df.drop('uid', axis=1) in_df = in_df[in_df['heart_rate'] >= 20] # hard cut-off for HR as HR of 20 is non-realistic # binarize the sleep: in_df['sleep'] = in_df['sleep'].map(dict([('awake', 0), ('wake', 0), ('unknown', 1), ('light', 1), ('deep', 1), ('restless', 1), ('rem', 1), ('asleep', 1), ('Asleep', 1), ('InBed', 0), ('NaN', 0)])) sleep_df = in_df.copy() sleep_df.loc[~sleep_df[['heart_rate', 'steps']].isnull().all(axis=1), 'sleep'] = sleep_df.loc[ ~sleep_df[['heart_rate', 'steps']].isnull().all(axis=1), 'sleep'].fillna(0.) # resample in_df = in_df.resample(granularity).median() in_df['sleep'] = sleep_df.resample(granularity).max() # set the steps to 0, where we have sleep == 1 in_df.loc[in_df['sleep'] == 1, 'steps'] = 0 # now extend the index of days that have x% of slen, and fill the nans w/ the average in sleep stratification in_df.dropna(thresh=1, axis=0, inplace=True) days = [] for n, d in in_df.groupby(pd.Grouper(freq='D')): exclusioncounter = 0 if len(d.index.values) >= .5 * slen: # get the date and reindex: date = d.index[0].date() # create full range: full_day_index = pd.date_range(date, periods=slen, freq=granularity) d = d.reindex(full_day_index) days.append(d) else: exclusioncounter += 1 try: in_df = pd.concat(days) except ValueError: return pd.DataFrame({'Empty': []}) in_df, _, _ = fill_nans_w_stratified_average(in_df, slen, granularity) # This dropna is very important: Drop the hours for which we did not have data!! in_df.dropna(axis=0, inplace=True) in_df = in_df.groupby(pd.Grouper(freq='D')).filter(lambda x: len(x.index.values) == slen) # binarize the sleep: s = in_df['sleep'] in_df.loc[:, 'sleep'] = s.where(s == 0., 1.).values assert in_df.shape[0] / slen == float(in_df.shape[0] // slen) in_df['uid'] = uid[0] # ensure numeric: in_df[[c for c in in_df.columns if c != 'uid']] = in_df[[c for c in in_df.columns if c != 'uid']].apply( pd.to_numeric) return in_df def get_average_per_granularity(df): """ Calculate the hourly medians and return a df that holds there values. :param df: the input df to calculate the hourly medians with :return: the df holding the hourly medians """ # median for HR and steps, mean for sleep, is later binarized. median_df = df.resample('30T').median() median_df.index = [h.time() for h in median_df.index] median_df.index.name = 'time_unit' median_df = median_df.groupby('time_unit').median() # here always median return median_df def get_stratified_average_per_granularity(df, slen, granularity, **kwargs): """ Calculate the medians/means per granularity STRATIFIED BY SLEEP and return a df that holds these values. :param df: the input df to calculate the hourly medians with :return: the df holding the hourly medians """ # stratify by sleep: dfs = dict() nulls = [] for n, g in df.groupby('sleep'): if pd.isnull(n): continue # resample (will introduce 'NaNs' if no values res_df = g.resample('30T').mean() res_df.index = [h.time() for h in res_df.index] res_df.index.name = 'time_unit' # after the median NaNs migth be reduced but not resolved. res_df = res_df.groupby('time_unit').mean() # here always median # now assert that res_df has all hours: if res_df.shape[0] < slen: time_units = [] for i in range(0, 24): time_units.extend([ datetime.time(i, j) for j in range(0, 60, int(granularity.strip('T'))) ]) res_df = res_df.reindex(pd.Index(time_units)) res_df.index.name = 'time_unit' nulls.append(sum(res_df.isnull().sum())) # fill whats left with the median of the res_df (as this is stratified as well) res_df = res_df.fillna(res_df.mean()) assert sum(res_df.isnull().sum()) == 0 dfs[n] = res_df return dfs, nulls def fill_nans_w_stratified_average(df, slen, granularity, **kwargs): """ Fills the NaNs by sleep distribution. """ df = df.astype('float') impute_count = 0 # ensure that sleep is binary: dfs, nulls = get_stratified_average_per_granularity(df.copy(), slen, granularity) imputed = [] for n, g_df in df.groupby('sleep'): if pd.isnull(n): imputed.append(g_df) complete_missing = g_df.loc[g_df[['steps', 'heart_rate']].isnull().all(axis=1)].index for t_idx in complete_missing: impute_count += 2 # as we fill 3 values h = t_idx.time() g_df.loc[t_idx, ['steps', 'heart_rate']] = dfs[n].loc[h, ['steps', 'heart_rate']] # now fill the remaining NaNs (we might have had NaNs in the average_df:) for c in [c for c in g_df.columns if c != 'sleep']: for t in g_df.loc[g_df[c].isnull(), c].index: h = t.time() g_df.loc[t, c] = dfs[n].loc[h, c] imputed.append(g_df) imputed.append(df[df['sleep'].isnull()]) del df df = pd.concat(imputed, axis=0) del imputed df.sort_index(inplace=True) # now, where sleep is missing, we fill by the median over the complete data including sleep: df = df.astype('float') average_df = get_average_per_granularity(df) daily_median_df = df.groupby(pd.Grouper(freq='D')).median() # the medians per day complete_missing = df.loc[df[df.columns].isnull().all(axis=1)].index for t_idx in complete_missing: impute_count += 3 # as we fill 3 values h = roundtime(t_idx.to_pydatetime(), 60 * 30).time() df.loc[t_idx, :] = average_df.loc[h] for c in df.columns: for t in df.loc[df[c].isnull(), c].index: # h = round_time(t.time(), 30*60) h = roundtime(t.to_pydatetime(), 60 * 30).time() d = t.date() if c != 'sleep': if not pd.isnull(average_df.loc[h, c]): df.loc[t, c] = average_df.loc[h, c] else: df.loc[t, c] = daily_median_df.loc[d, c] return df, impute_count, nulls def roundtime(dt=None, roundTo=60): """Round a datetime object to any time laps in seconds dt : datetime.datetime object, default now. roundTo : Closest number of seconds to round to, default 1 minute. Author: <NAME> 2012 - Use it as you want but don't blame me. """ if dt == None: dt = datetime.datetime.now() seconds = (dt - dt.min).seconds # // is a floor division, not a comment on following line: rounding = (seconds + roundTo / 2) // roundTo * roundTo return dt + datetime.timedelta(0, rounding - seconds, -dt.microsecond) def upload_to_gpq(df, pid): """ Upload a df of preprocessed data for pid to gbq """ # This pandas implementation is slow! @Diego: rewriting to native GBQ would be much faster! df.index.name = 'date_time' df.reset_index(inplace=True) df.to_gbq('%s.preprocessed' % pid, project_id='phd-project', chunksize=None, if_exists='replace') def main(pid, date_range, slen, granularity, **kwargs): if slen is None: slen = 288 if granularity is None: granularity = '5T' covariate_dfs = OrderedDict() for covariate in ['heartrate', 'steps', 'sleep']: try: covariate_df = query_covariate_df_from_gbq(pid, date_range, covariate) covariate_df = preprocess_covariate_df(pid, covariate_df, covariate) covariate_dfs[covariate] = covariate_df except NotImplementedError: covariate_dfs[covariate] =
pd.DataFrame(columns=['uid', covariate])
pandas.DataFrame
"""Python library for GCCR002""" from contextlib import contextmanager from datetime import datetime import hashlib from io import StringIO from IPython.display import display as _display from itertools import chain, product, combinations_with_replacement import joblib import json import logging import matplotlib.pyplot as plt from matplotlib import gridspec from matplotlib.ticker import MultipleLocator, FormatStrFormatter import networkx as nx import numpy as np import pandas as pd import pathlib import pickle import pingouin import re from scipy.special import logit from scipy.stats import ks_2samp, mannwhitneyu, wilcoxon, gaussian_kde, chi2_contingency, entropy, norm import seaborn as sns from sklearn.decomposition import PCA, NMF from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import RFE, RFECV from sklearn.linear_model import LinearRegression, RidgeClassifier, RidgeClassifierCV, LogisticRegression, LogisticRegressionCV from sklearn.metrics import auc, roc_curve, roc_auc_score, plot_roc_curve, confusion_matrix from sklearn.metrics import precision_score, recall_score, get_scorer, make_scorer, SCORERS from sklearn.model_selection import ShuffleSplit, GroupShuffleSplit, LeaveOneOut, cross_validate, cross_val_score, cross_val_predict from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.utils.class_weight import compute_sample_weight from statsmodels.api import add_constant from statsmodels.discrete.discrete_model import Logit import sys sys.path.append('/home/rgerkin/dev/pyvenn') #TODO: Turn pyvenn into a pip-installable package from tqdm.auto import tqdm, trange import urllib from venn import venn3, venn4, venn5, get_labels import warnings import zipfile sns.set(font_scale=1.1) sns.set_style('whitegrid') logger = logging.Logger('GCCR002') known_md5s = {'GCCR002_complete_database.csv': 'd476f67b081dd9d8d8cf1ee0481ad4e8', 'GCCR002_DATA_COVID_TimeStamp.xlsx': 'aa016d9208fbb44ffd8ce4a2dfe908a4', 'GCCR002_DATA_COVID_TimeStamp_plusdataJuly.csv': '56922f025047e379bf5cfc8ff2ceed91'} DATA = pathlib.Path('data') YOUGOV_CUTOFF_DATE = '2020-07-03' # In order to guarantee a match to the published figures, we must remove YouGov reported after the manuscript submission date. # This corresponds to week 11. To update this figure with new data (collected by YouGov after manuscript submission), # change max_week to a higher value (e.g. the present day)""" # For each type (e.g. categorical), a list of regular expressions for features considered to be that type dtype_ontology = {'categorical': ['Gender', 'GCCR', 'Referred', 'Test_Name'], 'discrete': ['Age', 'Days_since_onset', 'Onset_day', 'Completion_day', 'Recovery'], 'binary': ['Changes', 'Symptoms', 'Prior_conditions', 'cigarette(!=_f)', 'cigarette_use', 'Resp'], 'continuous': ['(?<!did_)(before_)', 'during_', 'after_', 'change_', 'recovery_', 'frequency', 'cigarette(?!_use)'], } feature_ontology = {'incidental': ['GCCR', 'Test_Name', 'Completion_', 'Referred'], 'chemosensory': ['Changes_in', 'Taste', 'Smell', 'Cheme', '_food', '_smell'], 'demographic': ['Gender', 'Age', 'Country'], 'history': ['Prior_conditions', 'cigarette'], 'typical': ['Symptoms', 'Resp', 'Recovery', 'Blocked', 'Onset_', 'Days_'] } timing_ontology = {'incidental': ['GCCR', 'Test_Name', 'Day', '_day', 'Referred'], 'demographic': ['Gender', 'Age', 'Country'], 'before': ['Prior_conditions', 'before_illness', 'cigarette'], 'during': ['Changes_in', 'change_illness', 'during_illness', 'Resp', 'Symptoms'], 'after': ['Recovery', 'after_illness', 'recovery_illness']} # Color scheme colors = pd.Series( index=pd.MultiIndex.from_tuples([], names=["diagnosis", "sense"]), dtype="object" ) colors.loc["C19+", "Smell"] = "#6699CD" colors.loc["C19-", "Smell"] = "#a5bcd4" colors.loc["C19+", "Taste"] = "#ff9900" colors.loc["C19-", "Taste"] = "#ffce85" colors.loc["C19+", "Chemesthesis"] = "#009999" colors.loc["C19-", "Chemesthesis"] = "#5fc7c7" colors.loc["C19+", "Blocked_nose"] = "#996600" colors.loc["C19-", "Blocked_nose"] = "#d1a752" @contextmanager def all_logging_disabled(highest_level=logging.CRITICAL): """ A context manager that will prevent any logging messages triggered during the body from being processed. :param highest_level: the maximum logging level in use. This would only need to be changed if a custom level greater than CRITICAL is defined. """ # two kind-of hacks here: # * can't get the highest logging level in effect => delegate to the user # * can't get the current module-level override => use an undocumented # (but non-private!) interface previous_level = logging.root.manager.disable logging.disable(highest_level) try: yield finally: logging.disable(previous_level) def get_hash(x): return joblib.hash(x) def load_all(): # All of the content loaded here was produced in pre-analysis.ipynb with open(DATA / 'processed' / 'data-types.json') as f: dtypes = json.load(f) df = pd.read_csv(DATA / 'processed' / 'data-clean.csv', dtype=dtypes, index_col=0) Xu = pd.read_csv(DATA / 'processed' / 'X-raw.csv', index_col=0).astype('float') Xn = pd.read_csv(DATA / 'processed' / 'X-normalized.csv', index_col=0).astype('float') #Xu.index = Xu.index.astype(int) #Xn.index = Xu.index.astype(int) with open(DATA / 'processed' / 'targets.json') as f: targets = json.load(f) sets = {name: set(ids) for name, ids in targets.items()} with open(DATA / 'processed' / 'classes.json') as f: classes = json.load(f) return df, Xu, Xn, dtypes, sets, classes def load_raw(): #file_name = 'GCCR002_DATA_COVID_TimeStamp.xlsx' #file_name = 'GCCR002_DATA_COVID_TimeStamp_plusdataJuly.csv' #assert_md5(file_name) # Check that the MD5 hash of the file is as expected #if file_name.endswith('.xlsx'): # df = pd.read_excel(file_name) # Pandas takes forever to load Excel files #elif file_name.endswith('.csv'): # df = pd.read_csv(file_name) df_ORIGINAL = pd.read_csv(DATA / 'raw' / 'GCCR002_DATA_COVID_TimeStamp.csv') df_JULY = pd.read_csv(DATA / 'raw' / 'GCCR002_julydatabase_timestamp_Countryclean_labelscorrect.csv') to_drop = ['UniqueID.1', 'UniqueID_1', 'Unnamed: 0', 'Unnamed: 2', 'Country_clean'] for df_ in [df_ORIGINAL, df_JULY]: df_.drop(to_drop, axis=1, errors='ignore', inplace=True) df_['Date_of_onset'] = pd.to_datetime(df_['Date_of_onset']) df_['Year_of_birth_Time_Stamp'] = pd.to_datetime(df_['Year_of_birth_Time_Stamp']) assert not set(df_ORIGINAL['UniqueID']).intersection(set(df_JULY['UniqueID'])) df = pd.concat([df_ORIGINAL, df_JULY[df_ORIGINAL.columns]]) df = df.rename(columns={'Chemethesis_before_illness': 'Chemesthesis_before_illness'}) assert len(set(df['UniqueID'])) == df.shape[0] df = df.set_index('UniqueID') df = df.drop('UniqueID.1', errors='ignore') report_size(df, 'loading') return df def get_md5(file_name): """Get MD5 hash of file""" with open(file_name, 'rb') as f: # read contents of the file data = f.read() # pipe contents of the file through md5 = hashlib.md5(data).hexdigest() return md5 def assert_md5(file_name): md5 = get_md5(file_name) assert md5 == known_md5s[file_name], "MD5 hashes do not match; file may have been changed." def date_to_integer_day(series): series = series.dt.dayofyear series = series.fillna(-1).astype(int) return series def display(x): if isinstance(x, str): print(x) else: _display(x) def interp_index(array1, array2, threshold): i = np.searchsorted(array1, threshold) a1 = array1[i-1] b1 = array1[i] a2 = array2[i-1] b2 = array2[i] return a2 + (b2-a2)*(threshold-a1)/(b1-a1) def plot_roc(clf, X, y, cv, cv_kwargs=None, weights=None, concat=True, ax=None, name=None, title=None): # Plot ROC curve roc_aucs = [] n = cv.get_n_splits() cv_kwargs = {} if cv_kwargs is None else cv_kwargs if ax is None: plt.figure(figsize=(4,4)) ax = plt.gca() y_score = [] y_true = [] all_weights = [] sample_weight_ = get_weights(X, y, weights) for i, (train, test) in enumerate(cv.split(X, **cv_kwargs)): #sample_weight = get_weights(X.iloc[train], y.iloc[train], weights) sample_weight = sample_weight_.iloc[train] clf.fit(X.iloc[train, :], y.iloc[train], sample_weight=sample_weight) #sample_weight = get_weights(X.iloc[test], y.iloc[test], weights) sample_weight = sample_weight_.iloc[test] if hasattr(clf, 'predict_proba'): y_score_ = clf.predict_proba(X.iloc[test, :])[:, 1] else: y_score_ = clf.decision_function(X.iloc[test, :]) if not concat: curve = plot_roc_curve(clf, X.iloc[test, :], y.iloc[test], alpha=(1/np.sqrt(n)), ax=ax, sample_weight=sample_weight, name='Split %d' % i) roc_aucs.append(curve.roc_auc) else: auc = roc_auc_score(y.iloc[test], y_score_) roc_aucs.append(auc) y_score += list(y_score_) y_true += list(y.iloc[test]) all_weights += list(sample_weight) score = np.mean(roc_aucs) if concat: fpr, tpr, thresholds = roc_curve(y_true, y_score, sample_weight=all_weights) #score = roc_auc_score(y_true, y_score, sample_weight=all_weights) if not name: name = clf.__class__.__name__.replace('Classifier','').replace('Ridge', 'Linear') sens_half = interp_index(fpr, tpr, 0.5) spec_half = 1-interp_index(tpr, fpr, 0.5) print("%s: Sens50 = %.3g, Spec50 = %.3g" % (name, sens_half, spec_half)) label = '%s: %.3g' % (name, score) if name else '%.3g' % score ax.plot(fpr, tpr, label=label) else: ax.set_title('AUC = %.3f +/- %.3f' % (score, np.std(roc_aucs)/np.sqrt(n))) ax.plot([0, 1], [0, 1], 'k--') ax.set_xlabel('False Positive Rate') ax.set_ylabel('True Positive Rate') if title: ax.set_title(title) if n <= 10 or concat: ax.legend(fontsize=12, loc=4) return score def rank_features(clf, X): # Rank the features identified by the classifier from most to least important key_features = pd.Series(clf.feature_importances_, index=X.columns).sort_values(ascending=False) # Show the 20 most important key_features.index = nicify(list(key_features.index)) return key_features.to_frame(name='Importance') def rank_coefs(clf, X, nicify_=True): key_features = pd.Series(clf.coef_.ravel(), index=X.columns) if hasattr(clf, 'intercept_') and clf.intercept_: key_features['Intercept'] = clf.intercept_[0] kf = key_features.to_frame(name='Value') kf['Magnitude'] = kf['Value'].abs().round(3) kf['Sign'] = ['+' if x>=0 else '-' for x in kf['Value']] kf = kf.sort_values('Magnitude', ascending=False) kf = kf.drop('Value', axis=1) kf = kf[kf['Magnitude']>0] if nicify_: kf.index = nicify(list(kf.index)) return kf def compute_score(clf, X, y, cv): # Apply cross-validation using this splitter, and check the following fitness metrics results = cross_validate(clf, X, y, scoring=['roc_auc'], cv=cv) for key in results: print(key, results[key].mean()) def cardinality_filter(X, n, dtype=None): cols = [] for col in X: if dtype is None or X[col].dtype == dtype: u = X[col].unique() if len(u)>=n: cols.append(col) return cols def ontology_to_classes(df, ontology, invert=False, add=None): if add is None: add = [] unassigned_cols = list(df.drop('id', errors='ignore')) if invert: classes = {x:[] for x in ontology} else: classes = {} for key, patterns in ontology.items(): for pattern in patterns: r = re.compile(pattern) cols = list(filter(r.search, list(df))) for col in cols: if col in unassigned_cols: if invert: classes[key].append(col) else: classes[col] = key unassigned_cols.remove(col) assert len(unassigned_cols)==0, "%s were unassigned." % unassigned_cols for kind in add: # The above ontology maps each feature to a single class. # Additiomal feature_classes below can reuse these features. if kind == 'CDC9': classes[kind] = ['Symptoms_%s' % x for x in ['changes_in_smell', 'changes_in_food_flavor', 'fever', 'muscle_aches', 'runny_nose', 'dry_cough', 'diarrhea', 'fatigue', 'difficulty_breathing_/_shortness_of_breath']] if kind == 'CDC7': classes[kind] = ['Symptoms_%s' % x for x in ['fever', 'muscle_aches', 'runny_nose', 'dry_cough', 'diarrhea', 'fatigue', 'difficulty_breathing_/_shortness_of_breath']] if kind == 'CDC3': classes[kind] = ['Symptoms_%s' % x for x in ['fever', 'dry_cough', 'difficulty_breathing_/_shortness_of_breath']] elif kind == 'chemosensory-binary': classes[kind] = [x for x in classes['chemosensory'] if 'illness' not in x] return classes def get_rccv_score(clf, X, y, feature_classes, classes, weights='balanced'): sample_weight = get_weights(X, y, weights) features = list(chain(*[feature_classes[x] for x in classes])) clf.fit(X[features], y, sample_weight=sample_weight) return clf.best_score_.round(3) def roc(clf, X, y, feature_classes, classes, cv, weights=None, concat=True, ax=None, with_name=True, title=False): features = list(chain(*[feature_classes[x] for x in classes])) if with_name: name = '%s' % '+'.join(classes) score = plot_roc(clf, X[features], y, cv, weights=weights, concat=concat, ax=ax, name=name) if ax and title: if title is True: title = '%s' % '+'.join(classes) ax.set_title(title) return score def country_weights(X, y): test_names = [col for col in X if 'Test_' in col] sample_weight = y.copy() sample_weight[:] = 1 for test_name in test_names: m = X[test_name].mean() # Allows this to work even on standardized data index = X[X[test_name]>m].index if len(index): weight = compute_sample_weight('balanced', y.loc[index]) sample_weight.loc[index] = weight return sample_weight def feature_weights(X, y, feature): sample_weight = y.copy() sample_weight[:] = 1 m = X[feature].mean() # Allows this to work even on standardized data index = X[X[feature]>m].index if len(index): weight = compute_sample_weight('balanced', y.loc[index]) sample_weight.loc[index] = weight return sample_weight def get_weights(X, y, kind): if isinstance(kind, pd.Series): sample_weight = kind elif kind == 'balanced-by-country': sample_weight = country_weights(X, y) elif kind == 'balanced': sample_weight = compute_sample_weight('balanced', y) elif kind: sample_weight = compute_sample_weight('balanced', X[kind]) else: sample_weight = compute_sample_weight(None, y) sample_weight = pd.Series(sample_weight, index=X.index) return sample_weight def table_summarize(X, y, feature): y.name = 'COVID status' summary = X.join(y).groupby([feature, 'COVID status']).count().sum(axis=1).to_frame().unstack('COVID status')[0] return summary.div(summary.sum()).round(2) def hist_summarize(X, y, feature): plt.hist(X.loc[y==1, feature], color='r', bins=30, alpha=0.3, density=True, label='COVID+'); plt.hist(X.loc[y==0, feature], color='g', bins=30, alpha=0.3, density=True, label='COVID-'); plt.legend() def report_size(df, action): print("Data after %s has %d subjects and %d features" % (action, *df.shape)) def qq_plot(X, y, feature): x_minus = X[y==0][feature].quantile(np.linspace(0, 1, 101)) x_plus = X[y==1][feature].quantile(np.linspace(0, 1, 101)) ax = sns.lineplot(x_minus, x_plus) ax.set_xlabel('%s (COVID -)' % feature.replace('_',' ')) ax.set_ylabel('%s (COVID +)' % feature.replace('_',' ')) ax.plot([0, max(x_minus)], [0, max(x_plus)], '--') def pp_plot(X, y, feature, label=True, stabilized=False, ax=None): x_minus = X[y==0][feature] x_plus = X[y==1][feature] minn = min(x_minus.min(), x_plus.min()) maxx = max(x_minus.max(), x_plus.max()) s_minus = pd.Series(index=np.linspace(minn-0.001, maxx+0.001, 200), dtype=float) s_plus = pd.Series(index=np.linspace(minn-0.001, maxx+0.001, 200), dtype=float) s_minus[:] = s_minus.index.map(lambda x: (x_minus<=x).mean()) s_plus[:] = s_plus.index.map(lambda x: (x_plus<=x).mean()) if stabilized: s_minus = (2/np.pi)*np.arcsin(np.sqrt(s_minus)) s_plus = (2/np.pi)*np.arcsin(np.sqrt(s_plus)) D, p = ks_2samp(x_minus, x_plus) #S, p = mannwhitneyu(x_minus, x_plus) sign = (s_plus - s_minus).mean() > 0 #print(sign) ax = sns.lineplot(s_minus, s_plus, ax=ax, label='%s (D=%.2f)' % (feature.replace('_', ' ').title(), D if sign>0 else -D)) ax.set_xlabel('COVID -') ax.set_ylabel('COVID +') ax.plot([0, 1], [0, 1], 'k--') ax.legend(fontsize=11) def nicify(name, line_break=False): if isinstance(name, list): return list(map(nicify, name)) s = name.replace('_', ' ').title().replace('Symptoms ', '').split('/')[0].strip().replace('Gccr', 'GCCR v')\ .replace('Illness Y', 'Illness').replace('Before Illness', 'Before').replace('After Illness', 'After')\ .replace('Prior Conditions None', 'No Prior Conditions')\ .replace('Basic Tastes ', '').replace('Recovery Y', 'Recovered').replace('Prior Conditions ', '').split('(')[0]\ .replace('Changes In Smell I Cannot Smell At All', 'Anosmia/Hyposmia')\ .replace('Changes In Smell Smells Smell Different Than They Did Before', 'Parosmia')\ .replace("Changes In Smell I Can Smell Things That Aren'T There", 'Phantosmia')\ .replace('Changes In Smell Sense Of Smell Fluctuates', 'Smell Fluctuation')\ .replace('During Illness', 'During').replace(' Illness', '')\ .replace(' That Required Chemotherapy Or Radiation', '+Chemo/Radiation')\ .replace('Combustible Cigarette', 'Cigarette')\ .replace('E-Cigarette 30 Day', 'E-Cigarette')\ .replace(' That Did Not Require Chemotherapy Or Radiation', '-Chemo/Radiation')\ .replace('Results','').replace('Final','').replace('!','').replace('Version','').split('[')[0]\ .replace('Const', 'Intercept')\ .replace(' ', ' ').strip() if line_break: x = s.rfind(' ') s = s[:x] + '\n' + s[x+1:] return s def nicify_labels(ax, x=True, y=True, line_break=True): for xy in ['x', 'y']: if locals()[xy]: # Fix axis labels z = getattr(ax, 'get_%slabel' % xy)() new = nicify(z, line_break=line_break) getattr(ax, 'set_%slabel' % xy)(new) # Fix tick labels z = getattr(ax, 'get_%sticklabels' % xy)() new = [nicify(zi.get_text(), line_break=line_break) if not zi.get_text().isnumeric() else zi.get_text() for zi in z] getattr(ax, 'set_%sticklabels' % xy)(new) def fill_impute(df, feature_dtypes, copy=True): if copy: df = df.copy() # Apply the following missing data handling and recasting rules. for col, dtype in feature_dtypes.items(): if dtype == 'categorical': df[col] = df[col].fillna('Missing').astype('object') elif dtype == 'discrete': df[col] = df[col].fillna(df[col].median()).astype(int) elif dtype == 'binary': df[col] = df[col].fillna(0.5).astype('float') elif dtype == 'continuous': df[col] = df[col].fillna(df[col].median()).astype('float') return df def plot_violin(X, y, feature, ax): y.name = "COVID status" Xy = X.join(y) sns.violinplot(x="COVID status", y=feature, data=Xy, ax=ax, alpha=0.2) ax.set_xlabel('') ax.set_xticklabels(['COVID -', 'COVID +'], fontweight='bold') ax.set_ylabel(nicify(feature), fontweight='bold') def rescale(X): # Create a version of X for which every column has mean 0, variance 1. X_st = X.copy() std_sclr = StandardScaler() X_st[:] = std_sclr.fit_transform(X) assert np.allclose(X_st.mean(), 0) assert all(np.isclose(X_st.var(ddof=0), 1) + np.isclose(X_st.var(ddof=0), 0)) # Create a version of X for which every column has min 0, max 1. mm_sclr = MinMaxScaler() X_nm = X.copy() X_nm[:] = mm_sclr.fit_transform(X) return X_st, std_sclr, X_nm, mm_sclr def lrcv_check(lrcv, X, y, features): sample_weight = get_weights(X, y, 'balanced-by-country') lrcv.fit(X[features], y, sample_weight=sample_weight) return pd.DataFrame(lrcv.scores_[True].mean(axis=0).round(3), index=pd.Series(lrcv.Cs_, name='C'), columns=pd.Series(lrcv.l1_ratios_, name='L1 Ratio')) def rccv_check(rccv, X, y, features): sample_weight = get_weights(X, y, 'balanced-by-country') rccv.fit(X[features], y, sample_weight=sample_weight) return rccv.best_score_.round(3), rccv.alpha_ def raw_hist(X, y, feature, cumul=False): minn = X[feature].min() maxx = X[feature].max() diff = maxx - minn bins = np.linspace(minn-diff*0.01, maxx+diff*0.01, 30) X.loc[y==1, feature].hist(density=True, cumulative=cumul, bins=bins, alpha=0.3, label='+') X.loc[y==0, feature].hist(density=True, cumulative=cumul, bins=bins, alpha=0.3, label='-') plt.legend() plt.title(nicify(feature)) def contingency(X, features, verbose=True): z = pd.crosstab(*[X[f] for f in features]) z.index.name = nicify(z.index.name) z.columns.name = nicify(z.columns.name) n = z.sum().sum() chi2, p, _, _ = chi2_contingency(z) k = min(*z.shape) if n and k>1: v = np.sqrt(chi2/(n*(k-1))) else: v = None if min(z.shape) >= 2 and z.iloc[0, 1] and z.iloc[1, 1]: num = (z.iloc[0, 0] / z.iloc[0, 1]) denom = (z.iloc[1, 0] / z.iloc[1, 1]) oddsr = num / denom else: oddsr = None if verbose: print('p = %.2g' % p) return z, p, chi2, v, oddsr def plot_coefs(clf, X, title=''): x = rank_coefs(clf, X) #x = x.drop('Intercept', errors='ignore') threshold = x.drop('Intercept', errors='ignore')['Magnitude'].max()/10 x = x[x['Magnitude'] > threshold] x = x.sort_values('Magnitude', ascending=True) x['Pos'] = x.apply(lambda z: z['Magnitude'] if z['Sign']=='+' else None, axis=1) x['Neg'] = x.apply(lambda z: z['Magnitude'] if z['Sign']=='-' else None, axis=1)*-1 try: x['Pos'].plot(kind='barh', color='r', label='+') except: pass try: x['Neg'].plot(kind='barh', color='b', label='-') except: pass plt.xlabel('Coefficient Magnitude') plt.title(title) plt.tight_layout() def plot_pb_given_a_(X, b, a, restrict=None, ax=None, title=None, color='k', scale=1000, ticks=None): if restrict is not None: data = X.loc[restrict, [a, b]] else: data = X[[a, b]] data = data.dropna() kde = gaussian_kde(data.T) if ticks is None: ticks = np.linspace(-100, 100, 9) a_ticks = [t for t in ticks if (t>=X[a].min() and t<=X[a].max())] b_ticks = [t for t in ticks if (t>=X[b].min() and t<=X[b].max())] a_support = a_ticks b_support = np.linspace(b_ticks[0], b_ticks[-1], 100) aa, bb = np.meshgrid(a_support, b_support) pab = kde([aa.ravel(), bb.ravel()]).reshape(len(b_support), len(a_support)) pab = pd.DataFrame(pab, index=b_support, columns=a_support) kde = gaussian_kde(data[a]) pa = pd.Series(kde(a_support), index=a_support) pbga = pab.div(pa) if ax is None: ax = plt.gca() for a_tick in a_ticks: l2d = ax.plot(b_support, a_tick + scale*(pbga[a_tick]), label=a_tick, color=color) color = l2d[0].get_color() ax.plot(b_support, np.ones_like(b_support)*a_tick, '--', color=color) ax.set_yticks(a_ticks) ax.set_xticks(b_ticks) ax.tick_params(reset=True, axis='y', length=5, width=1) ax.set_xlim(b_ticks[0], b_ticks[-1]) ax.set_xlabel(nicify(b)) ax.set_ylabel(nicify(a)) if title: ax.set_title(title) #ax.legend() return pab, pa def plot_difference(pab_0, pa_0, pab_1, pa_1, ax=None, crange=(-1, 1), scale=10): pbga_0 = pab_0.div(pa_0) pbga_1 = pab_1.div(pa_1) assert np.allclose(pbga_0.index, pbga_1.index) assert np.allclose(pbga_0.columns, pbga_1.columns) log2_odds = np.log2(pbga_1 / pbga_0) from matplotlib.cm import get_cmap from matplotlib.colors import Normalize from matplotlib.colorbar import ColorbarBase norm = Normalize(crange[0], crange[1], True) cmap = get_cmap('RdBu_r') for a_tick in log2_odds.columns: color = cmap(norm(log2_odds[a_tick].values)) l2d = ax.scatter(log2_odds.index, a_tick + (scale*pa_0[a_tick]*2**log2_odds[a_tick]), label=a_tick, c=color, s=1) #color = l2d[0].get_color() ax.plot(log2_odds.index, np.ones_like(log2_odds.index)*a_tick, '--', color='k') cb = plt.colorbar(l2d) cb.outline.set_visible(False) cb1 = ColorbarBase(cb.ax, cmap=cmap, norm=norm) cticks = np.linspace(*crange, 5) cb1.set_ticks(cticks) cb1.set_ticklabels(['%.2g' % (2**x) for x in cticks]) cb1.set_label('Odds Ratio') #cb.remove() ax.set_title('Ratio') def plot_conditionals(X, y, b, a, restrict=None, crange=(-2, 2), scale=10): covid = {0: y[y==0].index, 1: y[y==1].index} fig, ax = plt.subplots(1, 3, sharey=True, figsize=(15, 4)) if restrict is None: restrict = y.index restrict_0 = covid[0] & restrict restrict_1 = covid[1] & restrict pba_0, pa_0 = plot_pb_given_a_(X, b, a, restrict=restrict_0, ax=ax[0], title='COVID-', color='b') pba_1, pa_1 = plot_pb_given_a_(X, b, a, restrict=restrict_1, ax=ax[1], title='COVID+', color='r') ax[1].set_ylabel('') ax[2].set_xlabel(ax[1].get_xlabel()) plot_difference(pba_0, pa_0, pba_1, pa_1, ax=ax[2], crange=crange, scale=scale) plt.tight_layout() return pba_0, pba_1 def get_matches(X, match_list): return [x for x in X if any([m in x for m in match_list])] def check_lr(X, y, cv, sample_weight=None): from sklearn.linear_model import LogisticRegressionCV lrcv = LogisticRegressionCV(penalty='elasticnet', l1_ratios = np.linspace(0, 1, 5), Cs = np.logspace(-3, 3, 7), solver = 'saga', scoring = 'roc_auc', cv = cv, max_iter=10000) lrcv.fit(X, y, sample_weight=sample_weight) return pd.DataFrame(lrcv.scores_[True].mean(axis=0), index=lrcv.Cs_, columns=lrcv.l1_ratios_) def venn_covid(X, restrict, features, label, figsize=(5, 5)): indices = {} for feature in features: z = X.loc[restrict, feature] indices[feature] = set(z[z==1].index) labels = get_labels([indices[feature] for feature in features], fill='percent') labels = {k: v.replace('(','').replace(')','') for k, v in labels.items()} venn3(labels, names=nicify(features), figsize=figsize, fontsize=9) plt.gca().get_legend().remove() z = X.loc[restrict, features] z = z[z.sum(axis=1)==0].shape[0] / z.shape[0] plt.title('%s; None of the three = %.1f%%' % (label, z*100)) def kde_plot(df, x, restrict, label, color, ax=None, title=None, **kwargs): sns.set_style('whitegrid') data = df.loc[restrict, x].dropna() x_range = (np.min(df[x]), np.max(df[x])) ax = sns.kdeplot(data, clip=x_range, color=color, alpha=0.5, label=label, ax=ax, **kwargs) ax.set_xlim(*x_range) ax.set_xlabel(nicify(x), fontweight='bold') ax.set_ylabel('Probabilty density', fontweight='bold') if ax: ax.set_title(nicify(x) if title is None else title) return ax def joint_plot(df, x, y, restrict, label, maxx=1e-3, cmap='Reds', cbar=False, ax=None): sns.set_style('whitegrid') data = df.loc[restrict, [x, y]].dropna() x_range = (np.min(df[x]), np.max(df[x])) y_range = (np.min(df[y]), np.max(df[y])) ax = sns.kdeplot(data[x], data[y], shade=True, clip=[x_range, y_range], vmin=0, vmax=maxx, cmap=cmap, shade_lowest=True, alpha=0.5, ax=ax, n_levels=100, cbar=True, cbar_kws={'format': '%.2g', 'label': 'Probability density (x1000)', 'shrink': 0.8}) cax = plt.gcf().axes[-1] if cbar: cbar_ticks = cax.get_yticks() cax.set_yticklabels((cbar_ticks*1000).round(2)) else: cax.remove() ax.set_xlim(*x_range) ax.set_ylim(*y_range) ax.set_xlabel(nicify(x), fontweight='bold') ax.set_ylabel(nicify(y), fontweight='bold') ax.set_title(label, fontweight='bold') return ax def feature_hist(df, categories, feature, drop=None, bw=5, cut=0, ax=None, title=None, colors='rbgmck'): for i, (label, indices) in enumerate(categories.items()): ax = kde_plot(df, feature, indices, label, colors[i], lw=3, bw=bw, cut=cut, ax=ax, title=title) ax.legend(fontsize=9); def feature_contingency(df, categories, feature, drop=None, normalize=None, verbose=True): z = df[[feature]].copy() for label, indices in categories.items(): z.loc[indices, 'Group'] = label if drop: z = z[~z[feature].isin(drop)] c = contingency(z, [feature, 'Group'], verbose=verbose)[0] if normalize is not None: c = c.div(c.sum(axis=normalize), axis=1-normalize).round(2) try: c.index = [x.replace('\n', ' ') for x in c.index] except: pass try: c.columns = [x.replace('\n', ' ') for x in c.columns] except: pass c = c.rename(index={'M': 'Men', 'F': 'Women'}) return c def feature_compare(df, categories, feature): z = pd.DataFrame(index=list(categories), columns=pd.MultiIndex.from_product([categories, ['Δ', 'σ', 'seΔ', 'D', 'p']])) z.index.name = nicify(feature) for d1 in categories: for d2 in categories: x1 = df.loc[categories[d1], feature] x2 = df.loc[categories[d2], feature] delta = x1.mean() - x2.mean() d = cohen_d(x1, x2) p = mannwhitneyu(x1, x2).pvalue z.loc[d1, (d2, 'Δ')] = "%.2g" % delta z.loc[d1, (d2, 'σ')] = "%.2g" % (0 if not d>0 else delta/d) z.loc[d1, (d2, 'seΔ')] = "%.2g" % (delta/np.sqrt(len(x1)+len(x2))) z.loc[d1, (d2, 'D')] = "%.2g" % d z.loc[d1, (d2, 'p')] = "%.2g" % p if len(categories)==2: d1 = list(categories)[0] d2 = list(categories)[1] z = z.loc[d1, d2] z.name = nicify(feature) return z def features_compare(df, categories, features): assert len(categories) == 2 zs = [feature_compare(df, categories, feature) for feature in features] return pd.concat(zs, axis=1) def hist_or_contingency(df, categories, feature, drop=None, normalize=None): if (df.dtypes[feature] != 'object') and (df[feature].max() > 5 or df[feature].min() < -5): f = feature_hist else: f = feature_contingency return f(df, categories, feature, drop=None, normalize=None) def describe_three_clusters(df, feature, s, drop=None, normalize=None): smell_loss = (df['Smell_change_illness']<-80) smell_recovery = (df['Smell_recovery_illness']>30) r = (df['Recovery_y/n']==2) & df.index.to_series().isin(s['covid']) categories = {'Recovered Smell': r & smell_loss & smell_recovery, 'Nonrecovered Smell': r & smell_loss & ~smell_recovery, 'Intact Smell': r & ~smell_loss} return hist_or_contingency(df, categories, feature, drop=drop, normalize=normalize) def get_set(df, query): return set(df.query(query, engine='python').index) def diagnosis_joint_plots(df, feature1, feature2, r, s, maxx=3e-4): if r is None: r = set(df.index) fig, ax = plt.subplots(2, 1, sharex=True, sharey=True, figsize=(6, 11.5)) for i, (diagnosis, label, cmap) in enumerate([('lab-covid', 'C19+', 'Reds'), ('non-covid', 'C19-', 'Reds')]): joint_plot(df, feature1, feature2, r & s[diagnosis], label, cmap=cmap, maxx=maxx, ax=ax[i], cbar=(i==0)) return ax def statsmodels_to_df(results, plot=False, title='', figsize=(10, 5), scale=None): summ = results.summary() df = pd.read_csv(StringIO(summ.tables[1].as_csv()), index_col=0) df.columns = df.columns.str.strip() df['abs_coef'] = df['coef'].abs() df.index = df.index.str.strip() df = df.sort_values('abs_coef', ascending=False) df = df.round(2) df['P>|z|'] = results.pvalues#.apply(lambda x: '%.1g'%x) df = df[df['abs_coef']>0] if scale is not None and scale is not False: df['coef'] /= scale try: df['std err'] /= scale df['[0.025'] /= scale df['0.975]'] /= scale except: pass df.index = nicify(list(df.index)) if plot: plt.figure(figsize=figsize) dfp = df.drop('Intercept') ax = dfp.sort_values('abs_coef', ascending=True).plot.barh(y='coef', xerr='std err', legend=None, capsize=4, ax=plt.gca()) labels = ax.get_yticklabels() ax.set_yticklabels('%s\n(p=%.1g)' % (label.get_text(), df['P>|z|'].iloc[-i-2]) for i, label in enumerate(labels)) ax.set_xlabel('Regression Coefficient', fontweight='bold') ax.set_title(title, fontweight='bold') df = df.drop('abs_coef', axis=1) for col in df: def fill(x): try: return '%.2g' % float(x) except: return None df[col] = df[col].apply(fill) return df def pooled_sd(x1, x2): n1 = len(x1) n2 = len(x2) s1 = np.std(x1) s2 = np.std(x2) num = (n1-1)*(s1**2) + (n2-1)*(s2**2) denom = n1 + n2 - 2 return np.sqrt(num/denom) def cohen_d(x1, x2): return (np.mean(x1) - np.mean(x2)) / pooled_sd(x1, x2) def sequential_features(clf, X, y, features, cv, Cs=np.logspace(-1, 3, 5)): """Return feature that maximizes cross-validated ROC AUC, then feature that maximizes it given inclusion of the first feature, and so ob""" roc_aucs = pd.DataFrame(columns=pd.MultiIndex.from_product([Cs, ['Rank', 'AUC']]), dtype='float') bar0 = tqdm(Cs) bar1 = trange(len(features)) bar2 = trange(len(features)) for C in bar0: clf.C = C features_used = [] features_remaining = features.copy() bar1.reset() for i in range(len(features)): bar1.update(1) best_auc = 0 best_feature = None bar2.reset() z = pd.Series(index=features_remaining) for j in range(len(features)): bar2.update(1) feature = features[j] if feature in features_remaining: auc = cross_val_score(clf, X[features_used + [feature]], y, cv=cv, scoring='roc_auc', n_jobs=cv.n_splits).mean() #auc += 0.003*('savory' in feature.lower()) z[feature] = auc if auc > best_auc: best_feature = feature best_auc = auc features_used.append(best_feature) features_remaining.remove(best_feature) #print(z.sort_values(ascending=False)) roc_aucs.loc[best_feature, (C, 'Rank')] = i+1 roc_aucs.loc[best_feature, (C, 'AUC')] = best_auc return roc_aucs def status_map(df, mapping, name): status = {} for k, v in mapping.items(): status.update({key: k for key in v}) df[name] = df.index.map(status.get) return df def get_tuple_feature_aucs(clf, X, y, n, sample_weight=None, only_binary=False, nicify_=True, add_to=None): if only_binary: symptoms = [x for x in X if 'Symptoms_' in x] else: symptoms = list(X) if n > 1: tuples = combinations_with_replacement(symptoms, n) else: tuples = symptoms s = pd.Series(index=tuples, dtype='float') for tup in tqdm(s.index): if n > 1: tup_ = list(set(tup)) # Get rid of duplicates else: tup_ = [tup] if add_to: tup_+= list(set(add_to)) clf.fit(X[tup_], y, sample_weight=sample_weight) s.loc[tup] = clf.scores_[True].mean() if n>1: s.index = pd.MultiIndex.from_tuples(s.index) if nicify_: s.index = s.index.map(lambda x: ' + '.join([nicify(xi) for xi in x])) else: if nicify_: s.index = nicify(list(s.index)) s = s.sort_values(ascending=False).round(3) s.index.name = 'Symptom set' df = s.to_frame('ROC AUC') return df def yg_week(df_gccr, offset=0, how='Onset_day'): days = (datetime.strptime('2020/04/01', '%Y/%m/%d') - datetime.strptime('2020/01/01', '%Y/%m/%d')).days days += offset return 1 + ((df_gccr[how].astype(int) - days)/7).astype(int).clip(0, 9999) def download_unzip_df(url): filehandle, _ = urllib.request.urlretrieve(url) zip_file_object = zipfile.ZipFile(filehandle, 'r') first_file = zip_file_object.namelist()[0] file = zip_file_object.open(first_file) return pd.read_csv(file, encoding='latin1', dtype='object') def download_yougov(): url = 'https://raw.githubusercontent.com/YouGov-Data/covid-19-tracker/master' yg_countries = pd.read_csv('%s/countries.csv' % url, header=None)[0] path = pathlib.Path('data/yougov') path.mkdir(parents=True, exist_ok=True) for country in tqdm(yg_countries): file_url = '%s/data/%s.csv' % (url, country.replace(' ', '-').replace('emerites', 'emirates')) #print(file_name) try: yg = pd.read_csv(file_url, encoding='latin1', dtype='object') except: try: zip_file_url = file_url[:-4]+'.zip' print(zip_file_url) yg = download_unzip_df(zip_file_url) except: raise Exception("Could not download or read %s" % file_name) yg.to_csv(path / ('yougov_%s.csv' % country)) return yg_countries def fix_yougov(yg): yg = yg[yg['qweek'].str.contains('week')].copy()#.dropna(subset=['qweek']) yg['week'] = yg['qweek'].apply(lambda x: x.split(' ')[1]).astype(int) try: yg['endtime'] = pd.to_datetime(yg['endtime'], format='%d/%m/%Y %H:%M') except: yg['endtime'] = pd.to_datetime(yg['endtime'], format='%Y-%m-%d %H:%M:%S') yg = yg[yg['endtime']<=YOUGOV_CUTOFF_DATE] return yg def get_yougov(df_gccr, countries): df = pd.DataFrame(index=countries) for country in countries: yg = pd.read_csv('data/yougov/yougov_%s.csv' % country, dtype='object') yg = fix_yougov(yg) country = 'usa' if country == 'united-states' else country country = 'uk' if country == 'united-kingdom' else country weights = df_gccr[df_gccr['Country_of_Residence']==country]['yg_week'].value_counts() for week in (set(weights.index) | set(yg['week'])): if week not in (set(weights.index) & set(yg['week'])): weights.loc[week] = 0 weight = weights[yg['week']] z = pd.get_dummies(yg[['i3_health', 'i4_health']]) p = z[[x for x in z.columns if 'positive' in x]] n = z[[x for x in z.columns if 'negative' in x]] total = p.sum().sum() + n.sum().sum() p = p.mul(weight.values, axis=0).sum().sum() n = n.mul(weight.values, axis=0).sum().sum() if p+n: df.loc[country, 'YG_fp'] = p/(p+n) else: df.loc[country, 'YG_fp'] = None df.loc[country, 'YG_N'] = total df = df.drop(['united-states', 'united-kingdom']) df.index.name = 'Country' df = df.sort_values('YG_fp') return df def compare_yougov(df_gccr, df_yg, s): df_gccr['status'] = -1 #df_gccr.loc[s['lab-covid'] | s['clinical-covid'], 'status'] = 1 df_gccr.loc[s['lab-covid'], 'status'] = 1 df_gccr.loc[s['non-covid'], 'status'] = 0 df_gccr = df_gccr[df_gccr['status'] >= 0] #df_gccr['status'] = df_gccr['status'].astype(int) df_gccr = df_gccr.groupby('Country_of_Residence').agg({'status': ['mean', 'count']})['status'] df_gccr.columns = ['GCCR_fp', 'GCCR_N'] df_gccr = df_gccr[df_gccr['GCCR_N']>=10] df = df_gccr.join(df_yg, how='outer') return df def plot_fp(fp, drop=None, plot=True, verbose=False, ax=None, break_axis=False): if ax is None and plot: ax = plt.gca() if drop: fp = fp.copy() fp = fp.drop(drop) for kind in ['GCCR', 'YG']: p = fp['%s_fp' % kind] n = fp['%s_N' % kind] fp['%s_se' % kind] = np.sqrt(p*(1-p)/n) fp['%s_logodds_fp' % kind] = np.log(p/(1-p)) if plot: ax.errorbar(fp['GCCR_fp'], fp['YG_fp'], xerr=fp['GCCR_se'], yerr=fp['YG_se'], marker='o', ls='none', alpha=0.5); ax.set_xlabel('GCCR Fraction of COVID Tests Positive') ax.set_ylabel('YouGov Fraction of\nCOVID Tests Positive'); fp_ = fp.dropna() #lr = LinearRegression() #lr.fit(fp_[['GCCR_logodds_fp']], fp_[['YG_logodds_fp']], sample_weight=1/fp_['GCCR_se']**2) #x = np.linspace(-10, 10, 1000) #y = lr.predict(x.reshape(-1, 1)) #from scipy.special import expit #plt.plot(expit(x), expit(y), '--') from scipy.stats import spearmanr, pearsonr def pearson_spearman(x, y): return pearsonr(x, y)[0], spearmanr(x, y)[0] if verbose: print("Log-Odds R = %.3g; Rho=%.3g" % pearson_spearman(fp_['GCCR_logodds_fp'], fp_['YG_logodds_fp'])) print("Raw R = %.3g; Rho=%.3g" % pearson_spearman(fp_['GCCR_fp'], fp_['YG_fp'])) return pearson_spearman(fp_['GCCR_fp'], fp_['YG_fp'])[0] def cluster_summary(df, clusters, s, feature): z = pd.DataFrame(index=list(clusters), columns=['female', 'male']) for cluster in z.index: for gender in z.columns: restrict = clusters[cluster] & s[gender] mean = df.loc[restrict, feature].mean() std = df.loc[restrict, feature].std() z.loc[cluster, gender] = '%.3g +/- %.2g' % (mean, std) z.index = [x.replace('\n', ' ') for x in z.index] z.index.name = nicify(feature) z.columns = ['Women', 'Men'] return z def exclusive_best_tuples(tuple_feature_aucs): z = tuple_feature_aucs.copy() z.index = pd.MultiIndex.from_tuples( list(tuple_feature_aucs.index.map(lambda x: x.split(' + ')))) i = 0 while True: index = z.index[i] z = z.drop([x for x in z.index[i+1:] if x[0] in index or x[1] in index]) i += 1 if i >= z.shape[0]: break return z def compare_lr_model_roc_aucs(X, y, cv, feature_sets, Cs): z = pd.DataFrame(index=list(Cs), columns=feature_sets.keys()) z.index.name = 'C' for C in Cs: # Use the same model again lr = LogisticRegression(penalty='elasticnet', solver='saga', C=C, l1_ratio=1, max_iter=10000, random_state=0) for label, features in feature_sets.items(): z.loc[C, label] = cross_val_score(lr, X[features], y, scoring='roc_auc', cv=cv, n_jobs=cv.n_splits).mean().round(3) return z def single_and_cumulative_plot(single_aucs, single_xrange, cumul_aucs, cumul_xrange, n_features, classes, C=10, figsize=(14, 6)): # Figure layout #sns.set_style('whitegrid') fig = plt.figure(figsize=figsize) width_ratios = [single_xrange[1]-single_xrange[0], cumul_xrange[1]-cumul_xrange[0]] spec = gridspec.GridSpec(ncols=2, nrows=1, width_ratios=width_ratios) # First panel ax0 = fig.add_subplot(spec[0]) feature_names = list(single_aucs.index) ax0.plot(single_aucs, feature_names, 'ko', markersize=10) ax0.hlines(y=range(n_features), xmin=single_xrange[0], xmax=single_aucs, color='gray', alpha=0.2, linewidth=5) ax0.set_ylim(n_features-0.5, -0.5) ax0.set_xlim(*single_xrange) ax0.set_xlabel('ROC Area Under Curve') #ax0.set_title('Top single-feature models') ax0.set_yticklabels(feature_names) def fix_ticklabels(axes): for ticklabel in axes.get_yticklabels(): text = ticklabel.get_text() ticklabel.set_weight("normal") if text in classes['features']['chemosensory']: ticklabel.set_weight("bold") if classes['dtypes'].get(text, '') not in ['binary', 'categorical']: ticklabel.set_style("oblique") fix_ticklabels(ax0) ax0.set_yticklabels(nicify(feature_names)) # Second panel if cumul_xrange[0] == cumul_xrange[1]: return 0 ax1 = fig.add_subplot(spec[1]) z = cumul_aucs[C].sort_values('Rank') z['Rank'] = np.arange(1, z.shape[0]+1) z.plot(x='AUC', y='Rank', color='k', marker='o', ax=ax1) ax1.set_ylim(n_features+0.5, 0.5) ax1.set_ylabel('Cumulative # Features') ax1.set_xlabel('ROC AUC') ax1.set_xlim(*cumul_xrange) ax1.set_yticks(range(1, n_features+1)) ax1.legend().remove() #ax1.set_title('Top cumulative features model') twinax = ax1.twinx() twinax.set_ylim(n_features+0.5, 0.5) twinax.set_yticks(range(1, n_features+1)) feature_names = list(cumul_aucs[C].sort_values('Rank').index) twinax.set_yticklabels(feature_names) fix_ticklabels(twinax) feature_names = nicify(feature_names) twinax.set_yticklabels(['+%s' % f if i else f for i, f in enumerate(feature_names)]); plt.tight_layout() def single_plot(single_aucs, single_xrange, n_features, classes, figsize=(10, 6), ax=None, delta=False): if ax is None: fig = plt.figure(figsize=figsize) ax = plt.gca() feature_names = list(single_aucs.index) ax.plot(single_aucs, feature_names, 'ko', markersize=10) ax.hlines(y=range(n_features), xmin=single_xrange[0], xmax=single_xrange[1], color='gray', alpha=0.2, linewidth=5) ax.set_ylim(n_features-0.5, -0.5) ax.set_xlim(*single_xrange) if delta: ax.set_xlabel('Δ ROC AUC') else: ax.set_xlabel('ROC AUC') ax.set_yticklabels(feature_names) def fix_ticklabels(axes): for ticklabel in axes.get_yticklabels(): text = ticklabel.get_text() ticklabel.set_weight("normal") if text in classes['features']['chemosensory']: ticklabel.set_weight("bold") if classes['dtypes'].get(text, '') not in ['binary', 'categorical']: ticklabel.set_style("oblique") fix_ticklabels(ax) ax.set_yticklabels(nicify(feature_names)) return ax def plot_cumul_roc_aucs(roc_aucs): for C in roc_aucs.columns.levels[0]: roc_aucs[C].sort_values('Rank')['AUC'].plot(label='C=%.3g' % C) plt.legend() ns = range(0, roc_aucs.shape[0], 5) plt.xticks(ns, ['%d' % (1+x) for x in ns]) plt.xlabel('# Features') def fix_quartile_lines(ax): for i, l in enumerate(ax.lines): if i % 3 != 1: l.set_linestyle('--') else: l.set_linestyle('-') l.set_linewidth(2) if int(i/3) % 2 == 0: l.set_color('white') else: l.set_color('black') def mutual_info(contingency): pxy = (contingency+1e-25) / contingency.sum().sum() px = pxy.sum(axis=1) py = pxy.sum(axis=0) I = 0 for i in range(len(px)): for j in range(len(py)): I += pxy.iloc[i, j] * np.log2(pxy.iloc[i, j] / (px.iloc[i]*py.iloc[j])) return I def check_splits(clf, X, y, features, s, set_keys): for train_keys in combinations_with_replacement(set_keys, len(set_keys)-1): train_keys = set(train_keys) train = set.union(*[s[x] for x in train_keys]) for test_keys in combinations_with_replacement(set_keys, len(set_keys)-1): test_keys = set(test_keys) if not train_keys.intersection(test_keys): test = set.union(*[s[x] for x in test_keys]) clf.fit(X.loc[train, features], y[train]) auc = roc_auc_score(y[test], clf.predict_proba(X.loc[test, features])[:, 1]) print( "Trained on: %s; Tested on: %s; AUC=%.3g" % (train_keys, test_keys, auc) ) def odds_ratio(df, sets, feature): df_c = feature_contingency(df, sets, feature, verbose=False) return (df_c.loc[1, "C19+"] / df_c.loc[1, "C19-"]) / ( df_c.loc[0, "C19+"] / df_c.loc[0, "C19-"] ) def compute_gccr_yougov_corr(df, s, yg_countries): country_of_residence =
pd.read_csv("data/processed/country-of-residence.csv", index_col=0)
pandas.read_csv
""" Module containing the core system of encoding and creation of understandable dataset for the recommender system. """ import joblib import pandas as pd from recipe_tagger import recipe_waterfootprint as wf from recipe_tagger import util from sklearn import cluster from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.cluster import KMeans from sklearn.preprocessing import MinMaxScaler from stop_words import get_stop_words from tqdm import tqdm from cf_recommender import CFRecommender from configuration import load_configuration class Encoder: """ Class that contains all the encoding and the creation of data files from the dataset provided by the user in the input directory and the mapping specified into the configuration file. :param language: the language of the dataset. """ def __init__(self, language): """ Constructor method for the class. It loads all the necessary path for the files provided into the configuration file. """ config = load_configuration() self.language = language self.path_orders = config["path_orders"] self.path_recipes = config["path_recipes"] self.path_embedding = config["path_embedding"] self.path_user_scores = config["path_user_scores"] self.input_path_orders = config["input_path_orders"] self.input_path_recipes = config["input_path_recipes"] print(f">> Initialize encoder, language: {self.language} <<") def __process_ingredients(self, ingredients): """ Process the provided ingredients string. :param ingredients: a string composed by ingredients comma separated. :return: a list containing the processed ingredients. """ return [ util.process_ingredients(ing, language=self.language, stem=False) for ing in ingredients.split(",") ] def __generate_ingredients_embedding(self, column_name="ingredient"): """ Generate the ingredients embedding TF-IDF matrix and save it to a pickle file in the default folder. :param column_name: the name of the column that contains the ingredients in the recipe dataset. :return: None """ recipes_df = pd.read_csv(self.input_path_recipes)[[column_name]] recipes_df[column_name] = recipes_df[column_name].apply( self.__process_ingredients ) recipes_df[column_name] = recipes_df[column_name].apply(", ".join) tfidf = TfidfVectorizer(stop_words=get_stop_words(self.language)) matrix = tfidf.fit_transform(recipes_df[column_name]) joblib.dump(matrix, self.path_embedding) def __get_user_order_quantity(self, df): """ Reformat a dataset containing order history into a dictionary containing user ratings for recipes that he has ordered. The rating is computed based on how many times he has ordered the recipe compared with the recipes with most orders. :param df: the dataframe containing user orders. :return: a dictionary containing user recipes ratings. """ data = {"user_id": [], "item_id": [], "rating": []} for user in df["user_id"].unique(): user_df = df.query(f"user_id == {user}") user_df = user_df.groupby("item_id").count().reset_index() max_rating = user_df["user_id"].max() user_df["rating"] = user_df.apply( lambda x: int((x["user_id"] * 4) / max_rating) + 1, axis=1 ) data["user_id"].extend([user] * user_df.shape[0]) data["item_id"].extend(user_df["item_id"]) data["rating"].extend(user_df["rating"]) return data def __get_wf(self, ingredients, quantities): """ Return the total water footprint of a single recipe based on its ingredients and their quantities. :param ingredients: a list containing all the ingredients. :param quantities: a list containing all ingredients quantities. :return: the water footprint of the recipe. """ while len(ingredients) > len(quantities): quantities.append("5ml") return wf.get_recipe_waterfootprint( ingredients, quantities, online_search=False, language=self.language ) def __get_recipe_category(self, index, total): """ Return the category of a recipe based on their position on the sorted dataset. :param index: the index of the recipe in the dataset. :param total: the total number of recipes in the dataset. :return: the category of the recipe. (A, B, C, D, E) """ categories = ["A", "B", "C", "D", "E"] threshold = total / len(categories) return categories[int(index / threshold)] def __get_dataset_reduced(self, df, min_user_orders=5, min_recipe_orders=3): """ Return the dataset without recipes and orders that don't match the restrictions. Restrictions are on minimum orders made by user and minimum orders for a recipe. :param df: the dataframe containing all the orders. :param min_user_orders: the minimum number of user orders. Default is 5. :param min_user_orders: the minimum number of recipe orders. Default is 3. :return: a dataframe without orders that don't match guidelines. """ filter_recipe = df["item_id"].value_counts() > min_recipe_orders filter_recipe = filter_recipe[filter_recipe].index.tolist() filter_user = df["user_id"].value_counts() > min_user_orders filter_user = filter_user[filter_user].index.tolist() return df[ (df["user_id"].isin(filter_user)) & (df["item_id"].isin(filter_recipe)) ] def __generate_orders( self, columns_map, rating=True, ): """ Generate and save to pickle file the new orders dataset, formatted and reduced following the previous guidelines. If the input dataframe doesn't contains yet the user ratings it will transform it on a rating dataset. :param columns_map: a dictionary containing the mapping of the column in the input dataset. :param rating: the presence or not of user ratings. :return: None """ df = pd.read_csv(self.input_path_orders) df = df.rename(columns={v: k for k, v in columns_map.items()}) if rating: df = df[["user_id", "item_id", "rating"]] else: df = df[["user_id", "item_id"]] df = pd.DataFrame(self.__get_user_order_quantity(df)) df = self.__get_dataset_reduced(df) df = df.rename(columns={"item_id": "id"}) df.to_pickle(self.path_orders) def __generate_recipes_wf_category(self, columns_map): """ Generate and save to pickle file the new recipes dataset, with formatted ingredients and quantities, with every water footprint and category of single recipes. :param columns_map: a dictionary containing the mapping of the column in the input dataset. :return: None """ if columns_map is None: columns_map = { "id": "id", "name": "name", "ingredients": "ingredients", "quantity": "ingredients_quantity", } tqdm.pandas() df = pd.read_csv(self.input_path_recipes) df = df.rename(columns={v: k for k, v in columns_map.items()}) # df = df[["id", "name", "ingredients", "quantity", "wf"]] df = df[["id", "name", "ingredients", "quantity"]] df["ingredients"] = df["ingredients"].apply(self.__process_ingredients) df["quantity"] = df["quantity"].apply( lambda x: [q.strip() for q in x.split(",")] ) df["wf"] = df.progress_apply( lambda x: self.__get_wf(x["ingredients"], x["quantity"]), axis=1 ) df = df.sort_values(by="wf", ascending=True).reset_index(drop=True) df["category"] = df.apply( lambda x: self.__get_recipe_category(x.name, df.shape[0]), axis=1 ) df.to_pickle(self.path_recipes) def __generate_collaborative_filtering_model(self): """ Generate and save as a pickle file the collaborative filtering model. :return: None """ cf_recommender = CFRecommender() cf_recommender.create_cf_model(save=True) def __generate_user_score_data(self): """ Generate and save to pickle file the score of the users. The embedding contains the id of the user with the associated score provided by a KMeans clustering algorithm on normalized and weighted user data history orders. :return None: """ orders = pd.read_pickle(self.path_orders) recipes = pd.read_pickle(self.path_recipes) df =
pd.merge(orders, recipes, on="id")
pandas.merge
from datetime import datetime import logging import typing import hydra import pandas as pd from fetcher.utils import Fields from fetcher.source_utils import fetch_source, process_source_responses from fetcher.sources import build_sources # Indices TS = 'TIMESTAMP' STATE = Fields.STATE.name class Fetcher: def __init__(self, cfg): '''Initialize source information''' self.dataset = cfg.dataset # store dataset config self.sources = build_sources( cfg.dataset.sources_file, cfg.dataset.mapping_file, cfg.dataset.extras_module) def has_state(self, state): return state in self.sources def fetch_all(self, states): results = {} success = 0 failures = [] for state in states: if not self.has_state(state): # Nothing to do for a state without sources continue try: res, data = self.fetch_state(state) if res: if data: results[state] = data success += 1 else: # failed parsing logging.warning("Failed parsing %s", state) failures.append(state) except Exception: logging.error("Failed to fetch %s", state, exc_info=True) failures.append(state) logging.info("Fetched data for {} states".format(success)) if failures: logging.info("Failed to fetch: %r", failures) return results def fetch_state(self, state): ''' Fetch data for a single state, returning a tuple of (fetched_result, parsed_data) If there's no query for the state: return (None, _) ''' logging.debug("Fetching: %s", state) res = None source = self.sources.get(state) if not source or not source.queries: return res, {} results = fetch_source(source) data = process_source_responses(source, results) return results, data def _fix_index_and_columns(index, columns): index = index if isinstance(index, str) else list(index) if isinstance(index, list) and len(index) == 1: index = index[0] # make sure all index columns are also in columns if isinstance(index, str) and index not in columns: columns.insert(0, index) elif isinstance(index, list): for c in index: if c not in columns: columns.insert(0, c) return index def build_dataframe(results, states_to_index, dataset_cfg, output_date_format, filename=None): # TODO: move file generation out of here # results is a *dict*: state -> [] if not results: return {} # need to prepare the index and preparing the data, and the columns # data: a list of dicts # index: a string or a list of len 2+ # columns: add state even if not listed, if it's in index index = dataset_cfg.index columns = dataset_cfg.fields index = _fix_index_and_columns(index, columns) items = [] for _, v in results.items(): if isinstance(v, typing.List): items.extend(v) elif isinstance(v, typing.Dict): items.append(v) else: logging.warning("This shouldnt happen: %r", v) df = pd.DataFrame(items, columns=columns) # special casing here, because of groupby+dropna bug if isinstance(index, list) and len(index) > 1: for c in index: df[c] = df[c].fillna('n/a') df = df.set_index(index) df = df.groupby(level=df.index.names, dropna=False).last() # Notice: Reindexing and then sorting means that we're always sorting # the index, and not using the order that comes from configuration if not isinstance(index, list): # Reindex based on the given states, when we don't have # additional columns to index to (i.e., do not do it for backfill) df = df.reindex(
pd.Series(states_to_index, name=STATE)
pandas.Series
# Author: <NAME> # Email: <EMAIL> import sklearn.utils as sk import pandas as pd import numpy as np import matplotlib.pyplot as plt import pickle import os from glob import glob from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split, GridSearchCV from collections import defaultdict from sklearn.inspection import plot_partial_dependence from sklearn.inspection import partial_dependence from sklearn.inspection import permutation_importance from mpl_toolkits.mplot3d import axes3d from Python_Files.hydrolibs import rasterops as rops from Python_Files.hydrolibs import model_analysis as ma def create_dataframe(input_file_dir, input_gw_file, output_dir, label_attr, column_names=None, pattern='*.tif', exclude_years=(), exclude_vars=(), make_year_col=True, ordering=False, load_gw_info=False, remove_na=True): """ Create dataframe from file list :param input_file_dir: Input directory where the file names begin with <Variable>_<Year>, e.g, ET_2015.tif :param input_gw_file: Input GMD (Kansas) or AMA/INA (Arizona) shape file :param output_dir: Output directory :param label_attr: Label attribute present in the GW shapefile. Set 'GMD_label' for Kansas and 'NAME_ABBR' for Arizona :param column_names: Dataframe column names, these must be df headers :param pattern: File pattern to look for in the folder :param exclude_years: Exclude these years from the dataframe :param exclude_vars: Exclude these variables from the dataframe :param make_year_col: Make a dataframe column entry for year :param ordering: Set True to order dataframe column names :param load_gw_info: Set True to load previously created GWinfo raster containing the name of the GMD (Kansas) or AMA/INA (Arizona) regions :param remove_na: Set False to disable NA removal :return: GMD Numpy array :return: Pandas dataframe """ raster_file_dict = defaultdict(lambda: []) for f in glob(input_file_dir + pattern): sep = f.rfind('_') variable, year = f[f.rfind(os.sep) + 1: sep], f[sep + 1: f.rfind('.')] if variable not in exclude_vars and int(year) not in exclude_years: raster_file_dict[int(year)].append(f) raster_dict = {} flag = False years = sorted(list(raster_file_dict.keys())) df = None raster_arr = None gw_arr = rops.get_gw_info_arr(raster_file_dict[years[0]][0], input_gw_file, output_dir=output_dir, label_attr=label_attr, load_gw_info=load_gw_info) gw_arr = gw_arr.ravel() for year in years: file_list = raster_file_dict[year] for raster_file in file_list: raster_arr = rops.read_raster_as_arr(raster_file, get_file=False) raster_arr = raster_arr.reshape(raster_arr.shape[0] * raster_arr.shape[1]) variable = raster_file[raster_file.rfind(os.sep) + 1: raster_file.rfind('_')] raster_dict[variable] = raster_arr if make_year_col: raster_dict['YEAR'] = [year] * raster_arr.shape[0] if not flag: df = pd.DataFrame(data=raster_dict) flag = True else: df = df.append(pd.DataFrame(data=raster_dict)) df['GW_NAME'] = gw_arr.tolist() * len(years) if remove_na: df = df.dropna(axis=0) df = reindex_df(df, column_names=column_names, ordering=ordering) out_df = output_dir + 'raster_df.csv' df.to_csv(out_df, index=False) return df def reindex_df(df, column_names, ordering=False): """ Reindex dataframe columns :param df: Input dataframe :param column_names: Dataframe column names, these must be df headers :param ordering: Set True to apply ordering :return: Reindexed dataframe """ if not column_names: column_names = df.columns ordering = True if ordering: column_names = sorted(column_names) return df.reindex(column_names, axis=1) def get_rf_model(rf_file): """ Get existing RF model object :param rf_file: File path to RF model :return: RandomForestRegressor """ return pickle.load(open(rf_file, mode='rb')) def split_data_train_test_ratio(input_df, pred_attr='GW', shuffle=True, random_state=0, test_size=0.2, outdir=None, test_year=None, test_gw=None, use_gw=False): """ Split data based on train-test percentage :param input_df: Input dataframe :param pred_attr: Prediction attribute name :param shuffle: Default True for shuffling :param random_state: Random state used during train test split :param test_size: Test data size percentage (0<=test_size<=1) :param outdir: Set path to store intermediate files :param test_year: Build test data from only this year :param test_gw: Build test data from only this GMD or AMA/INA region, use_gw must be set to True :param use_gw: Set True to build test data from only test_gmd :return: X_train, X_test, y_train, y_test """ years = set(input_df['YEAR']) gws = set(input_df['GW_NAME']) x_train_df = pd.DataFrame() x_test_df = pd.DataFrame() y_train_df = pd.DataFrame() y_test_df = pd.DataFrame() flag = False if (test_year in years) or (use_gw and test_gw in gws): flag = True selection_var = years selection_label = 'YEAR' test_var = test_year if use_gw: selection_var = gws selection_label = 'GW_NAME' test_var = test_gw for svar in selection_var: selected_data = input_df.loc[input_df[selection_label] == svar] y = selected_data[pred_attr] x_train, x_test, y_train, y_test = train_test_split(selected_data, y, shuffle=shuffle, random_state=random_state, test_size=test_size) x_train_df = x_train_df.append(x_train) if (flag and test_var == svar) or not flag: x_test_df = x_test_df.append(x_test) y_test_df = pd.concat([y_test_df, y_test]) y_train_df = pd.concat([y_train_df, y_train]) if outdir: x_train_df.to_csv(outdir + 'X_Train.csv', index=False) x_test_df.to_csv(outdir + 'X_Test.csv', index=False) y_train_df.to_csv(outdir + 'Y_Train.csv', index=False) y_test_df.to_csv(outdir + 'Y_Test.csv', index=False) return x_train_df, x_test_df, y_train_df[0].ravel(), y_test_df[0].ravel() def split_data_attribute(input_df, pred_attr='GW', outdir=None, test_years=(2016, ), test_gws=('DIN',), use_gws=False, shuffle=True, random_state=0, spatio_temporal=False): """ Split data based on a particular attribute like year or GMD :param input_df: Input dataframe :param pred_attr: Prediction attribute name :param outdir: Set path to store intermediate files :param test_years: Build test data from only these years :param test_gws: Build test data from only these GMDs or AMA/INA regions, use_gws must be set to True :param use_gws: Set True to build test data from only test_gws :param shuffle: Set False to stop data shuffling :param random_state: Seed for PRNG :param spatio_temporal: Set True to build test from both test_years and test_gws :return: X_train, X_test, y_train, y_test """ years = set(input_df['YEAR']) gws = set(input_df['GW_NAME']) x_train_df = pd.DataFrame() x_test_df = pd.DataFrame() selection_var = years selection_label = 'YEAR' test_vars = test_years if use_gws: selection_var = gws selection_label = 'GW_NAME' test_vars = test_gws for svar in selection_var: selected_data = input_df.loc[input_df[selection_label] == svar] x_t = selected_data if svar not in test_vars: x_train_df = x_train_df.append(x_t) else: x_test_df = x_test_df.append(x_t) if spatio_temporal and use_gws: for year in test_years: x_test_new = x_train_df.loc[x_train_df['YEAR'] == year] x_test_df = x_test_df.append(x_test_new) x_train_df = x_train_df.loc[x_train_df['YEAR'] != year] y_train_df = x_train_df[pred_attr] y_test_df = x_test_df[pred_attr] if shuffle: x_train_df = sk.shuffle(x_train_df, random_state=random_state) y_train_df = sk.shuffle(y_train_df, random_state=random_state) x_test_df = sk.shuffle(x_test_df, random_state=random_state) y_test_df = sk.shuffle(y_test_df, random_state=random_state) if outdir: x_train_df.to_csv(outdir + 'X_Train.csv', index=False) x_test_df.to_csv(outdir + 'X_Test.csv', index=False) y_train_df.to_csv(outdir + 'Y_Train.csv', index=False) y_test_df.to_csv(outdir + 'Y_Test.csv', index=False) return x_train_df, x_test_df, y_train_df.to_numpy().ravel(), y_test_df.to_numpy().ravel() def create_pdplots(x_train, rf_model, outdir, plot_3d=False, descriptive_labels=False): """ Create partial dependence plots :param x_train: Training set :param rf_model: Random Forest model :param outdir: Output directory for storing partial dependence data :param plot_3d: Set True for creating pairwise 3D plots :param descriptive_labels: Set True to get descriptive labels :return: None """ print('Plotting...') feature_names = x_train.columns.values.tolist() plot_labels = {'AGRI': 'AGRI', 'URBAN': 'URBAN', 'SW': 'SW', 'SSEBop': 'ET (mm)', 'P': 'P (mm)', 'Crop': 'CC', 'WS_PA': 'WS_PA', 'WS_PA_EA': 'WS_PA_EA'} if descriptive_labels: plot_labels = {'AGRI': 'Agriculture density', 'URBAN': 'Urban density', 'SW': 'Surface water density', 'ET': 'Evapotranspiration (mm)', 'P': 'Precipitation (mm)'} feature_indices = range(len(feature_names)) feature_dict = {} if plot_3d: x_train = x_train[:500] for fi in feature_indices: for fj in feature_indices: feature_check = (fi != fj) and ((fi, fj) not in feature_dict.keys()) and ((fj, fi) not in feature_dict.keys()) if feature_check: print(feature_names[fi], feature_names[fj]) feature_dict[(fi, fj)] = True feature_dict[(fj, fi)] = True f_pefix = outdir + 'PDP_' + feature_names[fi] + '_' + feature_names[fj] saved_files = glob(outdir + '*' + feature_names[fi] + '_' + feature_names[fj] + '*') if not saved_files: pdp, axes = partial_dependence(rf_model, x_train, features=(fi, fj)) x, y = np.meshgrid(axes[0], axes[1]) z = pdp[0].T np.save(f_pefix + '_X', x) np.save(f_pefix + '_Y', y) np.save(f_pefix + '_Z', z) else: x = np.load(f_pefix + '_X.npy') y = np.load(f_pefix + '_Y.npy') z = np.load(f_pefix + '_Z.npy') fig = plt.figure() ax = axes3d.Axes3D(fig) surf = ax.plot_surface(x, y, z, cmap='viridis', edgecolor='k') ax.set_xlabel(plot_labels[feature_names[fi]]) ax.set_ylabel(plot_labels[feature_names[fj]]) ax.set_zlabel('GW Pumping (mm)') plt.colorbar(surf, shrink=0.3, aspect=5) plt.show() else: fnames = [] for name in feature_names: fnames.append(plot_labels[name]) plot_partial_dependence(rf_model, features=feature_indices, X=x_train, feature_names=fnames, n_jobs=-1) plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0) plt.show() def rf_regressor(input_df, out_dir, n_estimators=500, random_state=0, bootstrap=True, max_features=None, test_size=0.2, pred_attr='GW', shuffle=True, plot_graphs=False, plot_3d=False, plot_dir=None, drop_attrs=(), test_case='', test_year=None, test_gw=None, use_gw=False, split_attribute=True, load_model=True, calc_perm_imp=False, spatio_temporal=False): """ Perform random forest regression :param input_df: Input pandas dataframe :param out_dir: Output file directory for storing intermediate results :param n_estimators: RF hyperparameter :param random_state: RF hyperparameter :param bootstrap: RF hyperparameter :param max_features: RF hyperparameter :param test_size: Required only if split_yearly=False :param pred_attr: Prediction attribute name in the dataframe :param shuffle: Set False to stop data shuffling :param plot_graphs: Plot Actual vs Prediction graph :param plot_3d: Plot pairwise 3D partial dependence plots :param plot_dir: Directory for storing PDP data :param drop_attrs: Drop these specified attributes :param test_case: Used for writing the test case number to the CSV :param test_year: Build test data from only this year. Use tuple of years to split train and test data using #split_data_attribute :param test_gw: Build test data from only this GMD (Kansas) or AMA/INA (Arizona) region, use_gw must be set to True. Use tuple of years to split train and test data using #split_data_attribute :param use_gw: Set True to build test data from only test_gw :param split_attribute: Split train test data based on a particular attribute like year or GMD :param load_model: Load an earlier pre-trained RF model :param calc_perm_imp: Set True to get permutation importances on train and test data :param spatio_temporal: Set True to build test from both test_years and test_gws :return: Random forest model """ saved_model = glob(out_dir + '*rf_model*') if load_model and saved_model: regressor = get_rf_model(saved_model[0]) x_train = pd.read_csv(out_dir + 'X_Train.csv') y_train = pd.read_csv(out_dir + 'Y_Train.csv') x_test = pd.read_csv(out_dir + 'X_Test.csv') y_test =
pd.read_csv(out_dir + 'Y_Test.csv')
pandas.read_csv
import numpy as np import pandas as pd import pytest from pandas.testing import assert_frame_equal @pytest.fixture def df_checks(): """fixture dataframe""" return pd.DataFrame( { "famid": [1, 1, 1, 2, 2, 2, 3, 3, 3], "birth": [1, 2, 3, 1, 2, 3, 1, 2, 3], "ht1": [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], "ht2": [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9], } ) @pytest.fixture def df_multi(): """MultiIndex dataframe fixture.""" return pd.DataFrame( { ("name", "a"): {0: "Wilbur", 1: "Petunia", 2: "Gregory"}, ("names", "aa"): {0: 67, 1: 80, 2: 64}, ("more_names", "aaa"): {0: 56, 1: 90, 2: 50}, } ) def test_column_level_wrong_type(df_multi): """Raise TypeError if wrong type is provided for column_level.""" with pytest.raises(TypeError): df_multi.pivot_longer(index="name", column_level={0}) @pytest.mark.xfail(reason="checking is done within _select_columns") def test_type_index(df_checks): """Raise TypeError if wrong type is provided for the index.""" with pytest.raises(TypeError): df_checks.pivot_longer(index=2007) @pytest.mark.xfail(reason="checking is done within _select_columns") def test_type_column_names(df_checks): """Raise TypeError if wrong type is provided for column_names.""" with pytest.raises(TypeError): df_checks.pivot_longer(column_names=2007) def test_type_names_to(df_checks): """Raise TypeError if wrong type is provided for names_to.""" with pytest.raises(TypeError): df_checks.pivot_longer(names_to={2007}) def test_subtype_names_to(df_checks): """ Raise TypeError if names_to is a sequence and the wrong type is provided for entries in names_to. """ with pytest.raises(TypeError, match="1 in names_to.+"): df_checks.pivot_longer(names_to=[1]) def test_duplicate_names_to(df_checks): """Raise error if names_to contains duplicates.""" with pytest.raises(ValueError, match="y is duplicated in names_to."): df_checks.pivot_longer(names_to=["y", "y"], names_pattern="(.+)(.)") def test_both_names_sep_and_pattern(df_checks): """ Raise ValueError if both names_sep and names_pattern is provided. """ with pytest.raises( ValueError, match="Only one of names_pattern or names_sep should be provided.", ): df_checks.pivot_longer( names_to=["rar", "bar"], names_sep="-", names_pattern="(.+)(.)" ) def test_name_pattern_wrong_type(df_checks): """Raise TypeError if the wrong type provided for names_pattern.""" with pytest.raises(TypeError, match="names_pattern should be one of.+"): df_checks.pivot_longer(names_to=["rar", "bar"], names_pattern=2007) def test_name_pattern_no_names_to(df_checks): """Raise ValueError if names_pattern and names_to is None.""" with pytest.raises(ValueError): df_checks.pivot_longer(names_to=None, names_pattern="(.+)(.)") def test_name_pattern_groups_len(df_checks): """ Raise ValueError if names_pattern and the number of groups differs from the length of names_to. """ with pytest.raises( ValueError, match="The length of names_to does not match " "the number of groups in names_pattern.+", ): df_checks.pivot_longer(names_to=".value", names_pattern="(.+)(.)") def test_names_pattern_wrong_subtype(df_checks): """ Raise TypeError if names_pattern is a list/tuple and wrong subtype is supplied. """ with pytest.raises(TypeError, match="1 in names_pattern.+"): df_checks.pivot_longer( names_to=["ht", "num"], names_pattern=[1, "\\d"] ) def test_names_pattern_names_to_unequal_length(df_checks): """ Raise ValueError if names_pattern is a list/tuple and wrong number of items in names_to. """ with pytest.raises( ValueError, match="The length of names_to does not match " "the number of regexes in names_pattern.+", ): df_checks.pivot_longer( names_to=["variable"], names_pattern=["^ht", ".+i.+"] ) def test_names_pattern_names_to_dot_value(df_checks): """ Raise Error if names_pattern is a list/tuple and .value in names_to. """ with pytest.raises( ValueError, match=".value is not accepted in names_to " "if names_pattern is a list/tuple.", ): df_checks.pivot_longer( names_to=["variable", ".value"], names_pattern=["^ht", ".+i.+"] ) def test_name_sep_wrong_type(df_checks): """Raise TypeError if the wrong type is provided for names_sep.""" with pytest.raises(TypeError, match="names_sep should be one of.+"): df_checks.pivot_longer(names_to=[".value", "num"], names_sep=["_"]) def test_name_sep_no_names_to(df_checks): """Raise ValueError if names_sep and names_to is None.""" with pytest.raises(ValueError): df_checks.pivot_longer(names_to=None, names_sep="_") def test_values_to_wrong_type(df_checks): """Raise TypeError if the wrong type is provided for `values_to`.""" with pytest.raises(TypeError, match="values_to should be one of.+"): df_checks.pivot_longer(values_to={"salvo"}) def test_values_to_wrong_type_names_pattern(df_checks): """ Raise TypeError if `values_to` is a list, and names_pattern is not. """ with pytest.raises( TypeError, match="values_to can be a list/tuple only " "if names_pattern is a list/tuple.", ): df_checks.pivot_longer(values_to=["salvo"]) def test_values_to_names_pattern_unequal_length(df_checks): """ Raise ValueError if `values_to` is a list, and the length of names_pattern does not match the length of values_to. """ with pytest.raises( ValueError, match="The length of values_to does not match " "the number of regexes in names_pattern.+", ): df_checks.pivot_longer( values_to=["salvo"], names_pattern=["ht", r"\d"], names_to=["foo", "bar"], ) def test_values_to_names_seq_names_to(df_checks): """ Raise ValueError if `values_to` is a list, and intersects with names_to. """ with pytest.raises( ValueError, match="salvo in values_to already exists in names_to." ): df_checks.pivot_longer( values_to=["salvo"], names_pattern=["ht"], names_to="salvo" ) def test_sub_values_to(df_checks): """Raise error if values_to is a sequence, and contains non strings.""" with pytest.raises(TypeError, match="1 in values_to.+"): df_checks.pivot_longer( names_to=["x", "y"], names_pattern=[r"ht", r"\d"], values_to=[1, "salvo"], ) def test_duplicate_values_to(df_checks): """Raise error if values_to is a sequence, and contains duplicates.""" with pytest.raises(ValueError, match="salvo is duplicated in values_to."): df_checks.pivot_longer( names_to=["x", "y"], names_pattern=[r"ht", r"\d"], values_to=["salvo", "salvo"], ) def test_values_to_exists_in_columns(df_checks): """ Raise ValueError if values_to already exists in the dataframe's columns. """ with pytest.raises(ValueError): df_checks.pivot_longer(index="birth", values_to="birth") def test_values_to_exists_in_names_to(df_checks): """ Raise ValueError if values_to is in names_to. """ with pytest.raises(ValueError): df_checks.pivot_longer(values_to="num", names_to="num") def test_column_multiindex_names_sep(df_multi): """ Raise ValueError if the dataframe's column is a MultiIndex, and names_sep is present. """ with pytest.raises(ValueError): df_multi.pivot_longer( column_names=[("names", "aa")], names_sep="_", names_to=["names", "others"], ) def test_column_multiindex_names_pattern(df_multi): """ Raise ValueError if the dataframe's column is a MultiIndex, and names_pattern is present. """ with pytest.raises(ValueError): df_multi.pivot_longer( index=[("name", "a")], names_pattern=r"(.+)(.+)", names_to=["names", "others"], ) def test_index_tuple_multiindex(df_multi): """ Raise ValueError if index is a tuple, instead of a list of tuples, and the dataframe's column is a MultiIndex. """ with pytest.raises(ValueError): df_multi.pivot_longer(index=("name", "a")) def test_column_names_tuple_multiindex(df_multi): """ Raise ValueError if column_names is a tuple, instead of a list of tuples, and the dataframe's column is a MultiIndex. """ with pytest.raises(ValueError): df_multi.pivot_longer(column_names=("names", "aa")) def test_sort_by_appearance(df_checks): """Raise error if sort_by_appearance is not boolean.""" with pytest.raises(TypeError): df_checks.pivot_longer( names_to=[".value", "value"], names_sep="_", sort_by_appearance="TRUE", ) def test_ignore_index(df_checks): """Raise error if ignore_index is not boolean.""" with pytest.raises(TypeError): df_checks.pivot_longer( names_to=[".value", "value"], names_sep="_", ignore_index="TRUE" ) def test_names_to_index(df_checks): """ Raise ValueError if there is no names_sep/names_pattern, .value not in names_to and names_to intersects with index. """ with pytest.raises( ValueError, match=r".+in names_to already exist as column labels.+", ): df_checks.pivot_longer( names_to="famid", index="famid", ) def test_names_sep_pattern_names_to_index(df_checks): """ Raise ValueError if names_sep/names_pattern, .value not in names_to and names_to intersects with index. """ with pytest.raises( ValueError, match=r".+in names_to already exist as column labels.+", ): df_checks.pivot_longer( names_to=["dim", "famid"], names_sep="_", index="famid", ) def test_dot_value_names_to_columns_intersect(df_checks): """ Raise ValueError if names_sep/names_pattern, .value in names_to, and names_to intersects with the new columns """ with pytest.raises( ValueError, match=r".+in names_to already exist in the new dataframe\'s columns.+", ): df_checks.pivot_longer( index="famid", names_to=(".value", "ht"), names_pattern="(.+)(.)" ) def test_values_to_seq_index_intersect(df_checks): """ Raise ValueError if values_to is a sequence, and intersects with the index """ match = ".+values_to already exist as column labels assigned " match = match + "to the dataframe's index parameter.+" with pytest.raises(ValueError, match=rf"{match}"): df_checks.pivot_longer( index="famid", names_to=("value", "ht"), names_pattern=["ht", r"\d"], values_to=("famid", "foo"), ) def test_dot_value_names_to_index_intersect(df_checks): """ Raise ValueError if names_sep/names_pattern, .value in names_to, and names_to intersects with the index """ match = ".+already exist as column labels assigned " match = match + "to the dataframe's index parameter.+" with pytest.raises( ValueError, match=rf"{match}", ): df_checks.rename(columns={"famid": "ht"}).pivot_longer( index="ht", names_to=(".value", "num"), names_pattern="(.+)(.)" ) def test_names_pattern_list_empty_any(df_checks): """ Raise ValueError if names_pattern is a list, and not all matches are returned. """ with pytest.raises( ValueError, match="No match was returned for the regex.+" ): df_checks.pivot_longer( index=["famid", "birth"], names_to=["ht"], names_pattern=["rar"], ) def test_names_pattern_no_match(df_checks): """Raise error if names_pattern is a regex and returns no matches.""" with pytest.raises( ValueError, match="Column labels .+ could not be matched with any .+" ): df_checks.pivot_longer( index="famid", names_to=[".value", "value"], names_pattern=r"(rar)(.)", ) def test_names_pattern_incomplete_match(df_checks): """ Raise error if names_pattern is a regex and returns incomplete matches. """ with pytest.raises( ValueError, match="Column labels .+ could not be matched with any .+" ): df_checks.pivot_longer( index="famid", names_to=[".value", "value"], names_pattern=r"(ht)(.)", ) def test_names_sep_len(df_checks): """ Raise error if names_sep, and the number of matches returned is not equal to the length of names_to. """ with pytest.raises(ValueError): df_checks.pivot_longer(names_to=".value", names_sep="(\\d)") def test_pivot_index_only(df_checks): """Test output if only index is passed.""" result = df_checks.pivot_longer( index=["famid", "birth"], names_to="dim", values_to="num", ) actual = df_checks.melt( ["famid", "birth"], var_name="dim", value_name="num" ) assert_frame_equal(result, actual) def test_pivot_column_only(df_checks): """Test output if only column_names is passed.""" result = df_checks.pivot_longer( column_names=["ht1", "ht2"], names_to="dim", values_to="num", ignore_index=False, ) actual = df_checks.melt( ["famid", "birth"], var_name="dim", value_name="num", ignore_index=False, ) assert_frame_equal(result, actual) def test_pivot_sort_by_appearance(df_checks): """Test output if sort_by_appearance is True.""" result = df_checks.pivot_longer( column_names="ht*", names_to="dim", values_to="num", sort_by_appearance=True, ) actual = ( df_checks.melt( ["famid", "birth"], var_name="dim", value_name="num", ignore_index=False, ) .sort_index() .reset_index(drop=True) ) assert_frame_equal(result, actual) def test_names_pat_str(df_checks): """ Test output when names_pattern is a string, and .value is present. """ result = ( df_checks.pivot_longer( column_names="ht*", names_to=(".value", "age"), names_pattern="(.+)(.)", sort_by_appearance=True, ) .reindex(columns=["famid", "birth", "age", "ht"]) .astype({"age": int}) ) actual = pd.wide_to_long( df_checks, stubnames="ht", i=["famid", "birth"], j="age" ).reset_index() assert_frame_equal(result, actual) def test_multiindex_column_level(df_multi): """ Test output from MultiIndex column, when column_level is provided. """ result = df_multi.pivot_longer( index="name", column_names="names", column_level=0 ) expected_output = df_multi.melt( id_vars="name", value_vars="names", col_level=0 ) assert_frame_equal(result, expected_output) def test_multiindex(df_multi): """ Test output from MultiIndex column, where column_level is not provided, and there is no names_sep/names_pattern. """ result = df_multi.pivot_longer(index=[("name", "a")]) expected_output = df_multi.melt(id_vars=[("name", "a")]) assert_frame_equal(result, expected_output) def test_multiindex_names_to(df_multi): """ Test output from MultiIndex column, where column_level is not provided, there is no names_sep/names_pattern, and names_to is provided as a sequence. """ result = df_multi.pivot_longer( index=[("name", "a")], names_to=["variable_0", "variable_1"] ) expected_output = df_multi.melt(id_vars=[("name", "a")]) assert_frame_equal(result, expected_output) def test_multiindex_names_to_length_mismatch(df_multi): """ Raise error if the length of names_to does not match the number of column levels. """ with pytest.raises(ValueError): df_multi.pivot_longer( index=[("name", "a")], names_to=["variable_0", "variable_1", "variable_2"], ) def test_multiindex_incomplete_level_names(df_multi): """ Raise error if not all the levels have names. """ with pytest.raises(ValueError): df_multi.columns.names = [None, "a"] df_multi.pivot_longer(index=[("name", "a")]) def test_multiindex_index_level_names_intersection(df_multi): """ Raise error if level names exist in index. """ with pytest.raises(ValueError): df_multi.columns.names = [None, "a"] df_multi.pivot_longer(index=[("name", "a")]) def test_no_column_names(df_checks): """ Test output if all the columns are assigned to the index parameter. """ assert_frame_equal( df_checks.pivot_longer(df_checks.columns).rename_axis(columns=None), df_checks, ) @pytest.fixture def test_df(): """Fixture DataFrame""" return pd.DataFrame( { "off_loc": ["A", "B", "C", "D", "E", "F"], "pt_loc": ["G", "H", "I", "J", "K", "L"], "pt_lat": [ 100.07548220000001, 75.191326, 122.65134479999999, 124.13553329999999, 124.13553329999999, 124.01028909999998, ], "off_lat": [ 121.271083, 75.93845266, 135.043791, 134.51128400000002, 134.484374, 137.962195, ], "pt_long": [ 4.472089953, -144.387785, -40.45611048, -46.07156181, -46.07156181, -46.01594293, ], "off_long": [ -7.188632000000001, -143.2288569, 21.242563, 40.937416999999996, 40.78472, 22.905889000000002, ], } ) def test_names_pattern_str(test_df): """Test output for names_pattern and .value.""" result = test_df.pivot_longer( column_names="*_*", names_to=["set", ".value"], names_pattern="(.+)_(.+)", sort_by_appearance=True, ) actual = test_df.copy() actual.columns = actual.columns.str.split("_").str[::-1].str.join("_") actual = ( pd.wide_to_long( actual.reset_index(), stubnames=["loc", "lat", "long"], sep="_", i="index", j="set", suffix=r".+", ) .reset_index("set") .reset_index(drop=True) ) assert_frame_equal(result, actual) def test_names_sep(test_df): """Test output for names_sep and .value.""" result = test_df.pivot_longer( names_to=["set", ".value"], names_sep="_", sort_by_appearance=True ) actual = test_df.copy() actual.columns = actual.columns.str.split("_").str[::-1].str.join("_") actual = ( pd.wide_to_long( actual.reset_index(), stubnames=["loc", "lat", "long"], sep="_", i="index", j="set", suffix=".+", ) .reset_index("set") .reset_index(drop=True) ) assert_frame_equal(result, actual) def test_names_pattern_list(): """Test output for names_pattern if list/tuple.""" df = pd.DataFrame( { "Activity": ["P1", "P2"], "General": ["AA", "BB"], "m1": ["A1", "B1"], "t1": ["TA1", "TB1"], "m2": ["A2", "B2"], "t2": ["TA2", "TB2"], "m3": ["A3", "B3"], "t3": ["TA3", "TB3"], } ) result = df.pivot_longer( index=["Activity", "General"], names_pattern=["^m", "^t"], names_to=["M", "Task"], sort_by_appearance=True, ).loc[:, ["Activity", "General", "Task", "M"]] actual = ( pd.wide_to_long( df, i=["Activity", "General"], stubnames=["t", "m"], j="number" ) .set_axis(["Task", "M"], axis="columns") .droplevel(-1) .reset_index() ) assert_frame_equal(result, actual) @pytest.fixture def not_dot_value(): """Fixture DataFrame""" return pd.DataFrame( { "country": ["United States", "Russia", "China"], "vault_2012": [48.1, 46.4, 44.3], "floor_2012": [45.4, 41.6, 40.8], "vault_2016": [46.9, 45.7, 44.3], "floor_2016": [46.0, 42.0, 42.1], } ) def test_not_dot_value_sep(not_dot_value): """Test output when names_sep and no dot_value""" result = not_dot_value.pivot_longer( "country", names_to=("event", "year"), names_sep="_", values_to="score", sort_by_appearance=True, ) result = result.sort_values( ["country", "event", "year"], ignore_index=True ) actual = not_dot_value.set_index("country") actual.columns = actual.columns.str.split("_", expand=True) actual.columns.names = ["event", "year"] actual = ( actual.stack(["event", "year"]) .rename("score") .sort_index() .reset_index() ) assert_frame_equal(result, actual) def test_not_dot_value_sep2(not_dot_value): """Test output when names_sep and no dot_value""" result = not_dot_value.pivot_longer( "country", names_to="event", names_sep="/", values_to="score", ) actual = not_dot_value.melt( "country", var_name="event", value_name="score" ) assert_frame_equal(result, actual) def test_not_dot_value_pattern(not_dot_value): """Test output when names_pattern is a string and no dot_value""" result = not_dot_value.pivot_longer( "country", names_to=("event", "year"), names_pattern=r"(.+)_(.+)", values_to="score", sort_by_appearance=True, ) result = result.sort_values( ["country", "event", "year"], ignore_index=True ) actual = not_dot_value.set_index("country") actual.columns = actual.columns.str.split("_", expand=True) actual.columns.names = ["event", "year"] actual = ( actual.stack(["event", "year"]) .rename("score") .sort_index() .reset_index() ) assert_frame_equal(result, actual) def test_not_dot_value_sep_single_column(not_dot_value): """ Test output when names_sep and no dot_value for a single column. """ A = not_dot_value.loc[:, ["country", "vault_2012"]] result = A.pivot_longer( "country", names_to=("event", "year"), names_sep="_", values_to="score", ) result = result.sort_values( ["country", "event", "year"], ignore_index=True ) actual = A.set_index("country") actual.columns = actual.columns.str.split("_", expand=True) actual.columns.names = ["event", "year"] actual = ( actual.stack(["event", "year"]) .rename("score") .sort_index() .reset_index() ) assert_frame_equal(result, actual) def test_multiple_dot_value(): """Test output for multiple .value.""" df = pd.DataFrame( { "x_1_mean": [1, 2, 3, 4], "x_2_mean": [1, 1, 0, 0], "x_1_sd": [0, 1, 1, 1], "x_2_sd": [0.739, 0.219, 1.46, 0.918], "y_1_mean": [1, 2, 3, 4], "y_2_mean": [1, 1, 0, 0], "y_1_sd": [0, 1, 1, 1], "y_2_sd": [-0.525, 0.623, -0.705, 0.662], "unit": [1, 2, 3, 4], } ) result = df.pivot_longer( index="unit", names_to=(".value", "time", ".value"), names_pattern=r"(x|y)_([0-9])(_mean|_sd)", ).astype({"time": int}) actual = df.set_index("unit") cols = [ent.split("_") for ent in actual.columns] actual.columns = [f"{start}_{end}{middle}" for start, middle, end in cols] actual = ( pd.wide_to_long( actual.reset_index(), stubnames=["x_mean", "y_mean", "x_sd", "y_sd"], i="unit", j="time", ) .sort_index(axis=1) .reset_index() ) assert_frame_equal(result, actual) @pytest.fixture def single_val(): """fixture dataframe""" return pd.DataFrame( { "id": [1, 2, 3], "x1": [4, 5, 6], "x2": [5, 6, 7], } ) def test_multiple_dot_value2(single_val): """Test output for multiple .value.""" result = single_val.pivot_longer( index="id", names_to=(".value", ".value"), names_pattern="(.)(.)" )
assert_frame_equal(result, single_val)
pandas.testing.assert_frame_equal
""" Module encapsulating Selection's method for Pivoting output rows Copyright (C) 2016 ERT Inc. """ import ast import pandas import numpy def count(collection): """ module alias, to rename Python 'len' builtin for pandas aggregation """ return len(collection) def get_result(results_generator, pivot_variable_names, fact_variables): """ Reformat data result rows into a wider list of Pivoted headings Keyword Parameters: results_generator -- list generator, representing Warehouse query result header & data rows pivot_variable_names -- list of Strings, identifying which result fields are to be represented as newly generated Pivot columns instead of simply being represented as traditional data rows with 1x field heading. fact_variables -- list of DWSupport variable objects, defining what aggregation method should be used on fact's Measured/Calculated data fields. >>> from pprint import pprint >>> variables = ['date_dim$year','product_dim$flavor','sales'] >>> def data_generator1(): ... yield variables #Header row ... yield ['1998', 'red', 4] ... yield ['1998', 'blue', 53] ... yield ['2001', 'orange', 7] ... yield ['2001', 'red', 45] ... yield ['2001', 'blue', 106] >>> columns = ['product_dim$flavor'] #Generate pivot columns,for all flavors >>> dwsupport_fact_variables = [{ 'column': 'sales' ... ,'python_type': 'int'}] >>> output_data = get_result(data_generator1(), columns, dwsupport_fact_variables) >>> pprint([row for row in output_data]) [['date_dim$year', 'sales(sum) product_dim$flavor(blue)', 'sales(sum) product_dim$flavor(orange)', 'sales(sum) product_dim$flavor(red)'], ('1998', 53.0, nan, 4.0), ('2001', 106.0, 7.0, 45.0)] >>> variables = ['product_dim$flavor', 'sales'] >>> def data_generator2(): ... yield variables #Header row ... yield ['red', 49] ... yield ['blue', 159] ... yield ['orange', 7] >>> columns = ['product_dim$flavor'] >>> dwsupport_fact_variables = [{ 'column': 'sales' ... ,'python_type': 'int'}] >>> output_data = get_result(data_generator2(), columns ... ,dwsupport_fact_variables) >>> pprint([row for row in output_data]) [['sales(sum) product_dim$flavor(blue)', 'sales(sum) product_dim$flavor(orange)', 'sales(sum) product_dim$flavor(red)'], (159, 7, 49)] >>> variables = ['date_dim$year','product_dim$flavor','sales','sales_person_dim$name'] >>> def data_generator3(): ... yield variables #Header row ... yield ['1998', 'red', 4, 'pat'] ... yield ['1998', 'blue', 28, 'pat'] ... yield ['1998', 'blue', 6, 'pat'] ... yield ['1998', 'blue', 53, 'tay'] ... yield ['2001', 'orange', 7, 'tay'] ... yield ['2001', 'red', 45, 'pat'] ... yield ['2001', 'blue', 106, 'tay'] >>> columns = ['product_dim$flavor', 'sales_person_dim$name'] #Generate pivot columns,for flavor+salesmen >>> output_data = get_result(data_generator3(), columns, dwsupport_fact_variables) >>> pprint([row for row in output_data]) [['date_dim$year', 'sales(sum) product_dim$flavor(blue) sales_person_dim$name(pat)', 'sales(sum) product_dim$flavor(blue) sales_person_dim$name(tay)', 'sales(sum) product_dim$flavor(orange) sales_person_dim$name(tay)', 'sales(sum) product_dim$flavor(red) sales_person_dim$name(pat)'], ('1998', 34.0, 53.0, nan, 4.0), ('2001', nan, 106.0, 7.0, 45.0)] """ variable_names = next(results_generator) is_not_dim_or_role = lambda n: '$' not in n #assume all names with $ are: {dim_name|role_name}${field} data_headings = [name for name in variable_names if is_not_dim_or_role(name)] nonpivot_variable_headings = [name for name in variable_names if name not in data_headings+pivot_variable_names] make = PivotMaker(pivot_variable_names, nonpivot_variable_headings ,data_headings, fact_variables) final_results_generator = make.pivot(variable_names, results_generator) return final_results_generator class PivotMaker: """ Utility class, coupling pivot implementation w/ required data inputs """ artificial_index_name = 'PivotMaker_default_index' # String, representing column added to results without pivot Index artificial_index_value = 'uniform_value_added_to_each_row' # String, representing the value default-Index column row will have def __init__(self, pivot_variable_names, nonpivot_variable_headings ,data_headings, fact_variables): """ encapsulate the four data inputs, required for pivot generation Keyword Parameters: pivot_variable_names -- list of Strings, identifying which result fields are to be represented as newly generated Pivot columns instead of simply being represented as traditional data rows with 1x field heading. nonpivot_variable_names -- list of Strings, identifying additional dimensional result fields on which no pivot is to be performed. data_headings -- list of Strings, identifying fact data variables. Fact data is grouped/aggregated based on the pivot field values the fact data is related to. fact_variables -- list of DWSupport variable objects, defining what aggregation method should be used on fact's Measured/Calculated data fields. """ self.pivot_variable_names = pivot_variable_names self.nonpivot_variable_headings = nonpivot_variable_headings self.data_headings = data_headings self.fact_variables = fact_variables def pivot(self, input_header_row, data_rows_generator): """ Return tuple generator, encapsulating the pivot creation process Keyword Parameters: input_header_row -- list of strings, representing Warehouse result headings data_rows_generator -- list generator, representing Warehouse data result """ df =
pandas.DataFrame(data_rows_generator, columns=input_header_row)
pandas.DataFrame