prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import numpy as np
import matplotlib.pyplot as plt
import h5py
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import os
import datetime as dt
import pandas as pd
import imageio
def average_grid(val_data, val_long, val_lat, long, lat, flipped=True, cropped=True):
count = np.zeros((lat.shape[0] - 1, long.shape[0] - 1))
average = np.zeros((lat.shape[0] - 1, long.shape[0] - 1))
n = len(val_data)
for i in range(n):
if val_data[i] != np.nan and val_data[i] > 0:
if long[0] < val_long[i] < long[-1] and lat[0] < val_lat[i] < lat[-1]:
index_long = np.digitize(val_long[i], long) - 1
index_lat = np.digitize(val_lat[i], lat) - 1
average[index_lat, index_long] += val_data[i]
count[index_lat, index_long] += 1
valid = count != 0
average[valid] = average[valid] / count[valid]
if cropped:
cropping = np.where(count == 0)
average[cropping] = np.nan
if flipped:
return np.flip(average, axis=0), np.flip(count, axis=0)
else:
return average, count
def h5_MOPITT_loader(dir_path, extent, size, averaging=1, n_pressure=2):
averages, counts = [], []
long = np.linspace(extent[0], extent[1], size[0])
lat = np.linspace(extent[2], extent[3], size[1])
DATAFIELD_NAME = '/HDFEOS/SWATHS/MOP02/Data Fields/RetrievedCOMixingRatioProfile'
GEO_DATA = '/HDFEOS/SWATHS/MOP02/Geolocation Fields'
files = [f for f in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, f))]
n_snaps = int(len(files) / averaging)
for i in range(n_snaps):
values, longitudes, latitudes = [], [], []
for j in range(averaging):
index = i * averaging + j
path = os.path.join(dir_path, files[index])
with h5py.File(path, mode='r') as file:
# Extract Datasets
data_var = file[DATAFIELD_NAME]
data_lat = file[GEO_DATA + '/Latitude']
data_lon = file[GEO_DATA + '/Longitude']
# Read Values
val_lat = data_lat[:]
val_lon = data_lon[:]
val_data = data_var[:, n_pressure, 0]
longitudes += val_lon.tolist()
latitudes += val_lat.tolist()
values += val_data.tolist()
average, count = average_grid(values, longitudes, latitudes, long, lat)
averages.append(average)
counts.append(count)
return averages, counts
def simple_plot_map(matrix, extent, borderlines="white"):
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_extent(extent)
ax.coastlines(color=borderlines)
ax.add_feature(cfeature.STATES, zorder=1, linewidth=1.5, edgecolor=borderlines)
ax.imshow(matrix, transform=ccrs.PlateCarree(), extent=extent, cmap='inferno')
plt.show()
def csv_MODIS_loader(file_path, extent, size, averaging=1, beginning="2020-08-01"):
long = np.linspace(extent[0], extent[1], size[0])
lat = np.linspace(extent[2], extent[3], size[1])
date_format = "%Y-%m-%d"
df = pd.read_csv(file_path)
df["acq_date"] = | pd.to_datetime(df["acq_date"], format=date_format) | pandas.to_datetime |
import glob
import random
import numpy as np
import pandas as pd
from sklearn import preprocessing
import torch
from torch.utils.data.sampler import Sampler
from torch.utils.data import DataLoader,Dataset
import torch.nn as nn
min_max_scaler = preprocessing.MinMaxScaler()
class GTExTaskMem(object):
# This class is for task generation for both meta training and meta testing.
# For meta training, we use all 20 samples without valid set (empty here).
# For meta testing, we use 1 or 5 shot samples for training, while using the same number of samples for validation.
# If set num_samples = 20 and chracter_folders = metatrain_character_folders, we generate tasks for meta training
# If set num_samples = 1 or 5 and chracter_folders = metatest_chracter_folders, we generate tasks for meta testing
def __init__(self, whole_dic,data_root_dir, num_classes, train_num, test_num):
self.character_folders = whole_dic.keys()
self.num_classes = num_classes
self.train_num = train_num
self.test_num = test_num
class_folders = random.sample(self.character_folders,self.num_classes)
labels = np.array(range(len(class_folders)))
labels = dict(zip([i[len(data_root_dir):] for i in class_folders], labels))
self.train_roots = []
self.test_roots = []
self.train_labels = []
self.test_labels = []
for types in class_folders:
this_tmp = whole_dic[types]
len_col = this_tmp.shape[1]
col_nu = list(range(len_col) )
random.shuffle(col_nu)
self.train_roots.append(col_nu[:train_num])
self.test_roots.append( col_nu[train_num:test_num + train_num])
self.train_labels.append([labels[types]] * train_num)
self.test_labels.append([labels[types]] * test_num)
self.label_set = self.class_folders
self.label_list = list(self.label_set)
def sample_test_split(geo, num_of_class_test, num_of_example, num_of_testing, string_set, tr):
class_folders = geo.keys()
class_folders = random.sample(class_folders, num_of_class_test)
labels_converter = np.array(range(len(class_folders)))
labels_converter = dict(zip(class_folders, labels_converter))
labels_to_text = {value:key for key, value in labels_converter.items()}
example_set = pd.DataFrame()
test_set = pd.DataFrame()
example_label = []
test_label = []
# balance sampler
for ith in range(len(class_folders)):
subtype = class_folders[ith]
this_exp = geo[subtype]
if(tr == True):
this_exp = this_exp.transpose()
total_colno = (this_exp.shape)[1]
col_nu = list(range(total_colno) )
random.shuffle(col_nu)
assert(len(col_nu) > num_of_example+num_of_testing), subtype
example_ids = col_nu[0 : num_of_example]
ex = this_exp.iloc[:,example_ids]
test_ids = col_nu[num_of_example : num_of_example + num_of_testing]
te = this_exp.iloc[:,test_ids]
ex.sort_index(inplace=True)
te.sort_index(inplace=True)
example_set = pd.concat([example_set,ex],axis=1)
test_set = pd.concat([test_set, te],axis=1)
example_label += [labels_converter[subtype]] * num_of_example
test_label += [labels_converter[subtype]] * num_of_testing
if string_set is not None:
example_set = example_set[~example_set.index.duplicated(keep='first')]
example_set = example_set.transpose()
example_set = example_set.filter(items=string_set)
example_set = example_set.transpose()
test_set = test_set[~test_set.index.duplicated(keep='first')]
test_set = test_set.transpose()
test_set = test_set.filter(items=string_set)
test_set = test_set.transpose()
out_ex = pd.DataFrame(index=string_set)
out_ex = pd.concat([out_ex, example_set],axis=1)
out_ex = out_ex.replace(np.nan,0)
#out_ex.sort_index(inplace=True)
test_set = test_set.transpose()
test_set['label'] = test_label
test_set = test_set.sample(frac=1)
test_label = test_set['label']
test_set = test_set.drop(columns='label')
test_set = test_set.transpose()
out_te = pd.DataFrame(index=string_set)
out_te = pd.concat([out_te,test_set], axis=1)
#out_te.sort_index(inplace=True)
out_te = out_te.replace(np.nan,0)
out_ex = min_max_scaler.fit_transform(out_ex)
out_te = min_max_scaler.fit_transform(out_te)
return out_ex, example_label, out_te, test_label, labels_to_text
##### relation learning implementation
class GTExTask(object):
# This class is for task generation for both meta training and meta testing.
# For meta training, we use all 20 samples without valid set (empty here).
# For meta testing, we use 1 or 5 shot samples for training, while using the same number of samples for validation.
# If set num_samples = 20 and chracter_folders = metatrain_character_folders, we generate tasks for meta training
# If set num_samples = 1 or 5 and chracter_folders = metatest_chracter_folders, we generate tasks for meta testing
def __init__(self, character_folders, data_root_dir, num_classes, train_num, test_num):
self.character_folders = character_folders
self.num_classes = num_classes
self.train_num = train_num
self.test_num = test_num
class_folders = random.sample(self.character_folders,self.num_classes)
labels = np.array(range(len(class_folders)))
labels = dict(zip([i[len(data_root_dir):] for i in class_folders], labels))
self.train_roots = []
self.test_roots = []
self.train_labels = []
self.test_labels = []
label_set = set()
for c in class_folders:
label_string = c[len(data_root_dir):]
label_set.add(label_string)
label_file_list = glob.glob(c + "/*")
label_file_list = random.sample(label_file_list,len(label_file_list))
self.train_roots += label_file_list[:train_num]
self.test_roots += label_file_list[train_num:test_num + train_num]
self.train_labels += [labels[label_string]] * train_num
self.test_labels += [labels[label_string]] * test_num
self.label_set = label_set
#label size dic done
self.label_list = list(self.label_set)
def get_class(self, sample):
return os.path.join(*sample.split('/')[:-1])
class ClassBalancedSampler(Sampler):
''' Samples 'num_inst' examples each from 'num_cl' pools
of examples of size 'num_per_class' '''
def __init__(self, num_cl, num_inst,shuffle=True):
self.num_cl = num_cl
self.num_inst = num_inst
self.shuffle = shuffle
def __iter__(self):
# return a single list of indices, assuming that items will be grouped by class
if self.shuffle:
batches = [[i+j*self.num_inst for i in torch.randperm(self.num_inst)] for j in range(self.num_cl)]
else:
batches = [[i+j*self.num_inst for i in range(self.num_inst)] for j in range(self.num_cl)]
batches = [[batches[j][i] for j in range(self.num_cl)] for i in range(self.num_inst)]
if self.shuffle:
random.shuffle(batches)
for sublist in batches:
random.shuffle(sublist)
batches = [item for sublist in batches for item in sublist]
return iter(batches)
def __len__(self):
return 1
class ClassBalancedSamplerOld(Sampler):
''' Samples 'num_inst' examples each from 'num_cl' pools
of examples of size 'num_per_class' '''
def __init__(self, num_per_class, num_cl, num_inst,shuffle=True):
self.num_per_class = num_per_class
self.num_cl = num_cl
self.num_inst = num_inst
self.shuffle = shuffle
def __iter__(self):
# return a single list of indices, assuming that items will be grouped by class
if self.shuffle:
batch = [[i+j*self.num_inst for i in torch.randperm(self.num_inst)[:self.num_per_class]] for j in range(self.num_cl)]
else:
batch = [[i+j*self.num_inst for i in range(self.num_inst)[:self.num_per_class]] for j in range(self.num_cl)]
batch = [item for sublist in batch for item in sublist]
if self.shuffle:
random.shuffle(batch)
return iter(batch)
def __len__(self):
return 1
class ClassBalancedSamplerMem(Sampler):
''' Samples 'num_inst' examples each from 'num_cl' pools
of examples of size 'num_per_class' '''
def __init__(self, num_cl, num_inst, num_set):
self.num_cl = num_cl
self.num_inst = num_inst
self.num_set = num_set #[(0,10), (1,10),(2,10)]
print(self.num_set)
def __iter__(self):
# return a single list of indices, assuming that items will be grouped by class
picked = random.sample(range(len(self.num_set)), self.num_cl)
batches = [[ int(i * 10000 + j) for j in random.sample(range(self.num_set[i]), self.num_inst)] for i in picked]
batches = [item for sublist in batches for item in sublist]
random.shuffle(batches)
return iter(batches)
def __len__(self):
return 1
# need to split file list
class GTExGepDatasetMem(torch.utils.data.Dataset):
def __init__(self, geo_dic, gene_set):
super(GTExGepDatasetMem, self).__init__()
self.geo_dic = geo_dic
self.keys = []
self.num_per_classes = []
for i in self.geo_dic:
self.keys.append(i)
self.num_per_classes.append((geo_dic[i].shape)[1])
self.gene_set = gene_set
def __len__(self):
return len(self.keys)
def __getitem__(self, idx):
print('>')
print(idx)
print('<')
i = idx//10000
j = idx%10000
ki = self.keys[i]
gep = self.geo_dic[ki].iloc[:,j]
return gep, ki
def get_gtex_loader_mem(tr_data, num_class, num_per_class=5, gene_set=None):
# NOTE: batch size here is # instances PER CLASS
#normalize = transforms.Normalize(mean=[0.92206, 0.92206, 0.92206], std=[0.08426, 0.08426, 0.08426])
dataset = GTExGepDatasetMem(tr_data, gene_set)
sampler = ClassBalancedSamplerMem(num_class. num_per_class, dataset.num_per_classes)
loader = DataLoader(dataset, batch_size=num_per_class*task.num_classes, sampler=sampler)
return loader
# need to split file list
class GTExGepDataset(torch.utils.data.Dataset):
def __init__(self, task, split, gene_set, headers):
if (split == "train"):
self.file_list = task.train_roots
self.label_list = task.train_labels
else:
self.file_list = task.test_roots
self.label_list = task.test_labels
self.gene_set = gene_set
self.headers = headers
def __len__(self):
return len(self.label_set)
def __getitem__(self, idx):
file_name = self.file_list[idx]
data = pd.read_csv(file_name, sep='\t' ,index_col=0, header=None)
data = np.log(data+1.0)
#data = np.clip(data, 1, np.max(data)[1])
data = data.transpose()
data.columns = [i.split(".")[0] for i in data.columns]
data = data.filter(items=self.gene_set)
data = data.transpose()
out_ex = pd.DataFrame(index=self.gene_set)
out_ex = pd.concat([out_ex, data],axis=1)
out_ex = out_ex.replace(np.nan,0)
out_ex = np.array(out_ex).reshape(-1,1)
#data.sort_index(inplace = True)
#data = data.astype('float')
data_label = self.label_list[idx]
data_label = np.array(data_label)
data_label = data_label.astype('int')
return out_ex, data_label
def get_gtex_loader(task, num_per_class=1, split='train',shuffle=True, gene_set=None, order=None):
# NOTE: batch size here is # instances PER CLASS
#normalize = transforms.Normalize(mean=[0.92206, 0.92206, 0.92206], std=[0.08426, 0.08426, 0.08426])
if split == 'train':
sampler = ClassBalancedSamplerOld(num_per_class, task.num_classes, task.train_num,shuffle=shuffle)
dataset = GTExGepDataset(task, split, gene_set, order)
else:
sampler = ClassBalancedSampler(task.num_classes, task.test_num,shuffle=shuffle)
dataset = GTExGepDataset(task, split, gene_set, order)
loader = DataLoader(dataset, batch_size=num_per_class*task.num_classes, sampler=sampler)
return loader
def GTEx_divide_train_test(root_dir, num_class, seed_num):
metatrain_character_folders = glob.glob(root_dir + "training_gtex/*")
metaval_character_folders = glob.glob(root_dir + "test_gtex/*")
print ("We are training :", metatrain_character_folders)
print ("We are testing :", metaval_character_folders)
return metatrain_character_folders, metaval_character_folders
def GTEx_divide_train_test_all(root_dir, num_class, seed_num):
tissue_folder = glob.glob(root_dir + "*")
random.seed(seed_num)
random.shuffle(tissue_folder)
metatrain_character_folders = tissue_folder[num_class:]
metaval_character_folders = tissue_folder[:num_class] #first num_class is for test
print ("We are training :", metatrain_character_folders)
print ("We are testing :", metaval_character_folders)
return metatrain_character_folders, metaval_character_folders
def GTEx_divide_train_test_all_in_memory(root_dir, num_class, seed_num):
tissue_folder = glob.glob(root_dir + "sub_*")
#random.seed(seed_num)
random.shuffle(tissue_folder)
metatrain_character_folders = tissue_folder[num_class:]
metaval_character_folders = tissue_folder[:num_class] #first num_class is for test
print ("We are training :", metatrain_character_folders)
print ("We are testing :", metaval_character_folders)
meta_train = dict()
meta_test = dict()
for type_data in metatrain_character_folders:
this_pd = | pd.read_csv(type_data,sep="\t",index_col=0, header=None) | pandas.read_csv |
"""
OBJECT RECOGNITION USING A SPIKING NEURAL NETWORK.
* The data preparation module.
@author: atenagm1375
"""
import os
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
import cv2
class CaltechDataset(Dataset):
"""
CaltechDataset class.
Attributes
----------
caltech_dataset_loader : utils.data.CaltechDatasetLoader
An instance of CaltechDatasetLoader.
train : bool, optional
Defines whether to load the train instances or the test. The default
is True.
Keyword Arguments
-----------------
size_low : int
The size of first GaussianBlur filter.
size_high : int
The size of second GaussianBlur filter.
"""
def __init__(self, caltech_dataset_loader, train=True, **kwargs):
self._cdl = caltech_dataset_loader
if kwargs:
self._cdl.apply_DoG(kwargs.get("size_low", 0),
kwargs.get("size_high", 0))
self.dataframe = self._cdl.data_frame.iloc[
self._cdl.train_idx] if train else \
self._cdl.data_frame.iloc[self._cdl.test_idx]
def __len__(self):
"""
Get number of instances in the dataset.
Returns
-------
int
number of instances in the dataset.
"""
return len(self.dataframe)
def __getitem__(self, index):
"""
Get value(s) at the described index.
Returns the image matrix and one-hot encoded label of the instance(s)
at location index.
Parameters
----------
index : int
The index to return values of.
Returns
-------
tuple of two numpy.arrays
The tuple of image matrix and the label array.
"""
return self.dataframe["x"].iloc[index].astype(np.float32), \
self.dataframe[self._cdl.classes].iloc[index].values.astype(
np.float32)
class CaltechDatasetLoader:
"""
Loads the Caltech dataset.
Attributes
----------
path : str
Path to Caltech image folders.
classes: list of str
List of classes.
image_size: tuple, optional
The input image size. All images are resized to the specified size.
The default is (100, 100).
"""
def __init__(self, path, classes, image_size=(100, 100)):
self.classes = classes
self.n_classes = len(classes)
self.data_frame = pd.DataFrame()
self.train_idx = []
self.test_idx = []
x = []
y = []
for obj in classes:
cls_path = path + ("/" if path[-1] != "/" else "") + obj + "/"
for img_path in os.listdir(cls_path):
img = cv2.imread(cls_path + img_path, 0)
img = cv2.resize(img, image_size,
interpolation=cv2.INTER_CUBIC)
x.append(img.reshape((1, *image_size)))
y.append(obj)
self.n_samples = len(y)
self.data_frame = | pd.DataFrame({"x": x, "y": y}, columns=["x", "y"]) | pandas.DataFrame |
'''
@author : <NAME>
ML model for foreign exchange prediction
'''
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
import joblib
def getFxRatesForPairs(pairName):
df = pd.read_csv("C:\\Users\\Srivastava_Am\\PycharmProjects\\exchange-rate-prediction\\data_source\\fx_rates_aud-USD.csv")
df = df.replace('ND', np.nan)
df = df.dropna().reset_index(drop=True)
df.isna().sum()
for col in df.columns[1:]:
df[col] = pd.to_numeric(df[col], errors='coerce')
df['Time Series'] = pd.to_datetime(df['Time Series'])
df['month'] = df['Time Series'].dt.month
df['year'] = df['Time Series'].dt.year
df['month_year'] = df['Time Series'].dt.to_period('M')
return df.groupby('month_year').AUD_USD.mean().reset_index()
def getIrdData(pairName):
ir_df = pd.read_csv("C:\\Users\\Srivastava_Am\\PycharmProjects\\exchange-rate-prediction\\data_source\\aud-usd-ird.csv")
ir_df = ir_df[(ir_df['Date'] >= '2016-03-01') &
(ir_df['Date'] <= '2020-04-02')]
ir_df = ir_df['Long Carry'].astype(str)
ir_df.reindex(index=ir_df.index[::-1])
ir_df = ir_df.replace({'%': ''}, regex=True)
ir_df = ir_df.astype(float)
return np.array(ir_df).reshape(-1, 1)
def getGdpDiff(pairName):
aus_gdp = pd.read_csv("C:\\Users\\Srivastava_Am\\PycharmProjects\\exchange-rate-prediction\\data_source\\aus-gdp-rate.csv")
usa_gdp = pd.read_csv("C:\\Users\\Srivastava_Am\\PycharmProjects\\exchange-rate-prediction\\data_source\\usd-gdp-rate.csv")
aus_gdp['DATE'] = pd.to_datetime(aus_gdp['DATE']).dt.to_period('M')
aus_gdp = aus_gdp.set_index('DATE').resample('M').interpolate()
aus_gdp['month_year'] = aus_gdp.index
usa_gdp['DATE'] = pd.to_datetime(usa_gdp['DATE']).dt.to_period('M')
usa_gdp = usa_gdp.set_index('DATE').resample('M').interpolate()
usa_gdp['month_year'] = usa_gdp.index
aus_gdp = aus_gdp.rename(columns={'GDP': 'AUS_GDP'})
aus_usa_gdp = pd.merge(aus_gdp, usa_gdp, on="month_year", how="inner")
aus_usa_gdp = aus_usa_gdp.rename(columns={'GDP': 'USA_GDP'})
aus_usa_gdp['GDP_diff'] = aus_usa_gdp['AUS_GDP'] - aus_usa_gdp['USA_GDP']
aus_usa_gdp = aus_usa_gdp[(aus_usa_gdp['month_year'] >= '2016-03') &
(aus_usa_gdp['month_year'] <= '2020-04')].reset_index(drop=True)
gdp_diff = ["%.4f" % num for num in aus_usa_gdp['GDP_diff']]
return gdp_diff
def getCPIDiff(pairName):
aus_cpi = pd.read_csv("C:\\Users\\Srivastava_Am\\PycharmProjects\\exchange-rate-prediction\\data_source\\AUS-CPI.csv")
usa_cpi = | pd.read_csv("C:\\Users\\Srivastava_Am\\PycharmProjects\\exchange-rate-prediction\\data_source\\USA-CPI.csv") | pandas.read_csv |
import numpy as np
import monai
import porchio
from porchio import Queue
from torchvision import datasets, transforms, models
from torch.utils.data import DataLoader, Dataset
from PIL import Image
import pandas as pd
import os
import argparse
import torchvision
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from model.model import nnUNet
import random
from ignite.handlers import EarlyStopping
from model.metric import DiceLoss
import glob
import time
import monai.visualize.img2tensorboard as img2tensorboard
import sys
sys.path.append('/home/pedro/over9000')
from over9000 import RangerLars
class BadDataset:
def __init__(self, df, transform):
self.df = df
self.loader = porchio.ImagesDataset
self.transform = transform
self.sampler = porchio.data.UniformSampler(patch_size=80)
def __getitem__(self, index):
# These names are arbitrary
MRI = 'mri'
SEG = 'seg'
PHYSICS = 'physics'
subjects = []
for (image_path, label_path, subject_physics) in zip(self.df.Filename, self.df.Label_Filename,
self.df.subject_physics):
subject_dict = {
MRI: porchio.ScalarImage(image_path),
SEG: porchio.LabelMap(label_path),
PHYSICS: subject_physics
}
subject = porchio.Subject(subject_dict)
subjects.append(subject)
this_dataset = self.loader(subjects, self.transform)
patches_dataset = porchio.Queue(
subjects_dataset=this_dataset,
max_length=queue_length,
samples_per_volume=samples_per_volume,
sampler=porchio.sampler.UniformSampler(patch_size),
shuffle_subjects=False,
shuffle_patches=False,
)
return patches_dataset
def __len__(self):
return self.df.shape[0]
def BespokeDataset(df, transform, patch_size, batch_seed):
loader = porchio.ImagesDataset
sampler = porchio.data.UniformSampler(patch_size=patch_size, batch_seed=batch_seed)
# These names are arbitrary
MRI = 'mri'
SEG = 'seg'
PHYSICS = 'physics'
subjects = []
for (image_path, label_path, subject_physics) in zip(df.Filename, df.Label_Filename, df.subject_physics):
subject_dict = {
MRI: porchio.ScalarImage(image_path),
SEG: porchio.LabelMap(label_path),
PHYSICS: subject_physics
}
subject = porchio.Subject(subject_dict)
subjects.append(subject)
this_dataset = loader(subjects, transform)
patches_dataset = porchio.Queue(
subjects_dataset=this_dataset,
max_length=queue_length,
samples_per_volume=samples_per_volume,
sampler=sampler,
shuffle_subjects=False,
shuffle_patches=False,
num_workers=24,
)
return patches_dataset
# Not enough to shuffle batches, shuffle WITHIN batches!
# Take original csv, shuffle between subjects!
def reshuffle_csv(og_csv, batch_size):
# Calculate some necessary variables
batch_reshuffle_csv = pd.DataFrame({})
num_images = len(og_csv)
batch_numbers = list(np.array(range(num_images // batch_size)) * batch_size)
num_unique_subjects = og_csv.subject_id.nunique()
unique_subject_ids = og_csv.subject_id.unique()
# First, re-order within subjects so batches don't always contain same combination of physics parameters
for sub_ID in unique_subject_ids:
batch_reshuffle_csv = batch_reshuffle_csv.append(og_csv[og_csv.subject_id == sub_ID].sample(frac=1).
reset_index(drop=True), ignore_index=True)
# Set up empty lists for appending re-ordered entries
new_subject_ids = []
new_filenames = []
new_label_filenames = []
new_physics = []
new_folds = []
for batch in range(num_images // batch_size):
# Randomly sample a batch ID
batch_id = random.sample(batch_numbers, 1)[0]
# Find those images/ labels/ params stipulated by the batch ID
transferred_subject_ids = batch_reshuffle_csv.subject_id[batch_id:batch_id + batch_size]
transferred_filenames = batch_reshuffle_csv.Filename[batch_id:batch_id + batch_size]
transferred_label_filenames = batch_reshuffle_csv.Label_Filename[batch_id:batch_id + batch_size]
transferred_physics = batch_reshuffle_csv.subject_physics[batch_id:batch_id + batch_size]
transferred_folds = batch_reshuffle_csv.fold[batch_id:batch_id + batch_size]
# Append these to respective lists
new_subject_ids.extend(transferred_subject_ids)
new_filenames.extend(transferred_filenames)
new_label_filenames.extend(transferred_label_filenames)
new_physics.extend(transferred_physics)
new_folds.extend(transferred_folds)
# Remove batch number used to reshuffle certain batches
batch_numbers.remove(batch_id)
altered_basic_csv = pd.DataFrame({
'subject_id': new_subject_ids,
'Filename': new_filenames,
'subject_physics': new_physics,
'fold': new_folds,
'Label_Filename': new_label_filenames
})
return altered_basic_csv
def visualise_batch_patches(loader, bs, ps, comparisons=2):
print('Calculating tester...')
assert comparisons <= batch_size
next_data = next(iter(loader))
batch_samples = random.sample(list(range(bs)), comparisons)
import matplotlib.pyplot as plt
# Set up figure for ALL intra-batch comparisons
f, axarr = plt.subplots(3, comparisons)
for comparison in range(comparisons):
# print(f'Label shape is {next_data["seg"]["data"].shape}')
# print(f'Data shape is {next_data["mri"]["data"].shape}')
example_batch_patch = np.squeeze(next_data['mri']['data'][batch_samples[comparison], ..., int(ps/2)])
# For segmentation need to check that all classes (in 4D) have same patch that ALSO matches data
example_batch_patch2 = np.squeeze(next_data['seg']['data'][batch_samples[comparison], 0, ..., int(ps/2)])
example_batch_patch3 = np.squeeze(next_data['seg']['data'][batch_samples[comparison], 1, ..., int(ps/2)])
axarr[0, comparison].imshow(example_batch_patch)
axarr[0, comparison].axis('off')
axarr[1, comparison].imshow(example_batch_patch2)
axarr[1, comparison].axis('off')
axarr[2, comparison].imshow(example_batch_patch3)
axarr[2, comparison].axis('off')
plt.show()
# Stratification specific functions
def feature_loss_func(volume1, volume2):
if type(volume2) == np.ndarray:
return np.mean((volume1 - volume2) ** 2)
else:
return torch.mean((volume1 - volume2) ** 2).item()
def stratification_checker(input_volume):
# Will only work for batch size 4 for now, but that comprises most experiments
return int(torch.sum(input_volume[0, ...] + input_volume[3, ...] - input_volume[1, ...] - input_volume[2, ...]))
def calc_feature_loss(input_volume):
feature_loss1 = feature_loss_func(
volume1=input_volume[0, ...],
volume2=input_volume[1, ...])
feature_loss2 = feature_loss_func(
volume1=input_volume[0, ...],
volume2=input_volume[2, ...])
feature_loss3 = feature_loss_func(
volume1=input_volume[0, ...],
volume2=input_volume[3, ...])
feature_loss4 = feature_loss_func(
volume1=input_volume[1, ...],
volume2=input_volume[2, ...])
feature_loss5 = feature_loss_func(
volume1=input_volume[1, ...],
volume2=input_volume[3, ...])
feature_loss6 = feature_loss_func(
volume1=input_volume[2, ...],
volume2=input_volume[3, ...])
total_feature_loss = np.mean([feature_loss1,
feature_loss2,
feature_loss3,
feature_loss4,
feature_loss5,
feature_loss6])
return total_feature_loss
def normalise_image(array):
return (array - np.min(array)) / (np.max(array) - np.min(array))
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
torch.cuda.empty_cache()
# Writer will output to ./runs/ directory by default
log_dir = f'/home/pedro/PhysicsPyTorch/logger/preliminary_tests_physics'
if not os.path.exists(log_dir):
os.makedirs(log_dir)
SAVE_PATH = os.path.join(f'/home/pedro/PhysicsPyTorch/logger/preliminary_tests/models')
if not os.path.exists(SAVE_PATH):
os.makedirs(SAVE_PATH)
SAVE = True
LOAD = True
patch_test = False
val_test = False
# Physics specific parameters
physics_flag = True
physics_experiment_type = 'MPRAGE'
physics_input_size = {'MPRAGE': 2,
'SPGR': 6}
def physics_preprocessing(physics_input, experiment_type):
if experiment_type == 'MPRAGE':
expo_physics = torch.exp(-physics_input)
overall_physics = torch.stack((physics, expo_physics), dim=1)
elif experiment_type == 'SPGR':
TR_expo_params = torch.unsqueeze(torch.exp(-physics_input[:, 0]), dim=1)
TE_expo_params = torch.unsqueeze(torch.exp(-physics_input[:, 1]), dim=1)
FA_sin_params = torch.unsqueeze(torch.sin(physics_input[:, 2] * 3.14159265 / 180), dim=1)
overall_physics = torch.stack((physics, TR_expo_params, TE_expo_params, FA_sin_params), dim=1)
return overall_physics
# Check if SAVE_PATH is empty
file_list = os.listdir(path=SAVE_PATH)
num_files = len(file_list)
# Hyper-parameter loading: General parameters so doesn't matter which model file is loaded exactly
if LOAD and num_files > 0:
model_files = glob.glob(os.path.join(SAVE_PATH, '*.pth'))
latest_model_file = max(model_files, key=os.path.getctime)
checkpoint = torch.load(latest_model_file, map_location=torch.device('cuda:0'))
print(f'Loading {latest_model_file}!')
loaded_epoch = checkpoint['epoch']
loss = checkpoint['loss']
running_iter = checkpoint['running_iter']
EPOCHS = 100
# Memory related variables
batch_size = checkpoint['batch_size']
queue_length = batch_size
patch_size = checkpoint['patch_size']
samples_per_volume = 1
else:
running_iter = 0
loaded_epoch = -1
EPOCHS = 100
# Memory related variables
patch_size = 16
batch_size = 4
queue_length = batch_size
samples_per_volume = 1
# Stratification
training_modes = ['standard', 'stratification']
training_mode = 'stratification'
stratification_epsilon = 0.05
# Some necessary variables
dataset_csv = '/home/pedro/PhysicsPyTorch/local_physics_csv.csv'
# img_dir = '/data/MPRAGE_subjects_121T/Train_121T' # '/nfs/home/pedro/COVID/Data/KCH_CXR_JPG'
# label_dir = '/data/Segmentation_MPRAGE_121T/All_labels' # '/nfs/home/pedro/COVID/Labels/KCH_CXR_JPG.csv'
img_dir = '/data/Resampled_Data/Images/SS_GM_Images' # '/nfs/home/pedro/COVID/Data/KCH_CXR_JPG'
label_dir = '/data/Resampled_Data/Labels/GM_Labels' # '/nfs/home/pedro/COVID/Labels/KCH_CXR_JPG.csv'
print(img_dir)
print(label_dir)
val_batch_size = 4
# Read csv + add directory to filenames
df = | pd.read_csv(dataset_csv) | pandas.read_csv |
from Heuristic import CPH
from joblib import Parallel, delayed
from datetime import datetime
import pandas as pd
import numpy as np
import pickle
import csv
def run_heuristic(tree_set=None, tree_set_newick=None, inst_num=0, lengths=True, repeats=1, time_limit=None,
progress=True, reduce_trivial=False, pick_lowest_cherry=False, pick_ml=False, model_name=None,
str_features=None, problem_type=""):
# READ TREE SET
now = datetime.now().time()
if progress:
print(f"Instance {inst_num} {problem_type}: Start at {now}")
if tree_set is None and tree_set_newick is not None:
# Empty set of inputs
inputs = []
# Read each line of the input file with name set by "option_file_argument"
f = open(tree_set_newick, "rt")
reader = csv.reader(f, delimiter='~', quotechar='|')
for row in reader:
inputs.append(str(row[0]))
f.close()
# Make the set of inputs usable for all algorithms: use the CPH class
tree_set = CPH.Input_Set(newick_strings=inputs, instance=inst_num)
# RUN HEURISTIC CHERRY PICKING SEQUENCE
# Run the heuristic to find a cherry-picking sequence `seq' for the set of input trees.
# Arguments are set as given by the terminal arguments
seq, df_pred = tree_set.CPSBound(repeats=repeats,
progress=progress,
time_limit=time_limit,
reduce_trivial=reduce_trivial,
pick_lowest_cherry=pick_lowest_cherry,
pick_ml=pick_ml,
model_name=model_name,
str_features=str_features)
# Output the computation time for the heuristic
now = datetime.now().time()
if progress:
print(f"Instance {inst_num} {problem_type}: Finish at {now}")
print(f"Instance {inst_num} {problem_type}: Computation time heuristic: {tree_set.CPS_Compute_Time}")
print(f"Instance {inst_num} {problem_type}: Reticulation number = {min(tree_set.RetPerTrial)}")
if pick_ml:
return tree_set.RetPerTrial, tree_set.DurationPerTrial, seq, df_pred
else:
return tree_set.RetPerTrial, tree_set.DurationPerTrial, seq
def run_main(i, model_name, repeats=20, progress=False, str_features=None, time_limit=None, name=None, tree_size="med",
forest_size=10, partial=False):
# save results
score = pd.DataFrame(
index=pd.MultiIndex.from_product([[i], ["RetNum", "Time"], np.arange(repeats)]),
columns=["ML", "LowestCherry", "TrivialRandom", "Random"], dtype=float)
df_seq = pd.DataFrame()
if partial:
tree_set_newick = f"../DataGen/LGT/Test/TreeSetsNewick/tree_set_newick_{tree_size}_T{forest_size}_part_{i}_LGT.txt"
test_leaves = "part"
else:
tree_set_newick = f"../DataGen/LGT/Test/TreeSetsNewick/tree_set_newick_{tree_size}_T{forest_size}_{i}_LGT.txt"
test_leaves = "all"
# ML HEURISTIC
score.loc[i, "RetNum"]["ML"], score.loc[i, "Time"]["ML"], seq_ml, df_pred = run_heuristic(tree_set_newick=tree_set_newick,
inst_num=i, repeats=1,
time_limit=time_limit,
pick_ml=True,
model_name=model_name,
str_features=str_features,
progress=progress,
problem_type="ML")
ml_time = score.loc[i, "Time", 0]["ML"]
ml_ret = int(score.loc[i, "RetNum"]["ML"][0])
df_seq = pd.concat([df_seq, pd.Series(seq_ml)], axis=1)
# PICK LOWEST CHERRY HEURISTIC
score.loc[i, "RetNum"]["LowestCherry"], score.loc[i, "Time"]["LowestCherry"], seq_lc = run_heuristic(
tree_set_newick=tree_set_newick, inst_num=i, repeats=repeats,
time_limit=ml_time, pick_lowest_cherry=True, progress=progress, problem_type="LC")
lc_ret = int(min(score.loc[i, "RetNum"]["LowestCherry"]))
df_seq = pd.concat([df_seq, pd.Series(seq_lc)], axis=1)
# RANDOM WITH PREPROCESSING HEURISTIC
score.loc[i, "RetNum"]["TrivialRandom"], score.loc[i, "Time"]["TrivialRandom"], seq_tr = run_heuristic(
tree_set_newick=tree_set_newick, inst_num=i, repeats=repeats,
time_limit=ml_time, reduce_trivial=True, progress=progress, problem_type="TR")
tr_ret = int(min(score.loc[i, "RetNum"]["TrivialRandom"]))
df_seq = pd.concat([df_seq, pd.Series(seq_tr)], axis=1)
# RANDOM HEURISTIC
score.loc[i, "RetNum"]["Random"], score.loc[i, "Time"]["Random"], seq_ra = run_heuristic(tree_set_newick=tree_set_newick,
inst_num=i, repeats=repeats,
time_limit=ml_time,
progress=progress,
problem_type="RA")
ra_ret = int(min(score.loc[i, "RetNum"]["Random"]))
df_seq = pd.concat([df_seq, | pd.Series(seq_ra) | pandas.Series |
# pylint: disable=E1101
from datetime import datetime, timedelta
from pandas.compat import range, lrange, zip, product
import numpy as np
from pandas import Series, TimeSeries, DataFrame, Panel, isnull, notnull, Timestamp
from pandas.tseries.index import date_range
from pandas.tseries.offsets import Minute, BDay
from pandas.tseries.period import period_range, PeriodIndex, Period
from pandas.tseries.resample import DatetimeIndex, TimeGrouper
import pandas.tseries.offsets as offsets
import pandas as pd
import unittest
import nose
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal)
import pandas.util.testing as tm
bday = BDay()
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest
class TestResample(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
def test_custom_grouper(self):
dti = DatetimeIndex(freq='Min', start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10))
s = Series(np.array([1] * len(dti)), index=dti, dtype='int64')
b = TimeGrouper(Minute(5))
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
b = TimeGrouper(Minute(5), closed='right', label='right')
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
self.assertEquals(g.ngroups, 2593)
self.assert_(notnull(g.mean()).all())
# construct expected val
arr = [1] + [5] * 2592
idx = dti[0:-1:5]
idx = idx.append(dti[-1:])
expect = Series(arr, index=idx)
# GH2763 - return in put dtype if we can
result = g.agg(np.sum)
assert_series_equal(result, expect)
df = DataFrame(np.random.rand(len(dti), 10), index=dti, dtype='float64')
r = df.groupby(b).agg(np.sum)
self.assertEquals(len(r.columns), 10)
self.assertEquals(len(r.index), 2593)
def test_resample_basic(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', how='mean', closed='right', label='right')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=date_range('1/1/2000', periods=4, freq='5min'))
assert_series_equal(result, expected)
self.assert_(result.index.name == 'index')
result = s.resample('5min', how='mean', closed='left', label='right')
expected = Series([s[:5].mean(), s[5:10].mean(), s[10:].mean()],
index=date_range('1/1/2000 00:05', periods=3,
freq='5min'))
assert_series_equal(result, expected)
s = self.series
result = s.resample('5Min', how='last')
grouper = TimeGrouper(Minute(5), closed='left', label='left')
expect = s.groupby(grouper).agg(lambda x: x[-1])
assert_series_equal(result, expect)
def test_resample_basic_from_daily(self):
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to weekly
result = s.resample('w-sun', how='last')
self.assertEquals(len(result), 3)
self.assert_((result.index.dayofweek == [6, 6, 6]).all())
self.assertEquals(result.irow(0), s['1/2/2005'])
self.assertEquals(result.irow(1), s['1/9/2005'])
self.assertEquals(result.irow(2), s.irow(-1))
result = s.resample('W-MON', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [0, 0]).all())
self.assertEquals(result.irow(0), s['1/3/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-TUE', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [1, 1]).all())
self.assertEquals(result.irow(0), s['1/4/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-WED', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [2, 2]).all())
self.assertEquals(result.irow(0), s['1/5/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-THU', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [3, 3]).all())
self.assertEquals(result.irow(0), s['1/6/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
result = s.resample('W-FRI', how='last')
self.assertEquals(len(result), 2)
self.assert_((result.index.dayofweek == [4, 4]).all())
self.assertEquals(result.irow(0), s['1/7/2005'])
self.assertEquals(result.irow(1), s['1/10/2005'])
# to biz day
result = s.resample('B', how='last')
self.assertEquals(len(result), 7)
self.assert_((result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all())
self.assertEquals(result.irow(0), s['1/2/2005'])
self.assertEquals(result.irow(1), s['1/3/2005'])
self.assertEquals(result.irow(5), s['1/9/2005'])
self.assert_(result.index.name == 'index')
def test_resample_frame_basic(self):
df = tm.makeTimeDataFrame()
b = TimeGrouper('M')
g = df.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
result = df.resample('A')
assert_series_equal(result['A'], df['A'].resample('A'))
result = df.resample('M')
assert_series_equal(result['A'], df['A'].resample('M'))
df.resample('M', kind='period')
df.resample('W-WED', kind='period')
def test_resample_loffset(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', how='mean', closed='right', label='right',
loffset=timedelta(minutes=1))
idx = date_range('1/1/2000', periods=4, freq='5min')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=idx + timedelta(minutes=1))
assert_series_equal(result, expected)
expected = s.resample(
'5min', how='mean', closed='right', label='right',
loffset='1min')
assert_series_equal(result, expected)
expected = s.resample(
'5min', how='mean', closed='right', label='right',
loffset=Minute(1))
assert_series_equal(result, expected)
self.assert_(result.index.freq == Minute(5))
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D')
ser = Series(np.random.rand(len(dti)), dti)
# to weekly
result = ser.resample('w-sun', how='last')
expected = ser.resample('w-sun', how='last', loffset=-bday)
self.assertEqual(result.index[0] - bday, expected.index[0])
def test_resample_upsample(self):
# from daily
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to minutely, by padding
result = s.resample('Min', fill_method='pad')
self.assertEquals(len(result), 12961)
self.assertEquals(result[0], s[0])
self.assertEquals(result[-1], s[-1])
self.assert_(result.index.name == 'index')
def test_upsample_with_limit(self):
rng = date_range('1/1/2000', periods=3, freq='5t')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('t', fill_method='ffill', limit=2)
expected = ts.reindex(result.index, method='ffill', limit=2)
assert_series_equal(result, expected)
def test_resample_ohlc(self):
s = self.series
grouper = TimeGrouper(Minute(5))
expect = s.groupby(grouper).agg(lambda x: x[-1])
result = s.resample('5Min', how='ohlc')
self.assertEquals(len(result), len(expect))
self.assertEquals(len(result.columns), 4)
xs = result.irow(-2)
self.assertEquals(xs['open'], s[-6])
self.assertEquals(xs['high'], s[-6:-1].max())
self.assertEquals(xs['low'], s[-6:-1].min())
self.assertEquals(xs['close'], s[-2])
xs = result.irow(0)
self.assertEquals(xs['open'], s[0])
self.assertEquals(xs['high'], s[:5].max())
self.assertEquals(xs['low'], s[:5].min())
self.assertEquals(xs['close'], s[4])
def test_resample_ohlc_dataframe(self):
df = (pd.DataFrame({'PRICE': {Timestamp('2011-01-06 10:59:05', tz=None): 24990,
Timestamp('2011-01-06 12:43:33', tz=None): 25499,
Timestamp('2011-01-06 12:54:09', tz=None): 25499},
'VOLUME': {Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
Timestamp('2011-01-06 12:54:09', tz=None): 100000000}})
).reindex_axis(['VOLUME', 'PRICE'], axis=1)
res = df.resample('H', how='ohlc')
exp = pd.concat([df['VOLUME'].resample('H', how='ohlc'),
df['PRICE'].resample('H', how='ohlc')],
axis=1,
keys=['VOLUME', 'PRICE'])
assert_frame_equal(exp, res)
df.columns = [['a', 'b'], ['c', 'd']]
res = df.resample('H', how='ohlc')
exp.columns = pd.MultiIndex.from_tuples([('a', 'c', 'open'), ('a', 'c', 'high'),
('a', 'c', 'low'), ('a', 'c', 'close'), ('b', 'd', 'open'),
('b', 'd', 'high'), ('b', 'd', 'low'), ('b', 'd', 'close')])
assert_frame_equal(exp, res)
# dupe columns fail atm
# df.columns = ['PRICE', 'PRICE']
def test_resample_dup_index(self):
# GH 4812
# dup columns with resample raising
df = DataFrame(np.random.randn(4,12),index=[2000,2000,2000,2000],columns=[ Period(year=2000,month=i+1,freq='M') for i in range(12) ])
df.iloc[3,:] = np.nan
result = df.resample('Q',axis=1)
expected = df.groupby(lambda x: int((x.month-1)/3),axis=1).mean()
expected.columns = [ Period(year=2000,quarter=i+1,freq='Q') for i in range(4) ]
assert_frame_equal(result, expected)
def test_resample_reresample(self):
dti = DatetimeIndex(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10),
freq='D')
s = Series(np.random.rand(len(dti)), dti)
bs = s.resample('B', closed='right', label='right')
result = bs.resample('8H')
self.assertEquals(len(result), 22)
tm.assert_isinstance(result.index.freq, offsets.DateOffset)
self.assert_(result.index.freq == offsets.Hour(8))
def test_resample_timestamp_to_period(self):
ts = _simple_ts('1/1/1990', '1/1/2000')
result = ts.resample('A-DEC', kind='period')
expected = ts.resample('A-DEC')
expected.index = period_range('1990', '2000', freq='a-dec')
assert_series_equal(result, expected)
result = ts.resample('A-JUN', kind='period')
expected = ts.resample('A-JUN')
expected.index = period_range('1990', '2000', freq='a-jun')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period')
expected = ts.resample('M')
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period')
expected = ts.resample('M')
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
def test_ohlc_5min(self):
def _ohlc(group):
if isnull(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
rng = date_range('1/1/2000 00:00:00', '1/1/2000 5:59:50',
freq='10s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', how='ohlc', closed='right',
label='right')
self.assert_((resampled.ix['1/1/2000 00:00'] == ts[0]).all())
exp = _ohlc(ts[1:31])
self.assert_((resampled.ix['1/1/2000 00:05'] == exp).all())
exp = _ohlc(ts['1/1/2000 5:55:01':])
self.assert_((resampled.ix['1/1/2000 6:00:00'] == exp).all())
def test_downsample_non_unique(self):
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(5).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
result = ts.resample('M', how='mean')
expected = ts.groupby(lambda x: x.month).mean()
self.assertEquals(len(result), 2)
assert_almost_equal(result[0], expected[1])
assert_almost_equal(result[1], expected[2])
def test_asfreq_non_unique(self):
# GH #1077
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(2).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
self.assertRaises(Exception, ts.asfreq, 'B')
def test_resample_axis1(self):
rng = date_range('1/1/2000', '2/29/2000')
df = DataFrame(np.random.randn(3, len(rng)), columns=rng,
index=['a', 'b', 'c'])
result = df.resample('M', axis=1)
expected = df.T.resample('M').T
tm.assert_frame_equal(result, expected)
def test_resample_panel(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', axis=1)
def p_apply(panel, f):
result = {}
for item in panel.items:
result[item] = f(panel[item])
return Panel(result, items=panel.items)
expected = p_apply(panel, lambda x: x.resample('M'))
tm.assert_panel_equal(result, expected)
panel2 = panel.swapaxes(1, 2)
result = panel2.resample('M', axis=2)
expected = p_apply(panel2, lambda x: x.resample('M', axis=1))
tm.assert_panel_equal(result, expected)
def test_resample_panel_numpy(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', how=lambda x: x.mean(1), axis=1)
expected = panel.resample('M', how='mean', axis=1)
tm.assert_panel_equal(result, expected)
panel = panel.swapaxes(1, 2)
result = panel.resample('M', how=lambda x: x.mean(2), axis=2)
expected = panel.resample('M', how='mean', axis=2)
tm.assert_panel_equal(result, expected)
def test_resample_anchored_ticks(self):
# If a fixed delta (5 minute, 4 hour) evenly divides a day, we should
# "anchor" the origin at midnight so we get regular intervals rather
# than starting from the first timestamp which might start in the middle
# of a desired interval
rng = date_range('1/1/2000 04:00:00', periods=86400, freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
freqs = ['t', '5t', '15t', '30t', '4h', '12h']
for freq in freqs:
result = ts[2:].resample(freq, closed='left', label='left')
expected = ts.resample(freq, closed='left', label='left')
assert_series_equal(result, expected)
def test_resample_base(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 02:00', freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', base=2)
exp_rng = date_range('12/31/1999 23:57:00', '1/1/2000 01:57',
freq='5min')
self.assert_(resampled.index.equals(exp_rng))
def test_resample_daily_anchored(self):
rng = date_range('1/1/2000 0:00:00', periods=10000, freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
result = ts[2:].resample('D', closed='left', label='left')
expected = ts.resample('D', closed='left', label='left')
assert_series_equal(result, expected)
def test_resample_to_period_monthly_buglet(self):
# GH #1259
rng = date_range('1/1/2000', '12/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('M', kind='period')
exp_index = period_range('Jan-2000', 'Dec-2000', freq='M')
self.assert_(result.index.equals(exp_index))
def test_resample_empty(self):
ts = _simple_ts('1/1/2000', '2/1/2000')[:0]
result = ts.resample('A')
self.assert_(len(result) == 0)
self.assert_(result.index.freqstr == 'A-DEC')
result = ts.resample('A', kind='period')
self.assert_(len(result) == 0)
self.assert_(result.index.freqstr == 'A-DEC')
xp = DataFrame()
rs = xp.resample('A')
assert_frame_equal(xp, rs)
def test_weekly_resample_buglet(self):
# #1327
rng = date_range('1/1/2000', freq='B', periods=20)
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('W')
expected = ts.resample('W-SUN')
assert_series_equal(resampled, expected)
def test_monthly_resample_error(self):
# #1451
dates = date_range('4/16/2012 20:00', periods=5000, freq='h')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
result = ts.resample('M')
def test_resample_anchored_intraday(self):
# #1471, #1458
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('M')
expected = df.resample('M', kind='period').to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
result = df.resample('M', closed='left')
exp = df.tshift(1, freq='D').resample('M', kind='period')
exp = exp.to_timestamp(how='end')
| tm.assert_frame_equal(result, exp) | pandas.util.testing.assert_frame_equal |
from sklearn.tree import DecisionTreeClassifier
import pytest
import numpy as np
import pandas as pd
from probatus.interpret import ShapModelInterpreter
from unittest.mock import patch
@pytest.fixture(scope='function')
def X_train():
return pd.DataFrame({'col_1': [1, 1, 1, 1],
'col_2': [0, 0, 0, 0],
'col_3': [1, 0, 1, 0]}, index=[1, 2, 3, 4])
@pytest.fixture(scope='function')
def y_train():
return pd.Series([1, 0, 1, 0], index=[1, 2, 3, 4])
@pytest.fixture(scope='function')
def X_test():
return pd.DataFrame({'col_1': [1, 1, 1, 1],
'col_2': [0, 0, 0, 0],
'col_3': [1, 0, 1, 0]}, index=[5, 6, 7, 8])
@pytest.fixture(scope='function')
def y_test():
return pd.Series([0, 0, 1, 0], index=[5, 6, 7, 8])
@pytest.fixture(scope='function')
def fitted_tree(X_train, y_train):
return DecisionTreeClassifier(max_depth=1, random_state=1).fit(X_train, y_train)
@pytest.fixture(scope='function')
def expected_feature_importance():
return pd.DataFrame({
'mean_abs_shap_value': [0.5, 0., 0.],
'mean_shap_value': [0., 0., 0.]}, index=['col_3', 'col_1', 'col_2'])
def test_shap_interpret(fitted_tree, X_train, y_train, X_test, y_test, expected_feature_importance):
class_names = ['neg', 'pos']
shap_interpret = ShapModelInterpreter(fitted_tree)
shap_interpret.fit(X_train, X_test, y_train, y_test, class_names=class_names)
# Check parameters
assert shap_interpret.fitted == True
shap_interpret._check_if_fitted
assert shap_interpret.class_names == class_names
assert shap_interpret.auc_train == 1
assert shap_interpret.auc_test == pytest.approx(0.833, 0.01)
# Check expected shap values
assert (np.mean(np.abs(shap_interpret.shap_values), axis=0) == [0, 0, 0.5]).all()
importance_df = shap_interpret.compute()
| pd.testing.assert_frame_equal(expected_feature_importance, importance_df) | pandas.testing.assert_frame_equal |
from autodesk.states import INACTIVE, ACTIVE, DOWN
from pandas import Timedelta
import numpy as np
import pandas as pd
def enumerate_hours(start, end):
time = start
while time < end:
yield (time.weekday(), time.hour)
time = time + | Timedelta(hours=1) | pandas.Timedelta |
import os
import pandas as pd
import numpy as np
import datetime
from sklearn import linear_model
from scipy.special import erfinv
import scipy as sp
import matplotlib.pyplot as plt
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore")
np.random.seed(7)
def load_data():
'''Load the input Excel files from AQUA, for necessary template you could ask Longxing'''
starttime = datetime.datetime.now()
print("-------------Data Loading--------------")
production_data,repair_data = pd.DataFrame(),pd.DataFrame()
Production_path = os.path.join(os.getcwd(), '01-Production')
Repair_path=os.path.join(os.getcwd(),'02-Repair')
dateparse = lambda x: pd.datetime.strptime(str(x), '%m/%d/%Y')
for file in os.listdir(Production_path):
File_name = os.path.join(Production_path, file)
Date_col=['Production date', 'Initial registration date','Engine production date']
File_data = | pd.read_excel(File_name, parse_dates=Date_col,date_parser=dateparse) | pandas.read_excel |
import pandas as pd
coverage = {'source': [], 'count': [], 'percentage': []}
coverage_df = | pd.DataFrame(coverage) | pandas.DataFrame |
import time
from collections import Counter
import warnings; warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from algorithms import ShapeletTransformer
from extractors.extractor import GeneticExtractor, MultiGeneticExtractor, SAXExtractor, LearningExtractor
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from tslearn.shapelets import grabocka_params_to_shapelet_size_dict
from tslearn.shapelets import ShapeletModel
from mstamp.mstamp_stomp import mstamp as mstamp_stomp
from mstamp.mstamp_stamp import mstamp as mstamp_stamp
# Prepare all the datasets
datasets = []
meta_info = [
#('ItalyPowerDemand', None, None, None, 13, 18), # (Acc: 0.93, Time: 35s)
#('SonyAIBORobotSurface1', None, None, None, 17, 54), # (Acc: 0.8519, Time: 218s)
#('SonyAIBORobotSurface2', None, None, None, 32, 63), # (Acc: 0.8519, Time: 131s)
#('MoteStrain', None, None, None, 16, 33), # (Acc: 0.8458, Time: xx)
('Beef', None, None, None, 15, 128), # (Acc: 0.90, Time: xx)
]
"""
Short runs:
------------
0 1 2
0 ItalyPowerDemand 36.171110 0.896016
1 SonyAIBORobotSurface1 26.520345 0.883527
2 SonyAIBORobotSurface2 33.203732 0.838405
3 MoteStrain 43.249932 0.802716
4 Beef 111.535088 0.533333
Longer runs:
------------
0 ItalyPowerDemand 186.922645 0.952381
1 SonyAIBORobotSurface1 132.779138 0.921797
2 SonyAIBORobotSurface2 164.222865 0.803778
3 MoteStrain 228.977391 0.753195
4 Beef 351.845052 0.500000
"""
"""
ItalyPowerDemand
SonyAIBORobotSurface1
SonyAIBORobotSurface2
MoteStrain
TwoLeadECG
ECGFiveDays
CBF
GunPoint
ECG200
DiatomSizeReduction
"""
def grabocka_params_to_shapelet_size_dict(n_ts, ts_sz, n_shapelets, l, r):
base_size = int(l * ts_sz)
d = {}
for sz_idx in range(r):
shp_sz = base_size * (sz_idx + 1)
d[shp_sz] = n_shapelets
return d
def estimate_min_max(X, y, extractor, min_perc=25, max_perc=75, min_len=3, max_len=None, iterations=5):
shapelet_lengths = []
for _ in range(iterations):
rand_idx = np.random.choice(range(len(X)), size=10, replace=False)
X_sub = X[rand_idx, :]
y_sub = y[rand_idx]
map_dict = {}
for j, c in enumerate(np.unique(y_sub)):
map_dict[c] = j
y_sub = np.vectorize(map_dict.get)(y_sub)
shapelet_lengths += [len(x) for x in extractor.extract(X_sub, y_sub, min_len=min_len, max_len=max_len, nr_shapelets=10)]
_min = int(np.percentile(shapelet_lengths, min_perc))
_max = int(np.percentile(shapelet_lengths, max_perc))
if _min == _max:
_max += 1
print('Estimated a minimum and maximum length:', _min, _max)
return _min, _max
result_vectors = []
for dataset_name, start_idx, end_idx, samples_per_class, min_len, max_len in meta_info:
print(dataset_name)
train_path = '/home/giles/Projects/pyShapelets/pyshapelets/data/partitioned/{}/{}_train.csv'.format(dataset_name, dataset_name)
test_path = '/home/giles/Projects/pyShapelets/pyshapelets/data/partitioned/{}/{}_test.csv'.format(dataset_name, dataset_name)
train_df = | pd.read_csv(train_path) | pandas.read_csv |
import os
from datetime import datetime
import pandas as pd
from read import clean_read
def relate_gauges_to_storms(storm_file, storm_effect_folder, ext='.txt'):
"""
Finds what dates correspond to a hurricane landfall for gauges
Args:
storm_file: a csv that relates storm names to landfall dates, with at minimum headers 'HURRICANE'
and 'LANDFALL'
storm_effect_folder: a folder full of csvs where each csv is titled after a storm name and only
contains the gauges that the storm affected. Headerless.
ext: the extension for the files in storm_effect_folder
Returns: a dictionary where each key is a gauge number, and the value is a dict that relates a storm name
to a date
"""
storms = | pd.read_csv(storm_file) | pandas.read_csv |
import pandas as pd
import numpy as np
from sklearn.metrics import roc_curve, auc, confusion_matrix, precision_score, recall_score, f1_score
from sklearn.metrics import average_precision_score, precision_recall_curve
from ._woe_binning import woe_binning, woe_binning_2, woe_binning_3
class Metrics:
def __init__(self, df, actual, prediction):
self.df = df
self.target = actual
self.actual = df[actual]
self.prediction = df[prediction]
self.gains = self.calculate_gains()
self.ks = self.ks()
self.gini = self.gini()
self.tn, self.fp, self.fn, self.tp, self.precision, self.recall, self.f1_score = self.precision_recall_f1_score()
def calculate_gains(self):
"""Returns a pandas dataframe with gains along with KS and Gini calculated"""
self.df['scaled_score'] = (self.df['positive_probability']*1000000).round(0)
gains = self.df.groupby('scaled_score')[self.target].agg(['count','sum'])
gains.columns = ['total','responders']
gains.reset_index(inplace=True)
gains.sort_values(by='scaled_score', ascending=False)
gains['non_responders'] = gains['total'] - gains['responders']
gains['cum_resp'] = gains['responders'].cumsum()
gains['cum_non_resp'] = gains['non_responders'].cumsum()
gains['total_resp'] = gains['responders'].sum()
gains['total_non_resp'] = gains['non_responders'].sum()
gains['perc_resp'] = (gains['responders']/gains['total_resp'])*100
gains['perc_non_resp'] = (gains['non_responders']/gains['total_non_resp'])*100
gains['perc_cum_resp'] = gains['perc_resp'].cumsum()
gains['perc_cum_non_resp'] = gains['perc_non_resp'].cumsum()
gains['k_s'] = gains['perc_cum_resp'] - gains['perc_cum_non_resp']
return gains
def get_threshold(self):
"""Returns a pandas dataframe with y_pred based on threshold from roc_curve."""
fpr, tpr, threshold = roc_curve(self.actual, self.prediction)
threshold_cutoff_df = pd.DataFrame({'fpr': fpr, 'tpr': tpr, 'threshold': threshold})
threshold_cutoff_df['distance'] = ((threshold_cutoff_df['fpr']-0)**2+(threshold_cutoff_df['tpr']-1)**2)**0.5
threshold_cutoff_df['distance_diff'] = abs(threshold_cutoff_df['distance'].diff(periods=1))
for index, rows in threshold_cutoff_df.iterrows():
if index != 0 and index != threshold_cutoff_df.shape[0]-1:
curr_val = threshold_cutoff_df.loc[index, 'distance_diff']
prev_val = threshold_cutoff_df.loc[index-1, 'distance_diff']
next_val = threshold_cutoff_df.loc[index+1, 'distance_diff']
if curr_val>prev_val and curr_val>next_val:
threshold_cutoff = threshold_cutoff_df.loc[index, 'threshold']
break
return threshold_cutoff
def gini(self):
fpr, tpr, threshold = roc_curve(self.actual, self.prediction)
auroc = auc(fpr, tpr)
gini = 2*auroc -1
return gini
def ks(self):
gains = self.gains()
return gains['k_s'].max()
def precision_recall_f1_score(self):
threshold_cutoff = self.get_threshold()
self.y_pred = np.where(self.prediction>=threshold_cutoff,1,0)
self.df['y_pred'] = self.y_pred
tn, fp, fn, tp = confusion_matrix(self.actual, self.y_pred).ravel()
precision = precision_score(self.actual, self.y_pred)
recall = recall_score(self.actual, self.y_pred)
f1 = f1_score(self.actual, self.y_pred)
return tn, fp, fn, tp, precision, recall, f1
def to_dict(self):
return {'ks': self.ks, 'gini': self.gini, 'tn': self.tn, 'tp': self.tp, 'fn': self.fn, 'fp': self.fp, 'precision': self.precision, 'recall': self.recall, 'f1_score': self.f1_score}
def standard_metrics(df, target_col, prediction_col):
"""Returns a dict with all metrics - Gini, KS, Precision, Recall, F1 Score, True Negative, True Positive, False Positive, False Negative."""
metrics = Metrics(df, target_col, prediction_col)
return metrics.to_dict()
def quick_psi(dev, val):
"""Calculate PSI from 2 arrays - dev and val"""
return sum([(a-b)*np.log(a/b) for (a,b) in zip(dev,val)])
def psi(dev, val, target='positive_probability', n_bins=10):
"""
Returns a pandas dataframe with psi column (Population Stability Index) after creating 10 deciles.
Code includes creating score calculation using round(500-30 x log(100 x (p/(1-p))), 0) where p is probability.
We need to pass both dev and val at same time to apply same bins created on dev dataframe.
"""
dev['score'] = dev[target].apply(lambda x: round(500-30*np.log2(100*(x/(1-x))), 0))
val['score'] = val[target].apply(lambda x: round(500-30*np.log2(100*(x/(1-x))), 0))
_, bins = pd.qcut(dev.score, n_bins, retbins=True, precision=0)
bins = [int(i) if abs(i)!=np.inf else i for i in bins]
dev['bins'] = pd.cut(dev.score, bins)
val['bins'] = pd.cut(val.score, bins)
dev_bins = dev.bins.value_counts(sort=False, normalize=True)
val_bins = val.bins.value_counts(sort=False, normalize=True)
psi_ = pd.concat([dev_bins, val_bins], axis=1)
psi_.columns = ['dev', 'val']
psi_['psi'] = (psi_.dev - psi_.val)*np.log(psi_.dev/psi_.val)
return psi_
def gsi(data, col='GENDER', col_val='F', target='positive_probability', n_bins=10):
"""
Returns a pandas dataframe with gsi columns (Group Stability Index) after creating n bins.
Args:
data: pandas dataframe
col: Columns on which GSI has to be calculated (ex: Gender column)
col_val: selected value will be compared with rest of the values (ex: F vs Rest)
target: score column
n_bins: number of bins to be created (Default=10)
"""
df = data.copy()
df['decile'] = pd.qcut(df[target], n_bins, labels=False)
df.loc[df[col]!=col_val, col] = 'Rest'
pivot_ = df.groupby(['decile', col])[target].count().unstack()
pivot = pivot_.div(pivot_.sum(axis=0), axis=1)
pivot['gsi'] = (pivot[col_val]-pivot['Rest'])*np.log(pivot[col_val]/pivot['Rest'])
return pivot
def iv(df, suffix='_dev'):
"""Returns a pandas dataframe with calculated fields - resp_rate, perc_dist, perc_non_resp, perc_resp, raw_odds, ln_odds, iv, exp_resp, exp_non_resp, chi_square."""
df['resp_rate'+suffix] = (df['responders'+suffix]*100)/df['total'+suffix]
df['perc_dist'+suffix] = (df['total'+suffix]*100)/df.groupby('var_name')['total'+suffix].transform('sum')
df['perc_non_resp'+suffix] = (df['non_responders'+suffix]*100)/df.groupby('var_name')['non_responders'+suffix].transform('sum')
df['perc_resp'+suffix] = (df['responders'+suffix]*100)/df.groupby('var_name')['responders'+suffix].transform('sum')
df['raw_odds'+suffix] = df.apply(lambda r: 0 if r['perc_resp'+suffix]==0 else r['perc_non_resp'+suffix]/r['perc_resp'+suffix], axis=1)
df['ln_odds'+suffix] = df['raw_odds'+suffix].apply(lambda x: 0 if abs(np.log(x))==np.inf else np.log(x))
df['iv'+suffix] = (df['perc_non_resp'+suffix]-df['perc_resp'+suffix])*df['ln_odds'+suffix]/100
df['exp_resp'+suffix] = df['total'+suffix]*df.groupby('var_name')['responders'+suffix].transform('sum')/df.groupby('var_name')['total'+suffix].transform('sum')
df['exp_non_resp'+suffix] = df['total'+suffix]*df.groupby('var_name')['non_responders'+suffix].transform('sum')/df.groupby('var_name')['total'+suffix].transform('sum')
df['chi_square'+suffix] = (((df['responders'+suffix]-df['exp_resp'+suffix])**2)/df['exp_resp'+suffix]) + (((df['non_responders'+suffix]-df['exp_non_resp'+suffix])**2)/df['exp_non_resp'+suffix])
return df
def iv_var(df, var_name, resp_name, suffix='_dev', var_cuts=None):
"""Returns IV of a variable"""
summ_df, _ = woe_bins(df, var_name, resp_name, suffix, var_cuts)
iv_ = iv(summ_df, suffix)
return iv_, iv_['iv'+suffix].sum()
def woe_bins(df, var_name, resp_name, suffix='_dev', var_cuts=None):
"""
Returns a pandas dataframe, var_cuts after creating bins.
Returns:
df: pandas dataframe has var_cuts_string, total, responders, non_responders, var_name (with _dev or _val suffix)
var_cuts: list of Interval items to be used on val file.
"""
df1 = df[[resp_name, var_name]]
if (np.issubdtype(df1[var_name].dtype, np.number)):
n = df1[var_name].nunique()
if var_cuts is None:
suffix = '_dev'
var_cuts = woe_binning_3(df1, resp_name, var_name, 0.05, 0.00001, 0, 50, 'bad', 'good')
var_cuts = list(set(var_cuts))
var_cuts.sort()
df1.loc[:,'var_binned'] = pd.cut(df[var_name], var_cuts, right=True, labels=None, retbins=False, precision=10, include_lowest=False)
var_min = float(df1[var_name].min())
var_max = float(df1[var_name].max())
summ_df = df1.groupby('var_binned')[resp_name].agg(['count','sum']).reset_index()
summ_df['delta'] = summ_df['count'] - summ_df['sum']
summ_df['var_name'] = var_name
summ_df.columns = ['var_cuts', 'total'+suffix, 'responders'+suffix, 'non_responders'+suffix, 'var_name']
summ_df['var_cuts_string'+suffix] = summ_df.var_cuts.apply(lambda x: str(x.left if x.left!=-np.inf else var_min)+' To '+str(x.right if x.right!=np.inf else var_max))
else:
df1[var_name].fillna('Blank', inplace=True)
summ_df = df1.groupby(var_name)[resp_name].agg(['count','sum']).reset_index()
summ_df['delta'] = summ_df['count'] - summ_df['sum']
summ_df['var_name'] = var_name
summ_df.columns = ['var_cuts_string'+suffix, 'total'+suffix, 'responders'+suffix, 'non_responders'+suffix, 'var_name']
summ_df['var_cuts'] = summ_df['var_cuts_string'+suffix]
return summ_df[summ_df['total'+suffix]!=0], var_cuts
def csi(dev_df, val_df, var_list, resp_name):
"""Returns a pandas dataframe with csi, csi_var, perc_csi columns (Charecteristic Stability Index) calculated based on both dev and val dataframes."""
dev_df.fillna(0, inplace=True)
val_df.fillna(0, inplace=True)
dev_dfs = []
var_cuts = {}
for var_name in var_list:
summ_df, cut = woe_bins(dev_df, var_name, resp_name, '_dev')
dev_dfs.append(summ_df)
var_cuts[var_name] = cut
dev = pd.concat(dev_dfs, axis=0)
dev = iv(dev, '_dev')
val_dfs = []
val_cuts = {}
for var_name in var_list:
val_summ_df, val_cut = woe_bins(val_df, var_name, resp_name, '_val', var_cuts[var_name])
val_dfs.append(val_summ_df)
val_cuts[var_name] = val_cut
val = | pd.concat(val_dfs, axis=0) | pandas.concat |
# %%
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
from pymatgen.core import Composition
from scipy.stats import sem
plt.rcParams.update({"font.size": 20})
plt.rcParams["axes.linewidth"] = 2.5
plt.rcParams["lines.linewidth"] = 3.5
plt.rcParams["xtick.major.size"] = 7
plt.rcParams["xtick.major.width"] = 2.5
plt.rcParams["xtick.minor.size"] = 5
plt.rcParams["xtick.minor.width"] = 2.5
plt.rcParams["ytick.major.size"] = 7
plt.rcParams["ytick.major.width"] = 2.5
plt.rcParams["ytick.minor.size"] = 5
plt.rcParams["ytick.minor.width"] = 2.5
plt.rcParams["legend.fontsize"] = 20
# %%
fig, ax = plt.subplots(1, figsize=(10, 9))
markers = [
"o",
"v",
"^",
"H",
"D",
"",
]
tars = []
df_hull_list = []
df_list = []
df_list_cgcnn = []
df_list_rel = []
for i, m in enumerate(markers):
offsets = 1
title = f"Batch-{i+offsets}"
if i < 5:
df_cgcnn = pd.read_csv(
f"results/manuscript/step_{i+offsets}_cgcnn_org.csv",
comment="#",
na_filter=False,
)
df_rel = pd.read_csv(
f"results/manuscript/step_{i+offsets}_cgcnn_cse.csv",
comment="#",
na_filter=False,
)
df = pd.read_csv(
f"results/manuscript/step_{i+offsets}_wren_org.csv",
comment="#",
na_filter=False,
)
df_hull = pd.read_csv(
f"datasets/wbm-ehull/step-{i+offsets}-e_hull.csv",
comment="#",
na_filter=False,
)
df_hull_list.append(df_hull)
df_list.append(df)
df_list_cgcnn.append(df_cgcnn)
df_list_rel.append(df_rel)
continue
else:
df_hull = | pd.concat(df_hull_list) | pandas.concat |
import pandas as pd
import pvlib
import re
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, linear_model
from Find_Panels_DB import panels_iguais_df
def module_name(mod):
part = mod.split('_')
name = {}
for i in range(len(part)):
if part[i].isdigit():
year_int = int(part[i])
if 2000 <= year_int <= 2100:
name[i] = year_int
break
else:
name[i] = year_int
else:
name[i] = part[i]
name_final = ''
for i in range(len(name)):
if name[i] == '':
pass
else:
if i > 0:
if name[i - 1] == ' ' and name[i] == ' ':
pass
else:
name_final = f'{name_final} {name[i]}'
else:
name_final = f'{name_final} {name[i]}'
return name_final
def searched_deco(panel):
mod = module_name(panel).split(' ')
array = re.findall(r'[0-9]+', module_name(panel))
Power = {}
for i in range(len(array)):
tw = int(array[i])
if 999 > tw > 30:
try:
Power = array[i]
except:
pass
brand = mod[1]
year = mod[len(mod) - 1]
panel_nameinfo = {'Brand': brand, 'Year': year, 'Power': Power}
panel_nameinfo = pd.Series(panel_nameinfo)
i = 2
factor = {}
for i in range(2, len(mod)):
factor[i - 2] = mod[i]
i = i + 1
factor = pd.Series(factor)
panel_nameinfo = panel_nameinfo.append(factor)
return panel_nameinfo
def plot_styling():
plt.style.use('dark_background')
plt.gca().yaxis.grid(True, color='gray')
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Ubuntu'
plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['font.size'] = 10
plt.rcParams['axes.labelsize'] = 10
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['xtick.labelsize'] = 8
plt.rcParams['ytick.labelsize'] = 8
plt.rcParams['legend.fontsize'] = 10
plt.rcParams['figure.titlesize'] = 12
for spine in plt.gca().spines.values():
spine.set_visible(False)
def plot_PowervsPrice(df, regr, ransac, x, W):
plot_styling()
df.plot(x='Power', y='Price', style='o')
plt.title(f'{searched_brand} Power vs Price interpolation for {W}W')
plt.xlabel('Power [W]')
plt.ylabel('Price [$]')
df_aux = df.describe()
min_price = df_aux.loc['min']['Price']
min_power = df_aux.loc['min']['Power']
max_price = df_aux.loc['max']['Price']
max_power = df_aux.loc['max']['Power']
plt.ylim((min_price*0.5, max_price + min_price*0.5))
plt.xlim((min_power * 0.5, max_power + min_power * 0.5))
line_x = np.arange(min_power * 0.7, max_power + min_power * 0.3)[:, np.newaxis]
line_y = regr.predict(line_x)
line_y_ransac = ransac.predict(line_x)
plt.plot(line_x, line_y_ransac,
color='teal',
alpha=0.4,
linewidth=10,
label='RANSAC Regressor',
solid_capstyle = "round",
zorder=0)
i_x = W
i_y = round(ransac.predict([[W]])[0][0], 2)
plt.annotate(f'${i_y} for {i_x}W',
xy=(i_x, i_y),
arrowprops=dict(arrowstyle='->'),
xytext=(i_x * 0.7, i_y * 1))
plt.plot(x, regr.predict(x),
color='salmon',
linewidth=2,
ls='-',
label='Linear Regressor',
zorder=1)
plt.legend(loc='upper left', frameon=False)
plt.gca().axes.get_yaxis().set_visible(True)
plt.savefig(f'C:\\Users\\<NAME>\\Desktop\\perkier tech\\Energy\\CODE\\Plots\\{searched_brand}_{W}Watts_plot.png')
def interpolating(df, W):
df = df.astype(float)
length = len(df)
x = df['Power'].values.reshape(length, 1)
y = df['Price'].values.reshape(length, 1)
regr = linear_model.LinearRegression()
regr.fit(x, y)
ransac = linear_model.RANSACRegressor()
ransac.fit(x, y)
inlier_mask = ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
prediction_ransac = ransac.predict([[W]])
plot_PowervsPrice(df, regr, ransac, x, W)
return prediction_ransac
def data_2():
prices_reader = open('C:\\Users\\<NAME>\\Desktop\\perkier tech\\Energy\\CODE\\Google API\\prices_solar_2.csv', 'rb')
prices_read = pd.read_csv(prices_reader, encoding='latin1')
database_name = str(prices_reader).split("'")
db_len = len(database_name)
database_name = database_name[db_len - 2]
database_name = database_name.split('\\')
db_len = len(database_name)
database_name = database_name[db_len - 1]
prices_reader.close()
j = 0
i = 0
searching_nameinfo = pd.DataFrame()
for i in range(len(prices_read)):
searching_brand_splited = prices_read.iloc[i][0].split(' ')[0]
if searching_brand_splited == searched_brand.split(' ')[0]:
df2 = {'Brand': prices_read.iloc[i][0],
'Model': prices_read.iloc[i][1],
'Price': prices_read.iloc[i][3],
'Size': prices_read.iloc[i][4],
'Power': prices_read.iloc[i][2],
'Weight': prices_read.iloc[i][5],
'Country': prices_read.iloc[i][6],
'Database': database_name}
searching_nameinfo = searching_nameinfo.append(df2, ignore_index=True)
j = j+1
i = i+1
else:
i = i+1
return searching_nameinfo, j
def data_1():
prices_reader = open('C:\\Users\\<NAME>\\Desktop\\perkier tech\\Energy\\CODE\\Google API\\prices.csv', 'rb')
database_name = str(prices_reader).split("'")
db_len = len(database_name)
database_name = database_name[db_len - 2]
database_name = database_name.split('\\')
db_len = len(database_name)
database_name = database_name[db_len - 1]
prices_read = pd.read_csv(prices_reader, encoding='latin1')
prices_reader.close()
j = 0
searching_nameinfo = pd.DataFrame()
for i in range(len(prices_read)):
searching_brand_splited = prices_read.iloc[i]['Brand'].split(' ')[0]
if searching_brand_splited == searched_brand.split(' ')[0]:
df2 = {'Brand': prices_read.iloc[i]['Brand'],
'Model': prices_read.iloc[i]['Solar Panel'],
'Price': prices_read.iloc[i]['$ per Panel'],
'Power': prices_read.iloc[i]['Factory'],
'Warranty': prices_read.iloc[i]['Warranty'],
'Country': prices_read.iloc[i]['Country'],
'Database': database_name}
searching_nameinfo = searching_nameinfo.append(df2, ignore_index=True)
j = j + 1
else:
i = i + 1
return searching_nameinfo, j
def data_set(searched_brand):
try:
searching_nameinfo_2, j_2 = data_2()
except:
searching_nameinfo_2, j_2 = pd.DataFrame(), 0
try:
searching_nameinfo_1, j_1 = data_1()
except:
searching_nameinfo_1, j_1 = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
"""
06/08/18
* Determine a null distributiom in order to set an appropriate p-value threshold
Steps
-----
For each mutant line
get n number of homs
relabel n wildtypes as mutants
run organ volume LM organ_volume ~ genotype + 'staging metric'
Notes
-----
The p and t-values that are returned by lm() which calls R lm() usually has specimen-level values appended to the
matrix.
This does not happen here as mutants must be labelled 'mutant' for this to happen (see Rscrips/lmFast.R)
TDOD: If the relabelled baselines were relabelled as 'mutant' instead og 'hom' or 'synthetic_hom' the speciemn-level
p-values, tvalues could be obtained from the same call the lm() as in the standard stats module.
"""
from os.path import expanduser
from typing import Union, Tuple, List
import random
from pathlib import Path
import math
from collections import Counter
import pandas as pd
import numpy as np
from scipy.special import comb
import statsmodels.formula.api as smf
from joblib import Parallel, delayed
import datetime
from logzero import logger
from tqdm import tqdm
import random
import itertools
from lama.stats.linear_model import lm_r, lm_sm
home = expanduser('~')
def generate_random_combinations(data: pd.DataFrame, num_perms):
logger.info('generating permutations')
data = data.drop(columns='staging', errors='ignore')
line_specimen_counts = get_line_specimen_counts(data)
result = {}
# now for each label calcualte number of combinations we need for each
for label in line_specimen_counts:
label_indices_result = []
counts = line_specimen_counts[label].value_counts()
number_of_lines = counts[counts.index != 0].sum() # Drop the lines with zero labels (have been qc'd out)
ratios = counts[counts.index != 0] / number_of_lines
num_combs = num_perms * ratios
# get wt data for label
label_data = data[[label, 'line']]
label_data = label_data[label_data.line == 'baseline']
label_data = label_data[~label_data[label].isna()]
# Soret out the numnbers
# Sort out the numbers
records = []
for n, n_combs_to_try in num_combs.items():
n_combs_to_try = math.ceil(n_combs_to_try)
max_combs = int(comb(len(label_data), n))
# logger.info(f'max_combinations for n={n} and wt_n={len(label_data)} = {max_combs}')
records.append([n, n_combs_to_try, max_combs])
df = pd.DataFrame.from_records(records, columns=['n', 'num_combs', 'max_combs'], index='n').sort_index(ascending=True)
# test whether it's possible to have this number of permutations with data structure
print(f'Max combinations for label {label} is {df.max_combs.sum()}')
if num_perms > df.max_combs.sum():
raise ValueError(f'Max number of combinations is {df.max_combs.sum()}, you requested {num_perms}')
# Now spread the overflow from any ns to other groups
while True:
df['overflow'] = df.num_combs - df.max_combs # Get the 'overflow' num permutations over maximumum unique
groups_full = df[df.overflow >= 0].index
df['overflow'][df['overflow'] < 0] = 0
extra = df[df.overflow > 0].overflow.sum()
df.num_combs -= df.overflow
if extra < 1: # All combimation amounts have been distributed
break
num_non_full_groups = len(df[df.overflow >= 0])
top_up_per_group = math.ceil(extra / num_non_full_groups)
for n, row in df.iterrows():
if n in groups_full:
continue
# Add the topup amount
row.num_combs += top_up_per_group
# now generate the indices
indx = label_data.index
for n, row in df.iterrows():
combs_gen = itertools.combinations(indx, n)
for i, combresult in enumerate(combs_gen):
if i == row.num_combs:
break
label_indices_result.append(combresult)
result[label] = label_indices_result
return result
def max_combinations(num_wts: int, line_specimen_counts: dict) -> int:
"""
num_wts
Total number of wild types
lines_n
{line_n: number of lines}
Returns
-------
Maximum number or permutations
"""
# calcualte the maximum number of permutations allowed given the WT n and the mutant n line structure.
results = {}
counts = line_specimen_counts.iloc[:,0].value_counts()
for n, num_lines in counts.items():
# Total number of combinations given WT n and this mut n
total_combs_for_n = int(comb(num_wts, n))
# Now weight based on how many lines have this n
total_combs_for_n /= num_lines
results[n] = total_combs_for_n
return int(min(results.values()))
def get_line_specimen_counts(input_data: pd.DataFrame) -> pd.DataFrame:
"""
For each mutant line get the number of specimens per label. Does not inlude specimen labels that are NAN thereby
accounting for QC'd out labels
Parameters
----------
input_data
index: specimen_id
cols: label numbers (sppended with x for use with statsmodels) eg. x1, x2
line id e.g baseline
Returns
-------
index: line_id
cols: label numbers (sppended with x for use with statsmodels) eg. x1, x2
"""
if 'line' in input_data:
col = 'line'
elif 'genotype' in input_data:
col = 'genotype'
line_specimen_counts = input_data[input_data[col] != 'baseline'].groupby(col).count()
return line_specimen_counts
def null(input_data: pd.DataFrame,
num_perm: int,) -> Tuple[pd.DataFrame, pd.DataFrame, List]:
"""
Generate null distributions for line and specimen-level data
Parameters
----------
input_data
columns
staging
line
then multiple columns each one a label (organ)
Baselines must be labelled 'baseline' in the line column
num_perm
number of permutations
Returns
-------
line-level null distribution
specimen-level null distribution
Notes
-----
Labels must not start with a digit as R will throw a wobbly
"""
random.seed(999)
# Use the generic staging label from now on
input_data.rename(columns={'crl': 'staging', 'volume': 'staging'}, inplace=True)
label_names = input_data.drop(['staging', 'line'], axis='columns').columns
# Store p-value and t-value results. One tuple (len==num labels) per iteration
spec_p = []
# Create synthetic specimens by iteratively relabelling each baseline as synthetic mutant
baselines = input_data[input_data['line'] == 'baseline']
# Get the line specimen n numbers. Keep the first column
# line_specimen_counts = get_line_specimen_counts(input_data)
# Pregenerate all the combinations
wt_indx_combinations = generate_random_combinations(input_data, num_perm)
# Split data into a numpy array of raw data and dataframe for staging and genotype fpr the LM code
data = baselines.drop(columns=['staging', 'line']).values
info = baselines[['staging', 'line']]
# Get the specimen-level null distribution. i.e. the distributuion of p-values obtained from relabelling each
# baseline once. Loop over each specimen and set to 'synth_hom'
for index, _ in info.iterrows():
info.loc[:, 'genotype'] = 'wt' # Set all genotypes to WT
info.loc[[index], 'genotype'] = 'synth_hom' # Set the ith baseline to synth hom
row = data[info.index.get_loc(index), :]
# Get columns (labels) where the mutant specimen (as it's line level)
# has a null value (i.e. This specimen is QC-flagged at these labels)
# Set all values to zero
labels_to_skip = np.isnan(row)
if any(labels_to_skip):
# Set the whole label column to zero. R:lm() will return NaN for this column
d = np.copy(data)
d[:, labels_to_skip] = 0.0
else:
d = data
# Get a p-value for each organ
p, t = lm_sm(d, info) # TODO: move this to statsmodels
# Check that there are equal amounts of p-values than there are data points
if len(p) != data.shape[1]:
raise ValueError(f'The length of p-values results: {data.shape[1]} does not match the length of the input data: {len(p)}')
spec_p.append(p)
spec_df = pd.DataFrame.from_records(spec_p, columns=label_names)
line_df = null_line(wt_indx_combinations, baselines, num_perm)
return strip_x([line_df, spec_df])
def null_line(wt_indx_combinations: dict,
data: pd.DataFrame,
num_perms=1000) -> pd.DataFrame:
"""
Generate pvalue null distributions for all labels in 'data'
NaN values are excluded potentailly resultnig in different sets of specimens for each label. This makes it tricky to
use Rs vectorised lm() function, so we use statsmodels here and just loop over the data, with each label in its own
process using joblib.
Parameters
----------
line_specimen_counts
The number of specimens in each mutant line. Used to specify the number of synthetic mutants so the null
matches our data in terms of sample number
data
Label data in each column except last 2 which are 'staging' and 'genotype'
num_perms
Usually about 10000
Returns
-------
DataFrame of null distributions. Each label in a column
Notes
-----
If QC has been applied to the data, we may have some NANs
"""
def prepare(label):
return data[[label, 'staging', 'genotype']]
data = data.rename(columns={'line': 'genotype'})
starttime = datetime.datetime.now()
cols = list(data.drop(['staging', 'genotype'], axis='columns').columns)
# Run each label on a thread
pdists = Parallel(n_jobs=-1)(delayed(_null_line_thread)
(prepare(i), num_perms, wt_indx_combinations, i) for i in tqdm(cols))
line_pdsist_df = pd.DataFrame(pdists).T
line_pdsist_df.columns = cols
endtime = datetime.datetime.now()
elapsed = endtime - starttime
print(f'Time taken for null distribution calculation: {elapsed}')
return line_pdsist_df
def _null_line_thread(*args) -> List[float]:
"""
Create a null distribution for a single label. This can put put onto a thread or process
Returns
-------
pvalue distribution
"""
data, num_perms, wt_indx_combinations, label = args
print('Generating null for', label)
label = data.columns[0]
data = data.astype({label: np.float,
'staging': np.float})
synthetics_sets_done = []
line_p = []
perms_done = 0
# Get combinations of WT indices for current label
indxs = wt_indx_combinations[label]
for comb in indxs:
data.loc[:, 'genotype'] = 'wt'
data.loc[data.index.isin(comb), 'genotype'] = 'synth_hom'
# _label_synthetic_mutants(data, n, synthetics_sets_done)
perms_done += 1
model = smf.ols(formula=f'{label} ~ C(genotype) + staging', data=data, missing='drop')
fit = model.fit()
p = fit.pvalues['C(genotype)[T.wt]']
line_p.append(p)
return line_p
def _label_synthetic_mutants(info: pd.DataFrame, n: int, sets_done: List) -> bool:
"""
Given a dataframe of wild type data, relabel n baselines as synthetic mutant in place.
Keep track of combinations done in sets_done and do not duplicate
Parameters
----------
info
columns
label_num with 'x' prefix e.g. 'x1'
staging
genotype
n
how many specimens to relabel
sets_done
Contains Sets of previously selected specimen IDs
Returns
-------
True if able to find a new combination of mutant and wildtype
False if no more combinations are available for that specific n
"""
# Set all to wt genotype
info.loc[:, 'genotype'] = 'wt'
# label n number of baselines as mutants from the maximum number of combination
# max_comb = int(comb(len(info), n))
#
# for i in range(max_comb):
# synthetics_mut_indices = random.sample(range(0, len(info)), n)
# i += 1
# if not set(synthetics_mut_indices) in sets_done:
# break
#
# if i > max_comb - 1:
# msg = f"""Cannot find unique combinations of wild type baselines to relabel as synthetic mutants
# With a baseline n of {len(info)}\n. Choosing {n} synthetics.
# Try increasing the number of baselines or reducing the number of permutations"""
# logger.warn(msg)
# raise ValueError(msg)
sets_done.append(set(synthetics_mut_indices))
# info.ix[synthetics_mut_indices, 'genotype'] = 'synth_hom' # Why does iloc not work here?
info.loc[info.index[synthetics_mut_indices], 'genotype'] = 'synth_hom'
return True
def strip_x(dfs):
for df in dfs:
df.columns = [x.strip('x') for x in df.columns]
yield df
def alternative(input_data: pd.DataFrame,
plot_dir: Union[None, Path] = None,
boxcox: bool = False) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""
Generate alterntive (mutant) distributions for line and pecimen-level data
Parameters
----------
input_data
plot_dir
boxcox
Returns
-------
alternative distribution dataframes with either line or specimen as index.
0: line-level p values
1: specimen-level p values
2: line-level t-values
3: specimen-level t-values
"""
# Group by line and sequntaily run
info_columns = ['staging', 'line', 'genotype'] # Columns of non-organ volumes in input_data
line_groupby = input_data.groupby('line')
label_names = list(input_data.drop(['staging', 'line'], axis='columns').columns)
baseline = input_data[input_data['line'] == 'baseline']
baseline.loc[:, 'genotype'] = 'wt'
alt_line_pvalues = []
alt_spec_pvalues = []
alt_line_t = []
alt_spec_t = []
# Get line-level alternative distributions
for line_id, line_df in line_groupby:
if line_id == 'baseline':
continue
line_df.loc[:, 'genotype'] = 'hom'
line_df.drop(['line'], axis=1)
df_wt_mut = pd.concat([baseline, line_df])
# Lm code needs daatpoints in numpy array and genotype+staging in dataframe
data_df = df_wt_mut.drop(columns=info_columns)
# Get columns (labels) where all speciemns have null values (i.e. all the line is QC-flagged at these labels)
labels_to_skip = [col for col, isany in line_df.any().iteritems() if not isany]
if labels_to_skip:
# Set the whole label column to zero. R:lm() will return NaN for this column
data_df[labels_to_skip] = 0.0
# Get a numpy array of the organ volumes
data = data_df.values
info = df_wt_mut[['staging', 'line', 'genotype']]
p: np.array
t: np.array
p, t = lm_sm(data, info) # returns p_values for all organs, 1 iteration
res_p = [line_id] + list(p) # line_name, label_1, label_2 ......
alt_line_pvalues.append(res_p)
res_t = [line_id] + list(t)
alt_line_t.append(res_t)
### Get specimen-level alternative distributions ###
mutants = input_data[input_data['line'] != 'baseline']
# baselines = input_data[input_data['line'] == 'baseline']
for specimen_id, row in mutants.iterrows():
row['genotype'] = 'hom'
line_id = row['line']
df_wt_mut = baseline.append(row)
data_df = df_wt_mut.drop(columns=['line', 'genotype', 'staging'])
# Get columns (labels) where the mutant specimen (as it's line level)
# has a null value (i.e. This specimen is QC-flagged at these labels)
labels_to_skip = [col for col, isany in | pd.DataFrame(row) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 12 10:31:02 2020
@author: mazal
"""
"""
=========================================
Support functions of imageio
=========================================
Purpose: Create support functions for the pydicom project.
"""
"""
Test mode 1 | Basics
testMode = True
reportMode = False
Test mode 2 | Function Report
testMode = False
reportMode = True
Commisionning mode
testMode = False
reportMode = False
"""
testMode = True
"""
=========================================
Function 1: Compute space index that depicts FVC status | Index type 1
=========================================
Purpose: Get space index
Code Source: The index prototype is available at generic_imageio.py
Raw code reference (see Tester.py): Test 18
"""
def IndexType1(productType,splitType,ID,interpolationMethod,testMode,reportMode):
# Conditionning | Phase -1: Set root based on: (1) ProductType; (2) splitType; (3) ID; (4) Interpoltion method
import os
import pandas as pd
import numpy as np
if productType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if productType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if productType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Sampling)/'
if splitType == 'test':
if(interpolationMethod == None):interpolationMethod = 'None'
imageioDataFrame_path_input = path_ProductType + 'outcome/' + 'jpgImageProcessing/' + 'test/' + ID + '/'+ interpolationMethod + '/'
else:
if(interpolationMethod == None):interpolationMethod = 'None'
imageioDataFrame_path_input = path_ProductType + 'outcome/' + 'jpgImageProcessing/' + 'train/' + ID + '/' + interpolationMethod + '/'
# Conditionning | Phase -1: Set input data: (1) path input ;(2) filename list.
path_input = imageioDataFrame_path_input
os.chdir(path_input)
fileNameList = os.listdir(path_input)
# Computing | Index Type 1
indexTriad_List = []
for i in fileNameList:
filename = i
# Get CSV file
rawFile_DataFrame = pd.read_csv(filename)
rawFile_DataFrame = rawFile_DataFrame.drop(columns=['Unnamed: 0'])
# Get triad list and DataFrame
triadNumberElements_index = list(rawFile_DataFrame.index)
triadNumberElements = list(rawFile_DataFrame.columns)
triadNumberElements_range = int(len(list(triadNumberElements))/3)
triad_list = []
for i in triadNumberElements_index:
constant_growth = 3
variable_growth = 0 #start value
for j in range(0,triadNumberElements_range):
# Column-j series
## cg:constant growth (+3)
## cv:variable growth (+1 iteration)
## j:0 ## j=0 + cg=3 * cv=0 -> 0
## j:0 ## j=1 + cg=3 * cv=0 -> 1
## j:0 ## j=2 + cg=3 * cv=0 -> 2
## j:1 ## j=0 + cg=3 * cv=1 -> 3
## j:1 ## j=1 + cg=3 * cv=1 -> 4
## j:1 ## j=2 + cg=3 * cv=1 -> 5
## j:2 ## j=0 + cg=3 * cv=2 -> 6
## j:2 ## j=1 + cg=3 * cv=2 -> 7
## j:2 ## j=2 + cg=3 * cv=2 -> 8
## ...
## ...
## ...
## j:99 ## j=0 + cg=3 * cv=99 -> 297
## j:99 ## j=1 + cg=3 * cv=99 -> 298
## j:99 ## j=2 + cg=3 * cv=99 -> 299
variable_growth = j
x_value = 0 + constant_growth * variable_growth
y_value = 1 + constant_growth * variable_growth
z_value = 2 + constant_growth * variable_growth
x_col = str(x_value)
y_col = str(y_value)
z_col = str(z_value)
triadIndex = int(i)
x_triad = rawFile_DataFrame[x_col][triadIndex]
y_triad = rawFile_DataFrame[y_col][triadIndex]
z_triad = rawFile_DataFrame[z_col][triadIndex]
triad = np.array([x_triad,y_triad,z_triad])
#print(triad)
triad_list = triad_list + [triad]
####triad_dictionary = {'triad':triad_list}
####triad_DataFrame = pd.DataFrame(data=triad_dictionary)
# Define RGB Scale boundaries based on a grey scale
## RGB | Grey Scale |
### (1) 255,255,255
### (2) 224,224,224
### (3) 192,192,192
### (4) 160,160,160
### (5) 128,128,128
### (6) 96,96,96
### (7) 64,64,64
### (8) 32,32,32
### (9) 0,0,0
boundary9 = np.array([255,255,255])
boundary8 = np.array([224,224,224])
boundary7 = np.array([192,192,192])
boundary6 = np.array([160,160,160])
boundary5 = np.array([128,128,128])
boundary4 = np.array([96,96,96])
boundary3 = np.array([64,64,64])
boundary2 = np.array([32,32,32])
boundary1 = np.array([0,0,0])
GreyScaleBoundaries = [boundary1,boundary2,boundary3,boundary4,boundary5,boundary6,boundary7,boundary8,boundary9]
# Get new frequency group by RGB scale boundaries
frequencyRGB_Dictionary = {}
for i in triad_list:
for j in GreyScaleBoundaries:
# print("Iteration: ")
# print("Triads to evaluate")
# print("Triads of concern: ",i)
# print("Triad Boundary: ",j)
if max(i) >= max(j):
None
else:
key_frequencyRGB_Dictionary_list = list(frequencyRGB_Dictionary.keys())
keyToValidate = str(list(j))
keyOfConcern = str(list(j))
if(keyToValidate in key_frequencyRGB_Dictionary_list):
frequencyRGB_Dictionary[keyOfConcern] = [frequencyRGB_Dictionary[keyOfConcern][0] + 1]
break
else:
frequencyRGB_Dictionary[keyOfConcern] = [1]
break
frequencyRGB_DataFrame = pd.DataFrame(data = frequencyRGB_Dictionary)
# Image typing | Phase (1): Get frequency DataFrame group by triad
frequency_dictionary = {}
#### indexList_frequency_DataFrame = []
#### indexNumber = 0
for i in triad_list:
keyToValidate = str(list(i))
keyList = list(frequency_dictionary.keys())
if(keyToValidate not in keyList):
array_frequency_dictionary = i
key_frequency_dictionary = str(list(array_frequency_dictionary))
frequency_dictionary[key_frequency_dictionary] = [1]
else:
keyToInclude = keyToValidate
array_frequency_dictionary = i
key_frequency_dictionary = str(list(array_frequency_dictionary))
newFrequency = frequency_dictionary[keyToInclude][0] + 1
frequency_dictionary[keyToInclude] = [newFrequency]
#### frequency_DataFrame = pd.DataFrame(data = frequency_dictionary)
# Image typing | Phase (2): Image typing identifier
## Types 2: 'ID00007637202177411956430' | blackTriadFrequency = 1896 -> 2000
## Types 3: 'ID00009637202177434476278'| 5075 -> 5500
## Types 1: 'ID00014637202177757139317', 'ID00419637202311204720264' | 457 -> 1000
## Criterion: black color spacing - RGB triad: frequency below RGB triad (32,32,32)
try:
triadFrequencyToEvaluate = frequencyRGB_DataFrame['[32, 32, 32]'][0]
except KeyError:
triadFrequencyToEvaluate = 0
imageType1 = 1000
imageType2 = 2000
imageType3 = 5500
imageTypeBoundaries = [imageType1, imageType2, imageType3]
imageTypeBoundaries_label = ['1','2','3']
comparisonResults = []
for i in imageTypeBoundaries:
if(triadFrequencyToEvaluate >= i):
comparisonResults = comparisonResults + [True]
else:
comparisonResults = comparisonResults + [False]
iteration = 0
for i in comparisonResults:
if i is False:
imageType = imageTypeBoundaries_label[iteration]
break
else:
iteration = iteration + 1
if comparisonResults == [True, True, True]: imageType = imageTypeBoundaries_label[2]
# Image typing | Phase (3): Differ state Inhalation / Exhalation and get index
## Step 0: Set column Label list
#### labelList = ['[224, 224, 224]','[192, 192, 192]','[160, 160, 160]','[128, 128, 128]','[96, 96, 96]','[64, 64, 64]','[32, 32, 32]']
## Step 1: Get the state given the type of image (i.e. inhalation or exhalation)
if (imageType == '1'):
# Set RGB triads to differ 'inhalation' from 'exhalation'
## RGB triads to build a pattern: ['[224, 224, 224]','[192, 192, 192]']
## Boundary | Exhalation: Summation(RGB triads) <= 7000 (6715)
## Boundary | Inhalation: 8000 (7840) <= Summation(RGB triads)
labelList_imageState = ['[224, 224, 224]','[192, 192, 192]']
labelList_exhalation_dark = ['[160, 160, 160]','[128, 128, 128]','[96, 96, 96]','[64, 64, 64]','[32, 32, 32]']
labelList_exhalation_tenous = ['[224, 224, 224]','[192, 192, 192]']
labelList_inhalation_dark = ['[64, 64, 64]','[32, 32, 32]']
labelList_inhalation_tenous = ['[224, 224, 224]','[192, 192, 192]','[160, 160, 160]','[128, 128, 128]','[96, 96, 96]']
stateBoundaryExhalation = 7000
if (imageType == '2'):
# Set RGB triads to differ 'inhalation' from 'exhalation'
## RGB triads to build a pattern: ['[224, 224, 224]','[192, 192, 192]']
## Boundary | Exhalation: Summation(RGB triads) <= 3500 (3215)
## Boundary | Inhalation: 5000 (5195) <= Summation(RGB triads)
labelList_imageState = ['[224, 224, 224]','[192, 192, 192]']
labelList_exhalation_dark = ['[128, 128, 128]','[96, 96, 96]','[64, 64, 64]','[32, 32, 32]']
labelList_exhalation_tenous = ['[224, 224, 224]','[192, 192, 192]','[160, 160, 160]']
labelList_inhalation_dark = ['[64, 64, 64]','[32, 32, 32]']
labelList_inhalation_tenous = ['[224, 224, 224]','[192, 192, 192]','[160, 160, 160]','[128, 128, 128]','[96, 96, 96]']
stateBoundaryExhalation = 3500
if (imageType == '3'):
# Set RGB triads to differ 'inhalation' from 'exhalation'
## RGB triads to build a pattern: ['[128, 128, 128]','[96, 96, 96]']
## Boundary | Exhalation: Summation(RGB triads) <= 3500 (3426)
## Boundary | Inhalation: 6500 (6720) <= Summation(RGB triads)
labelList_imageState = ['[128, 128, 128]','[96, 96, 96]']
labelList_exhalation_dark = ['[32, 32, 32]']
labelList_exhalation_tenous = ['[224, 224, 224]','[192, 192, 192]','[160, 160, 160]','[128, 128, 128]','[96, 96, 96]','[64, 64, 64]']
labelList_inhalation_dark = ['[32, 32, 32]']
labelList_inhalation_tenous = ['[224, 224, 224]','[192, 192, 192]','[160, 160, 160]','[128, 128, 128]','[96, 96, 96]','[64, 64, 64]']
stateBoundaryExhalation = 3500
## Step 2: Get ciphers to compute the index
stateTriads_List = []
for i in labelList_imageState:
try:
elementToInclude = frequencyRGB_DataFrame[i][0]
stateTriads_List = stateTriads_List + [elementToInclude]
except KeyError:
stateTriads_List = stateTriads_List + [0]
if sum(stateTriads_List) <= stateBoundaryExhalation:
imageState = 'Exhalation'
DarkGreyCipherLabel = labelList_exhalation_dark
TenousGreyCipherLabel = labelList_exhalation_tenous
else:
imageState = 'Inhalation'
DarkGreyCipherLabel = labelList_inhalation_dark
TenousGreyCipherLabel = labelList_inhalation_tenous
DarkGreyCipher_list = []
for i in DarkGreyCipherLabel:
try:
elementToInclude = frequencyRGB_DataFrame[i][0]
DarkGreyCipher_list = DarkGreyCipher_list + [elementToInclude]
except KeyError:
DarkGreyCipher_list = DarkGreyCipher_list + [0]
TenousGreyCipher_list = []
for i in TenousGreyCipherLabel:
try:
elementToInclude = frequencyRGB_DataFrame[i][0]
TenousGreyCipher_list = TenousGreyCipher_list + [elementToInclude]
except KeyError:
DarkGreyCipher_list = DarkGreyCipher_list + [0]
# Image typing | Phase (4): Get index value
numerator = sum(TenousGreyCipher_list)
denominator = sum(DarkGreyCipher_list)
index = round(numerator / denominator,6)
## Step 7: Get index triad
indexTriad_List = indexTriad_List + [[ID, index, imageState, imageType]]
# Computing | Index Type 1 | Build DataFarme using indexTriad_List
indexTriad_Array = np.array(indexTriad_List)
indexTriad_DataFrame = pd.DataFrame(indexTriad_Array,columns=['Patient','indexType1','ImageState','ImageType'])
# Computing | Index Type 1 | Get average values for indexType1 regarding the ImageState
exhalationValues = []
inhalationValues = []
for i in indexTriad_DataFrame.index:
itemToInclude = float(indexTriad_DataFrame.indexType1[i])
if(indexTriad_DataFrame.ImageState[i] == 'Exhalation'): exhalationValues = exhalationValues + [itemToInclude]
if(indexTriad_DataFrame.ImageState[i] == 'Inhalation'): inhalationValues = inhalationValues + [itemToInclude]
exhalationAverage = round(np.mean(exhalationValues),6)
inhalationAverage = round(np.mean(inhalationValues),6)
exhalationTriad = np.array([ID,exhalationAverage,'Exhalation',indexTriad_DataFrame.ImageType[0]])
inhalationTriad = np.array([ID,inhalationAverage,'Inhalation',indexTriad_DataFrame.ImageType[0]])
# Closing | Get Function Result
import time
processTime = time.process_time()
FunctionResult = indexTriad_DataFrame,exhalationTriad,inhalationTriad,processTime
if reportMode == True:
print("=========================================")
print("Function Report")
print("=========================================")
print("Inputs")
print(" Product type: ",ProductType)
print(" Split type: ",splitType)
print(" Patient ID: ",ID)
print(" Interpolation Method: ",interpolationMethod)
print("=========================================")
print("Process")
print(" Time: ", FunctionResult[3])
print("=========================================")
print("Outputs")
print(" Number of DataFrames: ", 1)
print(" Number of processed images", len(FunctionResult[0]))
print(" Exhalation triad: ",FunctionResult[1][1:])
print(" Inhalation triad: ",FunctionResult[2][1:])
print("=========================================")
print("Test result Function 1: Success")
print("=========================================")
return FunctionResult
if testMode == True:
#ProductType = 'prototype'
ProductType = 'population'
splitType = 'train'
#ID = 'ID00007637202177411956430' # Image type 1
#ID = 'ID00011637202177653955184'
#ID = 'ID00009637202177434476278' # Image type 2
#ID = 'ID00014637202177757139317' # Image type 3
#ID = 'ID00009637202177434476278'
#ID = 'ID00134637202223873059688'
ID ='ID00135637202224630271439'
interpolationMethod = None
reportMode = True
FunctionResult1 = IndexType1(ProductType,splitType,ID,interpolationMethod,testMode,reportMode)
"""
=========================================
Function 2: Compute space index that depicts FVC status | Index type 1 (Scaling mode)
=========================================
Purpose: Get space index under scaling mode
Code Source: -
Raw code reference (see Tester.py): -
"""
def GetIndexType1_ScalingMode(productType,interpolationMethod,testMode,reportMode):
import os
import pandas as pd
import numpy as np
import distutils.ccompiler
# Create datasets | Phase -1: Get output path
if productType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if productType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if productType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Sampling)/'
path_output_train = path_ProductType + 'outcome/' + 'indexType1/train/'
path_output_test = path_ProductType + 'outcome/' + 'indexType1/test/'
path_output = path_ProductType + 'outcome/'
try:
os.chdir(path_output_train)
except FileNotFoundError:
distutils.dir_util.mkpath(path_output_train)
try:
os.chdir(path_output_test)
except FileNotFoundError:
distutils.dir_util.mkpath(path_output_test)
# Conditionning | Phase 0: Get ID lists
## Train and test paths
train_path = 'Y:/Kaggle_OSIC/2-Data/train/'
test_path = 'Y:/Kaggle_OSIC/2-Data/test/'
## Train and test lists
trainList = os.listdir(train_path)
testList = os.listdir(test_path)
## Set parameters
testMode = False
reportMode = True
# Create datasets | Phase 1: Test dataset
tetrad_test_List = []
try:
tetrad_test_List_output = os.listdir(path_output_test)
except FileNotFoundError:
tetrad_test_List_output = []
splitType = 'test'
for i in testList:
try:
# Verify if process is done
filenameToVerify = i + '.csv'
if (filenameToVerify in tetrad_test_List_output):
print("=========================================")
print(splitType," ",filenameToVerify, " -> Done")
print("=========================================")
else:
# Process
ID = i
FunctionResult = IndexType1(ProductType,splitType,ID,interpolationMethod,testMode,reportMode)
exhalationTetrad = list(FunctionResult[1])
inhalationTetrad = list(FunctionResult[2])
tetrad_test_List = [exhalationTetrad] + [inhalationTetrad]
tetrad_test_array = np.array(tetrad_test_List)
# Get unitary DataFrame
indexType1_test_DataFrame_unitary = pd.DataFrame(tetrad_test_array, columns=['Patient','indexType1','ImageState','ImageType'])
# Create CSV File
filename_test_output_unitary = i + '.csv'
indexType1_test_DataFrame_unitary.to_csv(path_output_test+filename_test_output_unitary)
except FileNotFoundError:
break
# Create datasets | Phase 2: Train dataset
tetrad_train_List = []
try:
tetrad_train_List_output = os.listdir(path_output_train)
except FileNotFoundError:
tetrad_train_List_output = []
splitType = 'train'
for j in trainList:
try:
# Verify if process is done
filenameToVerify = j + '.csv'
if (filenameToVerify in tetrad_train_List_output):
print("=========================================")
print(splitType," ",filenameToVerify, " -> Done")
print("=========================================")
else:
# Process
ID = j
FunctionResult = IndexType1(ProductType,splitType,ID,interpolationMethod,testMode,reportMode)
exhalationTetrad = list(FunctionResult[1])
inhalationTetrad = list(FunctionResult[2])
tetrad_train_List = [exhalationTetrad] + [inhalationTetrad]
tetrad_train_array = np.array(tetrad_train_List)
# Get unitary DataFrame
indexType1_train_DataFrame_unitary = pd.DataFrame(tetrad_train_array, columns=['Patient','indexType1','ImageState','ImageType'])
# Create CSV File
filename_train_output_unitary = j + '.csv'
indexType1_train_DataFrame_unitary.to_csv(path_output_train+filename_train_output_unitary)
except FileNotFoundError:
break
# Create datasets | Phase 2: Create DataFarme and CSV file
## Train dataset case
unitary_Dataframe = []
trainList_output = os.listdir(path_output_train)
if (len(trainList) == len(trainList_output)):
unitary_Dataframe = []
for i in trainList_output:
filename = i
if(unitary_Dataframe != []):
unitary_DataframeToInclude = | pd.read_csv(path_output_train+filename) | pandas.read_csv |
import fast_to_sql.fast_to_sql as fts
from fast_to_sql import errors
import datetime
import pandas as pd
import unittest
import pyodbc
import numpy as np
# Tests
class FastToSQLTests(unittest.TestCase):
conn = None
# Intentionally included weird column names
TEST_DF = pd.DataFrame({
"T1;'": [1,2,3],
"[(Add)]": ["hello's","My","name"],
"This is invalid": [True, False, False]
})
@classmethod
def setUpClass(cls):
cls.conn = pyodbc.connect("Driver={ODBC Driver 17 for SQL Server};Server=localhost;Database=test;UID=sa;PWD=<PASSWORD>;")
@classmethod
def tearDown(self):
tables = ["test_table1","test_table2","test_table3","test_table4","test_table5","testy1","testy2"]
for t in tables:
self.conn.execute(f"DROP TABLE IF EXISTS {t}")
self.conn.commit()
def test_clean_cols(self):
clean_cols = [fts._clean_col_name(c) for c in list(self.TEST_DF.columns)]
self.assertEqual(["[T1;']", '[Add]', '[This_is_invalid]'],clean_cols)
def test_dups(self):
TEST_DF_NEW = self.TEST_DF.copy()
TEST_DF_NEW["t1;'"] = 1
def should_fail():
fts._check_duplicate_cols(TEST_DF_NEW)
self.assertRaises(errors.DuplicateColumns,should_fail)
def test_custom_dtype_error(self):
TEST_DF_c = self.TEST_DF.copy()
columns = [fts._clean_col_name(c) for c in list(TEST_DF_c.columns)]
TEST_DF_c.columns = columns
def should_fail():
fts._clean_custom(TEST_DF_c, {"[(Add)]":"INT PRIMARY KEY","fail":"DATE"})
self.assertRaises(errors.CustomColumnException,should_fail)
def test_dtypes(self):
TEST_DF_c = self.TEST_DF.copy()
columns = [fts._clean_col_name(c) for c in list(TEST_DF_c.columns)]
TEST_DF_c.columns = columns
custom = fts._clean_custom(TEST_DF_c, {"[(Add)]":"INT PRIMARY KEY"})
data_types = fts._get_data_types(TEST_DF_c, custom)
self.assertEqual({"[T1;']": 'int', '[Add]': 'INT PRIMARY KEY', '[This_is_invalid]': 'bit'},data_types)
def test_clean_name(self):
table_name = "this isn't valid"
self.assertEqual("this isn''t valid",fts._clean_table_name(table_name))
def test_get_schema(self):
cur = self.conn.cursor()
name = "dbo.test"
schema, name = fts._get_schema(cur, name)
self.assertEqual("dbo",schema)
self.assertEqual("test",name)
name = "test"
schema, name = fts._get_schema(cur, name)
self.assertEqual("dbo", schema)
self.assertEqual("test", name)
def test_check_exists(self):
name = "dbo.test"
cur = self.conn.cursor()
schema, name = fts._get_schema(cur, name)
cur.execute("IF EXISTS(SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'test' and TABLE_SCHEMA = SCHEMA_NAME()) DROP TABLE dbo.test")
cur.execute("create table dbo.test (t varchar(1))")
res = fts._check_exists(cur, schema, name, False)
self.assertEqual(1, res)
cur.execute("drop table dbo.test")
res = fts._check_exists(cur, schema, name, False)
self.assertEqual(0, res)
cur.close()
def test_check_parameter_if_exists(self):
def should_fail():
fts._check_parameter_if_exists("should_fail")
self.assertRaises(errors.WrongParam,should_fail)
def test_generate_create_statement(self):
df = pd.DataFrame({"A":[1,2,3],"B":["a","b","c"],"C":[True,False,True]})
data_types = fts._get_data_types(df, {})
create_statement = fts._generate_create_statement("dbo","test3",data_types,"")
with open("tests/test_create.sql","r") as f:
compare = f.read()
self.assertEqual(compare, create_statement)
def test_big_numbers(self):
cur = self.conn.cursor()
with open("tests/test_data.dat", "r") as f:
data = f.read()
data = data.split("\n")
data = {i.split("|")[0]: [i.split("|")[1]] for i in data}
data = pd.DataFrame(data)
fts.fast_to_sql(data, "testy1", self.conn, if_exists="replace", temp=False)
self.conn.commit()
test_val = int(cur.execute("select M from testy1").fetchall()[0][0])
self.assertEqual(352415214754234,test_val)
def test_null_values(self):
cur = self.conn.cursor()
data = pd.read_csv("tests/test_data2.csv")
fts.fast_to_sql(data, "testy2", self.conn, if_exists="replace", temp=False)
self.conn.commit()
output = cur.execute("select * from testy2").fetchall()
self.assertIsNone(output[0][1])
self.assertIsNone(output[1][2])
def test_fast_to_sql(self):
"""Test main fast_to_sql function
"""
cur = self.conn.cursor()
# Simple test table
df = pd.DataFrame({"A":[1,2,3],"B":["a","b","c"],"C":[True,False,True]})
fts.fast_to_sql(df, "dbo.test_table2", self.conn, "replace", None, False)
self.assertEqual((1, 'a', True),tuple(cur.execute("select * from dbo.test_table2").fetchall()[0]))
# Series
s = pd.Series([1,2,3])
fts.fast_to_sql(s, "seriesTest", self.conn, "replace", None, False)
self.assertEqual(1,cur.execute("select * from seriesTest").fetchall()[0][0])
# Temp table
df = | pd.DataFrame({"A":[1,2,3],"B":["a","b","c"],"C":[True,False,True]}) | pandas.DataFrame |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = cudf.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = cudf.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
assert_exceptions_equal(
lfunc=pmdx.isin,
rfunc=gmdx.isin,
lfunc_args_and_kwargs=([values], {"level": level}),
rfunc_args_and_kwargs=([values], {"level": level}),
check_exception_type=False,
expected_error_message=re.escape(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
),
)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
pdf = data
gdf = cudf.from_pandas(pdf)
if cudf.utils.dtypes.is_scalar(values):
assert_exceptions_equal(
lfunc=pdf.isin,
rfunc=gdf.isin,
lfunc_args_and_kwargs=([values],),
rfunc_args_and_kwargs=([values],),
)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
pytest.xfail(
not PANDAS_GE_110,
"https://github.com/pandas-dev/pandas/issues/34256",
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = cudf.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = cudf.DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor (for DataFrame)
assert_eq(df, df._constructor({key1: val1, key2: val2}))
# Correct use of _constructor (for cudf.Series)
assert_eq(df[key1], df[key2]._constructor(val1, name=key1))
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for cudf.Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for cudf.Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype=dtype)
gdf["bar"] = cudf.Series(data, dtype=dtype)
insert_data = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = cudf.Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = cudf.Series(data, dtype=as_dtype)
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype="datetime64[ms]")
gdf["bar"] = cudf.Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = cudf.Series(data, dtype="str")
expect["bar"] = cudf.Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = cudf.Series(gdf["foo"], dtype="category")
expect["bar"] = cudf.Series(gdf["bar"], dtype="category")
else:
expect["foo"] = cudf.Series(data, dtype=as_dtype)
expect["bar"] = cudf.Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = cudf.DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = cudf.Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = cudf.DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
cudf.DataFrame(),
cudf.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
),
cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False
),
}
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops(data, op, skipna):
gdf = data
pdf = gdf.to_pandas()
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0, skipna=skipna)
got = getattr(gdf, op)(axis=1, ddof=0, skipna=skipna)
else:
expected = getattr(pdf, op)(axis=1, skipna=skipna)
got = getattr(gdf, op)(axis=1, skipna=skipna)
assert_eq(expected, got, check_exact=False)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops_nullable_dtypes_all_null(op):
gdf = cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
)
expected = cudf.Series([None, None, None, None], dtype="float64")
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series(
[10.0, None, np.NaN, 2234.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"min",
cudf.Series(
[10.0, None, np.NaN, 13.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"sum",
cudf.Series(
[20.0, None, np.NaN, 2247.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"product",
cudf.Series(
[100.0, None, np.NaN, 29042.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"mean",
cudf.Series(
[10.0, None, np.NaN, 1123.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"var",
cudf.Series(
[0.0, None, np.NaN, 1233210.25, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"std",
cudf.Series(
[0.0, None, np.NaN, 1110.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
],
)
def test_rowwise_ops_nullable_dtypes_partial_null(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series([10, None, None, 2234, None, 453], dtype="int64",),
),
("min", cudf.Series([10, None, None, 13, None, 15], dtype="int64",),),
(
"sum",
cudf.Series([20, None, None, 2247, None, 468], dtype="int64",),
),
(
"product",
cudf.Series([100, None, None, 29042, None, 6795], dtype="int64",),
),
(
"mean",
cudf.Series(
[10.0, None, None, 1123.5, None, 234.0], dtype="float32",
),
),
(
"var",
cudf.Series(
[0.0, None, None, 1233210.25, None, 47961.0], dtype="float32",
),
),
(
"std",
cudf.Series(
[0.0, None, None, 1110.5, None, 219.0], dtype="float32",
),
),
],
)
def test_rowwise_ops_nullable_int_dtypes(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, None, 13, None, 15],
"b": cudf.Series(
[10, None, 323, 2234, None, 453], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ns]"
),
"t3": cudf.Series(
["1960-08-31 06:00:00", "2030-08-02 10:00:00"], dtype="<M8[s]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[us]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(["1940-08-31 06:00:00", None], dtype="<M8[ms]"),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
"b1": cudf.Series([True, False], dtype="bool"),
},
],
)
@pytest.mark.parametrize("op", ["max", "min"])
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops_datetime_dtypes(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data,op,skipna",
[
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"max",
True,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
False,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
True,
),
],
)
def test_rowwise_ops_datetime_dtypes_2(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
(
{
"t1": pd.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ns]",
),
"t2": pd.Series(
["1940-08-31 06:00:00", pd.NaT], dtype="<M8[ns]"
),
}
)
],
)
def test_rowwise_ops_datetime_dtypes_pdbug(data):
pdf = pd.DataFrame(data)
gdf = cudf.from_pandas(pdf)
expected = pdf.max(axis=1, skipna=False)
got = gdf.max(axis=1, skipna=False)
if PANDAS_GE_120:
assert_eq(got, expected)
else:
# PANDAS BUG: https://github.com/pandas-dev/pandas/issues/36907
with pytest.raises(AssertionError, match="numpy array are different"):
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = cudf.DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = cudf.datasets.randomdata(10)
pdf = gdf.to_pandas()
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
cudf.Series([4, 2, 3]),
cudf.Series([4, 2, 3], index=["a", "b", "c"]),
cudf.Series([4, 2, 3], index=["a", "b", "d"]),
cudf.Series([4, 2], index=["a", "b"]),
cudf.Series([4, 2, 3], index=cudf.core.index.RangeIndex(0, 3)),
pytest.param(
cudf.Series([4, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[3.0, 2.0, 5.0], [3.0, None, 5.0], [6.0, 7.0, np.nan]]
data = dict(zip(colnames, data))
gsr = gsr.astype("float64")
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas(nullable=True)
psr = gsr.to_pandas(nullable=True)
expect = op(pdf, psr)
got = op(gdf, gsr).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
expect = op(psr, pdf)
got = op(gsr, gdf).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
# comparison ops will temporarily XFAIL
# see PR https://github.com/rapidsai/cudf/pull/7491
pytest.param(operator.eq, marks=pytest.mark.xfail()),
pytest.param(operator.lt, marks=pytest.mark.xfail()),
pytest.param(operator.le, marks=pytest.mark.xfail()),
pytest.param(operator.gt, marks=pytest.mark.xfail()),
pytest.param(operator.ge, marks=pytest.mark.xfail()),
pytest.param(operator.ne, marks=pytest.mark.xfail()),
],
)
@pytest.mark.parametrize(
"gsr", [cudf.Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas()
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = cudf.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = cudf.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = cudf.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
def test_memory_usage_list():
df = cudf.DataFrame({"A": [[0, 1, 2, 3], [4, 5, 6], [7, 8], [9]]})
expected = (
df.A._column.offsets._memory_usage()
+ df.A._column.elements._memory_usage()
)
assert expected == df.A.memory_usage()
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = cudf.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = cudf.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All columns must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(cudf.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(cudf.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(cudf.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = cudf.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, cudf.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = cudf.DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = cudf._lib.table.Table(odict)
result = cudf.DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = cudf.from_pandas(pd_idx1)
gd_idx2 = cudf.from_pandas(pd_idx2)
gd_series1 = cudf.Series([1, 2, 3], index=gd_idx1)
gd_series2 = cudf.Series([1, 2, 3], index=gd_idx2)
expect = pd.DataFrame({"a": pd_series1, "b": pd_series2})
got = cudf.DataFrame({"a": gd_series1, "b": gd_series2})
if dtype == "str":
# Pandas actually loses its index name erroneously here...
expect.index.name = "test_index"
assert_eq(expect, got)
assert expect.index.names == got.index.names
@pytest.mark.parametrize(
"arg", [slice(2, 8, 3), slice(1, 20, 4), slice(-2, -6, -2)]
)
def test_dataframe_strided_slice(arg):
mul = pd.DataFrame(
{
"Index": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"AlphaIndex": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
)
pdf = pd.DataFrame(
{"Val": [10, 9, 8, 7, 6, 5, 4, 3, 2]},
index=pd.MultiIndex.from_frame(mul),
)
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf[arg]
got = gdf[arg]
assert_eq(expect, got)
@pytest.mark.parametrize(
"data,condition,other,error",
[
(pd.Series(range(5)), pd.Series(range(5)) > 0, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, 10, None),
(
pd.Series(range(5)),
pd.Series(range(5)) > 1,
pd.Series(range(5, 10)),
None,
),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
% 3
)
== 0,
-pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) == 4,
None,
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) != 4,
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True, False],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True, True, False], [True, True, True, False]],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cuda.to_device(
np.array(
[[True, True], [False, True], [True, False], [False, True]]
)
),
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cupy.array(
[[True, True], [False, True], [True, False], [False, True]]
),
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
],
None,
ValueError,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) == 4,
None,
None,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) == 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6], dtype="category"),
pd.Series([4, np.nan, 6], dtype="category") != 4,
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
"s",
None,
),
(
pd.Series([1, 2, 3, 2, 5]),
pd.Series([1, 2, 3, 2, 5]) == 2,
pd.DataFrame(
{
"a": pd.Series([1, 2, 3, 2, 5]),
"b": pd.Series([1, 2, 3, 2, 5]),
}
),
NotImplementedError,
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_df_sr_mask_where(data, condition, other, error, inplace):
ps_where = data
gs_where = cudf.from_pandas(data)
ps_mask = ps_where.copy(deep=True)
gs_mask = gs_where.copy(deep=True)
if hasattr(condition, "__cuda_array_interface__"):
if type(condition).__module__.split(".")[0] == "cupy":
ps_condition = cupy.asnumpy(condition)
else:
ps_condition = np.array(condition).astype("bool")
else:
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
if error is None:
expect_where = ps_where.where(
ps_condition, other=ps_other, inplace=inplace
)
got_where = gs_where.where(
gs_condition, other=gs_other, inplace=inplace
)
expect_mask = ps_mask.mask(
ps_condition, other=ps_other, inplace=inplace
)
got_mask = gs_mask.mask(gs_condition, other=gs_other, inplace=inplace)
if inplace:
expect_where = ps_where
got_where = gs_where
expect_mask = ps_mask
got_mask = gs_mask
if pd.api.types.is_categorical_dtype(expect_where):
np.testing.assert_array_equal(
expect_where.cat.codes,
got_where.cat.codes.astype(expect_where.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_where.cat.categories, got_where.cat.categories)
np.testing.assert_array_equal(
expect_mask.cat.codes,
got_mask.cat.codes.astype(expect_mask.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_mask.cat.categories, got_mask.cat.categories)
else:
assert_eq(
expect_where.fillna(-1),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1), got_mask.fillna(-1), check_dtype=False
)
else:
assert_exceptions_equal(
lfunc=ps_where.where,
rfunc=gs_where.where,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False
if error is NotImplementedError
else True,
)
assert_exceptions_equal(
lfunc=ps_mask.mask,
rfunc=gs_mask.mask,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False,
)
@pytest.mark.parametrize(
"data,condition,other,has_cat",
[
(
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
),
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
)
!= "a",
None,
None,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
"a",
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
"a",
True,
),
],
)
def test_df_string_cat_types_mask_where(data, condition, other, has_cat):
ps = data
gs = cudf.from_pandas(data)
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
expect_where = ps.where(ps_condition, other=ps_other)
got_where = gs.where(gs_condition, other=gs_other)
expect_mask = ps.mask(ps_condition, other=ps_other)
got_mask = gs.mask(gs_condition, other=gs_other)
if has_cat is None:
assert_eq(
expect_where.fillna(-1).astype("str"),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1).astype("str"),
got_mask.fillna(-1),
check_dtype=False,
)
else:
assert_eq(expect_where, got_where, check_dtype=False)
assert_eq(expect_mask, got_mask, check_dtype=False)
@pytest.mark.parametrize(
"data,expected_upcast_type,error",
[
(
pd.Series([random.random() for _ in range(10)], dtype="float32"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float16"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float64"),
np.dtype("float64"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float128"),
None,
NotImplementedError,
),
],
)
def test_from_pandas_unsupported_types(data, expected_upcast_type, error):
pdf = pd.DataFrame({"one_col": data})
if error == NotImplementedError:
with pytest.raises(error):
cudf.from_pandas(data)
with pytest.raises(error):
cudf.Series(data)
with pytest.raises(error):
cudf.from_pandas(pdf)
with pytest.raises(error):
cudf.DataFrame(pdf)
else:
df = cudf.from_pandas(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.Series(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.from_pandas(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
df = cudf.DataFrame(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
@pytest.mark.parametrize("nan_as_null", [True, False])
@pytest.mark.parametrize("index", [None, "a", ["a", "b"]])
def test_from_pandas_nan_as_null(nan_as_null, index):
data = [np.nan, 2.0, 3.0]
if index is None:
pdf = pd.DataFrame({"a": data, "b": data})
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
else:
pdf = pd.DataFrame({"a": data, "b": data}).set_index(index)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = expected.set_index(index)
got = cudf.from_pandas(pdf, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_from_pandas_for_series_nan_as_null(nan_as_null):
data = [np.nan, 2.0, 3.0]
psr = pd.Series(data)
expected = cudf.Series(column.as_column(data, nan_as_null=nan_as_null))
got = cudf.from_pandas(psr, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_copy(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype="float", copy=copy),
pdf.astype(dtype="float", copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype="float", copy=copy),
psr.astype(dtype="float", copy=copy),
)
assert_eq(gsr, psr)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype(dtype="int64", copy=copy)
expected = psr.astype(dtype="int64", copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_dtype_dict(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype={"col1": "float"}, copy=copy),
pdf.astype(dtype={"col1": "float"}, copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype={None: "float"}, copy=copy),
psr.astype(dtype={None: "float"}, copy=copy),
)
assert_eq(gsr, psr)
assert_exceptions_equal(
lfunc=psr.astype,
rfunc=gsr.astype,
lfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
rfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype({None: "int64"}, copy=copy)
expected = psr.astype({None: "int64"}, copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize(
"data,columns",
[
([1, 2, 3, 100, 112, 35464], ["a"]),
(range(100), None),
([], None),
((-10, 21, 32, 32, 1, 2, 3), ["p"]),
((), None),
([[1, 2, 3], [1, 2, 3]], ["col1", "col2", "col3"]),
([range(100), range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3), (1, 2, 3)), ["tuple0", "tuple1", "tuple2"]),
([[1, 2, 3]], ["list col1", "list col2", "list col3"]),
([range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3),), ["k1", "k2", "k3"]),
],
)
def test_dataframe_init_1d_list(data, columns):
expect = pd.DataFrame(data, columns=columns)
actual = cudf.DataFrame(data, columns=columns)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
expect = pd.DataFrame(data, columns=None)
actual = cudf.DataFrame(data, columns=None)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
@pytest.mark.parametrize(
"data,cols,index",
[
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
["a", "b", "c", "d"],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 20, 30, 10],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 1, 2, 3],
),
(np.array([11, 123, -2342, 232]), ["a"], [1, 2, 11, 12]),
(np.array([11, 123, -2342, 232]), ["a"], ["khsdjk", "a", "z", "kk"]),
(
cupy.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "z"],
["a", "z", "a", "z"],
),
(cupy.array([11, 123, -2342, 232]), ["z"], [0, 1, 1, 0]),
(cupy.array([11, 123, -2342, 232]), ["z"], [1, 2, 3, 4]),
(cupy.array([11, 123, -2342, 232]), ["z"], ["a", "z", "d", "e"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
],
)
def test_dataframe_init_from_arrays_cols(data, cols, index):
gd_data = data
if isinstance(data, cupy.core.ndarray):
# pandas can't handle cupy arrays in general
pd_data = data.get()
# additional test for building DataFrame with gpu array whose
# cuda array interface has no `descr` attribute
numba_data = cuda.as_cuda_array(data)
else:
pd_data = data
numba_data = None
# verify with columns & index
pdf = pd.DataFrame(pd_data, columns=cols, index=index)
gdf = cudf.DataFrame(gd_data, columns=cols, index=index)
assert_eq(pdf, gdf, check_dtype=False)
# verify with columns
pdf = pd.DataFrame(pd_data, columns=cols)
gdf = cudf.DataFrame(gd_data, columns=cols)
assert_eq(pdf, gdf, check_dtype=False)
pdf = pd.DataFrame(pd_data)
gdf = cudf.DataFrame(gd_data)
assert_eq(pdf, gdf, check_dtype=False)
if numba_data is not None:
gdf = cudf.DataFrame(numba_data)
assert_eq(pdf, gdf, check_dtype=False)
@pytest.mark.parametrize(
"col_data",
[
range(5),
["a", "b", "x", "y", "z"],
[1.0, 0.213, 0.34332],
["a"],
[1],
[0.2323],
[],
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar(col_data, assign_val):
pdf = pd.DataFrame({"a": col_data})
gdf = cudf.DataFrame({"a": col_data})
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"col_data",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar_with_scalar_cols(col_data, assign_val):
pdf = pd.DataFrame(
{
"a": cupy.asnumpy(col_data)
if isinstance(col_data, cupy.ndarray)
else col_data
},
index=["dummy_mandatory_index"],
)
gdf = cudf.DataFrame({"a": col_data}, index=["dummy_mandatory_index"])
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
def test_dataframe_info_basic():
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 10 entries, a to 1111
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 0 10 non-null float64
1 1 10 non-null float64
2 2 10 non-null float64
3 3 10 non-null float64
4 4 10 non-null float64
5 5 10 non-null float64
6 6 10 non-null float64
7 7 10 non-null float64
8 8 10 non-null float64
9 9 10 non-null float64
dtypes: float64(10)
memory usage: 859.0+ bytes
"""
)
df = pd.DataFrame(
np.random.randn(10, 10),
index=["a", "2", "3", "4", "5", "6", "7", "8", "100", "1111"],
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
def test_dataframe_info_verbose_mem_usage():
buffer = io.StringIO()
df = pd.DataFrame({"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]})
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Columns: 2 entries, a to b
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=False)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]},
index=["sdfdsf", "sdfsdfds", "dsfdf"],
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 3 entries, sdfdsf to dsfdf
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 91.0 bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True, memory_usage="deep")
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0 bytes
"""
)
df.info(buf=buffer, verbose=True, memory_usage="deep")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
def test_dataframe_info_null_counts():
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Dtype
--- ------ -----
0 int_col int64
1 text_col object
2 float_col float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0+ bytes
"""
)
df.info(buf=buffer, verbose=True, null_counts=False)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, verbose=True, max_cols=0)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 0 entries
Empty DataFrame"""
)
df.info(buf=buffer, verbose=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame(
{
"a": [1, 2, 3, None, 10, 11, 12, None],
"b": ["a", "b", "c", "sd", "sdf", "sd", None, None],
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Dtype
--- ------ -----
0 a int64
1 b object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
pd.options.display.max_info_rows = 2
df.info(buf=buffer, max_cols=2, null_counts=None)
pd.reset_option("display.max_info_rows")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 6 non-null int64
1 b 6 non-null object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
df.info(buf=buffer, max_cols=2, null_counts=None)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, null_counts=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
@pytest.mark.parametrize(
"data1",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize("rtol", [0, 0.01, 1e-05, 1e-08, 5e-1, 50.12])
@pytest.mark.parametrize("atol", [0, 0.01, 1e-05, 1e-08, 50.12])
def test_cudf_isclose(data1, data2, rtol, atol):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, rtol=rtol, atol=atol))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(data1, data2, rtol=rtol, atol=atol)
assert_eq(expected, actual)
actual = cudf.isclose(
cupy.array(data1), cupy.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
np.array(data1), np.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
pd.Series(data1), pd.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data1",
[
[
-1.9876543,
-2.9876654,
np.nan,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
np.nan,
-21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
np.nan,
np.nan,
np.nan,
21.1212,
],
],
)
@pytest.mark.parametrize("equal_nan", [True, False])
def test_cudf_isclose_nulls(data1, data2, equal_nan):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, equal_nan=equal_nan))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), equal_nan=equal_nan
)
assert_eq(expected, actual, check_dtype=False)
actual = cudf.isclose(data1, data2, equal_nan=equal_nan)
assert_eq(expected, actual, check_dtype=False)
def test_cudf_isclose_different_index():
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 3, 4, 2],
)
expected = cudf.Series([True] * 6, index=s1.index)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 10, 4, 2],
)
expected = cudf.Series(
[True, True, True, False, True, True], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[100, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 100, 10, 4, 2],
)
expected = cudf.Series(
[False, True, True, False, True, False], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
def test_dataframe_to_dict_error():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [9, 5, 3]})
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df.to_dict()
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df["a"].to_dict()
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": [1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]}),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
}
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=[10, 20, 30, 40, 50, 60],
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=["a", "b", "c", "d", "e", "f"],
),
pd.DataFrame(index=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(columns=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(index=[10, 11, 12]),
pd.DataFrame(columns=[10, 11, 12]),
pd.DataFrame(),
pd.DataFrame({"one": [], "two": []}),
pd.DataFrame({2: [], 1: []}),
pd.DataFrame(
{
0: [1, 2, 3, 4, 5, 10],
1: ["abc", "def", "ghi", "xyz", "pqr", "abc"],
100: ["a", "b", "b", "x", "z", "a"],
},
index=[10, 20, 30, 40, 50, 60],
),
],
)
def test_dataframe_keys(df):
gdf = cudf.from_pandas(df)
assert_eq(df.keys(), gdf.keys())
@pytest.mark.parametrize(
"ps",
[
pd.Series([1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]),
pd.Series(["abc", "def", "ghi", "xyz", "pqr", "abc"]),
pd.Series(
[1, 2, 3, 4, 5, 10],
index=["abc", "def", "ghi", "xyz", "pqr", "abc"],
),
pd.Series(
["abc", "def", "ghi", "xyz", "pqr", "abc"],
index=[1, 2, 3, 4, 5, 10],
),
pd.Series(index=["a", "b", "c", "d", "e", "f"], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
pd.Series(dtype="float64"),
pd.Series([], dtype="float64"),
],
)
def test_series_keys(ps):
gds = cudf.from_pandas(ps)
if len(ps) == 0 and not isinstance(ps.index, pd.RangeIndex):
assert_eq(ps.keys().astype("float64"), gds.keys())
else:
assert_eq(ps.keys(), gds.keys())
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
pd.DataFrame(),
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
pd.DataFrame([]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([], index=[100]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_dataframe(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = cudf.from_pandas(other)
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({12: [], 22: []}),
pd.DataFrame([[1, 2], [3, 4]], columns=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[0, 1], index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[1, 0], index=[7, 8]),
pd.DataFrame(
{
23: [315.3324, 3243.32432, 3232.332, -100.32],
33: [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
0: [315.3324, 3243.32432, 3232.332, -100.32],
1: [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.Series([10, 11, 23, 234, 13]),
pytest.param(
pd.Series([10, 11, 23, 234, 13], index=[11, 12, 13, 44, 33]),
marks=pytest.mark.xfail(
reason="pandas bug: "
"https://github.com/pandas-dev/pandas/issues/35092"
),
),
{1: 1},
{0: 10, 1: 100, 2: 102},
],
)
@pytest.mark.parametrize("sort", [False, True])
def test_dataframe_append_series_dict(df, other, sort):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
if isinstance(other, pd.Series):
other_gd = cudf.from_pandas(other)
else:
other_gd = other
expected = pdf.append(other_pd, ignore_index=True, sort=sort)
actual = gdf.append(other_gd, ignore_index=True, sort=sort)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
def test_dataframe_append_series_mixed_index():
df = cudf.DataFrame({"first": [], "d": []})
sr = cudf.Series([1, 2, 3, 4])
with pytest.raises(
TypeError,
match=re.escape(
"cudf does not support mixed types, please type-cast "
"the column index of dataframe and index of series "
"to same dtypes."
),
):
df.append(sr, ignore_index=True)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
[pd.DataFrame([[5, 6], [7, 8]], columns=list("AB"))],
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
],
[pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame()],
[
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
],
[
| pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}) | pandas.DataFrame |
import os
import torch
import torch.nn as nn
import numpy as np
import torch.optim as optim
import torch.nn.functional as F
import matplotlib.pyplot as plt
import pandas as pd
from mmd_metric import polynomial_mmd
import argparse
# Hinge Loss
def loss_hinge_dis_real(dis_real):
loss_real = torch.mean(F.relu(1. - dis_real))
return loss_real
def loss_hinge_dis_fake(dis_fake):
loss_fake = torch.mean(F.relu(1. + dis_fake))
return loss_fake
def loss_hinge_gen(dis_fake):
loss = -torch.mean(dis_fake)
return loss
# Vanilla
def loss_bce_dis_real(dis_output):
target = torch.tensor(1.).cuda().expand_as(dis_output)
loss = F.binary_cross_entropy_with_logits(dis_output, target)
return loss
def loss_bce_dis_fake(dis_output):
target = torch.tensor(0.).cuda().expand_as(dis_output)
loss = F.binary_cross_entropy_with_logits(dis_output, target)
return loss
def loss_bce_gen(dis_fake):
target_real = torch.tensor(1.).cuda().expand_as(dis_fake)
loss = F.binary_cross_entropy_with_logits(dis_fake, target_real)
return loss
def plot_density(flights,binwidth=0.1):
ax = plt.subplot(1,1,1)
# Draw the plot
ax.hist(flights, bins=int(180 / binwidth),
color='blue', edgecolor='black')
# Title and labels
ax.set_title('Histogram with Binwidth = %d' % binwidth, size=30)
ax.set_xlabel('Delay (min)', size=22)
ax.set_ylabel('Flights', size=22)
plt.tight_layout()
plt.show()
class G_guassian(nn.Module):
def __init__(self, nz, num_classes=3):
super(G_guassian, self).__init__()
self.embed = nn.Embedding(num_embeddings=num_classes, embedding_dim=nz)
self.decode = nn.Sequential(
nn.Linear(nz*2, 10),
nn.Tanh(),
nn.Linear(10, 10),
nn.Tanh(),
nn.Linear(10, 10),
# nn.Tanh(),
nn.Linear(10, 1),
)
self.__initialize_weights()
def forward(self, z, label, output=None):
input = torch.cat([z, self.embed(label)], dim=1)
x = input.view(input.size(0), -1)
output = self.decode(x)
return output
def __initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, 0.02)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class D_guassian(nn.Module):
def __init__(self, num_classes=3, AC=True, dis_mlp=False):
super(D_guassian, self).__init__()
self.AC = AC
self.encode = nn.Sequential(
nn.Linear(1, 10),
nn.Tanh(),
nn.Linear(10, 10),
nn.Tanh(),
nn.Linear(10, 10),
# nn.Tanh(),
)
if dis_mlp:
self.gan_linear = nn.Sequential(
nn.Linear(10, 10),
nn.Tanh(),
nn.Linear(10, 1)
)
else:
self.gan_linear = nn.Linear(10, 1)
self.aux_linear = nn.Linear(10, num_classes)
self.mi_linear = nn.Linear(10, num_classes)
if not self.AC:
self.projection = nn.Embedding(num_embeddings=num_classes, embedding_dim=10)
self.sigmoid = nn.Sigmoid()
self.__initialize_weights()
def forward(self, input, y=None):
x = self.encode(input)
x = x.view(-1, 10)
c = self.aux_linear(x)
mi = self.mi_linear(x)
s = self.gan_linear(x)
if not self.AC:
s += (self.projection(y)*x).sum(dim=1, keepdim=True)
return s.squeeze(1), c.squeeze(1), mi.squeeze(1)
def __initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, 0.02)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def train(data1, data2, data3, nz, G, D, optd, optg, AC=True, MI=True, gan_loss='bce'):
if gan_loss == 'hinge':
loss_dis_real, loss_dis_fake, loss_gen = loss_hinge_dis_real, loss_hinge_dis_fake, loss_hinge_gen
elif gan_loss == 'bce':
loss_dis_real, loss_dis_fake, loss_gen = loss_bce_dis_real, loss_bce_dis_fake, loss_bce_gen
else:
raise NotImplementedError
bs = 384
for _ in range(20):
for i in range(1000):
#####D step
for _ in range(1):
data = torch.cat(
[data1[128 * i:128 * i + 128], data2[128 * i:128 * i + 128], data3[128 * i:128 * i + 128]],
dim=0).unsqueeze(dim=1)
label = torch.cat([torch.ones(128).cuda().long()*0, torch.ones(128).cuda().long()*1, torch.ones(128).cuda().long()*2], dim=0)
###D
d_real, c, _ = D(data, label)
z = torch.randn(bs, nz).cuda()
fake_label = torch.LongTensor(bs).random_(3).cuda()
fake_data = G(z, label=fake_label)
d_fake, _, mi = D(fake_data, fake_label)
# D_loss = F.binary_cross_entropy(d_real, torch.ones(384).cuda()) \
# + F.binary_cross_entropy(d_fake, torch.zeros(bs).cuda())
D_loss = loss_dis_real(d_real) + loss_dis_fake(d_fake)
if AC:
D_loss += F.cross_entropy(c, label)
if MI:
D_loss += F.cross_entropy(mi, fake_label)
optd.zero_grad()
D_loss.backward()
optd.step()
#####G step
if i % 10 == 0:
z = torch.randn(bs, nz).cuda()
fake_label = torch.LongTensor(bs).random_(3).cuda()
fake_data = G(z, label=fake_label)
d_fake, c, mi = D(fake_data, fake_label)
# G_loss = F.binary_cross_entropy(d_fake, torch.ones(bs).cuda())
G_loss = loss_gen(d_fake)
if AC:
G_loss += F.cross_entropy(c, fake_label)
if MI:
G_loss -= F.cross_entropy(mi, fake_label)
optg.zero_grad()
G_loss.backward()
optg.step()
def get_start_id(args):
if args.resume:
return 0
else:
cnt = 0
suffix = '_mlp' if not args.suffix and args.dis_mlp else args.suffix
for i in os.listdir(os.path.join('MOG', '1D')):
if i.startswith(f'{args.distance}_{args.gan_loss}{suffix}_'):
cnt += 1
return max(0, cnt - 1)
def multi_results(distance, gan_loss='bce', dis_mlp=False, run_id=0, suffix='', no_graph=False):
if not suffix and dis_mlp:
suffix = '_mlp'
# time.sleep(distance*3)
nz = 2
G = G_guassian(nz=nz, num_classes=3).cuda()
D = D_guassian(num_classes=3, AC=True, dis_mlp=dis_mlp).cuda()
optg = optim.Adam(G.parameters(), lr=0.002, betas=(0.5, 0.999))
optd = optim.Adam(D.parameters(), lr=0.002, betas=(0.5, 0.999))
distance = (distance + 2) / 2
if os.path.exists(os.path.join('MOG', '1D', f'{distance}_{gan_loss}{suffix}_{run_id}')):
pass
else:
os.makedirs(os.path.join('MOG', '1D', f'{distance}_{gan_loss}{suffix}_{run_id}'))
save_path = os.path.join('MOG', '1D', f'{distance}_{gan_loss}{suffix}_{run_id}')
data1 = torch.randn(128000).cuda()
data2 = torch.randn(128000).cuda() * 2 + distance
data3 = torch.randn(128000).cuda() * 3 + distance * 2
df1 = pd.DataFrame()
df2 = | pd.DataFrame() | pandas.DataFrame |
# Script to plot Figures 4 (A, B and C)
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
# Prepare the dataframe containing all variation data. MERGED_prio1_prio2.csv is a dataframe with all germline variation found in actionable genes (known and novel)
df = pd.read_csv('/path/to/MERGED_prio1_prio2.csv',sep='\t') # '/mnt/64716603-5b56-4f9a-b195-c11560647a3a/Projects/PHARMACOGENETICS/PGx_project/CNIO_jLanillos/Tier1_PharmGKB/samples'
#Filtering the "consequence" column for those variants of interest. I have previously checked all possible consequences annotated and chosen the following ones:
df = df.loc[df['annonimous_ANNOTATION'].str.contains('missense|frameshift|stop|start_lost|splice')] # len(df) = 3448
mask = (df['annonimous_GENE'].str.contains(','))
df_valid = df[mask]
df['SYMBOL'] = df['annonimous_GENE']
# Some variants affect overlapping genes, and I just want to get the info related to the genes of interest. Create an auxiliary "'aux'" column which contains the index location of the gene of interest. E.g. UGT1A8,UGT1A4,UGT1A1
# aux = 2, to retrieve latter the gene UGT1A1 from that list (which is in "annonimous_GENE" col)
genesdf = pd.read_csv('/path/to/bioMart_transcripts_length.csv',sep='\t')
genes = list(genesdf['Gene name'])
df.loc[mask, 'aux'] = df_valid.apply(lambda x: str([i for i, j in enumerate(x['annonimous_GENE'].split(',')) if j == [z for z in genes if z in x['annonimous_GENE']][0]]).replace('[','').replace(']',''), axis=1)
mask = (df['annonimous_GENE'].str.contains(','))
df_valid = df[mask]
df.loc[mask, 'SYMBOL'] = df_valid.apply(lambda x: x['annonimous_GENE'].split(',')[int(x['aux'])],axis = 1)
# Since we know the 'aux' column, we can apply the same principle to columns with multiple annotations and get the right term into "ANNOTATION" column
df['ANNOTATION'] = df['annonimous_ANNOTATION']
df.loc[mask, 'ANNOTATION'] = df_valid.apply(lambda x: x['annonimous_ANNOTATION'].split(',')[int(x['aux'])],axis = 1)
#Filter by consequence again on the newly created "ANNOTATION" column. That column may contain a consequence we did not want
df = df.loc[df['ANNOTATION'].str.contains('missense|frameshift|stop|start_lost|splice')] # len(df) = 3387
df = df.loc[~(df['ANNOTATION'].str.contains('synonymous SNV'))] # len(df) = 3352
# More filtering criteria? No, by the moment
# Splice variants. I have summarized all variants containing "splice" and checked for their distance to NeasrestSS. df.loc[df['ANNOTATION'].str.contains('splice')].groupby(['ANNOTATION','distNearestSS'])['ID'].count()
# I will filter out variants labeled as "splice_acceptor_variant&splice_region_variant&intron_variant" and "splice_donor_variant&splice_region_variant&intron_variant"
# Variants in those two categories are located further to the SS (as far as -11 and 13 bp)
df['distNearestSS_aux'] = df['distNearestSS']
mask = (df['annonimous_GENE'].str.contains(','))
df_valid = df[mask]
df.loc[mask, 'distNearestSS_aux'] = df_valid.apply(lambda x: x['distNearestSS_aux'].split(',')[int(x['aux'])],axis = 1)
# Filtering splice_acceptor_variant&splice_region_variant&intron_variant in the splicing canonical regions (-2 and -1 bp from the start of the exon)
dfaux = df.loc[df['ANNOTATION'] == 'splice_acceptor_variant&splice_region_variant&intron_variant'] #len(dfaux) = 114
dfaux = dfaux.loc[df['distNearestSS_aux'].astype(float)>-3] #len(dfaux) = 1
df = df.loc[~(df['ANNOTATION'] == 'splice_acceptor_variant&splice_region_variant&intron_variant')] # len(df) = 3238; these variants are further than 3 bp from the canonical splice site
df = pd.concat([df,dfaux]) #len(df) = 3239
# Filteringsplice_donor_variant&splice_region_variant&intron_variant in the splicing canonical regions (+2 and +1 bp from the end of the exon)
dfaux = df.loc[df['ANNOTATION'] == 'splice_donor_variant&splice_region_variant&intron_variant'] #len(dfaux) = 114
dfaux = dfaux.loc[df['distNearestSS_aux'].astype(float)<3] #len(dfaux) = 1
df = df.loc[~(df['ANNOTATION'] == 'splice_donor_variant&splice_region_variant&intron_variant')] # len(df) = 3238; these variants are further than 3 bp from the canonical splice site
df = pd.concat([df,dfaux]) #len(df) = 3127
# Filtering out all variants which are spliceACCEPTOR/DONOR&intron_variant with close distance to NeasrestSS (-3,3)
dfaux = df.loc[~df['ANNOTATION'].str.contains('splice_acceptor_variant&intron_variant')]
dfaux = dfaux.loc[~dfaux['ANNOTATION'].str.contains('splice_donor_variant&intron_variant')]
dff = df.loc[(df['ANNOTATION'].str.contains('splice_acceptor_variant&intron_variant')) & (df['distNearestSS_aux'].astype(float)>-3) ]
dff2 = df.loc[(df['ANNOTATION'].str.contains('splice_donor_variant&intron_variant')) & (df['distNearestSS_aux'].astype(float)<3) ]
df = | pd.concat([dfaux,dff, dff2]) | pandas.concat |
from mlxtend.frequent_patterns import apriori, fpgrowth, association_rules
from mlxtend.preprocessing import TransactionEncoder
import pandas as pd
from functools import wraps
from constants import *
from helpers import DataCleaner, ResponseParser
def _can_export(f):
"""
Decorator for AssociationMiner methods that return Rules.
If AssociationMiner is set to export to CSV, then exporting occurs.
:param f: Method
:return: Method
"""
@wraps(f)
def wrapper(self, *args, **kwargs):
name_map = {
"mine_favorite_characters": "overall-favorite-characters",
"mine_favorite_band_members": "favorite-characters-in-band",
"mine_favorite_character_reasons": "reasons-for-liking-characters",
"mine_age_favorite_characters": "age-and-favorite-characters",
"mine_gender_favorite_characters": "gender-and-favorite-characters",
"mine_region_favorite_characters": "region-and-favorite-characters",
"mine_age_favorite_band_chara": "age-and-favorite-band-for-characters",
"mine_gender_favorite_band_chara": "gender-and-favorite-band-for-characters",
"mine_region_favorite_band_chara": "region-and-favorite-band-for-characters",
"mine_region_favorite_seiyuu": "region-and-favorite-seiyuu"
}
res = f(self, *args, **kwargs) # Rules object
if self.export_to_csv:
name = name_map[f.__name__]
if args:
name += "." + "".join(args)
if kwargs:
name += "." + "".join(kwargs.values())
if res.table_organized is not None:
res.table_organized.to_csv(f"{name}.organized.csv")
res.table.to_csv(f"{name}.csv")
return res
return wrapper
class AssociationMiner:
"""
http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/apriori/
http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/association_rules/
"""
def __init__(
self,
tsv_path,
export_to_csv=False
):
self.df = DataCleaner.prepare_data_frame(tsv_path)
self.export_to_csv = export_to_csv
def mine(
self,
columns,
column_values,
min_frequency=0.01, # ~25 responses
metric="confidence",
metric_threshold=0.3
):
"""
Generic function to mine rules from responses. Default metric is confidence > 30%.
If confidence is too high, rules are too specific to individual people, as opinions vary quite a bit.
:param columns: List of column names to consider.
:param column_values: List of column values each column can have (one list per column).
:param min_frequency: threshold frequency for itemset to be considered "frequent"
:param metric: "confidence" or "lift"
:param metric_threshold: Float, [0, 1]
:return: Rules
"""
raw_itemsets = self._generate_frequent_itemsets(columns, column_values, min_frequency)
return self._generate_association_rules(raw_itemsets, metric, metric_threshold)
@_can_export
def mine_favorite_characters(self):
"""
Mines for rules regarding all favorite characters.
:return Rules
"""
return self.mine([CHARACTERS], [ALL_CHARACTERS])
@_can_export
def mine_favorite_band_members(self):
"""
Mines for rules regarding favorite character in each band.
:return Rules
"""
return self.mine(
[CHARACTER_POPIPA,
CHARACTER_AFTERGLOW,
CHARACTER_GURIGURI,
CHARACTER_HHW,
CHARACTER_PASUPARE,
CHARACTER_ROSELIA],
[[ALL_CHARACTERS] * 7][0] # list of 7 CHARACTER lists
)
@_can_export
def mine_favorite_character_reasons(
self,
antecedent="all"
):
"""
Mines for rules involving favorite characters and reasons for liking those characters.
:param antecedent: "character", "reason", or "all"
:return: Rules
"""
if antecedent not in ["all", "character", "reason"]:
raise ValueError("invalid antecedent argument: must be 'all', 'character', or 'reason'")
rules = self.mine([CHARACTERS, CHARACTER_REASONS], [ALL_CHARACTERS, ALL_CHARACTER_REASONS])
rules = Rules(rules.search(one_of=ALL_CHARACTER_REASONS))
if antecedent == "all":
return rules
elif antecedent == "character":
return Rules(rules.search(one_of=ALL_CHARACTERS, location="antecedents"))
elif antecedent == "reason":
return Rules(rules.search(one_of=ALL_CHARACTER_REASONS, location="antecedents"))
@_can_export
def mine_age_favorite_characters(self):
"""
Mines for rules that predict age from favorite characters.
The reverse doesn't seem to predict anything useful (the predictions are just popular characters).
Since this is predicting age, the predictions are overwhelmingly 20-24yrs and 14-19yrs, since confidence
must be >30%, and less common age groups wouldn't make this threshold.
:return Rules
"""
age_values = DataCleaner.filter_age(self.df)[AGE].unique().tolist()
table = self.mine(
[CHARACTERS, AGE], [ALL_CHARACTERS, age_values]
).search(
one_of=age_values,
location="consequents"
)
return Rules(table)
@_can_export
def mine_gender_favorite_characters(self):
"""
Mines for rules that predict gender from favorite characters.
:return Rules
"""
gender_values = DataCleaner.filter_gender(self.df)[GENDER].unique().tolist()
table = self.mine(
[CHARACTERS, GENDER], [ALL_CHARACTERS, gender_values]
).search(
one_of=gender_values,
location="consequents"
)
return Rules(table)
@_can_export
def mine_region_favorite_characters(self):
"""
Mines for rules that predict region from favorite characters.
:return Rules
"""
region_values = DataCleaner.filter_region(self.df, keep_all_legal=True)[REGION].unique().tolist()
table = self.mine(
[CHARACTERS, REGION], [ALL_CHARACTERS, region_values]
).search(
one_of=region_values,
location="consequents"
)
return Rules(table)
@_can_export
def mine_age_favorite_band_chara(self):
"""
:return: Rules
"""
values = DataCleaner.filter_age(self.df)[AGE].unique().tolist()
table = self.mine(
[BANDS_CHARA, AGE], [ALL_BANDS, values]
).search(one_of=values)
return Rules(table)
@_can_export
def mine_gender_favorite_band_chara(self):
"""
:return: Rules
"""
values = DataCleaner.filter_gender(self.df)[GENDER].unique().tolist()
table = self.mine(
[BANDS_CHARA, GENDER], [ALL_BANDS, values]
).search(one_of=values)
return Rules(table)
@_can_export
def mine_region_favorite_band_chara(self):
"""
:return: Rules
"""
values = DataCleaner.filter_region(self.df)[REGION].unique().tolist()
table = self.mine(
[BANDS_CHARA, REGION], [ALL_BANDS, values]
).search(one_of=values)
return Rules(table)
@_can_export
def mine_region_favorite_seiyuu(self):
"""
Note: "I don't have a favorite seiyuu" is a possible answer that isn't ignored in mining.
Note: The "Other" answer for favorite seiyuu is ignored.
:return: Rules
"""
df = DataCleaner.filter_region(self.df)
regions = df[REGION].unique().tolist()
seiyuu = ResponseParser.unique_answers(df, SEIYUU)
seiyuu.remove("Other") # both regions and seiyuu have "Other" answer, so drop one of them
table = self.mine(
[REGION, SEIYUU], [regions, seiyuu]
).search(one_of=regions)
return Rules(table)
def _generate_frequent_itemsets(
self,
columns,
column_values,
min_frequency
):
"""
Uses the values of columns to generate frequent itemsets for association rule mining.
:param columns: List of column names to use
:param column_values: List; each element is itself a list, holding the legal values of the column
:param min_frequency: threshold frequency for set to be considered "frequent"
:return DataFrame
"""
lists = self._reduce(self.df, columns, column_values)
one_hot_df = self._transform_to_one_hot(lists)
return self._find_sets(one_hot_df, min_frequency=min_frequency)
def _generate_association_rules(
self,
itemsets,
metric,
metric_threshold
):
"""
Uses frequent itemsets to generate rules with 1 antecedent and sorted by lift.
:param itemsets: DataFrame
:param metric: "confidence" or "lift"
:param metric_threshold: Float, [0, 1]
:return: Rules
"""
rules = self._find_rules(itemsets, metric, metric_threshold)
rules.organize(max_antecedents=1, sort_by=["lift"], sort_ascending=[False])
return rules
@staticmethod
def _find_sets(
one_hot_df,
min_frequency
):
"""
Finds frequent itemsets.
:param min_frequency: Float; threshold occurrence for a set to be considered "frequent"
:return DataFrame
"""
itemsets = apriori(one_hot_df, min_support=min_frequency, use_colnames=True)
return itemsets.sort_values(by=["support"], ascending=False)
@staticmethod
def _filter_itemsets(
itemsets,
remove_single_items=True
):
"""
Filters itemsets based on flags.
:param itemsets: DataFrame
:param remove_single_items: Bool; whether to remove sets with one item or not
:return DataFrame
"""
# Remove all sets with one item, because those are just the most popular choices
if remove_single_items:
itemsets = itemsets.copy()
itemsets["length"] = itemsets["itemsets"].apply(lambda x: len(x))
return itemsets[itemsets["length"] > 1]
else:
return itemsets
@staticmethod
def _find_rules(
itemsets,
metric,
metric_threshold
):
"""
Uses itemsets attribute to find rules.
:param metric: "confidence" or "lift"
:param metric_threshold: Float, [0, 1]
:return Rules
"""
return Rules(association_rules(itemsets, metric=metric, min_threshold=metric_threshold))
@staticmethod
def _reduce(
df,
column_list,
column_values_list
):
"""
Reduces a DataFrame to lists, where each list holds the values of the columns listed in column_list.
:param df: DataFrame
:param column_list: A list of columns to reduce to
:param column_values_list: A list parallel to column_list that lists values to look for in each column
:return List of Lists
"""
rows = []
# remove rows with invalids in a column
for column in column_list:
df = DataCleaner.filter_invalids(df, column)
df.reset_index(drop=True, inplace=True) # needed for enumeration/iteration to work
# Make rows
for _ in range(len(df)):
rows.append([])
# Populate rows
for col_i, column in enumerate(column_list):
for row_i, column_value in df[column].items():
# Find all values in multi-response
legal_values = column_values_list[col_i]
found_values = []
for v in legal_values:
if v in column_value:
found_values.append(v)
rows[row_i].extend(found_values)
return rows
def _transform_to_one_hot(
self,
itemset_list
):
"""
Converts itemset list into one-hot encoded DataFrame,
which is required for frequent itemset mining.
:param itemset_list: A list of lists
:return DataFrame
"""
encoder = TransactionEncoder()
array = encoder.fit(itemset_list).transform(itemset_list)
df = pd.DataFrame(array)
# rename columns
columns = self._parse_columns(encoder.columns_)
df.rename(columns={k: v for k, v in enumerate(columns)}, inplace=True)
return df
@staticmethod
def _parse_columns(
columns
):
"""
Remove quotes in column names, because Pandas doesn't like them
"""
res = []
for column in columns:
res.append(column.replace('"', ''))
return res
class Rules:
"""
Represents a set of association rules.
"""
def __init__(
self,
df
):
"""
:param df: DataFrame; original table that is always retained
"""
self._df = df
self._organized_df = None
self._sort_by = ["lift"]
self._sort_ascending = [False]
@property
def table(self):
"""
:return: DataFrame
"""
return self._df
@property
def table_organized(self):
"""
:return: DataFrame
"""
return self._organized_df
def search(
self,
one_of,
location="all",
use_organized=True
):
"""
Filters out rules that don't match search condition.
E.g. if one_of=["Chisato", "Hina"], all rules with "Chisato" or "Hina" in antecedents or consequents
will be returned.
:param one_of: List; each element is search term, with entire list being a disjunction/OR
:param location: "antecedents", "consequents", or "all"; where to look for search terms
:param use_organized: Bool; whether to use organized table or not
:return: DataFrame with results
"""
if location not in ["all", "antecedents", "consequents"]:
raise ValueError("invalid location argument: must be 'all', 'antecedents', or 'consequents'")
if use_organized and self._organized_df is not None:
rules = self._organized_df.copy()
else:
rules = self._df.copy()
res = None
filter_partials = []
for term in one_of:
# Do filtering/search of term at specified locations
if location == "all":
filter_partials.append(rules[rules["antecedents"].astype(str).str.contains(term)])
filter_partials.append(rules[rules["consequents"].astype(str).str.contains(term)])
else:
filter_partials.append(rules[rules[location].astype(str).str.contains(term)])
# Union partial filter results to get final result
if res is not None:
filter_partials.append(res)
res = | pd.concat(filter_partials) | pandas.concat |
# -*- coding: utf-8 -*-
# + {}
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import networkx as nx
import matplotlib as mpl
import numba
import squarify
import numpy as np
from math import pi
from sklearn.decomposition import PCA
from sklearn.mixture import GaussianMixture as GMM
from umap import UMAP
from sklearn.cluster import KMeans
from scipy.spatial.distance import cdist
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from datetime import date
from warnings import filterwarnings
import os
import community
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras import regularizers
from keras.utils import np_utils
from keras.metrics import categorical_accuracy
from keras.layers import Dropout
import keras.backend as K
filterwarnings('ignore')
# +
def get_gene_data(data, gene_name_column, test_gene_list):
"""Extract data from specific genes given a larger dataframe.
Inputs
* data: large dataframe from where to filter
* gene_name_column: column to filter from
* test_gene_list : a list of genes you want to get
Output
* dataframe with the genes you want
"""
gene_profiles = pd.DataFrame()
for gene in data[gene_name_column].values:
if gene in test_gene_list:
df_ = data[(data[gene_name_column] == gene)]
gene_profiles = pd.concat([gene_profiles, df_])
gene_profiles.drop_duplicates(inplace = True)
return gene_profiles
# ---------PANDAS FUNCTIONS FOR DATA EXPLORATION -------------------------
def count_feature_types(data):
"""
Get the dtype counts for a dataframe's columns.
"""
df_feature_type = data.dtypes.sort_values().to_frame('feature_type')\
.groupby(by='feature_type').size().to_frame('count').reset_index()
return df_feature_type
def get_df_missing_columns(data):
'''
Get a dataframe of the missing values in each column with its
corresponding dtype.
'''
# Generate a DataFrame with the % of missing values for each column
df_missing_values = (data.isnull().sum(axis = 0) / len(data) * 100)\
.sort_values(ascending = False)\
.to_frame('% missing_values').reset_index()
# Generate a DataFrame that indicated the data type for each column
df_feature_type = data.dtypes.to_frame('feature_type').reset_index()
# Merge frames
missing_cols_df = pd.merge(df_feature_type, df_missing_values, on = 'index',
how = 'inner')
missing_cols_df.sort_values(['% missing_values', 'feature_type'], inplace = True)
return missing_cols_df
def find_constant_features(data):
"""
Get a list of the constant features in a dataframe.
"""
const_features = []
for column in list(data.columns):
if data[column].unique().size < 2:
const_features.append(column)
return const_features
def duplicate_columns(frame):
'''
Get a list of the duplicate columns in a pandas dataframe.
'''
groups = frame.columns.to_series().groupby(frame.dtypes).groups
dups = []
for t, v in groups.items():
cs = frame[v].columns
vs = frame[v]
lcs = len(cs)
for i in range(lcs):
ia = vs.iloc[:,i].values
for j in range(i+1, lcs):
ja = vs.iloc[:,j].values
if np.array_equal(ia, ja):
dups.append(cs[i])
break
return dups
def get_duplicate_columns(df):
"""
Returns a list of duplicate columns
"""
groups = df.columns.to_series().groupby(df.dtypes).groups
dups = []
for t, v in groups.items():
cs = df[v].columns
vs = df[v]
lcs = len(cs)
for i in range(lcs):
ia = vs.iloc[:,i].values
for j in range(i+1, lcs):
ja = vs.iloc[:,j].values
if np.array_equal(ia, ja):
dups.append(cs[i])
break
return dups
def get_df_stats(df):
"""
Wrapper for dataframe stats.
Output: missing_cols_df, const_feats, dup_cols_list
"""
missing_cols_df = get_df_missing_columns(df)
const_features_list = find_constant_features(df)
dup_cols_list = duplicate_columns(df)
return missing_cols_df, const_features_list, dup_cols_list
def test_missing_data(df, fname):
"""Look for missing entries in a DataFrame."""
assert np.all(df.notnull()), fname + ' contains missing data'
def col_encoding(df, column):
"""
Returns a one hot encoding of a categorical colunmn of a DataFrame.
------------------------------------------------------------------
Params
-------
-df:
-column: name of the column to be one-hot-encoded in string format.
Returns
---------
- hot_encoded: one-hot-encoding in matrix format.
"""
le = LabelEncoder()
label_encoded = le.fit_transform(df[column].values)
hot = OneHotEncoder(sparse = False)
hot_encoded = hot.fit_transform(label_encoded.reshape(len(label_encoded), 1))
return hot_encoded
def one_hot_df(df, cat_col_list):
"""
Make one hot encoding on categoric columns.
Returns a dataframe for the categoric columns provided.
-------------------------
inputs
- df: original input DataFrame
- cat_col_list: list of categorical columns to encode.
outputs
- df_hot: one hot encoded subset of the original DataFrame.
"""
df_hot = pd.DataFrame()
for col in cat_col_list:
encoded_matrix = col_encoding(df, col)
df_ = pd.DataFrame(encoded_matrix,
columns = [col+ ' ' + str(int(i))\
for i in range(encoded_matrix.shape[1])])
df_hot = pd.concat([df_hot, df_], axis = 1)
return df_hot
# OTHER FUNCTIONS
def plot_kmeans(kmeans, X, n_clusters=4, rseed=0, ax=None):
"""
Wrapper from JakeVDP data analysis handbook
"""
labels = kmeans.fit_predict(X)
# plot the input data
ax = ax or plt.gca()
ax.axis('equal')
ax.scatter(X[:, 0], X[:, 1], c=labels, s=40, cmap='viridis', zorder=2)
# plot the representation of the KMeans model
centers = kmeans.cluster_centers_
radii = [cdist(X[labels == i], [center]).max()
for i, center in enumerate(centers)]
for c, r in zip(centers, radii):
ax.add_patch(plt.Circle(c, r, fc='#CCCCCC', lw=3, alpha=0.5, zorder=1))
@numba.jit(nopython=True)
def draw_bs_sample(data):
"""
Draw a bootstrap sample from a 1D data set.
Wrapper from J. Bois' BeBi103 course.
"""
return np.random.choice(data, size=len(data))
def net_stats(G):
'''Get basic network stats and plots. Specifically degree and clustering coefficient distributions.'''
net_degree_distribution= []
for i in list(G.degree()):
net_degree_distribution.append(i[1])
print("Number of nodes in the network: %d" %G.number_of_nodes())
print("Number of edges in the network: %d" %G.number_of_edges())
print("Avg node degree: %.2f" %np.mean(list(net_degree_distribution)))
print('Avg clustering coefficient: %.2f'%nx.cluster.average_clustering(G))
print('Network density: %.2f'%nx.density(G))
fig, axes = plt.subplots(1,2, figsize = (16,4))
axes[0].hist(list(net_degree_distribution), bins=20, color = 'lightblue')
axes[0].set_xlabel("Degree $k$")
#axes[0].set_ylabel("$P(k)$")
axes[1].hist(list(nx.clustering(G).values()), bins= 20, color = 'lightgrey')
axes[1].set_xlabel("Clustering Coefficient $C$")
#axes[1].set_ylabel("$P(k)$")
axes[1].set_xlim([0,1])
def get_network_hubs(ntw):
"""
input: NetworkX ntw
output:Prints a list of global regulator name and eigenvector centrality score pairs
"""
eigen_cen = nx.eigenvector_centrality(ntw)
hubs = sorted(eigen_cen.items(), key = lambda cc:cc[1], reverse = True)[:10]
return hubs
def get_network_clusters(network_lcc, n_clusters):
"""
input = an empyty list
output = a list with the netoworks clusters
"""
cluster_list = []
for i in range(n_clusters):
cluster_lcc = [n for n in network_lcc.nodes()\
if network_lcc.node[n]['modularity'] == i]
cluster_list.append(cluster_lcc)
return cluster_list
def download_and_preprocess_data(org, data_dir = None, variance_ratio = 0.8,
output_path = '~/Downloads/'):
"""
General function to download and preprocess dataset from Colombos.
Might have some issues for using with Windows. If you're using windows
I recommend using the urllib for downloading the dataset.
Params
-------
data_path (str): path to directory + filename. If none it will download the data
from the internet.
org (str) : Organism to work with. Available datasets are E. coli (ecoli),
B.subtilis (bsubt), P. aeruginosa (paeru), M. tb (mtube), etc.
Source: http://colombos.net/cws_data/compendium_data/
variance (float): Fraction of the variance explained to make the PCA denoising.
Returns
--------
denoised (pd.DataFrame)
"""
#Check if dataset is in directory
if data_dir is None:
download_cmd = 'wget http://colombos.net/cws_data/compendium_data/'\
+ org + '_compendium_data.zip'
unzip_cmd = 'unzip '+org +'_compendium_data.zip'
os.system(download_cmd)
os.system(unzip_cmd)
df = pd.read_csv('colombos_'+ org + '_exprdata_20151029.txt',
sep = '\t', skiprows= np.arange(6))
df.rename(columns = {'Gene name': 'gene name'}, inplace = True)
df['gene name'] = df['gene name'].apply(lambda x: x.lower())
else:
df = pd.read_csv(data_dir, sep = '\t', skiprows= np.arange(6))
try :
df.rename(columns = {'Gene name': 'gene name'}, inplace = True)
except:
pass
annot = df.iloc[:, :3]
data = df.iloc[:, 3:]
preprocess = make_pipeline(SimpleImputer(strategy = 'median'),
StandardScaler(), )
scaled_data = preprocess.fit_transform(data)
# Initialize PCA object
pca = PCA(variance_ratio, random_state = 42).fit(scaled_data)
# Project to PCA space
projected = pca.fit_transform(scaled_data)
# Reconstruct the dataset using 80% of the variance of the data
reconstructed = pca.inverse_transform(projected)
# Save into a dataframe
reconstructed_df = pd.DataFrame(reconstructed, columns = data.columns.to_list())
# Concatenate with annotation data
denoised_df = pd.concat([annot, reconstructed_df], axis = 1)
denoised_df['gene name'] = denoised_df['gene name'].apply(lambda x: x.lower())
# Export dataset
denoised_df.to_csv(output_path + 'denoised_' + org + '.csv', index = False)
def annot_data_trn(
tf_tf_net_path=None,
trn_path=None,
denoised_data_path=None,
org="ecoli",
output_path= "~/Downloads/"):
"""
Annotate the preprocessed dataset with network clusters as a one-hot-matrix.
Performs the operation on E. coli by default.
Params
-------
Returns
--------
"""
# Load TF-TF net and TRN
if tf_tf_net_path is None and org is None:
os.system(
"wget http://regulondb.ccg.unam.mx/menu/download/datasets/files/network_tf_tf.txt"
)
tf_trn = pd.read_csv(
"network_tf_tf.txt",
delimiter="\t",
comment="#",
names=["TF", "TG", "regType", "ev", "confidence", "unnamed"],
usecols=np.arange(5),
)
else:
try:
tf_trn = pd.read_csv(tf_tf_net_path)
except:
tf_trn = pd.read_csv(
tf_tf_net_path,
delimiter="\t",
comment="#",
names=["TF", "TG", "regType", "ev", "confidence", "unnamed"],
usecols=np.arange(5),
)
if trn_path is None:
os.system(
"wget http://regulondb.ccg.unam.mx/menu/download/datasets/files/network_tf_gene.txt"
)
trn = pd.read_csv(
"network_tf_gene.txt",
delimiter="\t",
comment="#",
names=["TF", "TG", "regType", "ev", "confidence", "unnamed"],
usecols=np.arange(5),
)
else:
try:
trn = pd.read_csv(trn_path)
except:
trn = pd.read_csv(
trn_path,
delimiter="\t",
comment="#",
names=["TF", "TG", "regType", "ev", "confidence", "unnamed"],
usecols=np.arange(5),
)
# Lowercase gene names for both datasets
tf_trn.TF = tf_trn.TF.apply(lambda x: x.lower())
tf_trn.TG = tf_trn.TG.apply(lambda x: x.lower())
trn.TF = trn.TF.apply(lambda x: x.lower())
trn.TG = trn.TG.apply(lambda x: x.lower())
# Turn the TF TRN dataframe into a graph object
net = nx.from_pandas_edgelist(
df=tf_trn, source="TF", target="TG"
)
# Compute the LCC
net = max(nx.connected_component_subgraphs(net), key=len)
# Cluster TF net
communities = community.best_partition(net)
# Get number of clusters
n_clusters_tf = max(communities.values())
# Embed cluster annotation in net
nx.set_node_attributes(net, values=communities, name="modularity")
# Get np.array of TF clusters
cluster_list = np.array(get_network_clusters(net, n_clusters_tf))
# Get cluster sizes
cluster_sizes = np.array([len(clus) for clus in cluster_list])
# Select only the clusters with more than 5 TFs
clus_list = cluster_list[cluster_sizes > 5]
# Get a DataFrame of the TGs in each cluster
tgs_ = pd.DataFrame()
for ix, clus in enumerate(clus_list):
clus_trn = get_gene_data(trn, "TF", clus)
clus_tgs = list(set(clus_trn["TG"].values))
tgs_df = pd.DataFrame({"TGs": clus_tgs})
tgs_df["cluster"] = ix + 1
tgs_ = pd.concat([tgs_, tgs_df])
# -----Start constructing the annotated dataset ------
if denoised_data_path is None:
try:
denoised = pd.read_csv("denoised_coli.csv")
except:
download_and_preprocess_data(org)
else:
try:
denoised = pd.read_csv(denoised_data_path + 'denoised_'+ org + '.csv')
except:
'Could not load denoised dataset. Check file name input.'
# Get nrows of denoised data
nrows_data = denoised.shape[0]
# Initialize one-hot-matrix
one_hot_mat = np.zeros((nrows_data, n_clusters_tf))
# Populate one-hot-matrix
for ix, gene in enumerate(denoised["gene name"]):
gene_clus = tgs_[tgs_["TGs"] == gene]
if gene_clus.shape[0] > 0:
clusters = gene_clus.cluster.values
clus_ix = [clus - 1 for clus in clusters]
one_hot_mat[ix, clus_ix] = 1
else:
pass
# Make one-hot-matrix into a dataframe
one_hot_df = pd.DataFrame(
one_hot_mat,
columns=["cluster " + str(i) for i in np.arange(1, n_clusters_tf + 1)],
)
# Get the n_samples of smallest cluster
clus_samples = one_hot_mat.sum(axis=0)
min_clus_samples = min(clus_samples)
# Join the denoised dataset and one hot matrix
denoised_hot = pd.concat([denoised, one_hot_df], axis=1)
# add a column corresponding to genes that are TGs
one_hot_sum = one_hot_mat.sum(axis=1) # helper indicator array
denoised_hot["TG"] = [1 if val > 0 else 0 for i, val in enumerate(one_hot_sum)]
if output_path is "~/Downloads/":
denoised_hot.to_csv("~/Downloads/denoised_hot_" + org + ".csv", index=False)
else:
denoised_hot.to_csv( output_path + "denoised_hot_" + org + ".csv", index=False)
def train_keras_multilabel_nn(X_train,
y_train,
partial_x_train,
partial_y_train,
x_val= None,
y_val= None,
n_units=64,
epochs=20,
n_deep_layers=1,
batch_size=128):
'''
Trains a Keras model.
Assumes there are a X_train, y_train, x_val, y_val datasets.
Params
-------
n_units: number of neurons per deep layer.
epochs: number of epochs to train the net.
n_deep_layers: number of layers in the deep neural net.
batch_size : batch size to train the net with.
Returns
--------
nn (Keras model): neural net model
history (pd.DataFrame): history of the training procedure, includes
training and validation accuracy and loss.
'''
nn = Sequential()
#initial layer
nn.add(Dense(n_units, activation='relu', input_shape=(X_train.shape[1],)))
#extra deep layers
for i in range(n_deep_layers):
nn.add(Dense(n_units, activation='relu',
kernel_regularizer=regularizers.l2(0.001))
)
nn.add(Dropout(0.25))
#add final output layer
nn.add(Dense(y_train.shape[1], activation='softmax'))
nn.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
#print neural net architecture
nn.summary()
#fit and load history
if x_val and y_val == None:
history = nn.fit(X_train, y_train, epochs=epochs,
batch_size= batch_size,
verbose = 0)
else:
history = nn.fit(partial_x_train, partial_y_train, epochs=epochs,
batch_size= batch_size, validation_data=(x_val, y_val),
verbose = 0)
history_df = | pd.DataFrame(history.history) | pandas.DataFrame |
# %%
import os
import pandas as pd
import numpy as np
import datetime
from googletrans import Translator
from vininfo import Vin
# %%
motocicleta_p2 = | pd.read_excel(r'D:\Basededatos\Origen\MOTOCICLETAS-COLOMBIA\MOTOCICLETA_P2.xlsx', engine='openpyxl') | pandas.read_excel |
# =================================================================
# IMPORT REQUIRED LIBRARIES
# =================================================================
import os
import pandas as pd
# =================================================================
# READ DATA
# =================================================================
data_location = os.path.join(os.path.abspath(""), '2020/day-05-binary-boarding/')
# with open(os.path.join(data_location, 'input_small.txt'), 'r') as f:
with open(os.path.join(data_location, 'input.txt'), 'r') as f:
data = f.read().splitlines()
# print(data)
# =================================================================
# LOGIC - PART ONE
# =================================================================
def find_result_part_one():
maxSeatID = 0
maxSeat = ""
for aSeat in data:
rowInstruction = aSeat[0:7]
colInstruction = aSeat[-3:]
lower = 0
upper = 127
for letter in rowInstruction:
if letter == 'F':
upper = (lower + upper) // 2
# print(aSeat + " " + letter + " lower, upper: " + str(lower) + " " + str(upper))
else:
lower = ((lower + upper) // 2) + 1
# print(aSeat + " " + letter + " lower, upper: " + str(lower) + " " + str(upper))
finalRow = min(lower, upper)
lower = 0
upper = 7
for letter in colInstruction:
if letter == 'L':
upper = (lower + upper) // 2
# print(aSeat + " " + letter + " lower, upper: " + str(lower) + " " + str(upper))
else:
lower = ((lower + upper) // 2) + 1
# print(aSeat + " " + letter + " lower, upper: " + str(lower) + " " + str(upper))
finalCol = min(lower, upper)
# # print(str(finalRow) + " " + str(finalCol))
seatID = (finalRow * 8) + finalCol
if seatID > maxSeatID:
maxSeatID = seatID
maxSeat = aSeat
# print(str(aSeat) + " " + str(seatID)
return [maxSeatID, maxSeat]
# =================================================================
# LOGIC - PART TWO
# =================================================================
def getSeatInfo():
seatData = []
for aSeat in data:
rowInstruction = aSeat[0:7]
colInstruction = aSeat[-3:]
lower = 0
upper = 127
for letter in rowInstruction:
if letter == 'F':
upper = (lower + upper) // 2
# print(aSeat + " " + letter + " lower, upper: " + str(lower) + " " + str(upper))
else:
lower = ((lower + upper) // 2) + 1
# print(aSeat + " " + letter + " lower, upper: " + str(lower) + " " + str(upper))
finalRow = min(lower, upper)
lower = 0
upper = 7
for letter in colInstruction:
if letter == 'L':
upper = (lower + upper) // 2
# print(aSeat + " " + letter + " lower, upper: " + str(lower) + " " + str(upper))
else:
lower = ((lower + upper) // 2) + 1
# print(aSeat + " " + letter + " lower, upper: " + str(lower) + " " + str(upper))
finalCol = min(lower, upper)
# # print(str(finalRow) + " " + str(finalCol))
seatData.append([finalRow, finalCol])
# print(str(aSeat) + " " + str(seatID)
return seatData
def find_result_part_two():
seatData = getSeatInfo()
seat_df = | pd.DataFrame(seatData, columns=['row', 'col']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core import ops
from pandas.errors import NullFrequencyError
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = pd.timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
| Series([2, 3, 4]) | pandas.Series |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 1.1.2
# kernelspec:
# display_name: PyCharm (ocean_alzheimers_demo)
# language: python
# name: pycharm-55ce45ad
# ---
# %% {"_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5", "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19", "pycharm": {"is_executing": false}}
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import Imputer
from sklearn.metrics import mean_absolute_error
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from xgboost import XGBRegressor
import xgboost
print('xgboost', xgboost.__version__)
# %% {"pycharm": {"name": "#%%\n", "is_executing": false}}
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
from pathlib import Path
print(os.listdir("./input"))
# Any results you write to the current directory are saved as output.
#data = pd.read_csv("../input/oasis_cross-sectional.csv")
data = | pd.read_csv('./input/oasis_longitudinal.csv') | pandas.read_csv |
### Librerias necesarias
import luigi
import luigi.contrib.s3
from luigi import Event, Task, build # Utilidades para acciones tras un task exitoso o fallido
from luigi.contrib.postgres import CopyToTable, PostgresQuery
import boto3
from datetime import date, datetime
import getpass # Usada para obtener el usuario
from io import BytesIO
import socket #import publicip
import requests
import os, subprocess, ast
import pandas as pd
import psycopg2
from psycopg2 import extras
from zipfile import ZipFile
from pathlib import Path
###librerias para clean
### Imports desde directorio de proyecto dpa_rita
## Credenciales
from src import(
MY_USER,
MY_PASS,
MY_HOST,
MY_PORT,
MY_DB,
)
## Utilidades
#from src.orquestadores.tasks.load_test import *
from src.utils.db_utils import save_rds
from src.utils.metadatos_utils import Linaje_load
from src.orquestadores.tasks.load_test import Load_Testing
# ======================================================
# Etapa load
# ======================================================
MiLinaje = Linaje_load()
meta_load = [] # lista para insertar metadatos
class Load(luigi.Task):
'''
Carga hacia RDS los datos de la carpeta data
'''
def requires(self):
return Load_Testing()
# Recolectamos fecha y usuario para metadatos a partir de fecha actual
MiLinaje.fecha = datetime.now()
MiLinaje.usuario = getpass.getuser()
def run(self):
# Ip metadatos
MiLinaje.ip_ec2 = str(socket.gethostbyname(socket.gethostname()))
#Subimos de archivos csv
extension_csv = ".csv"
dir_name="./src/data/"
for item in os.listdir(dir_name):
if item.endswith(extension_csv):
table_name = "raw.rita"
MiLinaje.nombre_archivo = item
# Numero de columnas y renglones para metadatos
df = | pd.read_csv(dir_name + item, low_memory=False) | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import xgboost as xgb
from sklearn.model_selection import train_test_split
import statsmodels.api as sm
# all data
prec = pd.read_csv('../data/MH25_vaisalawxt520prec_2017.csv')
prec['time'] = pd.to_datetime(prec['time'])
wind = pd.read_csv('../data/MH25_vaisalawxt520windpth_2017.csv')
wind['time'] = pd.to_datetime(wind['time'])
temp = pd.read_csv('../data/MH30_temperature_rock_2017.csv')
temp['time'] = pd.to_datetime(temp['time'])
radio = pd.read_csv('../data/MH15_radiometer__conv_2017.csv')
radio['time'] = | pd.to_datetime(radio['time']) | pandas.to_datetime |
from pandas import DataFrame, read_csv, to_datetime
from datetime import datetime
import time, sys, os, argparse, pendulum
# Get report related arguments from the command line
parser = argparse.ArgumentParser()
parser.add_argument("-sd","--start_date", help="Enter date in YYYYMMDD format ONLY!", type=str)
parser.add_argument("-ed","--end_date", help="Enter date in YYYYMMDD format ONLY!", type=str)
parser.add_argument("-r","--report", help="Just leave it blank and current week's report will be generated", action='store_true')
#parser.add_argument("-ws","--week_start", help="Enter date in YYYYMMDD format ONLY!", type=str)
args = vars(parser.parse_args())
# Automatically retrieve the working directory
work_dir = os.path.dirname(os.path.realpath(__file__))
# Get the timesheet file path
timesheet_file = work_dir + r'\timesheet.csv'
last_record_file = work_dir + r'\last_record.csv' # This way it's faster to read the last task and write to the timesheet file without reading it completely
# Read the projects list
projects = read_csv(work_dir + r'\projects.csv')
# Get the last record in the timesheet
last_record = | read_csv(last_record_file) | pandas.read_csv |
import json
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.graph_objs as go
def update_graph(
graph_id,
graph_title,
y_train_index,
y_val_index,
run_log_json,
yaxis_title,
):
def smooth(scalars, weight=0.6):
last = scalars[0]
smoothed = list()
for point in scalars:
smoothed_val = last * weight + (1 - weight) * point
smoothed.append(smoothed_val)
last = smoothed_val
return smoothed
trace_train = go.Scatter()
trace_val = go.Scatter()
layout = go.Layout(
template="plotly_dark",
title_text=graph_title,
paper_bgcolor="rgb(16, 24, 32)",
plot_bgcolor="rgb(16, 24, 32)",
margin=dict(l=40, r=25, b=40, t=40),
)
fig = go.Figure(data=[trace_train, trace_val], layout=layout)
if run_log_json:
run_log_df = pd.read_json(run_log_json, orient="split")
if len(run_log_df["batch"]) != 0:
step = run_log_df["step"]
y_train = run_log_df[y_train_index]
if y_val_index in run_log_df:
y_val = run_log_df[y_val_index]
else:
y_val = pd.Series(dtype=object)
if not y_train.isnull().values.any():
y_train = smooth(y_train)
trace_train = go.Scatter(
x=step,
y=y_train,
mode="lines",
name="Training",
showlegend=True,
)
if y_val.isnull().values.any():
y_val = y_val.dropna()
# y_val = smooth(y_val)
trace_val = go.Scatter(
x=y_val.index,
y=y_val,
mode="lines",
name="Validation",
showlegend=True,
)
fig = go.Figure(data=[trace_train, trace_val], layout=layout)
fig.update_xaxes(range=[0, step.iloc[-1] * 1.1])
if len(y_train) > 1:
fig.update_yaxes(
range=[
max(min(y_train[max(-10, -len(y_train)) : -1]) - 0.1, -0.01),
y_train[-1] + 0.1,
]
)
fig.add_shape(
type="line",
x0=0,
y0=y_train[-1],
x1=step.iloc[-1] * 1.1,
y1=y_train[-1],
line=dict(color="blue", dash="dot", width=1),
xref="x",
yref="y",
)
fig.add_annotation(
x=0,
y=y_train[-1],
text=f"{y_train[-1]:.4f}",
showarrow=False,
yshift=11,
xshift=22,
font=dict(),
bgcolor="rgb(50,50,150)",
)
if not y_val.empty:
fig.update_yaxes(
range=[
max(min(y_train[-1], y_val.iloc[-1]) - 0.1, -0.01),
min(max(y_train[-1], y_val.iloc[-1]) + 0.1, 1.01),
]
)
fig.add_shape(
type="line",
x0=0,
y0=y_val.iloc[-1],
x1=step.iloc[-1] * 1.1,
y1=y_val.iloc[-1],
line=dict(color="red", dash="dot", width=1),
xref="x",
yref="y",
)
fig.add_annotation(
x=0,
y=y_val.iloc[-1],
text=f"{y_val.iloc[-1]:.4f}",
showarrow=False,
yshift=-11,
xshift=22,
font=dict(),
bgcolor="rgb(150,50,50)",
)
return dcc.Graph(
id=graph_id,
config={
"displayModeBar": False,
"scrollZoom": True,
},
figure=fig,
)
return dcc.Graph(
id=graph_id,
config={
"displayModeBar": False,
"scrollZoom": True,
},
figure=fig,
)
def update_current_value(value_train, value_validation, value_title, run_log_json):
if run_log_json:
run_log_df = pd.read_json(run_log_json, orient="split")
if run_log_df["epoch"].last_valid_index():
last_val_index = run_log_df["epoch"].last_valid_index()
val_div = (
html.Div(
f"Validation: \
{run_log_df[value_validation].iloc[last_val_index]:.4f}"
),
)
return [
html.P(
f"Current {value_title}:",
style={
"font-weight": "bold",
"margin-top": "10px",
"margin-bottom": "0px",
},
),
html.Div(f"Training: {run_log_df[value_train].iloc[-1]:.4f}"),
val_div[0],
]
if len(run_log_df["batch"]) != 0:
return [
html.P(
f"Current {value_title}:",
style={
"font-weight": "bold",
"margin-top": "10px",
"margin-bottom": "0px",
},
),
html.Div(f"Training: {run_log_df[value_train].iloc[-1]:.4f}"),
]
def get_input_layer_info(summary):
if "0" in summary["config"]:
config = json.loads(summary["config"]["0"])
layer_info = {
"class_name": config["layers"][0]["class_name"],
"name": config["layers"][0]["config"]["name"],
"input_shape": config["layers"][0]["config"]["batch_input_shape"],
}
return layer_info
def get_layers(summary):
layers = []
if "0" in summary["config"]:
config = json.loads(summary["config"]["0"])
def get_layer_info(layer):
layer_info = {
"Type": layer["class_name"],
"name": layer["config"]["name"],
}
if layer["class_name"] == "Dense":
layer_info["units"] = layer["config"]["units"]
layer_info["activation"] = layer["config"]["activation"]
return layer_info
for i, layer in enumerate(config["layers"]):
layers.append(get_layer_info(layer))
return layers
def update_interval_log(interval_rate):
if interval_rate == "fast":
return 500
elif interval_rate == "regular":
return 1000
elif interval_rate == "slow":
return 5 * 1000
elif interval_rate == "no":
return 24 * 60 * 60 * 1000
def update_progress_bars(run_log_json, model_params):
if run_log_json:
run_log_df = | pd.read_json(run_log_json, orient="split") | pandas.read_json |
# hst.py
import os
import numpy as np
import pandas as pd
from ..io.read_hst import read_hst
class Hst:
def read_hst(self, savdir=None, merge_mhd=True, force_override=False):
"""Function to read hst and convert quantities to convenient units
"""
# Create savdir if it doesn't exist
if savdir is None:
savdir = os.path.join(self.savdir, 'hst')
if not os.path.exists(savdir):
os.makedirs(savdir)
force_override = True
fpkl = os.path.join(savdir,
os.path.basename(self.files['hst']) + '.mod.p')
# Check if the original history file is updated
if not force_override and os.path.exists(fpkl) and \
os.path.getmtime(fpkl) > os.path.getmtime(self.files['hst']):
self.logger.info('[read_hst]: Reading from existing pickle.')
hst = pd.read_pickle(fpkl)
self.hst = hst
return hst
else:
self.logger.info('[read_hst]: Reading from original hst dump.')
# If we are here, force_override is True or history file is updated.
# Need to convert units and define new columns.
u = self.u
domain = self.domain
# volume of resolution element
dvol = domain['dx'].prod()
# total volume of domain
vol = domain['Lx'].prod()
# Area of domain
LxLy = domain['Lx'][0]*domain['Lx'][1]
# Read original history dump file
hst = read_hst(self.files['hst'], force_override=force_override)
# delete the first row
hst.drop(hst.index[:1], inplace=True)
# Time in code units
hst['time_code'] = hst['time']
# Time in Myr
hst['time'] *= u.Myr
# Total gas mass in Msun
hst['mass'] *= vol*u.Msun
# Gas surface density in Msun/pc^2
hst['Sigma_gas'] = hst['mass']/LxLy
if self.par['configure']['ionrad'] == 'ON':
# Neutral gas mass in Msun
#hst['Mneu'] = hst['scalar{:d}'.format(domain['IHI'])]*vol*u.Msun
hst['Mneu'] = hst['scalar0']*vol*u.Msun
# Ionized gas mass in Msun
hst['Mion'] *= vol*u.Msun
# Collisionally ionized gas (before ray tracing) in Msun
hst['Mion_coll'] *= vol*u.Msun
# Total photoionization rate [#/sec]
hst['Qiphot'] *= vol*(u.length**3).cgs
# Total collisional ionization rate [#/sec]
hst['Qicoll'] *= vol*(u.length**3).cgs
# Total dust absorption rate [#/sec]
hst['Qidust'] *= vol*(u.length**3).cgs
# Mass fraction ionized gas
hst['mf_ion'] = hst['Mion']/hst['mass']
hst['mf_ion_coll'] = hst['Mion_coll']/hst['mass']
for f in range(self.par['radps']['nfreq']):
# Total luminosity [Lsun]
hst['Ltot_cl{:d}'.format(f)] *= vol*u.Lsun
hst['Ltot_ru{:d}'.format(f)] *= vol*u.Lsun
hst['Ltot{:d}'.format(f)] = \
hst['Ltot_cl{:d}'.format(f)] + hst['Ltot_ru{:d}'.format(f)]
# Total luminosity included in simulation
hst['L_cl{:d}'.format(f)] *= vol*u.Lsun
hst['L_ru{:d}'.format(f)] *= vol*u.Lsun
hst['L{:d}'.format(f)] = \
hst['L_cl{:d}'.format(f)] + hst['L_ru{:d}'.format(f)]
# Luminosity that escaped boundary
hst['Lesc{:d}'.format(f)] *= vol*u.Lsun
# Luminosity lost due to dmax
hst['Llost{:d}'.format(f)] *= vol*u.Lsun
# Escape fraction, lost fraction
# Estimation of true escape fraction estimation (upper bound)
hst['fesc{:d}'.format(f)] = hst['Lesc{:d}'.format(f)] / \
hst['L{:d}'.format(f)]
hst['flost{:d}'.format(f)] = hst['Llost{:d}'.format(f)] / \
hst['L{:d}'.format(f)]
hst['fesc{:d}_est'.format(f)] = hst['fesc{:d}'.format(f)] + \
hst['flost{:d}'.format(f)]
hst['fesc{:d}_cum_est'.format(f)] = \
(hst['Lesc{:d}'.format(f)] + hst['Llost{:d}'.format(f)]).cumsum() / \
hst['L{:d}'.format(f)].cumsum()
# midplane radiation energy density in cgs units
hst['Erad{:d}_mid'.format(f)] *= u.energy_density
if self.par['configure']['ionrad'] == 'ON':
# Scale heights of [warm] ionized gas, nesq
# Check if columns exist
# nesq
if 'H2nesq' in hst.columns and 'nesq' in hst.columns:
hst['H_nesq'] = np.sqrt(hst['H2nesq'] / hst['nesq'])
hst.drop(columns=['H2nesq', 'nesq'], inplace=True)
# Warm nesq
if 'H2wnesq' in hst.columns and 'wnesq' in hst.columns:
hst['H_wnesq'] = np.sqrt(hst['H2wnesq'] / hst['wnesq'])
hst.drop(columns=['H2wnesq', 'wnesq'], inplace=True)
# For warm medium,
# append _ to distinguish from mhd history variable
if 'H2w' in hst.columns and 'massw' in hst.columns:
hst['H_w_'] = np.sqrt(hst['H2w'] / hst['massw'])
hst['Mw_'] = hst['massw']*vol*u.Msun
hst['mf_w_'] = hst['Mw_']/hst['mass']
hst.drop(columns=['H2w', 'massw'], inplace=True)
# Warm ionized
if 'H2wi' in hst.columns and 'masswi' in hst.columns:
hst['H_wi'] = np.sqrt(hst['H2wi'] / hst['masswi'])
hst['Mwion'] = hst['masswi']*vol*u.Msun
hst['mf_wion'] = hst['Mwion']/hst['mass']
hst.drop(columns=['H2wi', 'masswi'], inplace=True)
##########################
# With ionizing radiation
##########################
if self.par['radps']['nfreq'] == 2 and \
self.par['radps']['nfreq_ion'] == 1:
hnu0 = self.par['radps']['hnu[0]']/u.eV
hnu1 = self.par['radps']['hnu[1]']/u.eV
# Total luminosity
hst['Qitot_cl'] = hst['Ltot_cl0']/u.Lsun/hnu0/u.s
hst['Qitot_ru'] = hst['Ltot_ru0']/u.Lsun/hnu0/u.s
hst['Qitot'] = hst['Qitot_ru'] + hst['Qitot_cl']
# Total Q included as source
hst['Qi_cl'] = hst['L_cl0']/u.Lsun/hnu0/u.s
hst['Qi_ru'] = hst['L_ru0']/u.Lsun/hnu0/u.s
hst['Qi'] = hst['Qi_ru'] + hst['Qi_cl']
hst['Qiesc'] = hst['Lesc0']/u.Lsun/hnu0/u.s
hst['Qilost'] = hst['Llost0']/u.Lsun/hnu0/u.s
hst['Qiesc_est'] = hst['Qilost'] + hst['Qiesc']
else:
self.logger.error('Unrecognized option nfreq={0:d}, nfreq_ion={1:d}'.\
format(self.par['radps']['nfreq'],
self.par['radps']['nfreq_ion']))
hst.index = hst['time_code']
#hst.index.name = 'index'
# Merge with mhd history dump
if merge_mhd:
hst_mhd = self.read_hst_mhd()
hst = hst_mhd.reindex(hst.index, method='nearest',
tolerance=0.1).combine_first(hst)
try:
hst.to_pickle(fpkl)
except IOError:
self.logger.warning('[read_hst]: Could not pickle hst to {0:s}.'.format(fpkl))
self.hst = hst
return self.hst
def read_hst_mhd(self):
# Read original mhd history dump from /tigress/changgoo
hst = read_hst('/tigress/changgoo/{0:s}/hst/{0:s}.hst'.\
format(self.problem_id))
u = self.u
domain = self.par['domain1']
Lx = domain['x1max'] - domain['x1min']
Ly = domain['x2max'] - domain['x2min']
Lz = domain['x3max'] - domain['x3min']
Nx = domain['Nx1']
Ny = domain['Nx2']
Nz = domain['Nx3']
Ntot = Nx*Ny*Nz
vol = Lx*Ly*Lz
LxLy = Lx*Ly
dz = Lz/Nz
Omega = self.par['problem']['Omega']
time_orb = 2*np.pi/Omega*u.Myr # Orbital time in Myr
if 'x1Me' in hst:
mhd = True
else:
mhd = False
h = | pd.DataFrame() | pandas.DataFrame |
import itertools
import pandas as pd
from twobitreader import TwoBitFile
from typing import Union
from sys import stdout
import numpy as np
from .utils import compl, get_true_snps_from_maf, get_dnps_from_maf
from .context import context96, context1536, context78, context83, context_composite, context_polymerase, context_polymerase_id
def get_spectra_from_maf(
maf: pd.DataFrame,
hgfile: Union[str,None] = None,
reference: str = 'cosmic2',
real_snps: bool = False,
):
"""
Attaches context categories to maf and gets counts of contexts for each sample
---------------------------
Args:
* maf: Pandas DataFrame of maf
* hgfile: path to 2bit genome build file for computing reference context
* ref: reference signatures to decompose to
Returns:
* Pandas DataFrame of maf with context category attached
* Pandas DataFrame of counts with samples as columns and context as rows
"""
maf = maf.copy()
if 'Start_Position' in list(maf):
maf = maf.rename(columns={'Start_Position':'Start_position'})
maf['sample'] = maf['Tumor_Sample_Barcode']
if reference in ['cosmic2', 'cosmic3', 'cosmic3_exome', 'pcawg_SBS']:
# Context type
if reference in ['cosmic2', 'cosmic3', 'cosmic3_exome']: context_num, context_form, context_use = 'context96.num', 'context96.word', context96
else: context_num, context_form, context_use = 'context1536.num', 'context1536.arrow', context1536
# Subset to SNPs
if 'Variant_Type' in maf.columns:
maf = maf.loc[maf['Variant_Type'] == 'SNP']
else:
maf = maf.loc[maf['Reference_Allele'].apply(lambda k: len(k) == 1 and k != '-') & \
maf['Tumor_Seq_Allele2'].apply(lambda k: len(k) == 1 and k != '-')]
if not real_snps:
# Filter out adjacent SNPs
maf = get_true_snps_from_maf(maf)
ref = maf['Reference_Allele'].str.upper()
alt = maf['Tumor_Seq_Allele2'].str.upper()
if 'ref_context' in list(maf):
context = maf['ref_context'].str.upper()
else:
assert hgfile is not None, 'Please provide genome build file.'
try:
hg = TwoBitFile(hgfile)
except:
raise Exception("{} not a valid 2bit file.".format(hgfile))
chr_contig = all('chr{}'.format(i) in hg for i in list(range(1, 23)) + ['X', 'Y'])
# Map contexts
_contexts = list()
maf_size = maf.shape[0]
for idx,(pos,chromosome) in enumerate(zip(maf["Start_position"].astype(int), maf["Chromosome"].astype(str))):
stdout.write("\r * Mapping contexts: {} / {}".format(idx, maf_size))
# Double check version
if chromosome == '23':
chromosome = 'X'
elif chromosome == '24':
chromosome = 'Y'
elif chromosome == 'MT':
chromosome = 'M'
if chr_contig and not chromosome.startswith('chr'):
chromosome = 'chr' + chromosome
if not chr_contig and chromosome.startswith('chr'):
chromosome = chromosome[3:]
# 96 context, get reference [pos-1, pos, pos+1]
if reference != 'pcawg_SBS':
_contexts.append(hg[chromosome][pos-2:pos+1].lower())
# 1536 context, get refernece [pos-2, pos-1, pos, pos+1, pos+2]
else:
_contexts.append(hg[chromosome][pos-3:pos+2].lower())
maf['ref_context'] = _contexts
stdout.write("\n")
context = maf['ref_context'].str.upper()
n_context = context.str.len()
mid = n_context // 2
if reference != 'pcawg_SBS':
contig = pd.Series([r + a + c[m - 1] + c[m + 1] if r in 'AC' \
else compl(r + a + c[m + 1] + c[m - 1]) \
for r, a, c, m in zip(ref, alt, context, mid)], index=maf.index)
else:
contig = pd.Series([c[m-2:m] + "[" + r + ">" + a + "]" + c[m+1:m+3] if r in 'TC' \
else compl(c[::-1][m-2:m] + "[" + r + ">" + a + "]" + c[::-1][m+1:m+3]) \
for r, a, c, m in zip(ref, alt, context, mid)], index=maf.index)
try:
maf[context_num] = contig.apply(context_use.__getitem__)
except KeyError as e:
raise KeyError('Unusual context: ' + str(e))
maf[context_form] = contig
spectra = maf.groupby([context_form, 'sample']).size().unstack().fillna(0).astype(int)
for c in context_use:
if c not in spectra.index:
spectra.loc[c] = 0
spectra = spectra.loc[context_use]
elif reference == 'cosmic3_DBS':
# Subset to DNPs
if 'Variant_Type' not in maf.columns:
ref_alt = maf['Reference_Allele'] + '>' + maf['Tumor_Seq_Allele2']
def get_variant_type(ra):
r, a = ra.split('>')
if len(r) == 1 and r != '-' and len(a) == 1 and a != '-':
return 'SNP'
if len(r) == 2 and len(a) == 2:
return 'DNP'
maf['Variant_Type'] = ref_alt.apply(get_variant_type)
if maf['Variant_Type'].str.contains('DNP').any():
maf = maf.loc[maf['Variant_Type'] == 'DNP']
else:
maf = get_dnps_from_maf(maf)
ref = maf['Reference_Allele'].str.upper()
alt = maf['Tumor_Seq_Allele2'].str.upper()
contig = pd.Series([r + '>' + a if r + '>' + a in context78
else compl(r, reverse=True) + '>' + compl(a, reverse=True)
for r, a in zip(ref, alt)], index=maf.index)
try:
maf['context78.num'] = contig.apply(context78.__getitem__)
except KeyError as e:
raise KeyError('Unusual context: ' + str(e))
maf['context78.word'] = contig
spectra = maf.groupby(['context78.word', 'sample']).size().unstack().fillna(0).astype(int)
for c in context78:
if c not in spectra.index:
spectra.loc[c] = 0
spectra = spectra.loc[context78]
elif reference == 'cosmic3_ID':
maf = maf.loc[(maf['Reference_Allele'] == '-') ^ (maf['Tumor_Seq_Allele2'] == '-')]
ref = maf['Reference_Allele'].str.upper()
alt = maf['Tumor_Seq_Allele2'].str.upper()
assert hgfile is not None, 'Please provide genome build file.'
try:
hg = TwoBitFile(hgfile)
except:
raise Exception("{} not a valid 2bit file.".format(hgfile))
chr_contig = all('chr{}'.format(i) in hg for i in list(range(1, 23)) + ['X', 'Y'])
# Map contexts
contig = list()
maf_size = maf.shape[0]
for idx,(pos,chromosome,r,a) in enumerate(zip(maf["Start_position"].astype(int),
maf["Chromosome"].astype(str), ref, alt)):
stdout.write("\r * Mapping contexts: {} / {}".format(idx, maf_size))
# Double check version
if chromosome == '23':
chromosome = 'X'
elif chromosome == '24':
chromosome = 'Y'
elif chromosome == 'MT':
chromosome = 'M'
if chr_contig and not chromosome.startswith('chr'):
chromosome = 'chr' + chromosome
if not chr_contig and chromosome.startswith('chr'):
chromosome = chromosome[3:]
if a == '-':
del_len = len(r)
_context = hg[chromosome][pos - 1 + del_len:pos - 1 + del_len * 6].upper()
_context_list = [_context[n: n + del_len] for n in range(0, 5 * del_len, del_len)]
n_repeats = 1
for c in _context_list:
if c == r:
n_repeats += 1
else:
break
microhomology = 0
if n_repeats == 1:
for b1, b2 in zip(r, _context_list[0]):
if b1 == b2:
microhomology += 1
else:
break
prev_context = hg[chromosome][pos - 1 - del_len: pos - 1].upper()
for b1, b2 in zip(reversed(r), reversed(prev_context)):
if b1 == b2:
microhomology += 1
else:
break
if del_len == 1:
pre = 'C' if r in 'CG' else 'T'
elif del_len >= 5:
pre = '5+'
else:
pre = str(del_len)
if microhomology >= 5:
post = 'm5+'
elif microhomology:
post = 'm' + str(microhomology)
elif n_repeats == 6:
post = '6+'
else:
post = str(n_repeats)
contig.append(pre + 'del' + post)
elif r == '-':
ins_len = len(a)
_context = hg[chromosome][pos:pos + ins_len * 5].upper()
_context_list = [_context[n: n + ins_len] for n in range(0, 5 * ins_len, ins_len)]
n_repeats = 0
for c in _context_list:
if c == a:
n_repeats += 1
else:
break
if ins_len == 1:
pre = 'C' if a in 'CG' else 'T'
elif ins_len >= 5:
pre = '5+'
else:
pre = str(ins_len)
if n_repeats == 5:
post = '5+'
else:
post = str(n_repeats)
contig.append(pre + 'ins' + post)
maf['context83.word'] = contig
try:
maf['context83.num'] = maf['context83.word'].apply(context83.__getitem__)
except KeyError as e:
raise KeyError('Unusual context: ' + str(e))
spectra = maf.groupby(['context83.word', 'sample']).size().unstack().fillna(0).astype(int)
for c in context83:
if c not in spectra.index:
spectra.loc[c] = 0
spectra = spectra.loc[context83]
stdout.write("\n")
elif reference in ["pcawg_COMPOSITE","pcawg_COMPOSITE96"]:
"""
Concatenate 1536 or 96 SBS, DBS, and ID spectra
"""
maf_dbs,dbs_df = get_spectra_from_maf(maf,hgfile, 'cosmic3_DBS')
maf_id,id_df = get_spectra_from_maf(maf,hgfile,'cosmic3_ID')
if reference == "pcawg_COMPOSITE":
maf_sbs,sbs_df = get_spectra_from_maf(maf,hgfile,'pcawg_SBS',real_snps)
maf = pd.concat([maf_sbs,maf_dbs,maf_id])
maf['context.pcawg'] = maf['context1536.arrow'].fillna('') + maf['context78.word'].fillna('') + maf['context83.word'].fillna('')
else:
maf_sbs,sbs_df = get_spectra_from_maf(maf,hgfile,'cosmic3_exome',real_snps)
maf = pd.concat([maf_sbs,maf_dbs,maf_id])
maf['context.pcawg'] = maf['context96.word'].fillna('') + maf['context78.word'].fillna('') + maf['context83.word'].fillna('')
# concatenate spectra
spectra = | pd.concat([sbs_df,dbs_df,id_df]) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# # ReEDS Scenarios on PV ICE Tool STATES
# To explore different scenarios for furture installation projections of PV (or any technology), ReEDS output data can be useful in providing standard scenarios. ReEDS installation projections are used in this journal as input data to the PV ICE tool.
#
# Current sections include:
#
# <ol>
# <li> ### Reading a standard ReEDS output file and saving it in a PV ICE input format </li>
# <li>### Reading scenarios of interest and running PV ICE tool </li>
# <li>###Plotting </li>
# <li>### GeoPlotting.</li>
# </ol>
# Notes:
#
# Scenarios of Interest:
# the Ref.Mod,
# o 95-by-35.Adv, and
# o 95-by-35+Elec.Adv+DR ones
#
# In[1]:
import PV_ICE
import numpy as np
import pandas as pd
import os,sys
import matplotlib.pyplot as plt
from IPython.display import display
plt.rcParams.update({'font.size': 22})
plt.rcParams['figure.figsize'] = (12, 8)
# In[2]:
import os
from pathlib import Path
testfolder = str(Path().resolve().parent.parent.parent / 'PV_ICE' / 'TEMP' / 'SF_States')
statedatafolder = str(Path().resolve().parent.parent.parent / 'PV_ICE' / 'TEMP' / 'STATEs')
print ("Your simulation will be stored in %s" % testfolder)
# In[3]:
PV_ICE.__version__
# ### Reading REEDS original file to get list of SCENARIOs, PCAs, and STATEs
# In[4]:
r"""
reedsFile = str(Path().resolve().parent.parent.parent / 'December Core Scenarios ReEDS Outputs Solar Futures v2a.xlsx')
print ("Input file is stored in %s" % reedsFile)
rawdf = pd.read_excel(reedsFile,
sheet_name="UPV Capacity (GW)")
#index_col=[0,2,3]) #this casts scenario, PCA and State as levels
#now set year as an index in place
#rawdf.drop(columns=['State'], inplace=True)
rawdf.drop(columns=['Tech'], inplace=True)
rawdf.set_index(['Scenario','Year','PCA', 'State'], inplace=True)
scenarios = list(rawdf.index.get_level_values('Scenario').unique())
PCAs = list(rawdf.index.get_level_values('PCA').unique())
STATEs = list(rawdf.index.get_level_values('State').unique())
simulationname = scenarios
simulationname = [w.replace('+', '_') for w in simulationname]
simulationname
SFscenarios = [simulationname[0], simulationname[4], simulationname[8]]
"""
# ### Reading GIS inputs
# In[5]:
r"""
GISfile = str(Path().resolve().parent.parent.parent.parent / 'gis_centroid_n.xlsx')
GIS = pd.read_excel(GISfile)
GIS = GIS.set_index('id')
GIS.head()
GIS.loc['p1'].long
"""
# ### Create Scenarios in PV_ICE
# #### Downselect to Solar Future scenarios of interest
#
# Scenarios of Interest:
# <li> Ref.Mod
# <li> 95-by-35.Adv
# <li> 95-by-35+Elec.Adv+DR
# In[6]:
SFscenarios = ['Reference.Mod', '95-by-35.Adv', '95-by-35_Elec.Adv_DR']
SFscenarios
# In[7]:
STATEs = ['WA', 'CA', 'VA', 'FL', 'MI', 'IN', 'KY', 'OH', 'PA', 'WV', 'NV', 'MD',
'DE', 'NJ', 'NY', 'VT', 'NH', 'MA', 'CT', 'RI', 'ME', 'ID', 'MT', 'WY', 'UT', 'AZ', 'NM',
'SD', 'CO', 'ND', 'NE', 'MN', 'IA', 'WI', 'TX', 'OK', 'OR', 'KS', 'MO', 'AR', 'LA', 'IL', 'MS',
'AL', 'TN', 'GA', 'SC', 'NC']
# ### Create the 3 Scenarios and assign Baselines
#
# Keeping track of each scenario as its own PV ICE Object.
# In[8]:
MATERIALS = ['glass', 'silicon', 'silver','copper','aluminium','backsheet','encapsulant']
# In[9]:
#for ii in range (0, 1): #len(scenarios):
i = 0
r1 = PV_ICE.Simulation(name=SFscenarios[i], path=testfolder)
for jj in range (0, len(STATEs)):
filetitle = SFscenarios[i]+'_'+STATEs[jj]+'.csv'
filetitle = os.path.join(statedatafolder, filetitle)
r1.createScenario(name=STATEs[jj], file=filetitle)
r1.scenario[STATEs[jj]].addMaterials(MATERIALS, baselinefolder=r'..\..\baselines\SolarFutures_2021', nameformat=r'\baseline_material_{}_Reeds.csv')
i = 1
r2 = PV_ICE.Simulation(name=SFscenarios[i], path=testfolder)
for jj in range (0, len(STATEs)):
filetitle = SFscenarios[i]+'_'+STATEs[jj]+'.csv'
filetitle = os.path.join(statedatafolder, filetitle)
r2.createScenario(name=STATEs[jj], file=filetitle)
r2.scenario[STATEs[jj]].addMaterials(MATERIALS, baselinefolder=r'..\..\baselines\SolarFutures_2021', nameformat=r'\baseline_material_{}_Reeds.csv')
i = 2
r3 = PV_ICE.Simulation(name=SFscenarios[i], path=testfolder)
for jj in range (0, len(STATEs)):
filetitle = SFscenarios[i]+'_'+STATEs[jj]+'.csv'
filetitle = os.path.join(statedatafolder, filetitle)
r3.createScenario(name=STATEs[jj], file=filetitle)
r3.scenario[STATEs[jj]].addMaterials(MATERIALS, baselinefolder=r'..\..\baselines\SolarFutures_2021', nameformat=r'\baseline_material_{}_Reeds.csv')
# # Calculate Mass Flow
# In[10]:
r1.scenMod_noCircularity()
r2.scenMod_noCircularity()
r3.scenMod_noCircularity()
IRENA= False
PERFECTMFG = False
ELorRL = 'RL'
if IRENA:
r1.scenMod_IRENIFY(ELorRL=ELorRL)
r2.scenMod_IRENIFY(ELorRL=ELorRL)
r3.scenMod_IRENIFY(ELorRL=ELorRL)
if PERFECTMFG:
r1.scenMod_PerfectManufacturing()
r2.scenMod_PerfectManufacturing()
r3.scenMod_PerfectManufacturing()
# In[11]:
r1.calculateMassFlow()
r2.calculateMassFlow()
r3.calculateMassFlow()
# In[12]:
print("STATEs:", r1.scenario.keys())
print("Module Keys:", r1.scenario[STATEs[jj]].data.keys())
print("Material Keys: ", r1.scenario[STATEs[jj]].material['glass'].materialdata.keys())
# # OPEN EI
# In[13]:
kk=0
SFScenarios = [r1, r2, r3]
SFScenarios[kk].name
# In[14]:
# WORK ON THIS FOIR OPENEI
keyw=['mat_Virgin_Stock','mat_Total_EOL_Landfilled','mat_Total_MFG_Landfilled', 'mat_Total_Landfilled',
'new_Installed_Capacity_[MW]','Installed_Capacity_[W]']
keywprint = ['VirginMaterialDemand','EOLMaterial', 'ManufacturingScrap','ManufacturingScrapAndEOLMaterial',
'NewInstalledCapacity','InstalledCapacity']
keywunits = ['MetricTonnes', 'MetricTonnes', 'MetricTonnes', 'MetricTonnes',
'MW','MW']
keywdcumneed = [True,True,True,True,
True,False]
keywdlevel = ['material','material','material','material',
'module','module']
keywscale = [1000000, 1000000, 1000000, 1000000,
1,1e6]
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
for zz in range (0, len(STATEs)):
foo = pd.DataFrame()
for jj in range (0, len(keyw)):
if keywdlevel[jj] == 'material':
for ii in range (0, len(materials)):
sentit = '@value|'+keywprint[jj]+'|'+materials[ii].capitalize() +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyw[jj]]/keywscale[jj]
if keywdcumneed[jj]:
for ii in range (0, len(materials)):
sentit = '@value|Cumulative'+keywprint[jj]+'|'+materials[ii].capitalize() +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyw[jj]].cumsum()/keywscale[jj]
else:
sentit = '@value|'+keywprint[jj]+'|'+'PV' +'#'+keywunits[jj]
#sentit = '@value|'+keywprint[jj]+'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]]/keywscale[jj]
if keywdcumneed[jj]:
sentit = '@value|Cumulative'+keywprint[jj]+'|'+'PV' +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]].cumsum()/keywscale[jj]
foo['@states'] = STATEs[zz]
foo['@scenario|Solar Futures'] = SFScenarios[kk].name
foo['@timeseries|Year'] = SFScenarios[kk].scenario[STATEs[zz]].data.year
scenariolist = scenariolist.append(foo)
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
#scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PV ICE OpenEI.csv', index=False)
print("Done")
# In[15]:
# WORK ON THIS FOIR OPENEI
keyw=['mat_Virgin_Stock','mat_Total_EOL_Landfilled','mat_Total_MFG_Landfilled', 'mat_Total_Landfilled',
'new_Installed_Capacity_[MW]','Installed_Capacity_[W]']
keywprint = ['VirginMaterialDemand','EOLMaterial', 'ManufacturingScrap','ManufacturingScrapAndEOLMaterial',
'NewInstalledCapacity','InstalledCapacity']
keywunits = ['MetricTonnes', 'MetricTonnes', 'MetricTonnes', 'MetricTonnes',
'MW','MW']
keywdcumneed = [True,True,True,True,
True,False]
keywdlevel = ['material','material','material','material',
'module','module']
keywscale = [1000000, 1000000, 1000000, 1000000,
1,1e6]
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
for zz in range (0, len(STATEs)):
foo = pd.DataFrame()
for jj in range (0, len(keyw)):
if keywdlevel[jj] == 'material':
for ii in range (0, len(materials)):
sentit = '@value|'+keywprint[jj]+'|'+materials[ii].capitalize() +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyw[jj]]/keywscale[jj]
else:
sentit = '@value|'+keywprint[jj]+'|'+'PV' +'#'+keywunits[jj]
#sentit = '@value|'+keywprint[jj]+'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]]/keywscale[jj]
foo['@states'] = STATEs[zz]
foo['@scenario|Solar Futures'] = SFScenarios[kk].name
foo['@timeseries|Year'] = SFScenarios[kk].scenario[STATEs[zz]].data.year
scenariolist = scenariolist.append(foo)
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
#scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PV ICE OpenEI Yearly Only.csv', index=False)
print("Done")
# In[16]:
# WORK ON THIS FOIR OPENEI
keyw=['mat_Virgin_Stock','mat_Total_EOL_Landfilled','mat_Total_MFG_Landfilled', 'mat_Total_Landfilled',
'new_Installed_Capacity_[MW]','Installed_Capacity_[W]']
keywprint = ['VirginMaterialDemand','EOLMaterial', 'ManufacturingScrap','ManufacturingScrapAndEOLMaterial',
'NewInstalledCapacity','InstalledCapacity']
keywunits = ['MetricTonnes', 'MetricTonnes', 'MetricTonnes', 'MetricTonnes',
'MW','MW']
keywdcumneed = [True,True,True,True,
True,False]
keywdlevel = ['material','material','material','material',
'module','module']
keywscale = [1000000, 1000000, 1000000, 1000000,
1,1e6]
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
for zz in range (0, len(STATEs)):
foo = pd.DataFrame()
for jj in range (0, len(keyw)):
if keywdlevel[jj] == 'material':
if keywdcumneed[jj]:
for ii in range (0, len(materials)):
sentit = '@value|Cumulative'+keywprint[jj]+'|'+materials[ii].capitalize() +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyw[jj]].cumsum()/keywscale[jj]
else:
if keywdcumneed[jj]:
sentit = '@value|Cumulative'+keywprint[jj]+'|'+'PV' +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]].cumsum()/keywscale[jj]
foo['@states'] = STATEs[zz]
foo['@scenario|Solar Futures'] = SFScenarios[kk].name
foo['@timeseries|Year'] = SFScenarios[kk].scenario[STATEs[zz]].data.year
scenariolist = scenariolist.append(foo)
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
#scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv(title_Method+' OpenEI Cumulatives Only.csv', index=False)
print("Done")
# In[ ]:
# WORK ON THIS FOIR OPENEI
# SCENARIO DIFERENCeS
keyw=['new_Installed_Capacity_[MW]','Installed_Capacity_[W]']
keywprint = ['NewInstalledCapacity','InstalledCapacity']
keywprint = ['NewInstalledCapacity','InstalledCapacity']
sfprint = ['Reference','Grid Decarbonization', 'High Electrification']
keywunits = ['MW','MW']
keywdcumneed = [True,False]
keywdlevel = ['module','module']
keywscale = [1,1e6]
materials = []
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for zz in range (0, len(STATEs)):
foo = pd.DataFrame()
for jj in range (0, len(keyw)):
# kk -- scenario
for kk in range(0, 3):
sentit = '@value|'+keywprint[jj]+'|'+sfprint[kk]+'#'+keywunits[jj]
#sentit = '@value|'+keywprint[jj]+'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]]/keywscale[jj]
if keywdcumneed[jj]:
sentit = '@value|Cumulative'+keywprint[jj]+'|'+sfprint[kk]+'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]].cumsum()/keywscale[jj]
# foo['@value|scenario|Solar Futures'] = SFScenarios[kk].name
foo['@states'] = STATEs[zz]
foo['@timeseries|Year'] = SFScenarios[kk].scenario[STATEs[zz]].data.year
scenariolist = scenariolist.append(foo)
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
#scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PV ICE OpenEI ScenarioDifferences.csv', index=False)
print("Done")
# In[ ]:
scenariolist.head()
# # SAVE DATA FOR BILLY: STATES
# In[ ]:
#for 3 significant numbers rounding
N = 2
# SFScenarios[kk].scenario[PCAs[zz]].data.year
#
# Index 20 --> 2030
#
# Index 30 --> 2040
#
# Index 40 --> 2050
# In[ ]:
idx2030 = 20
idx2040 = 30
idx2050 = 40
print("index ", idx2030, " is year ", r1.scenario[STATEs[0]].data['year'].iloc[idx2030])
print("index ", idx2040, " is year ", r1.scenario[STATEs[0]].data['year'].iloc[idx2040])
print("index ", idx2050, " is year ", r1.scenario[STATEs[0]].data['year'].iloc[idx2050])
# #### 6 - STATE Cumulative Virgin Needs by 2050
#
# In[ ]:
keyword='mat_Virgin_Stock'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium', 'encapsulant', 'backsheet']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = []
for ii in range (0, len(materials)):
keywordsum = []
for zz in range (0, len(STATEs)):
keywordsum.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword].sum())
materiallist.append(keywordsum)
df = pd.DataFrame (materiallist,columns=STATEs, index = materials)
df = df.T
df = df.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , df], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tons
scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PV ICE 6 - STATE Cumulative2050 VirginMaterialNeeds_tons.csv')
# #### 7 - STATE Cumulative EoL Only Waste by 2050
# In[ ]:
keyword='mat_Total_EOL_Landfilled'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium', 'encapsulant', 'backsheet']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = []
for ii in range (0, len(materials)):
keywordsum = []
for zz in range (0, len(STATEs)):
keywordsum.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword].sum())
materiallist.append(keywordsum)
df = pd.DataFrame (materiallist,columns=STATEs, index = materials)
df = df.T
df = df.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , df], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tons
scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PV ICE 7 - STATE Cumulative2050 Waste_EOL_tons.csv')
# ##### 8 - STATE Yearly Virgin Needs 2030 2040 2050
# In[ ]:
keyword='mat_Virgin_Stock'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium', 'encapsulant', 'backsheet']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = pd.DataFrame()
for ii in range (0, len(materials)):
keywordsum2030 = []
keywordsum2040 = []
keywordsum2050 = []
for zz in range (0, len(STATEs)):
keywordsum2030.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2030])
keywordsum2040.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2040])
keywordsum2050.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2050])
yearlylist = pd.DataFrame([keywordsum2030, keywordsum2040, keywordsum2050], columns=STATEs, index = [2030, 2040, 2050])
yearlylist = yearlylist.T
yearlylist = yearlylist.add_prefix(materials[ii]+'_')
materiallist = | pd.concat([materiallist, yearlylist], axis=1) | pandas.concat |
# Module for plotting and fitting EIS data
# (C) <NAME> 2020
import os
import glob
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.optimize import minimize, basinhopping, differential_evolution, curve_fit, least_squares
from datetime import datetime, timedelta
import itertools
import re
from scipy.stats import iqr, mode
from scipy.special import binom
import inspect
from copy import copy
import warnings
from io import StringIO
#------------------
# Misc functions
#------------------
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def camel_case_split(identifier):
# from https://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python
matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z0-9])|(?<=[A-Z0-9])(?=[A-Z0-9][a-z])|$)', identifier)
return [m.group(0) for m in matches]
def fit_r_squared(x,y,fit,weights=None):
"""
Calculate r squared for polynomial fit
Args:
x: x values
y: y values
fit: numpy polyfit output, or array of coefficients
weights: sample weights
"""
y_hat = np.polyval(fit,x)
return r_squared(y,y_hat,weights)
def r_squared(y,y_hat,weights=None):
"""
Calculate r squared for
Args:
y: y values
y_hat: predicted y values
weights: sample weights
"""
if weights is None:
ss_res = np.sum((y_hat-y)**2)#np.var(y_hat-y)
ss_tot = np.sum((y - np.mean(y))**2) #np.var(y)
else:
ss_res = np.sum(weights*(y_hat-y)**2)
ss_tot = np.sum(weights*(y-np.average(y,weights=weights))**2)
return 1-(ss_res/ss_tot)
def reg_degree_polyfit(x,y,alpha,min_r2=0,weights=None,verbose=False):
"""
Regularized-degree polynomial fit. L2 regularization penalty applied to polynomial degree
Args:
x: x values
y: y values
alpha: regularization strength
min_r2: minimum r2. If specified, degree will be increased until this min value is achieved, even if overall score decreases
weights: weights for fit
verbose: if True, print info about best fit, plus previous and next degree fits
"""
best_score = -np.inf
deg = 1
r2 = -np.inf
while deg < len(x):
fit = np.polyfit(x,y,deg=deg,w=weights)
last_r2 = r2
r2 = fit_r_squared(x,y,fit,weights=weights)
score = r2 - alpha*deg**2
if score > best_score:# or r2 < min_r2:
#print(f'Deg {deg}, Case 1,','r2={},last r2={}'.format(round(r2,5),round(last_r2,5)))
best_fit = fit
best_score = score
best_deg = deg
deg += 1
elif last_r2 < min_r2:# and r2 >= min_r2:
#print(f'Deg {deg}, Case 2,','r2={},last r2={}'.format(round(r2,5),round(last_r2,5)))
best_fit = fit
best_score = score
best_deg = deg
deg += 1
#break
else:
break
if verbose==True:
print('Best score: degree={}, r2={}, score={}'.format(best_deg,round(fit_r_squared(x,y,best_fit,w=weights),5), round(best_score,5)))
if best_deg > 1:
prev_r2 = fit_r_squared(x,y,np.polyfit(x,y,deg=deg-2,w=weights),w=weights)
prev_score = prev_r2 - alpha*(deg-2)**2
print('Previous degree: degree={}, r2={}, score={}'.format(deg-2,round(prev_r2,5), round(prev_score,5)))
print('Next degree: degree={}, r2={}, score={}'.format(deg,round(fit_r_squared(x,y,fit,weights),5), round(score,5)))
return best_fit
#---------------------
# File loading
#---------------------
def source_extension(source):
"""Get file extension for source"""
extensions = {'gamry':'.DTA','zplot':'.z'}
return extensions[source]
def get_file_source(file):
"""Determine file source"""
try:
with open(file,'r') as f:
txt = f.read()
except UnicodeDecodeError:
with open(file,'r',encoding='latin1') as f:
txt = f.read()
# determine format
if txt.split('\n')[0]=='EXPLAIN':
source = 'gamry'
elif txt.split('\n')[0]=='ZPLOT2 ASCII':
source='zplot'
return source
def get_timestamp(file):
"""Get experiment start timestamp from file"""
try:
with open(file,'r') as f:
txt = f.read()
except UnicodeDecodeError:
with open(file,'r',encoding='latin1') as f:
txt = f.read()
source = get_file_source(file)
if source=='gamry':
date_start = txt.find('DATE')
date_end = txt[date_start:].find('\n') + date_start
date_line = txt[date_start:date_end]
date = date_line.split('\t')[2]
time_start = txt.find('TIME')
time_end = txt[time_start:].find('\n') + time_start
time_line = txt[time_start:time_end]
time = time_line.split('\t')[2]
timestr = date + ' ' + time
dt = datetime.strptime(timestr,"%m/%d/%Y %H:%M:%S")
elif source=='zplot':
date_start = txt.find('Date')
date_end = txt[date_start:].find('\n') + date_start
date_line = txt[date_start:date_end]
date = date_line.split()[1]
time_start = txt.find('Time')
time_end = txt[time_start:].find('\n') + time_start
time_line = txt[time_start:time_end]
time = time_line.split()[1]
timestr = date + ' ' + time
dt = datetime.strptime(timestr,"%m-%d-%Y %H:%M:%S")
return dt
def get_file_info(file,sequence=['file_type','T','aflow','cflow']):
"""
Get information from filename
Args:
file: filename (basename or full path)
sequence: list of identifiers in the order that they appear in the filename (separated by _)
"""
fname = os.path.basename(file).replace('.DTA','')
info = dict(zip(sequence,fname.split('_')))
info['T'] = int(info['T'][:info['T'].find('C')])
for flow in ('aflow','cflow'):
try:
if info[flow].find('sccm') > 0:
rate,gas = info[flow].split('sccm')
gas = ' '.join(camel_case_split(gas))
info[flow] = ' '.join([rate,'SCCM',gas])
else:
info[flow] = ' '.join(camel_case_split(info[flow]))
except KeyError:
pass
return info
def read_eis_zdata(file):
"""read EIS zcurve data from Gamry .DTA file"""
try:
with open(file,'r') as f:
txt = f.read()
except UnicodeDecodeError:
with open(file,'r',encoding='latin1') as f:
txt = f.read()
source = get_file_source(file)
if source=='gamry':
#find start of zcurve data
zidx = txt.find('ZCURVE')
#check for experiment aborted flag
if txt.find('EXPERIMENTABORTED') > -1:
skipfooter = len(txt[txt.find('EXPERIMENTABORTED'):].split('\n')) - 1
else:
skipfooter = 0
#preceding text
pretxt = txt[:zidx]
#zcurve data
ztable = txt[zidx:]
#column headers are next line after ZCURVE TABLE line
header_start = ztable.find('\n') + 1
header_end = header_start + ztable[header_start:].find('\n')
header = ztable[header_start:header_end].split('\t')
#units are next line after column headers
unit_end = header_end + 1 + ztable[header_end + 1:].find('\n')
units = ztable[header_end + 1:unit_end].split('\t')
#determine # of rows to skip by counting line breaks in preceding text
skiprows = len(pretxt.split('\n')) + 2
# if table is indented, ignore empty left column
if header[0] == '':
usecols = header[1:]
else:
usecols = header
# if extra tab at end of data rows, add an extra column to header to match (for Igor data)
first_data_row = ztable[unit_end+1: unit_end+ 1 + ztable[unit_end+1:].find('\n')]
if first_data_row.split('\t')[-1]=='':
header = header + ['extra_tab']
#read data to DataFrame
#python engine required to use skipfooter
data = pd.read_csv(file,sep='\t',skiprows=skiprows,header=None,names=header,usecols=usecols,skipfooter=skipfooter,engine='python')
elif source=='zplot':
#find start of zcurve data
zidx = txt.find('End Comments')
#preceding text
pretxt = txt[:zidx]
#z data
ztable = txt[zidx:]
#column headers are in line above "End Comments"
header = pretxt.split('\n')[-2].strip().split('\t')
#determine # of rows to skip by counting line breaks in preceding text
skiprows = len(pretxt.split('\n'))
# if table is indented, ignore empty left column
if header[0] == '':
usecols = header[1:]
else:
usecols = header
# read data to DataFrame
data = pd.read_csv(file,sep='\t',skiprows=skiprows,header=None,names=header,usecols=usecols)
# rename to standard format
rename = {"Z'(a)":"Zreal","Z''(b)":"Zimag","Freq(Hz)":"Freq"}
data = data.rename(rename,axis=1)
# calculate Zmod and Zphz
Zmod, Zphz = bode_from_complex(data)
data['Zmod'] = Zmod
data['Zphz'] = Zphz
return data
def read_nleis_data(file):
try:
with open(file,'r') as f:
txt = f.read()
except UnicodeDecodeError:
with open(file,'r',encoding='latin1') as f:
txt = f.read()
source = get_file_source(file)
if source=='gamry':
# get number of points measured
num_freq_start = txt.find('NUMFREQ')
num_freq_end = txt.find('\n',num_freq_start+1)
num_freq_line = txt[num_freq_start:num_freq_end]
num_freq = int(num_freq_line.split('\t')[2])
frequency_data = {}
for n in range(num_freq):
fra_start = txt.find(f'FREQUENCY {n}')
if n==num_freq-1:
fra_end = txt.find('ZCURVE')
else:
fra_end = txt.find('FREQUENCY {}'.format(n+1))
fra_txt = txt[fra_start:fra_end]
# get frequency
freq_line = fra_txt[:fra_txt.find('\n')]
requested_freq = float(freq_line.split('\t')[1].replace('Requested Freq (Hz):','').strip())
actual_freq = float(freq_line.split('\t')[2].replace('Actual Freq (Hz):','').strip())
# get header
header_start = fra_txt.find('\n',fra_txt.find('\n')+1) + 1
header_end = fra_txt.find('\n',header_start)
header = fra_txt[header_start:header_end].split('\t')
# if table is indented, ignore empty left column
if header[0] == '':
usecols = header[1:]
else:
usecols = header
fra_table = fra_txt[fra_txt.find('\n',header_end+1) + 1:]
fra_data = pd.read_csv(StringIO(fra_table),sep='\t',header=None,names=header,usecols=usecols)
frequency_data[n] = {'requested_freq':requested_freq,'actual_freq':actual_freq,'data':fra_data}
return frequency_data
def read_jv_data(file,source='gamry'):
"""read from manual jV txt file"""
try:
with open(file,'r') as f:
txt = f.read()
except UnicodeDecodeError:
with open(file,'r',encoding='latin1') as f:
txt = f.read()
if source=='manual':
"""Manually created j-V txt file"""
jv_idx = txt.find('Current')
pretxt = txt[:jv_idx]
skiprows = len(pretxt.split('\n'))-1
data = pd.read_csv(file,sep='\t',skiprows=skiprows)
elif source=='gamry':
#find start of curve data
cidx = txt.find('CURVE\tTABLE')
#preceding text
pretxt = txt[:cidx]
#curve data
ctable = txt[cidx:]
#column headers are next line after ZCURVE TABLE line
header_start = ctable.find('\n') + 1
header_end = header_start + ctable[header_start:].find('\n')
header = ctable[header_start:header_end].split('\t')
#units are next line after column headers
unit_end = header_end + 1 + ctable[header_end + 1:].find('\n')
units = ctable[header_end + 1:unit_end].split('\t')
#determine # of rows to skip by counting line breaks in preceding text
skiprows = len(pretxt.split('\n')) + 2
#if table is indented, ignore empty left column
if header[0] == '':
usecols = header[1:]
else:
usecols = header
#read data to DataFrame
data = pd.read_csv(file,sep='\t',skiprows=skiprows,header=None,names=header,usecols=usecols)
else:
raise ValueError(f'Invalid source {source}. Options are ''gamry'', ''manual''')
return data
def read_ocv_data(file,file_type='auto'):
"""
read OCV data from Gamry .DTA file
Args:
file: file to read
file_type: file type. Options are 'ocv','eis'
"""
try:
with open(file,'r') as f:
txt = f.read()
except UnicodeDecodeError:
with open(file,'r',encoding='latin1') as f:
txt = f.read()
if file_type=='auto':
file_type = os.path.basename(file).split('_')[0].lower()[:3]
#find start (and end, if needed) of ocv data
if file_type=='ocv':
cidx = txt.find('CURVE\tTABLE')
skipfooter = 0
elif file_type=='eis':
cidx = txt.find('OCVCURVE\tTABLE')
post_txt = txt[txt.find('EOC\tQUANT'):]
skipfooter = len(post_txt.split('\n')) - 1
if cidx==-1:
# coudn't find OCV curve data in file
# return empty dataframe
return pd.DataFrame([])
else:
#preceding text
pretxt = txt[:cidx]
#ocv curve data
ctable = txt[cidx:]
#column headers are next line after ZCURVE TABLE line
header_start = ctable.find('\n') + 1
header_end = header_start + ctable[header_start:].find('\n')
header = ctable[header_start:header_end].split('\t')
#units are next line after column headers
unit_end = header_end + 1 + ctable[header_end + 1:].find('\n')
units = ctable[header_end + 1:unit_end].split('\t')
#determine # of rows to skip by counting line breaks in preceding text
skiprows = len(pretxt.split('\n')) + 2
#if table is indented, ignore empty left column
if header[0] == '':
usecols = header[1:]
else:
usecols = header
#read data to DataFrame
data = pd.read_csv(file,sep='\t',skiprows=skiprows,skipfooter=skipfooter,header=None,names=header,usecols=usecols,engine='python')
#get timestamp
dt = get_timestamp(file)
#time_col = np.intersect1d(['Time','T'],data.columns) # EIS files in Repeating jv-EIS files have column named 'Time' instead of 'T'
data['timestamp'] = [dt + timedelta(seconds=t) for t in data['T']]
return data
def read_gen_curve_data(file):
"""
read generic curve data from Gamry .DTA file
Args:
file: file to read
"""
try:
with open(file,'r') as f:
txt = f.read()
except UnicodeDecodeError:
with open(file,'r',encoding='latin1') as f:
txt = f.read()
#find start of curve data
cidx = txt.find('CURVE\tTABLE')
skipfooter = 0
if cidx==-1:
# coudn't find OCV curve data in file
# return empty dataframe
return pd.DataFrame([])
else:
#preceding text
pretxt = txt[:cidx]
#ocv curve data
ctable = txt[cidx:]
#column headers are next line after ZCURVE TABLE line
header_start = ctable.find('\n') + 1
header_end = header_start + ctable[header_start:].find('\n')
header = ctable[header_start:header_end].split('\t')
#units are next line after column headers
unit_end = header_end + 1 + ctable[header_end + 1:].find('\n')
units = ctable[header_end + 1:unit_end].split('\t')
#determine # of rows to skip by counting line breaks in preceding text
skiprows = len(pretxt.split('\n')) + 2
#if table is indented, ignore empty left column
if header[0] == '':
usecols = header[1:]
else:
usecols = header
#read data to DataFrame
data = | pd.read_csv(file,sep='\t',skiprows=skiprows,skipfooter=skipfooter,header=None,names=header,usecols=usecols,engine='python') | pandas.read_csv |
import json
from tabulate import tabulate
import pandas as pd
fmt="psql" #"pipe" #"psql"
with open('out.json') as f:
d = json.load(f) # --> dict
print("# Moves\n")
for _d in d['moves']:
df = pd.DataFrame(_d)
a = tabulate(df,headers="keys", tablefmt=fmt)
print(a, end="\n\n")
for _d in d['energy'][0]['hamiltonian'][1]["nonbonded"]:
df = | pd.DataFrame(_d) | pandas.DataFrame |
import os
import uuid
from datetime import datetime
from time import sleep
import fsspec
import pandas as pd
import pytest
import v3iofs
from storey import EmitEveryEvent
import mlrun
import mlrun.feature_store as fs
from mlrun import store_manager
from mlrun.datastore.sources import CSVSource, ParquetSource
from mlrun.datastore.targets import CSVTarget, NoSqlTarget, ParquetTarget
from mlrun.features import Entity
from tests.system.base import TestMLRunSystem
@TestMLRunSystem.skip_test_if_env_not_configured
# Marked as enterprise because of v3io mount and remote spark
@pytest.mark.enterprise
class TestFeatureStoreSparkEngine(TestMLRunSystem):
project_name = "fs-system-spark-engine"
spark_service = ""
pq_source = "testdata.parquet"
csv_source = "testdata.csv"
spark_image_deployed = (
False # Set to True if you want to avoid the image building phase
)
test_branch = "" # For testing specific branch. e.g.: "https://github.com/mlrun/mlrun.git@development"
@classmethod
def _init_env_from_file(cls):
env = cls._get_env_from_file()
cls.spark_service = env["MLRUN_SYSTEM_TESTS_DEFAULT_SPARK_SERVICE"]
def get_local_pq_source_path(self):
return os.path.relpath(str(self.assets_path / self.pq_source))
def get_remote_pq_source_path(self, without_prefix=False):
path = "v3io://"
if without_prefix:
path = ""
path += "/bigdata/" + self.pq_source
return path
def get_local_csv_source_path(self):
return os.path.relpath(str(self.assets_path / self.csv_source))
def get_remote_csv_source_path(self, without_prefix=False):
path = "v3io://"
if without_prefix:
path = ""
path += "/bigdata/" + self.csv_source
return path
def custom_setup(self):
from mlrun import get_run_db
from mlrun.run import new_function
from mlrun.runtimes import RemoteSparkRuntime
self._init_env_from_file()
if not self.spark_image_deployed:
store, _ = store_manager.get_or_create_store(
self.get_remote_pq_source_path()
)
store.upload(
self.get_remote_pq_source_path(without_prefix=True),
self.get_local_pq_source_path(),
)
store, _ = store_manager.get_or_create_store(
self.get_remote_csv_source_path()
)
store.upload(
self.get_remote_csv_source_path(without_prefix=True),
self.get_local_csv_source_path(),
)
if not self.test_branch:
RemoteSparkRuntime.deploy_default_image()
else:
sj = new_function(
kind="remote-spark", name="remote-spark-default-image-deploy-temp"
)
sj.spec.build.image = RemoteSparkRuntime.default_image
sj.with_spark_service(spark_service="dummy-spark")
sj.spec.build.commands = ["pip install git+" + self.test_branch]
sj.deploy(with_mlrun=False)
get_run_db().delete_function(name=sj.metadata.name)
self.spark_image_deployed = True
def test_basic_remote_spark_ingest(self):
key = "patient_id"
measurements = fs.FeatureSet(
"measurements",
entities=[fs.Entity(key)],
timestamp_key="timestamp",
engine="spark",
)
source = ParquetSource("myparquet", path=self.get_remote_pq_source_path())
fs.ingest(
measurements,
source,
return_df=True,
spark_context=self.spark_service,
run_config=fs.RunConfig(local=False),
)
assert measurements.status.targets[0].run_id is not None
def test_basic_remote_spark_ingest_csv(self):
key = "patient_id"
name = "measurements"
measurements = fs.FeatureSet(
name,
entities=[fs.Entity(key)],
engine="spark",
)
source = CSVSource(
"mycsv", path=self.get_remote_csv_source_path(), time_field="timestamp"
)
fs.ingest(
measurements,
source,
return_df=True,
spark_context=self.spark_service,
run_config=fs.RunConfig(local=False),
)
features = [f"{name}.*"]
vec = fs.FeatureVector("test-vec", features)
resp = fs.get_offline_features(vec)
df = resp.to_dataframe()
assert type(df["timestamp"][0]).__name__ == "Timestamp"
def test_error_flow(self):
df = pd.DataFrame(
{
"name": ["Jean", "Jacques", "Pierre"],
"last_name": ["Dubois", "Dupont", "Lavigne"],
}
)
measurements = fs.FeatureSet(
"measurements",
entities=[fs.Entity("name")],
engine="spark",
)
with pytest.raises(mlrun.errors.MLRunInvalidArgumentError):
fs.ingest(
measurements,
df,
return_df=True,
spark_context=self.spark_service,
run_config=fs.RunConfig(local=False),
)
def test_ingest_to_csv(self):
key = "patient_id"
csv_path_spark = "v3io:///bigdata/test_ingest_to_csv_spark"
csv_path_storey = "v3io:///bigdata/test_ingest_to_csv_storey.csv"
measurements = fs.FeatureSet(
"measurements_spark",
entities=[fs.Entity(key)],
timestamp_key="timestamp",
engine="spark",
)
source = ParquetSource("myparquet", path=self.get_remote_pq_source_path())
targets = [CSVTarget(name="csv", path=csv_path_spark)]
fs.ingest(
measurements,
source,
targets,
spark_context=self.spark_service,
run_config=fs.RunConfig(local=False),
)
csv_path_spark = measurements.get_target_path(name="csv")
measurements = fs.FeatureSet(
"measurements_storey",
entities=[fs.Entity(key)],
timestamp_key="timestamp",
)
source = ParquetSource("myparquet", path=self.get_remote_pq_source_path())
targets = [CSVTarget(name="csv", path=csv_path_storey)]
fs.ingest(
measurements,
source,
targets,
)
csv_path_storey = measurements.get_target_path(name="csv")
read_back_df_spark = None
file_system = fsspec.filesystem("v3io")
for file_entry in file_system.ls(csv_path_spark):
filepath = file_entry["name"]
if not filepath.endswith("/_SUCCESS"):
read_back_df_spark = pd.read_csv(f"v3io://{filepath}")
break
assert read_back_df_spark is not None
read_back_df_storey = None
for file_entry in file_system.ls(csv_path_storey):
filepath = file_entry["name"]
read_back_df_storey = pd.read_csv(f"v3io://{filepath}")
break
assert read_back_df_storey is not None
assert read_back_df_spark.sort_index(axis=1).equals(
read_back_df_storey.sort_index(axis=1)
)
@pytest.mark.parametrize("partitioned", [True, False])
def test_schedule_on_filtered_by_time(self, partitioned):
name = f"sched-time-{str(partitioned)}"
now = datetime.now()
path = "v3io:///bigdata/bla.parquet"
fsys = fsspec.filesystem(v3iofs.fs.V3ioFS.protocol)
pd.DataFrame(
{
"time": [
| pd.Timestamp("2021-01-10 10:00:00") | pandas.Timestamp |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : <NAME>
# @Contact : <EMAIL>
import category_encoders.utils as util
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from autoflow.utils.logging_ import get_logger
class BaseImputer(BaseEstimator, TransformerMixin):
def __init__(
self,
categorical_feature=None,
numerical_feature=None,
copy=True,
missing_rate=0.4
):
self.missing_rate = missing_rate
self.numerical_feature = numerical_feature
self.copy = copy
self.categorical_feature = categorical_feature
self.logger = get_logger(self)
def fit(self, X, y=None, categorical_feature=None, numerical_feature=None, **kwargs):
X = util.convert_input(X)
if categorical_feature is not None:
self.categorical_feature = categorical_feature
if numerical_feature is not None:
self.numerical_feature = numerical_feature
# 自动找category特征
if self.categorical_feature is None and self.numerical_feature is None:
self.logger.warning(
f"You didn't declare numerical_feature or categorical_feature in {self.__class__.__name__}, "
f"program will auto find these by dtypes.")
self.categorical_feature = X.select_dtypes(include=["object", "category"]).columns
self.numerical_feature = X.select_dtypes(exclude=["object", "category"]).columns
else:
if self.categorical_feature is None:
if self.numerical_feature is not None:
self.categorical_feature = X.columns.difference(self.numerical_feature)
else:
self.categorical_feature = np.array([])
if numerical_feature is None:
if self.categorical_feature is not None:
self.numerical_feature = X.columns.difference(self.categorical_feature)
else:
self.numerical_feature = np.array([])
# todo: 统计各列的缺失率,过高则删除
missing_rates = np.count_nonzero( | pd.isna(X) | pandas.isna |
# import app components
from app import app, data
from flask_cors import CORS
CORS(app) # enable CORS for all routes
# import libraries
from flask import request
import pandas as pd
import re
from datetime import datetime
from functools import reduce
# define functions
## process date args
def date_arg(arg):
try:
arg = datetime.strptime(arg, '%d-%m-%Y')
except:
try:
arg = datetime.strptime(arg, '%Y-%m-%d')
except:
arg = None
return arg
## process missing arg
def missing_arg(missing):
if missing == 'na':
missing_val = 'NA'
elif missing == 'empty':
missing_val = ''
elif missing == 'nan':
missing_val = 'NaN'
else:
missing_val = 'NULL'
return(missing_val)
## get date column
def get_date_col(df):
return list(filter(re.compile('^date_.*').search, df.columns.values))[0]
# list of dataset by location
data_canada = ['cases_timeseries_canada',
'mortality_timeseries_canada',
'recovered_timeseries_canada',
'testing_timeseries_canada',
'active_timeseries_canada',
'vaccine_administration_timeseries_canada',
'vaccine_distribution_timeseries_canada',
'vaccine_completion_timeseries_canada']
data_prov = ['cases_timeseries_prov',
'mortality_timeseries_prov',
'recovered_timeseries_prov',
'testing_timeseries_prov',
'active_timeseries_prov',
'vaccine_administration_timeseries_prov',
'vaccine_distribution_timeseries_prov',
'vaccine_completion_timeseries_prov']
data_hr = ['cases_timeseries_hr',
'mortality_timeseries_hr']
data_names = ['cases',
'mortality',
'recovered',
'testing',
'active',
'avaccine',
'dvaccine',
'cvaccine']
data_sknew = ['sk_new_cases_timeseries_hr_combined',
'sk_new_mortality_timeseries_hr_combined']
data_names_dates = {
'date_report': 'cases',
'date_death_report': 'mortality',
'date_recovered': 'recovered',
'date_testing': 'testing',
'date_active': 'active',
'date_vaccine_administered': 'avaccine',
'date_vaccine_distributed': 'dvaccine',
'date_vaccine_completed': 'cvaccine'
}
data_other = {
'prov': 'prov_map',
'hr': 'hr_map',
'age_cases': 'age_map_cases',
'age_mortality': 'age_map_mortality'
}
@app.route('/')
@app.route('/index')
def index():
# initialize response
response = {}
# subset dataframes
dfs = {k: pd.read_csv(data.ccodwg[k]) for k in data_canada}
# rename date columns
for df in dfs.values():
df.columns = df.columns.str.replace('^date_.*', 'date', regex = True)
# subset active dataframe to avoid duplicate columns
dfs['active_timeseries_canada'] = dfs['active_timeseries_canada'].drop(columns=['cumulative_cases',
'cumulative_recovered',
'cumulative_deaths'])
# merge dataframes
df = reduce(lambda left, right: pd.merge(left, right, on=['date', 'province'], how='outer'), dfs.values())
# convert date column and filter to most recent date
df['date'] = pd.to_datetime(df['date'], dayfirst=True)
df = df.loc[df['date'] == data.version['date']]
# format output
df['date'] = df['date'].dt.strftime('%d-%m-%Y')
df = df.fillna('NULL')
response['summary'] = df.to_dict(orient='records')
# add version to response
response['version'] = data.version['version']
# return response
return response
@app.route('/timeseries')
def timeseries():
# initialize response
response = {}
# read arguments
stat = request.args.get('stat')
loc = request.args.get('loc')
date = request.args.get('date')
after = request.args.get('after')
before = request.args.get('before')
ymd = request.args.get('ymd')
missing = request.args.get('missing')
version = request.args.get('version')
# process date arguments
if date:
date = date_arg(date)
if after:
after = date_arg(after)
if before:
before = date_arg(before)
# process other arguments
missing_val = missing_arg(missing)
if not loc:
loc = 'prov'
# get dataframes
if loc == 'canada':
if stat == 'cases':
data_name = data_canada[0]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'mortality':
data_name = data_canada[1]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'recovered':
data_name = data_canada[2]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'testing':
data_name = data_canada[3]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'active':
data_name = data_canada[4]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'avaccine':
data_name = data_canada[5]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'dvaccine':
data_name = data_canada[6]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'cvaccine':
data_name = data_canada[7]
dfs = [pd.read_csv(data.ccodwg[data_name])]
else:
dfs = {k: pd.read_csv(data.ccodwg[k]) for k in data_canada}
dfs = list(dfs.values()) # convert to list
elif loc == 'prov' or loc in data.keys_prov.keys():
if stat == 'cases':
data_name = data_prov[0]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'mortality':
data_name = data_prov[1]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'recovered':
data_name = data_prov[2]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'testing':
data_name = data_prov[3]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'active':
data_name = data_prov[4]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'avaccine':
data_name = data_prov[5]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'dvaccine':
data_name = data_prov[6]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'cvaccine':
data_name = data_prov[7]
dfs = [pd.read_csv(data.ccodwg[data_name])]
else:
dfs = {k: pd.read_csv(data.ccodwg[k]) for k in data_prov}
dfs = list(dfs.values()) # convert to list
elif loc == 'hr' or loc in data.keys_hr.keys():
if stat == 'cases':
data_name = data_hr[0]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'mortality':
data_name = data_hr[1]
dfs = [pd.read_csv(data.ccodwg[data_name])]
else:
dfs = {k: pd.read_csv(data.ccodwg[k]) for k in data_canada}
dfs = list(dfs.values()) # convert to list
else:
return "Record not found", 404
# filter by location
if loc in data.keys_prov.keys():
for i in range(len(dfs)):
dfs[i] = dfs[i].loc[dfs[i]['province'] == data.keys_prov[loc]['province']]
elif loc in data.keys_hr.keys():
for i in range(len(dfs)):
dfs[i] = dfs[i].loc[dfs[i]['health_region'] == data.keys_hr[loc]['health_region']]
if loc != '9999':
dfs[i] = dfs[i].loc[dfs[i]['province'] == data.keys_hr[loc]['province']]
# convert date column
for i in range(len(dfs)):
col_date = get_date_col(dfs[i])
dfs[i][col_date] = pd.to_datetime(dfs[i][col_date], dayfirst=True)
# filter by date
for i in range(len(dfs)):
col_date = get_date_col(dfs[i])
if date:
dfs[i] = dfs[i].loc[dfs[i][col_date] == date]
if after:
dfs[i] = dfs[i].loc[dfs[i][col_date] >= after]
if before:
dfs[i] = dfs[i].loc[dfs[i][col_date] <= before]
# format output
for i in range(len(dfs)):
col_date = get_date_col(dfs[i])
if ymd == 'true':
dfs[i][col_date] = dfs[i][col_date].dt.strftime('%Y-%m-%d')
else:
dfs[i][col_date] = dfs[i][col_date].dt.strftime('%d-%m-%Y')
dfs[i] = dfs[i].fillna(missing_val)
# determine response name and add dataframe to response
resp_name = data_names_dates[col_date]
response[resp_name] = dfs[i].to_dict(orient='records')
# add version to response
if version == 'true':
response['version'] = data.version['version']
# return response
return response
@app.route('/sknew')
def sknew():
# initialize response
response = {}
# read arguments
stat = request.args.get('stat')
loc = request.args.get('loc')
date = request.args.get('date')
after = request.args.get('after')
before = request.args.get('before')
ymd = request.args.get('ymd')
missing = request.args.get('missing')
version = request.args.get('version')
# process date arguments
if date:
date = date_arg(date)
if after:
after = date_arg(after)
if before:
before = date_arg(before)
# process other arguments
missing_val = missing_arg(missing)
# get dataframes
if stat == 'cases':
data_name = data_sknew[0]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'mortality':
data_name = data_sknew[1]
dfs = [pd.read_csv(data.ccodwg[data_name])]
else:
dfs = {k: pd.read_csv(data.ccodwg[k]) for k in data_sknew}
dfs = list(dfs.values()) # convert to list
# filter by location
if loc in data.keys_prov.keys():
for i in range(len(dfs)):
dfs[i] = dfs[i].loc[dfs[i]['province'] == data.keys_prov[loc]['province']]
elif loc in data.keys_hr.keys():
for i in range(len(dfs)):
dfs[i] = dfs[i].loc[dfs[i]['health_region'] == data.keys_hr[loc]['health_region']]
if loc != '9999':
dfs[i] = dfs[i].loc[dfs[i]['province'] == data.keys_hr[loc]['province']]
# convert date column
for i in range(len(dfs)):
col_date = get_date_col(dfs[i])
dfs[i][col_date] = pd.to_datetime(dfs[i][col_date], dayfirst=True)
# filter by date
for i in range(len(dfs)):
col_date = get_date_col(dfs[i])
if date:
dfs[i] = dfs[i].loc[dfs[i][col_date] == date]
if after:
dfs[i] = dfs[i].loc[dfs[i][col_date] >= after]
if before:
dfs[i] = dfs[i].loc[dfs[i][col_date] <= before]
# format output
for i in range(len(dfs)):
col_date = get_date_col(dfs[i])
if ymd == 'true':
dfs[i][col_date] = dfs[i][col_date].dt.strftime('%Y-%m-%d')
else:
dfs[i][col_date] = dfs[i][col_date].dt.strftime('%d-%m-%Y')
dfs[i] = dfs[i].fillna(missing_val)
# determine response name and add dataframe to response
resp_name = data_names_dates[col_date]
response[resp_name] = dfs[i].to_dict(orient='records')
# add version to response
if version == 'true':
response['version'] = data.version['version']
# return response
return response
@app.route('/summary')
def summary():
# initialize response
response = {}
# read arguments
loc = request.args.get('loc')
date = request.args.get('date')
after = request.args.get('after')
before = request.args.get('before')
ymd = request.args.get('ymd')
missing = request.args.get('missing')
version = request.args.get('version')
# process date arguments
if date:
date = date_arg(date)
if after:
after = date_arg(after)
if before:
before = date_arg(before)
if not date and not after and not before:
date = data.version['date']
# process other arguments
missing_val = missing_arg(missing)
if not loc:
loc = 'prov'
# get dataframes and subset by location
if loc == 'canada':
dfs = {k: pd.read_csv(data.ccodwg[k]) for k in data_canada}
elif loc == 'prov' or loc in data.keys_prov.keys():
dfs = {k: pd.read_csv(data.ccodwg[k]) for k in data_prov}
elif loc == 'hr' or loc in data.keys_hr.keys():
dfs = {k: pd.read_csv(data.ccodwg[k]) for k in data_hr}
else:
return "Record not found", 404
# rename date columns
for df in dfs.values():
df.columns = df.columns.str.replace('^date_.*', 'date')
# subset active dataframe to avoid duplicate columns
if loc == 'canada':
dfs['active_timeseries_canada'] = dfs['active_timeseries_canada'].drop(columns=['cumulative_cases',
'cumulative_recovered',
'cumulative_deaths'])
elif loc == 'prov' or loc in data.keys_prov.keys():
dfs['active_timeseries_prov'] = dfs['active_timeseries_prov'].drop(columns=['cumulative_cases',
'cumulative_recovered',
'cumulative_deaths'])
# merge dataframes
if loc == 'hr' or loc in data.keys_hr.keys():
df = reduce(lambda left, right: pd.merge(left, right, on=['date', 'province', 'health_region'], how='outer'), dfs.values())
else:
df = reduce(lambda left, right: pd.merge(left, right, on=['date', 'province'], how='outer'), dfs.values())
# convert dates column
df['date'] = pd.to_datetime(df['date'], dayfirst=True)
# filter by location
if loc in data.keys_prov.keys():
df = df.loc[df['province'] == data.keys_prov[loc]['province']]
elif loc in data.keys_hr.keys():
df = df.loc[df['health_region'] == data.keys_hr[loc]['health_region']]
print("HI")
if loc != '9999':
df = df.loc[df['province'] == data.keys_hr[loc]['province']]
# filter by date
if date:
df = df.loc[df['date'] == date]
if after:
df = df.loc[df['date'] >= after]
if before:
df = df.loc[df['date'] <= before]
# format output
if ymd == 'true':
df['date'] = df['date'].dt.strftime('%Y-%m-%d')
else:
df['date'] = df['date'].dt.strftime('%d-%m-%Y')
df = df.fillna(missing_val)
response['summary'] = df.to_dict(orient='records')
# add version to response
if version == 'true':
response['version'] = data.version['version']
# return response
return response
@app.route('/individual')
def individual():
return "Individual level data are retired. Archived data may be downloaded from GitHub: https://github.com/ccodwg/Covid19Canada", 404
@app.route('/other')
def other():
# initialize response
response = {}
# read arguments
stat = request.args.get('stat')
missing = request.args.get('missing')
version = request.args.get('version')
# process other arguments
missing_val = missing_arg(missing)
# get dataframes
if stat:
if (stat == 'prov'):
dfs = pd.read_csv(data.ccodwg[data_other[stat]])
elif (stat == 'hr'):
dfs = pd.read_csv(data.ccodwg[data_other[stat]])
elif (stat == 'age_cases'):
dfs = pd.read_csv(data.ccodwg[data_other[stat]])
elif (stat == 'age_mortality'):
dfs = | pd.read_csv(data.ccodwg[data_other[stat]]) | pandas.read_csv |
"""
This module implements dynamic visualizations for EOPatch
Credits:
Copyright (c) 2017-2022 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
Copyright (c) 2017-2022 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
Copyright (c) 2017-2022 <NAME> (Sinergise)
Copyright (c) 2017-2019 <NAME>, <NAME> (Sinergise)
This source code is licensed under the MIT license found in the LICENSE
file in the root directory of this source tree.
"""
import dataclasses
import datetime as dt
from typing import Optional, List, cast
import numpy as np
import pandas as pd
from geopandas import GeoDataFrame
from shapely.geometry import Polygon
try:
import xarray as xr
import holoviews as hv
import geoviews as gv
import hvplot # pylint: disable=unused-import
import hvplot.xarray # pylint: disable=unused-import
import hvplot.pandas # pylint: disable=unused-import
from cartopy import crs as ccrs
except ImportError as exception:
raise ImportError(
"This module requires an installation of dynamic plotting package extension. It can be installed with:\n"
"pip install eo-learn-visualization[HVPLOT]"
) from exception
from sentinelhub import BBox, CRS
from eolearn.core import EOPatch, FeatureType, FeatureTypeSet
from eolearn.core.utils.parsing import parse_feature
from .xarray import array_to_dataframe, get_new_coordinates, string_to_variable
from ..eopatch_base import BasePlotConfig, BaseEOPatchVisualization
@dataclasses.dataclass
class HvPlotConfig(BasePlotConfig):
"""Additional advanced configurations for `hvplot` visualization.
:param plot_width: Width of the plot.
:param plot_height: Height of the plot.
:param plot_per_pixel: Whether to plot data for each pixel (line), for `FeatureType.DATA` and `FeatureType.MASK`.
:param vdims: Value dimensions for plotting a `GeoDataFrame`.
"""
plot_width: int = 800
plot_height: int = 500
plot_per_pixel: bool = False
vdims: Optional[str] = None
class HvPlotVisualization(BaseEOPatchVisualization):
"""EOPatch visualization using `HvPlot` framework."""
def __init__(
self, eopatch: EOPatch, feature, *, mask_feature=None, config: Optional[HvPlotConfig] = None, **kwargs
):
"""
:param eopatch: An EOPatch with a feature to plot.
:param feature: A feature from the given EOPatch to plot.
:param mask_feature: A mask feature to be applied as a mask to the feature that is being plotted
"""
config = config or HvPlotConfig()
super().__init__(eopatch, feature, config=config, **kwargs)
self.config = cast(HvPlotConfig, self.config)
self.mask_feature = parse_feature(mask_feature) if mask_feature else None
def plot(self):
"""Creates a `hvplot` of the feature from the given `EOPatch`."""
feature_type, _ = self.feature
data, timestamps = self.collect_and_prepare_feature()
eopatch = self.eopatch.copy()
eopatch[self.feature] = data
eopatch.timestamp = timestamps
if self.config.plot_per_pixel and feature_type in FeatureTypeSet.RASTER_TYPES_4D:
vis = self._plot_pixel(eopatch)
elif feature_type in (FeatureType.MASK, *FeatureTypeSet.RASTER_TYPES_3D):
vis = self._plot_raster(eopatch)
elif feature_type is FeatureType.DATA:
vis = self._plot_data(eopatch)
elif feature_type is FeatureType.VECTOR:
vis = self._plot_vector(eopatch)
elif feature_type is FeatureType.VECTOR_TIMELESS:
vis = self._plot_vector_timeless(eopatch)
else:
vis = self._plot_scalar_label(eopatch)
return vis.opts(plot=dict(width=self.config.plot_width, height=self.config.plot_height))
def _plot_data(self, eopatch: EOPatch):
"""Plots the FeatureType.DATA of eopatch."""
crs = eopatch.bbox.crs
crs = CRS.POP_WEB if crs is CRS.WGS84 else crs
data_da = array_to_dataframe(eopatch, self.feature, crs=crs)
if self.mask_feature:
data_da = self._mask_data(data_da, eopatch)
timestamps = eopatch.timestamp
crs = eopatch.bbox.crs
if not self.rgb:
return data_da.hvplot(x="x", y="y", crs=ccrs.epsg(crs.epsg))
_, feature_name = self.feature
data_rgb = self._eopatch_da_to_rgb(data_da, feature_name, crs)
rgb_dict = {timestamp_: self._plot_rgb_one(data_rgb, timestamp_) for timestamp_ in timestamps}
return hv.HoloMap(rgb_dict, kdims=["time"])
@staticmethod
def _plot_rgb_one(eopatch_da: xr.DataArray, timestamp: dt.datetime):
"""Returns visualization for one timestamp for FeatureType.DATA"""
return eopatch_da.sel(time=timestamp).drop("time").hvplot(x="x", y="y")
def _plot_raster(self, eopatch: EOPatch):
"""Makes visualization for raster data (except for FeatureType.DATA)"""
crs = eopatch.bbox.crs
crs = CRS.POP_WEB if crs is CRS.WGS84 else crs
data_da = array_to_dataframe(eopatch, self.feature, crs=crs)
data_min = data_da.values.min()
data_max = data_da.values.max()
data_levels = len(np.unique(data_da))
data_levels = 11 if data_levels > 11 else data_levels
data_da = data_da.where(data_da > 0).fillna(-1)
vis = data_da.hvplot(x="x", y="y", crs=ccrs.epsg(crs.epsg)).opts(
clim=(data_min, data_max), clipping_colors={"min": "transparent"}, color_levels=data_levels
)
return vis
def _plot_vector(self, eopatch: EOPatch):
"""A visualization for vector feature"""
crs = eopatch.bbox.crs
timestamps = eopatch.timestamp
data_gpd = self._fill_vector(eopatch)
if crs is CRS.WGS84:
crs = CRS.POP_WEB
data_gpd = data_gpd.to_crs(crs.pyproj_crs())
shapes_dict = {timestamp_: self._plot_shapes_one(data_gpd, timestamp_, crs) for timestamp_ in timestamps}
return hv.HoloMap(shapes_dict, kdims=["time"])
def _fill_vector(self, eopatch: EOPatch) -> GeoDataFrame:
"""Adds timestamps from eopatch to GeoDataFrame."""
vector = eopatch[self.feature].copy()
vector["valid"] = True
eopatch_timestamps = eopatch.timestamp
vector_timestamps = set(vector[self.config.timestamp_column])
blank_timestamps = [timestamp for timestamp in eopatch_timestamps if timestamp not in vector_timestamps]
dummy_geometry = self._create_dummy_polygon(eopatch.bbox, 0.0000001)
temp_df = self._create_dummy_dataframe(vector, blank_timestamps=blank_timestamps, dummy_geometry=dummy_geometry)
final_vector = GeoDataFrame( | pd.concat((vector, temp_df), ignore_index=True) | pandas.concat |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1+dev
# kernelspec:
# display_name: Python [conda env:annorxiver]
# language: python
# name: conda-env-annorxiver-py
# ---
# # Generate BioRxiv Document Embeddings
# This notebook is designed to generate document embeddings for every article in bioRxiv.
# +
from pathlib import Path
import re
import sys
from gensim.models import Word2Vec
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from tqdm import tqdm_notebook
import umap
from annorxiver_modules.document_helper import (
generate_doc_vector,
DocIterator,
dump_article_text,
)
# -
journal_map_df = pd.read_csv(
"../exploratory_data_analysis/output/biorxiv_article_metadata.tsv", sep="\t"
)
journal_map_df.head()
biorxiv_xpath_str = (
"//abstract/p|//abstract/title|//body/sec//p|//body/sec//title|//body/p"
)
# # Output Documents to File
# This section dumps all of biorxiv text into a single document in order to train the word2vec model. This is for ease of training the model.
# +
# Only use the most current version of the documents
latest_journal_version = journal_map_df.groupby("doi").agg(
{"document": "first", "doi": "last"}
)
if not Path("output/word2vec_input/biorxiv_text.txt").exists():
with open("output/word2vec_input/biorxiv_text.txt", "w") as f:
for article in tqdm_notebook(latest_journal_version.document.tolist()):
document_text = dump_article_text(
file_path=f"../biorxiv_articles/{article}",
xpath_str=biorxiv_xpath_str,
remove_stop_words=True,
)
f.write("\n".join(document_text))
f.write("\n\n")
# -
# # Train Word2Vec
# This section trains the word2vec model (continuous bag of words [CBOW]). Since the number of dimensions can vary I decided to train multiple models: 150, 250, 300. Each model is saved into is own respective directory.
word_embedding_sizes = [150, 250, 300]
for size in word_embedding_sizes:
# Create save path
word_path = Path(f"output/word2vec_models/{size}")
word_path.mkdir(parents=True, exist_ok=True)
# If model exists don't run again
if Path(f"{str(word_path.resolve())}/biorxiv_{size}.model").exists():
continue
# Run Word2Vec
words = Word2Vec(
DocIterator("output/word2vec_input/biorxiv_text.txt"),
size=size,
iter=20,
seed=100,
)
# Save the model for future use
words.save(f"{str(word_path.resolve())}/biorxiv_{size}.model")
# # Generate Document Embeddings
# After training the word2vec models, the next step is to generate a document embeddings. For this experiment each document embedding is generated via an average of all word vectors contained in the document. There are better approaches towards doing this, but this can serve as a simple baseline.
for word_model_path in Path().rglob("output/word2vec_models/*/*.model"):
model_dim = word_model_path.parents[0].stem
word_model = Word2Vec.load(str(word_model_path.resolve()))
biorxiv_document_map = {
document: generate_doc_vector(
word_model,
document_path=f"../biorxiv_articles/{document}",
xpath=biorxiv_xpath_str,
)
for document in tqdm_notebook(journal_map_df.document.tolist())
}
biorxiv_vec_df = (
pd.DataFrame.from_dict(biorxiv_document_map, orient="index")
.rename(columns={col: f"feat_{col}" for col in range(int(model_dim))})
.rename_axis("document")
.reset_index()
)
biorxiv_vec_df.to_csv(
f"output/word2vec_output/biorxiv_all_articles_{model_dim}.tsv.xz",
sep="\t",
index=False,
compression="xz",
)
# # UMAP the Documents
# After generating document embeddings, the next step is to visualize all the documents. In order to visualize the embeddings a low dimensional representation is needed. UMAP is an algorithm that can generate this representation, while grouping similar embeddings together.
random_state = 100
n_neighbors = journal_map_df.category.unique().shape[0]
n_components = 2
for biorxiv_doc_vectors in Path().rglob(
"output/word2vec_output/biorxiv_all_articles*.tsv.xz"
):
model_dim = int(re.search(r"(\d+)", biorxiv_doc_vectors.stem).group(1))
biorxiv_articles_df = pd.read_csv(str(biorxiv_doc_vectors.resolve()), sep="\t")
reducer = umap.UMAP(
n_components=n_components, n_neighbors=n_neighbors, random_state=random_state
)
embedding = reducer.fit_transform(
biorxiv_articles_df[[f"feat_{idx}" for idx in range(model_dim)]].values
)
umapped_df = (
pd.DataFrame(embedding, columns=["umap1", "umap2"])
.assign(document=biorxiv_articles_df.document.values.tolist())
.merge(journal_map_df[["category", "document", "doi"]], on="document")
)
umapped_df.to_csv(
f"output/embedding_output/umap/biorxiv_umap_{model_dim}.tsv",
sep="\t",
index=False,
)
# # TSNE the Documents
# After generating document embeddings, the next step is to visualize all the documents. In order to visualize the embeddings a low dimensional representation is needed. TSNE is an another algorithm (besides UMAP) that can generate this representation, while grouping similar embeddings together.
n_components = 2
random_state = 100
for biorxiv_doc_vectors in Path().rglob(
"output/word2vec_output/biorxiv_all_articles*.tsv.xz"
):
model_dim = int(re.search(r"(\d+)", biorxiv_doc_vectors.stem).group(1))
biorxiv_articles_df = pd.read_csv(str(biorxiv_doc_vectors.resolve()), sep="\t")
reducer = TSNE(n_components=n_components, random_state=random_state)
embedding = reducer.fit_transform(
biorxiv_articles_df[[f"feat_{idx}" for idx in range(model_dim)]].values
)
tsne_df = (
| pd.DataFrame(embedding, columns=["tsne1", "tsne2"]) | pandas.DataFrame |
#%%
"""
Analyze model:
Meant to analyze each models and their performance
"""
import h5py
import matplotlib.pyplot as plt
import seaborn
import numpy as np
import pandas as pd
import os
from keras.models import load_model
path = os.path.abspath(os.curdir)
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
#%%
df = pd.read_csv('{}/csv-data/movie-data.csv'.format(path))
df.drop(columns='Unnamed: 0', inplace=True)
# action_model = load_model('{}/model_version/n_most/action_model.h5'.format(path))
# adventure_model = load_model('{}/model_version/n_most/adventure_model.h5'.format(path))
# comedy_model = load_model('{}/model_version/n_most/comedy_model.h5'.format(path))
# crime_model = load_model('{}/model_version/n_most/crime_model.h5'.format(path))
# family_model = load_model('{}/model_version/n_most/family_model.h5'.format(path))
# mystery_model = load_model('{}/model_version/n_most/mystery_model.h5'.format(path))
# romance_model = load_model('{}/model_version/n_most/romance_model.h5'.format(path))
# thriller_model = load_model('{}/model_version/n_most/thriller_model.h5'.format(path))
#%%
print(df.head())
#%%
features = df['plot'].values
n_most_common_words = 10000
max_len = 500
tokenizer = Tokenizer(num_words=n_most_common_words, lower=True)
tokenizer.fit_on_texts(features)
sequences = tokenizer.texts_to_sequences(features)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
# print(word_index)
X = pad_sequences(sequences, maxlen=500)
# print(X)
#%%
models = {"Action": action_model,
"Adventure": adventure_model,
'Comedy': comedy_model,
"Crime": crime_model,
"Family": family_model,
"Mystery": mystery_model,
"Romance": romance_model,
"Thriller": thriller_model}
for genre, model in models.items():
y = df[genre]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=True)
prediction = model.predict(X_test)
cf_m = confusion_matrix(y_test.values.tolist(), np.rint(prediction.flatten()).tolist())
plt.title('Actual')
seaborn.set(font_scale=1.1) # for label size
rep = pd.DataFrame(cf_m, index=['N {}'.format(genre), genre],
columns=['N {}'.format(genre), genre])
sb = seaborn.heatmap(rep, annot=True, fmt='g').xaxis.set_ticks_position('top')
plt.ylabel('Predicted')
plt.xlabel('Bidirectional LSTM')
plt.show()
#%%
print(path)
df_co = pd.read_csv('{}/csv-data/movie-data-cleaned.csv'.format(path))
df_co.drop(['Unnamed: 0'], axis=1, inplace=True)
df_im = pd.read_csv('{}/csv-data/movies_genres.csv'.format(path), delimiter='\t')
df_im.head()
imdb_genres = df_im.drop(['plot', 'title', 'Sci-Fi','Documentary', 'Reality-TV', 'Animation'], axis=1)
counts = []
categories = list(imdb_genres.columns.values)
for i in categories:
counts.append((i, imdb_genres[i].sum()))
df_stats_imdb = | pd.DataFrame(counts, columns=['genre', '#movies']) | pandas.DataFrame |
"""
Base and utility classes for tseries type pandas objects.
"""
from __future__ import annotations
from datetime import datetime
from typing import (
TYPE_CHECKING,
Any,
Callable,
Sequence,
TypeVar,
cast,
final,
)
import warnings
import numpy as np
from pandas._libs import (
NaT,
Timedelta,
lib,
)
from pandas._libs.tslibs import (
BaseOffset,
Resolution,
Tick,
parsing,
to_offset,
)
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
cache_readonly,
doc,
)
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_dtype_equal,
is_integer,
is_list_like,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
PeriodArray,
TimedeltaArray,
)
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index,
_index_shared_docs,
)
from pandas.core.indexes.extension import (
NDArrayBackedExtensionIndex,
inherit_names,
)
from pandas.core.indexes.range import RangeIndex
from pandas.core.tools.timedeltas import to_timedelta
if TYPE_CHECKING:
from pandas import CategoricalIndex
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_T = TypeVar("_T", bound="DatetimeIndexOpsMixin")
_TDT = TypeVar("_TDT", bound="DatetimeTimedeltaMixin")
@inherit_names(
["inferred_freq", "_resolution_obj", "resolution"],
DatetimeLikeArrayMixin,
cache=True,
)
@inherit_names(["mean", "asi8", "freq", "freqstr"], DatetimeLikeArrayMixin)
class DatetimeIndexOpsMixin(NDArrayBackedExtensionIndex):
"""
Common ops mixin to support a unified interface datetimelike Index.
"""
_is_numeric_dtype = False
_can_hold_strings = False
_data: DatetimeArray | TimedeltaArray | PeriodArray
freq: BaseOffset | None
freqstr: str | None
_resolution_obj: Resolution
# ------------------------------------------------------------------------
@cache_readonly
def hasnans(self) -> bool:
return self._data._hasna
def equals(self, other: Any) -> bool:
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
elif other.dtype.kind in ["f", "i", "u", "c"]:
return False
elif not isinstance(other, type(self)):
should_try = False
inferable = self._data._infer_matches
if other.dtype == object:
should_try = other.inferred_type in inferable
elif is_categorical_dtype(other.dtype):
other = cast("CategoricalIndex", other)
should_try = other.categories.inferred_type in inferable
if should_try:
try:
other = type(self)(other)
except (ValueError, TypeError, OverflowError):
# e.g.
# ValueError -> cannot parse str entry, or OutOfBoundsDatetime
# TypeError -> trying to convert IntervalIndex to DatetimeIndex
# OverflowError -> Index([very_large_timedeltas])
return False
if not is_dtype_equal(self.dtype, other.dtype):
# have different timezone
return False
return np.array_equal(self.asi8, other.asi8)
@Appender(Index.__contains__.__doc__)
def __contains__(self, key: Any) -> bool:
hash(key)
try:
self.get_loc(key)
except (KeyError, TypeError, ValueError):
return False
return True
def _convert_tolerance(self, tolerance, target):
tolerance = np.asarray(to_timedelta(tolerance).to_numpy())
return super()._convert_tolerance(tolerance, target)
# --------------------------------------------------------------------
# Rendering Methods
def format(
self,
name: bool = False,
formatter: Callable | None = None,
na_rep: str = "NaT",
date_format: str | None = None,
) -> list[str]:
"""
Render a string representation of the Index.
"""
header = []
if name:
header.append(
ibase.pprint_thing(self.name, escape_chars=("\t", "\r", "\n"))
if self.name is not None
else ""
)
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header, na_rep=na_rep, date_format=date_format)
def _format_with_header(
self, header: list[str], na_rep: str = "NaT", date_format: str | None = None
) -> list[str]:
# matches base class except for whitespace padding and date_format
return header + list(
self._format_native_types(na_rep=na_rep, date_format=date_format)
)
@property
def _formatter_func(self):
return self._data._formatter()
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value).
"""
attrs = super()._format_attrs()
for attrib in self._attributes:
# iterating over _attributes prevents us from doing this for PeriodIndex
if attrib == "freq":
freq = self.freqstr
if freq is not None:
freq = repr(freq) # e.g. D -> 'D'
attrs.append(("freq", freq))
return attrs
@Appender(Index._summary.__doc__)
def _summary(self, name=None) -> str:
result = super()._summary(name=name)
if self.freq:
result += f"\nFreq: {self.freqstr}"
return result
# --------------------------------------------------------------------
# Indexing Methods
@final
def _can_partial_date_slice(self, reso: Resolution) -> bool:
# e.g. test_getitem_setitem_periodindex
# History of conversation GH#3452, GH#3931, GH#2369, GH#14826
return reso > self._resolution_obj
# NB: for DTI/PI, not TDI
def _parsed_string_to_bounds(self, reso: Resolution, parsed):
raise NotImplementedError
def _parse_with_reso(self, label: str):
# overridden by TimedeltaIndex
parsed, reso_str = parsing.parse_time_string(label, self.freq)
reso = Resolution.from_attrname(reso_str)
return parsed, reso
def _get_string_slice(self, key: str):
# overridden by TimedeltaIndex
parsed, reso = self._parse_with_reso(key)
try:
return self._partial_date_slice(reso, parsed)
except KeyError as err:
raise KeyError(key) from err
@final
def _partial_date_slice(
self,
reso: Resolution,
parsed: datetime,
):
"""
Parameters
----------
reso : Resolution
parsed : datetime
Returns
-------
slice or ndarray[intp]
"""
if not self._can_partial_date_slice(reso):
raise ValueError
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
vals = self._data._ndarray
unbox = self._data._unbox
if self.is_monotonic_increasing:
if len(self) and (
(t1 < self[0] and t2 < self[0]) or (t1 > self[-1] and t2 > self[-1])
):
# we are out of range
raise KeyError
# TODO: does this depend on being monotonic _increasing_?
# a monotonic (sorted) series can be sliced
left = vals.searchsorted(unbox(t1), side="left")
right = vals.searchsorted(unbox(t2), side="right")
return slice(left, right)
else:
lhs_mask = vals >= unbox(t1)
rhs_mask = vals <= unbox(t2)
# try to find the dates
return (lhs_mask & rhs_mask).nonzero()[0]
def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default):
"""
If label is a string, cast it to scalar type according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'} or None
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ["loc", "getitem", None, lib.no_default]
self._deprecated_arg(kind, "kind", "_maybe_cast_slice_bound")
if isinstance(label, str):
try:
parsed, reso = self._parse_with_reso(label)
except ValueError as err:
# DTI -> parsing.DateParseError
# TDI -> 'unit abbreviation w/o a number'
# PI -> string cannot be parsed as datetime-like
raise self._invalid_indexer("slice", label) from err
lower, upper = self._parsed_string_to_bounds(reso, parsed)
return lower if side == "left" else upper
elif not isinstance(label, self._data._recognized_scalars):
raise self._invalid_indexer("slice", label)
return label
# --------------------------------------------------------------------
# Arithmetic Methods
def shift(self: _T, periods: int = 1, freq=None) -> _T:
"""
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int, default 1
Number of periods (or increments) to shift by,
can be positive or negative.
freq : pandas.DateOffset, pandas.Timedelta or string, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.DatetimeIndex
Shifted index.
See Also
--------
Index.shift : Shift values of Index.
PeriodIndex.shift : Shift values of PeriodIndex.
"""
arr = self._data.view()
arr._freq = self.freq
result = arr._time_shift(periods, freq=freq)
return type(self)._simple_new(result, name=self.name)
# --------------------------------------------------------------------
@doc(Index._maybe_cast_listlike_indexer)
def _maybe_cast_listlike_indexer(self, keyarr):
try:
res = self._data._validate_listlike(keyarr, allow_object=True)
except (ValueError, TypeError):
if not isinstance(keyarr, ExtensionArray):
# e.g. we don't want to cast DTA to ndarray[object]
res = com.asarray_tuplesafe(keyarr)
# TODO: com.asarray_tuplesafe shouldn't cast e.g. DatetimeArray
else:
res = keyarr
return Index(res, dtype=res.dtype)
class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin):
"""
Mixin class for methods shared by DatetimeIndex and TimedeltaIndex,
but not PeriodIndex
"""
_data: DatetimeArray | TimedeltaArray
_comparables = ["name", "freq"]
_attributes = ["name", "freq"]
# Compat for frequency inference, see GH#23789
_is_monotonic_increasing = Index.is_monotonic_increasing
_is_monotonic_decreasing = Index.is_monotonic_decreasing
_is_unique = Index.is_unique
_join_precedence = 10
def _with_freq(self, freq):
arr = self._data._with_freq(freq)
return type(self)._simple_new(arr, name=self._name)
def is_type_compatible(self, kind: str) -> bool:
warnings.warn(
f"{type(self).__name__}.is_type_compatible is deprecated and will be "
"removed in a future version.",
FutureWarning,
stacklevel=find_stack_level(),
)
return kind in self._data._infer_matches
@property
def values(self) -> np.ndarray:
# NB: For Datetime64TZ this is lossy
return self._data._ndarray
# --------------------------------------------------------------------
# Set Operation Methods
@cache_readonly
def _as_range_index(self) -> RangeIndex:
# Convert our i8 representations to RangeIndex
# Caller is responsible for checking isinstance(self.freq, Tick)
freq = cast(Tick, self.freq)
tick = freq.delta.value
rng = range(self[0].value, self[-1].value + tick, tick)
return | RangeIndex(rng) | pandas.core.indexes.range.RangeIndex |
import pandas as pd
import matplotlib.pyplot as plt
import data
import testing_data
import statistics
import numpy as np
pd.set_option('display.max_columns', None)
def findWaitingTime(arrival_time, processes, total_processes, burst_time, waiting_time, quantum):
rem_bt = [0] * total_processes
for i in range(total_processes):
rem_bt[i] = burst_time[i]
t = 0
while (1):
done = True
for i in range(total_processes):
var = True
if (rem_bt[i] > 0):
done = False
if (arrival_time[i] <= t):
if (rem_bt[i] > quantum):
t += quantum
rem_bt[i] -= quantum
else:
t = t + rem_bt[i]
waiting_time[i] = t - burst_time[i] - arrival_time[i]
rem_bt[i] = 0
else:
t += 1
var = False
if var == False:
break
if (done == True):
break
def fairnessFunc(waiting_time):
largest_diff = 0
for i in range(waiting_time):
diff = abs(waiting_time[i] - waiting_time[i+1])
if(diff > largest_diff):
largest_diff = diff
def findTurnAroundTime(arrival_time, processes, total_processes, burst_time, waiting_time, turnaroundtime):
for i in range(total_processes):
turnaroundtime[i] = burst_time[i] + waiting_time[i]
def findavgTime(arrival_time, processes, total_processes, burst_time, quantum):
waiting_time = [0] * total_processes
turnaroundtime = [0] * total_processes
findWaitingTime(arrival_time, processes, total_processes, burst_time, waiting_time, quantum)
findTurnAroundTime(arrival_time, processes, total_processes, burst_time, waiting_time, turnaroundtime)
total_waitingtime = []
total_turnaroundtime = []
total_wt = 0
total_tat = 0
for i in range(total_processes):
total_wt = total_wt + waiting_time[i]
total_tat = total_tat + turnaroundtime[i]
total_waitingtime.append(total_wt)
total_turnaroundtime.append(total_tat)
avg_wt = total_wt / total_processes
avg_tat = total_tat / total_processes
process_df = pd.DataFrame()
# process_df['process_id'] = processes
process_df['burst_time'] = burst_time
process_df['arrival_time'] = arrival_time
process_df['total_waitingtime'] = total_waitingtime
process_df['total_turnarounftime'] = total_turnaroundtime
#####
diff_list = []
# largest_diff_list = []
for i in range(len(total_waitingtime)-1):
diff = abs(total_waitingtime[i] - total_waitingtime[i + 1])
diff_list.append(diff)
# process_df['diff_waiting_time'] = diff_list
largest_diff = max(diff_list)
# largest_diff_list.append(largest_diff)
# print(largest_diff_list)
return process_df, avg_tat, avg_wt, largest_diff
def plotGraphs(quantum_df, i):
quantum_df = quantum_df.sort_values('quantum')
plt.plot('quantum', 'fairness_score', data=quantum_df, color='magenta', label = 'fair_wt')
plt.plot('quantum', 'average_waitingtime', data=quantum_df, color='blue', label ="avg_wt")
plt.legend()
plt.title('train_set_' + str(i))
plt.grid()
plt.xlabel('quantum value')
plt.ylabel('time')
plt.tight_layout()
plt.savefig('train_set_'+ str(i) +'.png')
plt.show()
# quantum_df.plot.scatter(x = 'quantum', y = 'fair_waitingtime')
# plt.show()
quantum_assignment_df = pd.DataFrame()
quantum_df = pd.DataFrame()
quantum_df_1 = pd.DataFrame()
list_dataframes = []
# Driver code
i = 1
if __name__ == "__main__":
for train_set, at_set in zip(data.training, data.arrival_time):
len_set = len(train_set)
process_id = [i for i in range(0, len_set)]
total_processes = len(process_id)
burst_time = train_set
burst_time = list(map(int, burst_time))
arrival_time = at_set
arrival_time = list(map(int, at_set))
#quantum_list = [i for i in range(2, 100)]
quantum_list_1 = [i for i in range(2, 9)]
quantum_list_2 = [i for i in range(2, 90)]
quantum_list = []
quantum_list.append(list(quantum_list_1))
quantum_list.append(list(quantum_list_2))
avg_wt_list = []
avg_tat_list = []
largest_diff_list = []
if train_set == data.training[0]:
for quantum in quantum_list[0]:
print('-------------------------------Quantum Value: '+str(quantum))
process_df, avg_tat, avg_wt, largest_diff = findavgTime(arrival_time, process_id, total_processes, burst_time, quantum)
largest_diff_list.append(largest_diff)
avg_wt_list.append(avg_wt)
avg_tat_list.append(avg_tat)
print(process_df)
quantum_df['quantum'] = quantum_list[0]
quantum_df['average_waitingtime'] = avg_wt_list
fair_waitingtime_list = [abs(avg_wt - largest_diff) for avg_wt, largest_diff in zip(avg_wt_list, largest_diff_list) ]
quantum_df['fairness_score'] = fair_waitingtime_list
quantum_df['average_turnaroundtime'] = avg_tat_list
quantum_df = quantum_df.sort_values('fairness_score')
print(quantum_df)
list_dataframes.append(quantum_df)
train_set_names = ['train_set_1', 'train_set_2', 'train_set_3', 'train_set_4', 'train_set_5', 'train_set_6', 'train_set_7', 'train_set_8', 'train_set_9', 'train_set_10', 'train_set_11', 'train_set_12', 'train_set_13', 'train_set_14', 'train_set_15', 'train_set_16']
quantum_assignment_df = quantum_assignment_df.append(quantum_df.iloc[0], ignore_index=True)
plotGraphs(quantum_df, i)
i += 1
else:
for quantum in quantum_list[1]:
print('-------------------------------Quantum Value: ' + str(quantum))
process_df, avg_tat, avg_wt, largest_diff = findavgTime(arrival_time, process_id, total_processes, burst_time, quantum)
largest_diff_list.append(largest_diff)
avg_wt_list.append(avg_wt)
avg_tat_list.append(avg_tat)
print(process_df)
quantum_df_1['quantum'] = quantum_list[1]
quantum_df_1['average_waitingtime'] = avg_wt_list
fair_waitingtime_list = [abs(avg_wt - largest_diff) for avg_wt, largest_diff in
zip(avg_wt_list, largest_diff_list)]
quantum_df_1['fairness_score'] = fair_waitingtime_list
quantum_df_1['average_turnaroundtime'] = avg_tat_list
quantum_df_1 = quantum_df_1.sort_values('fairness_score')
print(quantum_df_1)
list_dataframes.append(quantum_df_1)
train_set_names = ['train_set_1', 'train_set_2', 'train_set_3', 'train_set_4', 'train_set_5', 'train_set_6', 'train_set_7', 'train_set_8', 'train_set_9', 'train_set_10', 'train_set_11', 'train_set_12', 'train_set_13', 'train_set_14', 'train_set_15', 'train_set_16']
quantum_assignment_df = quantum_assignment_df.append(quantum_df_1.iloc[0], ignore_index = True)
plotGraphs(quantum_df_1, i)
i+=1
quantum_assignment_df.index = ['train_set_1', 'train_set_2', 'train_set_3', 'train_set_4', 'train_set_5', 'train_set_6', 'train_set_7', 'train_set_8', 'train_set_9', 'train_set_10', 'train_set_11', 'train_set_12', 'train_set_13', 'train_set_14', 'train_set_15', 'train_set_16' ]
print(quantum_assignment_df)
test_process = testing_data.testing_set_b
test_process_atd = testing_data.atd_set_testa
train_processes = data.training
test_processes = []
test_processes_atd = []
for _ in range(16):
test_processes.append(list(test_process))
for _ in range(16):
test_processes_atd.append(list(test_process_atd))
quantum_assignment_df.reset_index(inplace=True)
stats_df = pd.DataFrame()
stats_df['train_bt'] = train_processes
stats_df['test_bt'] = test_processes
stats_df['train_bt'] = stats_df['train_bt'].apply(lambda x: list(map(int, x)))
stats_df['test_bt'] = stats_df['test_bt'].apply(lambda x: list(map(int, x)))
#######
stats_df['train_atd'] = data.diff_at
stats_df['test_atd'] = test_processes_atd
stats_df['train_atd'] = stats_df['train_atd'].apply(lambda x: list(map(int, x)))
stats_df['test_atd'] = stats_df['test_atd'].apply(lambda x: list(map(int, x)))
########
stats_df['train_mean_bt'] = stats_df['train_bt'].apply(lambda x: statistics.mean(x))
stats_df['test_mean_bt'] = stats_df['test_bt'].apply(lambda x: statistics.mean(x))
########
stats_df['train_mean_atd'] = stats_df['train_atd'].apply(lambda x: statistics.mean(x))
stats_df['test_mean_atd'] = stats_df['test_atd'].apply(lambda x: statistics.mean(x))
########
stats_df['allocated_quantum'] = quantum_assignment_df['quantum']
stats_df['mean_difference_bt'] = stats_df.apply(lambda x: np.linalg.norm(x['train_mean_bt'] - x['test_mean_bt']), axis=1)
stats_df['mean_difference_at'] = stats_df.apply(lambda x: np.linalg.norm(x['train_mean_atd'] - x['test_mean_atd']), axis=1)
stats_df['min_pair'] = stats_df['mean_difference_at'] + stats_df['mean_difference_bt']
stats_df = stats_df.sort_values(by='min_pair')
stats_df = stats_df.reset_index(drop = True)
print(stats_df)
###FINAL RESULT###
print('RESULTING DATAFRAME')
result_df = pd.DataFrame(columns = ['test_set', 'allocated_quantum'])
result_df.loc[0,['test_set','allocated_quantum']] = stats_df.loc[0,['test_bt','allocated_quantum']].values
print(result_df)
##########################AT##############################
#------------------Plot-------------------#
data_boxplot_bt = data.box_plot_bt
test_set_bt = testing_data.testing_set_b
data_boxplot_bt.append(test_set_bt)
plot_df_bt = | pd.DataFrame(data_boxplot_bt) | pandas.DataFrame |
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
from sklearn.preprocessing import LabelEncoder
#import data set file
data= | pd.read_csv('500_Person_Gender_Height_Weight_Index.csv') | pandas.read_csv |
###############################################################################################
#### Initialization
import pandas as pd
import numpy as np
df = pd.read_csv(filename, header=None, names=col_names, na_values={'col_name':['-1']}, \
parse_dates=[[0, 1, 2]], index_col='Date')
# if the first 3 columns are 'year','month','day', then the dataframe would have a single col named
# 'year_month_day' of datatype 'datatime64[ns]'
# Can use df.index = df['year_month_day'] to reassign this col as the index of df
## EDA == Exploratory Data Analysis
###############################################################################################
#### Basic data exploration
df.shape # shape of dataframe
df.head(7) # print the head part of dataset
df.tail(5) # print the tail part of dataset
df.info() # return data type of each column, and number of non-null values
df.count() # count items for each column
df.describe() # summary stat of numerical data
# df.mean(), df.median(), df.std(), df.quantile([0.25, 0.75]), df.min(), df.max()
df['one_col_name'].unique() # unique values in a column
df['one_col_name'].value_counts(dropna=False) # return frequency counts of a column
df['one_col_name'].value_counts(dropna=False).head() # note the result of prev line is a pandas Series
df.idxmax(axis=0) # Or use axis='index'
df.idxmin(axis=1) # Or use axis='columns'
# indexes of max/min vals for each column/row
###############################################################################################
#### Row & column index manipulation
df.columns # names of all the columns, usually class of Index
# can be assigned with a list of new names.
df.index # can be assigned with list of new indexes. # row indexes, can be class of Index or DatatimeIndex
df.index = df.index.map(str.lower) # use map to transform the index with a function
# pandas Index objects are immutable. Must reset the whole indexes of df at once
df = df.set_index(['col1', 'col2']) # change to multiple index (index being of class MultiIndex)
df = df.sort_index() # change multiple index to hierarchical index
# use tuple to slice multiple index
# use slice(None) to indicate ":" in the tuple
# more advanced manipulation of multiple indexes = stacking and unstacking
# please refer to datacamp course "Manipulating DataFrames with pandas"
df.reindex(ordered_index) # order rows by original index with the order in ordered_index
# ordered_index = somehow ordered list of original df indices
# if some item in ordered_index is not in orig_df_indices, there would be a row with that index but NA values
df.sort_index()
###############################################################################################
#### Data visualization for inspection
# use Bar plots for discrete data counts
# use Histograms for continuous data counts
df['one_col_name'].plot('hist')
import matplotlib.pyplot as plt
plt.show()
df.boxplot(column='one_numerical_col', by='one_categorical_col') # two columns are involved
df.boxplot(column='population', by='continent') # example of above
###############################################################################################
#### Data extraction & assignment (general)
## direct column access by column name
df["country"] # This is 1D labeled array (class: pandas.core.series.Series)
df[["country"]] # This is dataframe (class: pandas.core.frame.DataFrame)
## row/column access by (built-in) numerircal indexes
df[1:2] # single row as a dataframe...
# Note: row slicing cannot use a single number, which would be regarded as a col name
df.iloc[1] # row as pandas Series
df.iloc[[1, 2, 3]]
df.iloc[[1,2,3], [0, 1]]
df.iloc[:, [0,1]]
## row/column access by labels
df.loc["RU"] # row as Pandas Series
df.loc[["RU", "IN", "CH"]] # row as Pandas dataframe
df.loc[["RU", "IN", "CH"], ["country", "capital"]]
df.loc[:, ["country", "capital"]]
## filtering
df[df["area"] > 8]
df[np.logical_and(df["area"] > 8, df["area"] < 10)] # or use the next line
df[(df["area"] > 8 & df["area"] < 10)]
df[np.logical_or(df["area"] < 8, df["area"] > 10)] # or use the next line
df[(df["area"] < 8 | df["area"] > 10)]
## extract df values as ndarrays
data_array = df.values # extract the values as ndarray
col_array = df['col_name'].values # extract column values as ndarray
np.concatenate([arr1, arr2], axis=1)
## create new columns
df['new_col'] = df['existing_col'].str[0] # extract 1st char of 'existing_col' and save as 'new_col' in df
# note that 'str' here is an attribute name
df['str_split'] = df['existing_col'].str.split('_') # split string with '_' and save as 'str_split' col
df['new_col0'] = df['str_split'].str.get(0)
df['new_col1'] = df['str_split'].str.get(1)
df['new_col'] = df['col_name'].str.upper()
df['new_mask_col'] = df['col_name'].str.contains('given_substring') # Boolean data
for label, row in df.iterrows():
df.loc[label, "new_col"] = len(row["country"]) # added a new column "new_col" as function of existing data
df["new_col"] = df["country"].apply(len)
df['new_col'] = 0.0 # assign values with broadcasting
## create new copies of existing dataframes
df2 = df.copy()
sorted_df = df.sort_values('col_name') # sort rows (including index) by values in col 'col_name'
## modify existing entries
df.iloc[::3, -1] = np.nan # assign values with broadcasting
## delete row/column
del df['col_name']
df.drop(['col_name1', 'col_name2'], axis=1)
df.drop([1, 2]) # delete rows by numerical indexes
df.drop(index='row_ind') # delete rows by row index
## manage data types
df['treatment b'] = df['treatment b'].astype(str)
df['sex'] = df['sex'].astype('category')
df['treatment a'] = pd.to_numeric(df['treatment a'], errors='coerce') # force conversion
## manage duplicate rows
df = df.drop_duplicates() # drop duplicate rows
## manage missing data (NA/null/NaN)
df_dropped = df.dropna(how='any') # drop rows with NaN values
df['sex'] = df['sex'].fillna(obj_to_fill) # in 'sex' column, fill NaN with obj_to_fill (e.g. mean value)
checker_df = df.notnull() # boolean for each entry of the dataframe
checker_df_reverse = df.isnull() # boolean for each entry of the dataframe
checker_each_col = df.notnull().all() # aggregated for each column
checker_each_col_reverse = df.isnull().any() # aggregated for each column
checker_col = df.one_col_name.notnull() # boolean for the col "one_col_name"
###############################################################################################
#### tidy data
# tidy data principle: rows contain observations, columns form variables
# pd.melt(): solve the problem of columns (names) containing values, instead of variables
# ... by turning columns into rows
new_df = pd.melt(frame=df, id_vars=list_names_cols, value_vars=['treatment a', 'treatment b'], \
var_name='treatment', value_name='result')
# the columns in list_names_cols remain unchanged
# the 'treatment a' and 'treatment b' cols become values of a new col called 'treatment'
# the original table values are collected as values of a new col called 'result'
# pivot: opposite of melting
# ... by taking unique values from a column and create new columns
weather_tidy = weather.pivot(index='date', columns='element', values='value')
# the levels in 'element' column become new col names
# if the values are not specified or multiple, the new columns would become hierarchical index
# if there is duplicate conflict, use aggregate function
weather_tidy = weather.pivot(index='date', columns='element', values='value', aggfunc=np.mean)
# more advanced manipulation of multiple indexes = stacking and unstacking
# please refer to datacamp course "Manipulating DataFrames with pandas"
###############################################################################################
#### Data (table) joining/concatenation (like in SQL)
## concatenate dataframes
vertical_stacked = df1.append(df2) # indices are also stacked
vertical_stacked.reset_index(drop=True) # result would be the same as the following line
vertical_stacked = pd.concat([df1, df2], axis=0, ignore_index=True) # new indexes range from 0 to n_tot
hori_cat = pd.concat([df1, df2], axis=1, join='outer') # rows with the same index would be merged to single row. cols are stacked
hori_cat = pd.concat([df1, df2], axis=1, join='inner') # only return rows with index in both df1 and df2
df1.join(df2, how='inner/outer/left/right') # join by index
## concatenate lots of tables
import glob
csv_files = glob.glob('*.csv')
list_data = [pd.read_csv(filename) for filename in csv_files]
pd.concat(list_data)
## merge data (index is usually ignored)
pd.merge(left=df_state_populations, right=df_state_codes, on=None, left_on='state', right_on='name')
| pd.merge(df_bronze, df_gold, on=['NOC', 'Country'], suffixes=['_bronze', '_gold']) | pandas.merge |
from __future__ import division
from functools import wraps
import pandas as pd
import numpy as np
import time
import csv, sys
import os.path
import logging
from .ted_functions import TedFunctions
from .ted_aggregate_methods import TedAggregateMethods
from base.uber_model import UberModel, ModelSharedInputs
class TedSpeciesProperties(object):
"""
Listing of species properties that will eventually be read in from a SQL db
"""
def __init__(self):
"""Class representing Species properties"""
super(TedSpeciesProperties, self).__init__()
self.sci_name = pd.Series([], dtype='object')
self.com_name = pd.Series([], dtype='object')
self.taxa = pd.Series([], dtype='object')
self.order = pd.Series([], dtype='object')
self.usfws_id = pd.Series([], dtype='object')
self.body_wgt = pd.Series([], dtype='object')
self.diet_item = pd.Series([], dtype='object')
self.h2o_cont = pd.Series([], dtype='float')
def read_species_properties(self):
# this is a temporary method to initiate the species/diet food items lists (this will be replaced with
# a method to access a SQL database containing the properties
#filename = './ted/tests/TEDSpeciesProperties.csv'
filename = os.path.join(os.path.dirname(__file__),'tests/TEDSpeciesProperties.csv')
try:
with open(filename,'rt') as csvfile:
# csv.DictReader uses first line in file for column headings by default
dr = pd.read_csv(csvfile) # comma is default delimiter
except csv.Error as e:
sys.exit('file: %s, %s' (filename, e))
print(dr)
self.sci_name = dr.ix[:,'Scientific Name']
self.com_name = dr.ix[:,'Common Name']
self.taxa = dr.ix[:,'Taxa']
self.order = dr.ix[:,'Order']
self.usfws_id = dr.ix[:,'USFWS Species ID (ENTITY_ID)']
self.body_wgt= dr.ix[:,'BW (g)']
self.diet_item = dr.ix[:,'Food item']
self.h2o_cont = dr.ix[:,'Water content of diet']
class TedInputs(ModelSharedInputs):
"""
Required inputs class for Ted.
"""
def __init__(self):
"""Class representing the inputs for Ted"""
super(TedInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Pandas DataFrame
self.chemical_name = pd.Series([], dtype="object", name="chemical_name")
# application parameters for min/max application scenarios
self.crop_min = pd.Series([], dtype="object", name="crop")
self.app_method_min = pd.Series([], dtype="object", name="app_method_min")
self.app_rate_min = pd.Series([], dtype="float", name="app_rate_min")
self.num_apps_min = pd.Series([], dtype="int", name="num_apps_min")
self.app_interval_min = pd.Series([], dtype="int", name="app_interval_min")
self.droplet_spec_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.boom_hgt_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.pest_incorp_depth_min = pd.Series([], dtype="object", name="pest_incorp_depth")
self.crop_max = pd.Series([], dtype="object", name="crop")
self.app_method_max = pd.Series([], dtype="object", name="app_method_max")
self.app_rate_max = pd.Series([], dtype="float", name="app_rate_max")
self.num_apps_max = pd.Series([], dtype="int", name="num_app_maxs")
self.app_interval_max = pd.Series([], dtype="int", name="app_interval_max")
self.droplet_spec_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.boom_hgt_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.pest_incorp_depth_max = pd.Series([], dtype="object", name="pest_incorp_depth")
# physical, chemical, and fate properties of pesticide
self.foliar_diss_hlife = pd.Series([], dtype="float", name="foliar_diss_hlife")
self.aerobic_soil_meta_hlife = pd.Series([], dtype="float", name="aerobic_soil_meta_hlife")
self.frac_retained_mamm = pd.Series([], dtype="float", name="frac_retained_mamm")
self.frac_retained_birds = pd.Series([], dtype="float", name="frac_retained_birds")
self.log_kow = pd.Series([], dtype="float", name="log_kow")
self.koc = pd.Series([], dtype="float", name="koc")
self.solubility = pd.Series([], dtype="float", name="solubility")
self.henry_law_const = pd.Series([], dtype="float", name="henry_law_const")
# bio concentration factors (ug active ing/kg-ww) / (ug active ing/liter)
self.aq_plant_algae_bcf_mean = pd.Series([], dtype="float", name="aq_plant_algae_bcf_mean")
self.aq_plant_algae_bcf_upper = pd.Series([], dtype="float", name="aq_plant_algae_bcf_upper")
self.inv_bcf_mean = pd.Series([], dtype="float", name="inv_bcf_mean")
self.inv_bcf_upper = pd.Series([], dtype="float", name="inv_bcf_upper")
self.fish_bcf_mean = pd.Series([], dtype="float", name="fish_bcf_mean")
self.fish_bcf_upper = pd.Series([], dtype="float", name="fish_bcf_upper")
# bounding water concentrations (ug active ing/liter)
self.water_conc_1 = pd.Series([], dtype="float", name="water_conc_1") # lower bound
self.water_conc_2 = pd.Series([], dtype="float", name="water_conc_2") # upper bound
# health value inputs
# naming convention (based on listing from OPP TED Excel spreadsheet 'inputs' worksheet):
# dbt: dose based toxicity
# cbt: concentration-based toxicity
# arbt: application rate-based toxicity
# 1inmill_mort: 1/million mortality (note initial character is numeral 1, not letter l)
# 1inten_mort: 10% mortality (note initial character is numeral 1, not letter l)
# others are self explanatory
# dose based toxicity(dbt): mammals (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort")
self.dbt_mamm_1inten_mort = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_low_ld50 = pd.Series([], dtype="float", name="dbt_mamm_low_ld50")
self.dbt_mamm_rat_oral_ld50 = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_rat_derm_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50")
self.dbt_mamm_rat_inhal_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50")
self.dbt_mamm_sub_direct = pd.Series([], dtype="float", name="dbt_mamm_sub_direct")
self.dbt_mamm_sub_indirect = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect")
self.dbt_mamm_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort_wgt")
self.dbt_mamm_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_low_ld50_wgt")
self.dbt_mamm_rat_oral_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_rat_derm_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50_wgt")
self.dbt_mamm_rat_inhal_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50_wgt")
self.dbt_mamm_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_direct_wgt")
self.dbt_mamm_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect_wgt")
# dose based toxicity(dbt): birds (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_bird_1inmill_mort = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort")
self.dbt_bird_1inten_mort = pd.Series([], dtype="float", name="dbt_bird_1inten_mort")
self.dbt_bird_low_ld50 = pd.Series([], dtype="float", name="dbt_bird_low_ld50")
self.dbt_bird_hc05 = pd.Series([], dtype="float", name="dbt_bird_hc05")
self.dbt_bird_hc50 = pd.Series([], dtype="float", name="dbt_bird_hc50")
self.dbt_bird_hc95 = pd.Series([], dtype="float", name="dbt_bird_hc95")
self.dbt_bird_sub_direct = pd.Series([], dtype="float", name="dbt_bird_sub_direct")
self.dbt_bird_sub_indirect = pd.Series([], dtype="float", name="dbt_bird_sub_indirect")
self.mineau_sca_fact = pd.Series([], dtype="float", name="mineau_sca_fact")
self.dbt_bird_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort_wgt")
self.dbt_bird_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inten_mort_wgt")
self.dbt_bird_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_bird_low_ld50_wgt")
self.dbt_bird_hc05_wgt = pd.Series([], dtype="float", name="dbt_bird_hc05_wgt")
self.dbt_bird_hc50_wgt = pd.Series([], dtype="float", name="dbt_bird_hc50_wgt")
self.dbt_bird_hc95_wgt = pd.Series([], dtype="float", name="dbt_bird_hc95_wgt")
self.dbt_bird_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_direct_wgt")
self.dbt_bird_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_indirect_wgt")
self.mineau_sca_fact_wgt = pd.Series([], dtype="float", name="mineau_sca_fact_wgt")
# dose based toxicity(dbt): reptiles, terrestrial-phase amphibians (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort")
self.dbt_reptile_1inten_mort = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort")
self.dbt_reptile_low_ld50 = pd.Series([], dtype="float", name="dbt_reptile_low_ld50")
self.dbt_reptile_sub_direct = pd.Series([], dtype="float", name="dbt_reptile_sub_direct")
self.dbt_reptile_sub_indirect = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect")
self.dbt_reptile_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort_wgt")
self.dbt_reptile_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort_wgt")
self.dbt_reptile_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_reptile_low_ld50_wgt")
self.dbt_reptile_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_direct_wgt")
self.dbt_reptile_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect_wgt")
# concentration-based toxicity (cbt) : mammals (mg-pest/kg-diet food)
self.cbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="cbt_mamm_1inmill_mort")
self.cbt_mamm_1inten_mort = pd.Series([], dtype="float", name="cbt_mamm_1inten_mort")
self.cbt_mamm_low_lc50 = pd.Series([], dtype="float", name="cbt_mamm_low_lc50")
self.cbt_mamm_sub_direct = pd.Series([], dtype="float", name="cbt_mamm_sub_direct")
self.cbt_mamm_grow_noec = pd.Series([], dtype="float", name="cbt_mamm_grow_noec")
self.cbt_mamm_grow_loec = pd.Series([], dtype="float", name="cbt_mamm_grow_loec")
self.cbt_mamm_repro_noec = pd.Series([], dtype="float", name="cbt_mamm_repro_noec")
self.cbt_mamm_repro_loec = pd.Series([], dtype="float", name="cbt_mamm_repro_loec")
self.cbt_mamm_behav_noec = pd.Series([], dtype="float", name="cbt_mamm_behav_noec")
self.cbt_mamm_behav_loec = pd.Series([], dtype="float", name="cbt_mamm_behav_loec")
self.cbt_mamm_sensory_noec = pd.Series([], dtype="float", name="cbt_mamm_sensory_noec")
self.cbt_mamm_sensory_loec = pd.Series([], dtype="float", name="cbt_mamm_sensory_loec")
self.cbt_mamm_sub_indirect = pd.Series([], dtype="float", name="cbt_mamm_sub_indirect")
# concentration-based toxicity (cbt) : birds (mg-pest/kg-diet food)
self.cbt_bird_1inmill_mort = pd.Series([], dtype="float", name="cbt_bird_1inmill_mort")
self.cbt_bird_1inten_mort = pd.Series([], dtype="float", name="cbt_bird_1inten_mort")
self.cbt_bird_low_lc50 = pd.Series([], dtype="float", name="cbt_bird_low_lc50")
self.cbt_bird_sub_direct = pd.Series([], dtype="float", name="cbt_bird_sub_direct")
self.cbt_bird_grow_noec = pd.Series([], dtype="float", name="cbt_bird_grow_noec")
self.cbt_bird_grow_loec = pd.Series([], dtype="float", name="cbt_bird_grow_loec")
self.cbt_bird_repro_noec = pd.Series([], dtype="float", name="cbt_bird_repro_noec")
self.cbt_bird_repro_loec = pd.Series([], dtype="float", name="cbt_bird_repro_loec")
self.cbt_bird_behav_noec = pd.Series([], dtype="float", name="cbt_bird_behav_noec")
self.cbt_bird_behav_loec = pd.Series([], dtype="float", name="cbt_bird_behav_loec")
self.cbt_bird_sensory_noec = pd.Series([], dtype="float", name="cbt_bird_sensory_noec")
self.cbt_bird_sensory_loec = pd.Series([], dtype="float", name="cbt_bird_sensory_loec")
self.cbt_bird_sub_indirect = pd.Series([], dtype="float", name="cbt_bird_sub_indirect")
# concentration-based toxicity (cbt) : reptiles, terrestrial-phase amphibians (mg-pest/kg-diet food)
self.cbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="cbt_reptile_1inmill_mort")
self.cbt_reptile_1inten_mort = pd.Series([], dtype="float", name="cbt_reptile_1inten_mort")
self.cbt_reptile_low_lc50 = pd.Series([], dtype="float", name="cbt_reptile_low_lc50")
self.cbt_reptile_sub_direct = pd.Series([], dtype="float", name="cbt_reptile_sub_direct")
self.cbt_reptile_grow_noec = pd.Series([], dtype="float", name="cbt_reptile_grow_noec")
self.cbt_reptile_grow_loec = pd.Series([], dtype="float", name="cbt_reptile_grow_loec")
self.cbt_reptile_repro_noec = pd.Series([], dtype="float", name="cbt_reptile_repro_noec")
self.cbt_reptile_repro_loec = pd.Series([], dtype="float", name="cbt_reptile_repro_loec")
self.cbt_reptile_behav_noec = pd.Series([], dtype="float", name="cbt_reptile_behav_noec")
self.cbt_reptile_behav_loec = pd.Series([], dtype="float", name="cbt_reptile_behav_loec")
self.cbt_reptile_sensory_noec = pd.Series([], dtype="float", name="cbt_reptile_sensory_noec")
self.cbt_reptile_sensory_loec = pd.Series([], dtype="float", name="cbt_reptile_sensory_loec")
self.cbt_reptile_sub_indirect = pd.Series([], dtype="float", name="cbt_reptile_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates body weight (mg-pest/kg-bw(ww))
self.cbt_inv_bw_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_bw_1inmill_mort")
self.cbt_inv_bw_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_bw_1inten_mort")
self.cbt_inv_bw_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_bw_low_lc50")
self.cbt_inv_bw_sub_direct = pd.Series([], dtype="float", name="cbt_inv_bw_sub_direct")
self.cbt_inv_bw_grow_noec = pd.Series([], dtype="float", name="cbt_inv_bw_grow_noec")
self.cbt_inv_bw_grow_loec = pd.Series([], dtype="float", name="cbt_inv_bw_grow_loec")
self.cbt_inv_bw_repro_noec = pd.Series([], dtype="float", name="cbt_inv_bw_repro_noec")
self.cbt_inv_bw_repro_loec = pd.Series([], dtype="float", name="cbt_inv_bw_repro_loec")
self.cbt_inv_bw_behav_noec = pd.Series([], dtype="float", name="cbt_inv_bw_behav_noec")
self.cbt_inv_bw_behav_loec = pd.Series([], dtype="float", name="cbt_inv_bw_behav_loec")
self.cbt_inv_bw_sensory_noec = pd.Series([], dtype="float", name="cbt_inv_bw_sensory_noec")
self.cbt_inv_bw_sensory_loec = pd.Series([], dtype="float", name="cbt_inv_bw_sensory_loec")
self.cbt_inv_bw_sub_indirect = | pd.Series([], dtype="float", name="cbt_inv_bw_sub_indirect") | pandas.Series |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import os
from string import ascii_letters
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.testing._utils import DATETIME_TYPES, NUMERIC_TYPES, assert_eq
try:
import tables # noqa F401
except ImportError:
pytest.skip(
"PyTables is not installed and is required for HDF reading/writing",
allow_module_level=True,
)
@pytest.fixture(params=[0, 1, 10, 100])
def pdf(request):
types = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
renamer = {
"C_l0_g" + str(idx): "col_" + val for (idx, val) in enumerate(types)
}
typer = {"col_" + val: val for val in types}
ncols = len(types)
nrows = request.param
# Create a pandas dataframe with random data of mixed types
test_pdf = pd._testing.makeCustomDataframe(
nrows=nrows, ncols=ncols, data_gen_f=lambda r, c: r, r_idx_type="i"
)
# Delete the name of the column index, and rename the row index
test_pdf.columns.name = None
test_pdf.index.name = "test_index"
# Cast all the column dtypes to objects, rename them, and then cast to
# appropriate types
test_pdf = (
test_pdf.astype("object")
.rename(renamer, axis=1)
.astype(typer)
.rename({"col_datetime64[ms]": "col_datetime64"}, axis=1)
)
# Create non-numeric categorical data otherwise may be typecasted
data = [ascii_letters[np.random.randint(0, 52)] for i in range(nrows)]
test_pdf["col_category"] = pd.Series(data, dtype="category")
return (test_pdf, nrows)
@pytest.fixture
def gdf(pdf):
pdf, nrows = pdf
return (cudf.DataFrame.from_pandas(pdf), nrows)
@pytest.fixture(params=["fixed", "table"])
def hdf_files(request, tmp_path_factory, pdf):
pdf, nrows = pdf
if request.param == "fixed":
pdf = pdf.drop("col_category", axis=1)
fname_df = tmp_path_factory.mktemp("hdf") / "test_df.hdf"
pdf.to_hdf(fname_df, "hdf_df_tests", format=request.param)
fname_series = {}
for column in pdf.columns:
fname_series[column] = (
tmp_path_factory.mktemp("hdf") / "test_series.hdf"
)
pdf[column].to_hdf(
fname_series[column], "hdf_series_tests", format=request.param
)
return (fname_df, fname_series, request.param, nrows)
@pytest.mark.filterwarnings("ignore:Using CPU")
@pytest.mark.filterwarnings("ignore:Strings are not yet supported")
@pytest.mark.parametrize(
"columns",
[["col_int8"], ["col_category"], ["col_int32", "col_float32"], None],
)
def test_hdf_reader(hdf_files, columns):
hdf_df_file, hdf_series, format, nrows = hdf_files
if format == "fixed" and columns is not None:
pytest.skip("Can't use columns with format 'fixed'")
if format == "table" and nrows == 0:
pytest.skip("Can't read 0 row table with format 'table'")
expect_df = pd.read_hdf(hdf_df_file, columns=columns)
got_df = cudf.read_hdf(hdf_df_file, columns=columns)
assert_eq(
expect_df, got_df, check_categorical=False, check_index_type=False
)
for column in hdf_series.keys():
expect_series = pd.read_hdf(hdf_series[column])
got_series = cudf.read_hdf(hdf_series[column])
assert_eq(expect_series, got_series, check_index_type=False)
@pytest.mark.parametrize("format", ["fixed", "table"])
@pytest.mark.parametrize("complib", ["zlib", "bzip2", "lzo", "blosc"])
@pytest.mark.filterwarnings("ignore:Using CPU")
def test_hdf_writer(tmpdir, pdf, gdf, complib, format):
pdf, nrows = pdf
gdf, _ = gdf
if format == "fixed":
pdf = pdf.drop("col_category", axis=1)
gdf = gdf.drop("col_category", axis=1)
pdf_df_fname = tmpdir.join("pdf_df.hdf")
gdf_df_fname = tmpdir.join("gdf_df.hdf")
pdf.to_hdf(pdf_df_fname, "hdf_tests", format=format, complib=complib)
gdf.to_hdf(gdf_df_fname, "hdf_tests", format=format, complib=complib)
assert os.path.exists(pdf_df_fname)
assert os.path.exists(gdf_df_fname)
if format == "table" and nrows == 0:
pytest.skip("Can't read 0 row table with format 'table'")
expect = pd.read_hdf(pdf_df_fname)
got = pd.read_hdf(gdf_df_fname)
assert_eq(expect, got, check_index_type=False)
for column in pdf.columns:
pdf_series_fname = tmpdir.join(column + "_" + "pdf_series.hdf")
gdf_series_fname = tmpdir.join(column + "_" + "gdf_series.hdf")
pdf[column].to_hdf(
pdf_series_fname, "hdf_tests", format=format, complib=complib
)
gdf[column].to_hdf(
gdf_series_fname, "hdf_tests", format=format, complib=complib
)
assert os.path.exists(pdf_series_fname)
assert os.path.exists(gdf_series_fname)
expect_series = pd.read_hdf(pdf_series_fname)
got_series = | pd.read_hdf(gdf_series_fname) | pandas.read_hdf |
import pickle
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
from matplotlib import rc
import seaborn as sns
import os
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import LeaveOneOut
from sklearn.neighbors import KernelDensity
from scipy.stats import gaussian_kde
import pandas as pd
import glob
import scipy.stats
# Add the module to source path
dirname = os.path.dirname(os.path.realpath(__file__))
source_path = dirname + "/portfolio"
sys.path.append(source_path)
from portfolio.plotting import plot_comparison
def isin(array, substring1, substring2=None):
"""
Finds entries in array where substring1 is present, while substring2 is NOT present.
https://stackoverflow.com/a/38974252/2653663
"""
if substring2 is None:
return np.flatnonzero(np.core.defchararray.find(array,substring1)!=-1)
return np.flatnonzero((np.core.defchararray.find(array,substring1)!=-1) & (np.core.defchararray.find(array,substring2)==-1))
def load_pickle(filename):
with open(filename, "rb") as f:
d = pickle.load(f)
return d
# set matplotlib defaults
sns.set(font_scale=1.0)
sns.set_style("whitegrid",{'grid.color':'.92','axes.edgecolor':'0.92'})
rc('text', usetex=False)
def get_best_params(params):
"""
Attempts to get the best set of parameters
from a list of dictionaries containing the
parameters that resulted in the best cv score
"""
# Preprocess a bit
d = {}
for param in params:
for key,value in param.items():
if key not in d: d[key] = []
d[key].append(value)
# Select the most likely or median value
for key,value in d.items():
# if list choose most common
if isinstance(value[0], str):
best_value = max(set(value), key=value.count)
d[key] = best_value
continue
# if numeric return median
d[key] = np.median(value)
return d
def get_mean_lower_and_upper_bound(x, alpha=0.95, bootstrap=False):
"""
Calculate 95% confidence interval for rapported MAE.
The data is assumed to follow a laplacian distribution.
See https://waset.org/publications/8809/confidence-intervals-for-double-exponential-distribution-a-simulation-approach
for derivation.
"""
# Number of datapoints
m, n = x.shape
if bootstrap:
lb, ub = np.zeros(m), np.zeros(m)
for i in range(m):
X = np.random.choice(x[i], size=(n,10000))
mae = np.mean(abs(X), axis=0)
lb[i] = np.percentile(mae, 100-alpha*100/2)
ub[i] = np.percentile(mae, alpha*100/2)
else:
lb = 2 * mae * n / scipy.stats.chi2.ppf((1 + alpha) / 2, 2 * n)
ub = 2 * mae * n / scipy.stats.chi2.ppf((1 - alpha) / 2, 2 * n)
mae = np.mean(abs(x), axis=1)
return mae, lb, ub
def plot_score(reaction_index="1"):
single_data = load_pickle("pickles/%s_single_result.pkl" % reaction_index)
linear_data = load_pickle("pickles/%s_linear_result.pkl" % reaction_index)
linear_positive_data = load_pickle("pickles/%s_linear_positive_result.pkl" % reaction_index)
markowitz_data = load_pickle("pickles/%s_markowitz_result.pkl" % reaction_index)
markowitz_positive_data = load_pickle("pickles/%s_markowitz_positive_result.pkl" % reaction_index)
markowitz2_data = load_pickle("pickles/%s_markowitz2_result.pkl" % reaction_index)
markowitz2_positive_data = load_pickle("pickles/%s_markowitz2_positive_result.pkl" % reaction_index)
markowitz3_data = load_pickle("pickles/%s_markowitz3_result.pkl" % reaction_index)
markowitz3_positive_data = load_pickle("pickles/%s_markowitz3_positive_result.pkl" % reaction_index)
#print(linear_data.keys())
#print(linear_data['subset_names'])
#for i, errors in enumerate(linear_data['errors']):
# print(linear_data['subset_names'][i],np.mean(abs(errors)))
#quit()
subset_names = single_data['subset_names']
#TODO figure out how method names are generated
#print(list(single_data.keys()))
#quit()
#print(subset_names)
#for i in range(15):
# print(get_best_params(linear_data['cv_params'][i]))
#quit()
# plot gga
#idx = isin(subset_names, 'hybrid', "+")
idx = np.asarray([0,1,2,3,4,5])
single_mae, single_lb, single_ub = get_mean_lower_and_upper_bound(abs(single_data['errors'][idx,:]), bootstrap=True, alpha=0.68)
linear_mae, linear_lb, linear_ub = get_mean_lower_and_upper_bound(abs(linear_data['errors'][idx,:]), bootstrap=True, alpha=0.68)
linear_positive_mae, linear_positive_lb, linear_positive_ub = get_mean_lower_and_upper_bound(abs(linear_positive_data['errors'][idx,:]), bootstrap=True, alpha=0.68)
markowitz_mae, markowitz_lb, markowitz_ub = get_mean_lower_and_upper_bound(abs(markowitz_data['errors'][idx,:]), bootstrap=True, alpha=0.68)
markowitz_positive_mae, markowitz_positive_lb, markowitz_positive_ub = get_mean_lower_and_upper_bound(abs(markowitz_positive_data['errors'][idx,:]), bootstrap=True, alpha=0.68)
markowitz2_mae, markowitz2_lb, markowitz2_ub = get_mean_lower_and_upper_bound(abs(markowitz2_data['errors'][idx,:]), bootstrap=True, alpha=0.68)
markowitz2_positive_mae, markowitz2_positive_lb, markowitz2_positive_ub = get_mean_lower_and_upper_bound(abs(markowitz2_positive_data['errors'][idx,:]), bootstrap=True, alpha=0.68)
markowitz3_mae, markowitz3_lb, markowitz3_ub = get_mean_lower_and_upper_bound(abs(markowitz3_data['errors'][idx,:]), bootstrap=True, alpha=0.68)
markowitz3_positive_mae, markowitz3_positive_lb, markowitz3_positive_ub = get_mean_lower_and_upper_bound(abs(markowitz3_positive_data['errors'][idx,:]), bootstrap=True, alpha=0.68)
#markowitz_mae = np.exp(np.mean(np.log(abs(markowitz_data['errors'][idx,:])), axis=1))
plt.fill_between(list(range(len(single_mae))), single_lb, single_ub, alpha=0.15)
plt.plot(single_mae, "o-", label="single")
plt.fill_between(list(range(len(linear_mae))), linear_lb, linear_ub, alpha=0.15)
plt.plot(linear_mae, "o-", label="linear")
plt.fill_between(list(range(len(linear_positive_mae))), linear_positive_lb, linear_positive_ub, alpha=0.15)
plt.plot(linear_positive_mae, "o-", label="linear_positive")
#plt.fill_between(list(range(len(markowitz_mae))), markowitz_lb, markowitz_ub, alpha=0.15)
#plt.plot(markowitz_mae, "o-", label="markowitz")
#plt.fill_between(list(range(len(markowitz_positive_mae))), markowitz_positive_lb, markowitz_positive_ub, alpha=0.15)
#plt.plot(markowitz_positive_mae, "o-", label="markowitz_positive")
#plt.fill_between(list(range(len(markowitz2_mae))), markowitz2_lb, markowitz2_ub, alpha=0.15)
#plt.plot(markowitz2_mae, "o-", label="markowitz2")
#plt.fill_between(list(range(len(markowitz2_positive_mae))), markowitz2_positive_lb, markowitz2_positive_ub, alpha=0.15)
#plt.plot(markowitz2_positive_mae, "o-", label="markowitz2_positive")
#plt.fill_between(list(range(len(markowitz3_mae))), markowitz3_lb, markowitz3_ub, alpha=0.15)
#plt.plot(markowitz3_mae, "o-", label="markowitz3")
#plt.fill_between(list(range(len(markowitz3_positive_mae))), markowitz3_positive_lb, markowitz3_positive_ub, alpha=0.15)
#plt.plot(markowitz3_positive_mae, "o-", label="markowitz3_positive")
plt.legend()
plt.show()
#for idx, name in zip((idx1, idx2, idx3, idx4),
# ("Dissociation", "Atom transfer", "Dissociation barrier", "Atom transfer barrier")):
# mae1 = np.mean(abs(data1['errors'][:,idx]), axis=1)
# mae2 = np.mean(abs(data2['errors'][:,idx]), axis=1)
# std1 = np.std(abs(data1['errors'][:,idx]), axis=1, ddof=1)/np.sqrt(len(idx))
# std2 = np.std(abs(data2['errors'][:,idx]), axis=1, ddof=1)/np.sqrt(len(idx))
# # Do the plot
# plt.fill_between(list(range(len(mae1))), mae1 - std1, mae1 + std1, alpha=0.15)
# plt.plot(mae1, "o-", label=label1)
# plt.fill_between(list(range(len(mae2))), mae2 - std2, mae2 + std2, alpha=0.15)
# plt.plot(mae2, "o-", label=label2)
# plt.ylabel("MAE (kcal/mol)\n")
# # Set 6 mae as upper range
# plt.ylim([0,6])
# plt.title(name)
# plt.legend()
# # Chemical accuracy line
# ax = plt.gca()
# #xmin, xmax = ax.get_xlim()
# #plt.plot([xmin, xmax], [1, 1], "--", c="k")
# ## In case the xlimit have changed, set it again
# #plt.xlim([xmin, xmax])
# # Set xtick labels
# ax.set_xticklabels(xlabels, rotation=-45, ha='left')
# if filename_base is not None:
# plt.savefig(filename_base + "_" + name.replace(" ", "_").lower() + ".pdf", pad_inches=0.0, bbox_inches = "tight", dpi = 300)
# plt.clf()
# else:
# plt.show()
def plot_score2sns(file1, file2, label1=None, label2=None, filename_base=None):
if label1 == None and label2 == None:
label1 = file1.split("/")[-1].split(".")[0]
label2 = file2.split("/")[-1].split(".")[0]
data1 = load_pickle(file1)
data2 = load_pickle(file2)
xlabels = ["", "sto-3g", "sv(p)", "svp/6-31+G(d,p)", "avdz", "tzvp", "avtz", "qzvp", "WF"]
# rclass=1 (dissociation)
idx1 = [3, 4, 5, 6, 7, 8, 17, 19, 27, 28, 29, 65, 72, 99, 100, 101]
# rclass=2 (atom transfer)
idx2 = [0, 11, 13, 15, 30, 32, 37, 40, 43, 45, 50, 52, 54, 67, 74, 76, 78, 84, 86, 88, 90, 92, 95, 97]
# rclass=3 (dissociation barrier)
idx3 = [10, 18, 20, 36, 39, 49, 66, 73]
# rclass=4 (atom transfer barrier)
idx4 = [1, 2, 9, 12, 14, 16, 21, 22, 23, 24, 25, 26, 31, 33, 34, 35, 38, 41, 42, 44, 46, 47, 48, 51, 53, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 68, 69, 70, 71, 75, 77, 79, 80, 81, 82, 83, 85, 87, 89, 91, 93, 94, 96, 98]
for idx, name in zip((idx1, idx2, idx3, idx4),
("Dissociation", "Atom transfer", "Dissociation barrier", "Atom transfer barrier")):
mae1 = np.mean(abs(data1['errors'][:,idx]), axis=1)
mae2 = np.mean(abs(data2['errors'][:,idx]), axis=1)
# Create dataframe for the seaborn plots
basis = []
error = []
method = []
x1 = abs(data1['errors'][:,idx])
x2 = abs(data2['errors'][:,idx])
basis_list = ["sto-3g", "sv(p)", "svp/6-31+G(d,p)", "avdz", "tzvp", "avtz", "qzvp", "WF"]
for i in range(x1.shape[0]):
for j in range(x1.shape[1]):
basis.append(basis_list[i])
error.append(x1[i,j])
method.append(label1)
basis.append(basis_list[i])
error.append(x2[i,j])
method.append(label2)
df = pd.DataFrame.from_dict({'basis': basis, 'MAE (kcal/mol)': error, 'method':method})
sns.stripplot(x="basis", y="MAE (kcal/mol)", data=df, hue="method", jitter=0.1, dodge=True)
plt.plot(mae1, "-", label=label1)
plt.plot(mae2, "-", label=label2)
# Set 6 mae as upper range
plt.ylim([0,6])
plt.xticks(rotation=-45, ha='left')
plt.title(name)
if filename_base is not None:
plt.savefig(filename_base + "_" + name.replace(" ", "_").lower() + ".pdf", pad_inches=0.0, bbox_inches = "tight", dpi = 300)
plt.clf()
else:
plt.show()
def plot_score2(file1, file2, label1=None, label2=None, filename_base=None):
if label1 == None and label2 == None:
label1 = file1.split("/")[-1].split(".")[0]
label2 = file2.split("/")[-1].split(".")[0]
data1 = load_pickle(file1)
data2 = load_pickle(file2)
subset_names1 = data1['subset_names']
# rclass=1 (dissociation)
idx1 = [3, 4, 5, 6, 7, 8, 17, 19, 27, 28, 29, 65, 72, 99, 100, 101]
# rclass=2 (atom transfer)
idx2 = [0, 11, 13, 15, 30, 32, 37, 40, 43, 45, 50, 52, 54, 67, 74, 76, 78, 84, 86, 88, 90, 92, 95, 97]
# rclass=3 (dissociation barrier)
idx3 = [10, 18, 20, 36, 39, 49, 66, 73]
# rclass=4 (atom transfer barrier)
idx4 = [1, 2, 9, 12, 14, 16, 21, 22, 23, 24, 25, 26, 31, 33, 34, 35, 38, 41, 42, 44, 46, 47, 48, 51, 53, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 68, 69, 70, 71, 75, 77, 79, 80, 81, 82, 83, 85, 87, 89, 91, 93, 94, 96, 98]
for idx, name in zip((idx1, idx2, idx3, idx4),
("Dissociation", "Atom transfer", "Dissociation barrier", "Atom transfer barrier")):
mae1 = np.mean(abs(data1['errors'][:,idx]), axis=1)
mae2 = np.mean(abs(data2['errors'][:,idx]), axis=1)
std1 = np.std(abs(data1['errors'][:,idx]), axis=1, ddof=1)/np.sqrt(len(idx))
std2 = np.std(abs(data2['errors'][:,idx]), axis=1, ddof=1)/np.sqrt(len(idx))
# Do the plot
plt.fill_between(list(range(len(mae1))), mae1 - std1, mae1 + std1, alpha=0.15)
plt.plot(mae1, "o-", label=label1)
plt.fill_between(list(range(len(mae2))), mae2 - std2, mae2 + std2, alpha=0.15)
plt.plot(mae2, "o-", label=label2)
plt.ylabel("MAE (kcal/mol)\n")
# Set 6 mae as upper range
plt.ylim([0,6])
plt.title(name)
plt.legend()
# Chemical accuracy line
ax = plt.gca()
#xmin, xmax = ax.get_xlim()
#plt.plot([xmin, xmax], [1, 1], "--", c="k")
## In case the xlimit have changed, set it again
#plt.xlim([xmin, xmax])
# Set xtick labels
ax.set_xticklabels(xlabels, rotation=-45, ha='left')
if filename_base is not None:
plt.savefig(filename_base + "_" + name.replace(" ", "_").lower() + ".pdf", pad_inches=0.0, bbox_inches = "tight", dpi = 300)
plt.clf()
else:
plt.show()
def plot_score3sns(file1, file2, file3, label1=None, label2=None, label3=None, filename_base=None):
if label1 == None and label2 == None and label3 == None:
label1 = file1.split("/")[-1].split(".")[0]
label2 = file2.split("/")[-1].split(".")[0]
label3 = file3.split("/")[-1].split(".")[0]
data1 = load_pickle(file1)
data2 = load_pickle(file2)
data3 = load_pickle(file3)
xlabels = ["", "sto-3g", "sv(p)", "svp/6-31+G(d,p)", "avdz", "tzvp", "avtz", "qzvp", "WF"]
# rclass=1 (dissociation)
idx1 = [3, 4, 5, 6, 7, 8, 17, 19, 27, 28, 29, 65, 72, 99, 100, 101]
# rclass=2 (atom transfer)
idx2 = [0, 11, 13, 15, 30, 32, 37, 40, 43, 45, 50, 52, 54, 67, 74, 76, 78, 84, 86, 88, 90, 92, 95, 97]
# rclass=3 (dissociation barrier)
idx3 = [10, 18, 20, 36, 39, 49, 66, 73]
# rclass=4 (atom transfer barrier)
idx4 = [1, 2, 9, 12, 14, 16, 21, 22, 23, 24, 25, 26, 31, 33, 34, 35, 38, 41, 42, 44, 46, 47, 48, 51, 53, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 68, 69, 70, 71, 75, 77, 79, 80, 81, 82, 83, 85, 87, 89, 91, 93, 94, 96, 98]
for idx, name in zip((idx1, idx2, idx3, idx4),
("Dissociation", "Atom transfer", "Dissociation barrier", "Atom transfer barrier")):
mae1 = np.mean(abs(data1['errors'][:,idx]), axis=1)
mae2 = np.mean(abs(data2['errors'][:,idx]), axis=1)
mae3 = np.mean(abs(data3['errors'][:,idx]), axis=1)
# Create dataframe for the seaborn plots
basis = []
error = []
method = []
x1 = abs(data1['errors'][:,idx])
x2 = abs(data2['errors'][:,idx])
x3 = abs(data3['errors'][:,idx])
basis_list = ["sto-3g", "sv(p)", "svp/6-31+G(d,p)", "avdz", "tzvp", "avtz", "qzvp", "WF"]
for i in range(x1.shape[0]):
for j in range(x1.shape[1]):
basis.append(basis_list[i])
error.append(x1[i,j])
method.append(label1)
basis.append(basis_list[i])
error.append(x2[i,j])
method.append(label2)
basis.append(basis_list[i])
error.append(x3[i,j])
method.append(label3)
df = pd.DataFrame.from_dict({'basis': basis, 'MAE (kcal/mol)': error, 'method':method})
sns.stripplot(x="basis", y="MAE (kcal/mol)", data=df, hue="method", jitter=0.1, dodge=True)
plt.plot(mae1, "-", label=label1)
plt.plot(mae2, "-", label=label2)
plt.plot(mae3, "-", label=label3)
# Set 6 mae as upper range
plt.ylim([0,6])
plt.xticks(rotation=-45, ha='left')
plt.title(name)
if filename_base is not None:
plt.savefig(filename_base + "_" + name.replace(" ", "_").lower() + ".pdf", pad_inches=0.0, bbox_inches = "tight", dpi = 300)
plt.clf()
else:
plt.show()
def plot_score3(file1, file2, file3, label1=None, label2=None, label3=None, filename_base=None):
if label1 == None and label2 == None and label3 == None:
label1 = file1.split("/")[-1].split(".")[0]
label2 = file2.split("/")[-1].split(".")[0]
label3 = file3.split("/")[-1].split(".")[0]
data1 = load_pickle(file1)
data2 = load_pickle(file2)
data3 = load_pickle(file3)
xlabels = ["", "sto-3g", "sv(p)", "svp/6-31+G(d,p)", "avdz", "tzvp", "avtz", "qzvp", "WF"]
# rclass=1 (dissociation)
idx1 = [3, 4, 5, 6, 7, 8, 17, 19, 27, 28, 29, 65, 72, 99, 100, 101]
# rclass=2 (atom transfer)
idx2 = [0, 11, 13, 15, 30, 32, 37, 40, 43, 45, 50, 52, 54, 67, 74, 76, 78, 84, 86, 88, 90, 92, 95, 97]
# rclass=3 (dissociation barrier)
idx3 = [10, 18, 20, 36, 39, 49, 66, 73]
# rclass=4 (atom transfer barrier)
idx4 = [1, 2, 9, 12, 14, 16, 21, 22, 23, 24, 25, 26, 31, 33, 34, 35, 38, 41, 42, 44, 46, 47, 48, 51, 53, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 68, 69, 70, 71, 75, 77, 79, 80, 81, 82, 83, 85, 87, 89, 91, 93, 94, 96, 98]
for idx, name in zip((idx1, idx2, idx3, idx4),
("Dissociation", "Atom transfer", "Dissociation barrier", "Atom transfer barrier")):
mae1 = np.mean(abs(data1['errors'][:,idx]), axis=1)
mae2 = np.mean(abs(data2['errors'][:,idx]), axis=1)
mae3 = np.mean(abs(data3['errors'][:,idx]), axis=1)
std1 = np.std(abs(data1['errors'][:,idx]), axis=1, ddof=1)/np.sqrt(len(idx))
std2 = np.std(abs(data2['errors'][:,idx]), axis=1, ddof=1)/np.sqrt(len(idx))
std3 = np.std(abs(data3['errors'][:,idx]), axis=1, ddof=1)/np.sqrt(len(idx))
# Do the plot
plt.fill_between(list(range(len(mae1))), mae1 - std1, mae1 + std1, alpha=0.15)
plt.plot(mae1, "o-", label=label1)
plt.fill_between(list(range(len(mae2))), mae2 - std2, mae2 + std2, alpha=0.15)
plt.plot(mae2, "o-", label=label2)
plt.fill_between(list(range(len(mae3))), mae3 - std3, mae3 + std3, alpha=0.15)
plt.plot(mae3, "o-", label=label3)
plt.ylabel("MAE (kcal/mol)\n")
# Set 6 mae as upper range
plt.ylim([0,6])
plt.title(name)
plt.legend()
# Chemical accuracy line
ax = plt.gca()
#xmin, xmax = ax.get_xlim()
#plt.plot([xmin, xmax], [1, 1], "--", c="k")
## In case the xlimit have changed, set it again
#plt.xlim([xmin, xmax])
# Set xtick labels
ax.set_xticklabels(xlabels, rotation=-45, ha='left')
if filename_base is not None:
plt.savefig(filename_base + "_" + name.replace(" ", "_").lower() + ".pdf", pad_inches=0.0, bbox_inches = "tight", dpi = 300)
plt.clf()
else:
plt.show()
def plot_score4sns(file1, file2, file3, file4, label1=None, label2=None, label3=None, label4=None, filename_base=None):
if label1 == None and label2 == None and label3 == None and label4 == None:
label1 = file1.split("/")[-1].split(".")[0]
label2 = file2.split("/")[-1].split(".")[0]
label3 = file3.split("/")[-1].split(".")[0]
label3 = file3.split("/")[-1].split(".")[0]
data1 = load_pickle(file1)
data2 = load_pickle(file2)
data3 = load_pickle(file3)
data4 = load_pickle(file4)
xlabels = ["", "sto-3g", "sv(p)", "svp/6-31+G(d,p)", "avdz", "tzvp", "avtz", "qzvp", "WF"]
# rclass=1 (dissociation)
idx1 = [3, 4, 5, 6, 7, 8, 17, 19, 27, 28, 29, 65, 72, 99, 100, 101]
# rclass=2 (atom transfer)
idx2 = [0, 11, 13, 15, 30, 32, 37, 40, 43, 45, 50, 52, 54, 67, 74, 76, 78, 84, 86, 88, 90, 92, 95, 97]
# rclass=3 (dissociation barrier)
idx3 = [10, 18, 20, 36, 39, 49, 66, 73]
# rclass=4 (atom transfer barrier)
idx4 = [1, 2, 9, 12, 14, 16, 21, 22, 23, 24, 25, 26, 31, 33, 34, 35, 38, 41, 42, 44, 46, 47, 48, 51, 53, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 68, 69, 70, 71, 75, 77, 79, 80, 81, 82, 83, 85, 87, 89, 91, 93, 94, 96, 98]
for idx, name in zip((idx1, idx2, idx3, idx4),
("Dissociation", "Atom transfer", "Dissociation barrier", "Atom transfer barrier")):
mae1 = np.mean(abs(data1['errors'][:,idx]), axis=1)
mae2 = np.mean(abs(data2['errors'][:,idx]), axis=1)
mae3 = np.mean(abs(data3['errors'][:,idx]), axis=1)
mae4 = np.mean(abs(data4['errors'][:,idx]), axis=1)
# Create dataframe for the seaborn plots
basis = []
error = []
method = []
x1 = abs(data1['errors'][:,idx])
x2 = abs(data2['errors'][:,idx])
x3 = abs(data3['errors'][:,idx])
x4 = abs(data4['errors'][:,idx])
basis_list = ["sto-3g", "sv(p)", "svp/6-31+G(d,p)", "avdz", "tzvp", "avtz", "qzvp", "WF"]
for i in range(x1.shape[0]):
for j in range(x1.shape[1]):
basis.append(basis_list[i])
error.append(x1[i,j])
method.append(label1)
basis.append(basis_list[i])
error.append(x2[i,j])
method.append(label2)
basis.append(basis_list[i])
error.append(x3[i,j])
method.append(label3)
basis.append(basis_list[i])
error.append(x4[i,j])
method.append(label4)
df = | pd.DataFrame.from_dict({'basis': basis, 'MAE (kcal/mol)': error, 'method':method}) | pandas.DataFrame.from_dict |
import os
import glob
import pathlib
import re
import base64
import pandas as pd
from datetime import datetime, timedelta
# https://www.pythonanywhere.com/forums/topic/29390/ for measuring the RAM usage on pythonanywhere
class defichainAnalyticsModelClass:
def __init__(self):
workDir = os.path.abspath(os.getcwd())
self.dataPath = workDir[:-9] + '/data/'
# data for controller/views
self.dailyData = pd.DataFrame()
self.hourlyData = pd.DataFrame()
self.minutelyData = pd.DataFrame()
self.lastRichlist = None
self.snapshotData = None
self.changelogData = None
# last update of csv-files
self.updated_nodehubIO = None
self.updated_allnodes = None
self.updated_extractedRichlist = None
self.updated_tradingData = None
self.updated_blocktime = None
self.updated_dexHourly = None
self.update_dexMinutely = None
self.updated_daa = None
self.updated_LastRichlist = None
self.updated_dexVolume = None
self.updated_tokenCryptos = None
self.updated_twitterData = None
self.updated_twitterFollower = None
self.update_snapshotData = None
self.update_changelogData = None
self.update_incomeVisits = None
self.update_portfolioDownloads = None
self.update_promoDatabase = None
self.update_analyticsVisits = None
self.updated_hourlyDEXTrades = None
self.update_MNmonitor = None
self.updated_dfx = None
self.update_DFIsignal = None
# background image for figures
with open(workDir + "/assets/analyticsLandscapeGrey2.png", "rb") as image_file:
encoded_string = base64.b64encode(image_file.read()).decode()
self.figBackgroundImage = "data:image/png;base64," + encoded_string # Add the prefix that plotly will want when using the string as source
#### DAILY DATA #####
def loadDailyData(self):
self.loadHourlyDEXdata()
self.loadDEXVolume()
self.loadDailyTradingData()
self.loadExtractedRichlistData()
self.calcOverallTVLdata()
self.loadDailyBlocktimeData()
self.loadDAAData()
self.loadTwitterData()
self.loadTwitterFollowerData()
self.loadIncomeVisitsData()
self.loadPortfolioDownloads()
self.loadPromoDatabase()
self.loadMNMonitorDatabase()
self.loadAnalyticsVisitsData()
self.loadDFIsignalDatabase()
def loadMNnodehub(self):
print('>>>> Start update nodehub.IO data ... <<<<')
filePath = self.dataPath + 'mnNodehub.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_nodehubIO:
nodehubData = pd.read_csv(filePath, index_col=0)
nodehubData.rename(columns={"amount": "nbMNNodehub"}, inplace=True)
ind2Delete = self.dailyData.columns.intersection(nodehubData.columns)
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(nodehubData['nbMNNodehub'], how='outer', left_index=True, right_index=True)
self.updated_nodehubIO = fileInfo.stat()
print('>>>> nodehub data loaded from csv-file <<<<')
def loadMNAllnodes(self):
print('>>>> Start update allnodes data ... <<<<')
filePath = self.dataPath + 'mnAllnodes.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_allnodes:
allnodesData = pd.read_csv(filePath, index_col=0)
allnodesData.set_index('date', inplace=True)
ind2Delete = self.dailyData.columns.intersection(allnodesData.columns)
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(allnodesData['nbMNAllnode'], how='outer', left_index=True, right_index=True)
self.updated_allnodes = fileInfo.stat()
print('>>>> allnodes data loaded from csv-file <<<<')
def loadExtractedRichlistData(self):
self.loadMNnodehub() # number masternode hosted by nodehub must be load here to ensure correct values for other and relative representation
self.loadMNAllnodes() # number masternode hosted by Allnodes must be load here to ensure correct values for other and relative representation
print('>>>> Start update extracted richlist data ... <<<<')
filePath = self.dataPath + 'extractedDFIdata.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_extractedRichlist:
extractedRichlist = pd.read_csv(filePath, index_col=0)
ind2Delete = self.dailyData.columns.intersection(extractedRichlist.columns) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(extractedRichlist, how='outer', left_index=True, right_index=True) # add new columns to daily table
self.dailyData['nbMNOther'] = self.dailyData['nbMnId']-self.dailyData['nbMnCakeId']-self.dailyData['nbMydefichainId']-self.dailyData['nbMNNodehub'].fillna(0)-self.dailyData['nbMNAllnode'].fillna(0)
self.dailyData['nbMNnonCake'] = self.dailyData['nbMnId']-self.dailyData['nbMnCakeId']
self.dailyData['nbMnCakeIdRelative'] = self.dailyData['nbMnCakeId']/self.dailyData['nbMnId']*100
self.dailyData['nbMNOtherRelative'] = self.dailyData['nbMNOther'] / self.dailyData['nbMnId'] * 100
self.dailyData['nbMydefichainRelative'] = self.dailyData['nbMydefichainId'] / self.dailyData['nbMnId'] * 100
self.dailyData['nbMNNodehubRelative'] = self.dailyData['nbMNNodehub'] / self.dailyData['nbMnId'] * 100
self.dailyData['nbMNAllnodeRelative'] = self.dailyData['nbMNAllnode'] / self.dailyData['nbMnId'] * 100
self.dailyData['nbMNlocked10Relative'] = self.dailyData['nbMNlocked10'] / self.dailyData['nbMnId'] * 100
self.dailyData['nbMNlocked5Relative'] = self.dailyData['nbMNlocked5'] / self.dailyData['nbMnId'] * 100
# extracting DFI in Liquidity-Mining
lmCoins = pd.DataFrame(index=self.dailyData.index)
lmCoins['BTC_pool'] = self.hourlyData.groupby('Date')['BTC-DFI_reserveB'].first()
lmCoins['ETH_pool'] = self.hourlyData.groupby('Date')['ETH-DFI_reserveB'].first()
lmCoins['USDT_pool'] = self.hourlyData.groupby('Date')['USDT-DFI_reserveB'].first()
lmCoins['DOGE_pool'] = self.hourlyData.groupby('Date')['DOGE-DFI_reserveB'].first()
lmCoins['LTC_pool'] = self.hourlyData.groupby('Date')['LTC-DFI_reserveB'].first()
lmCoins['USDC_pool'] = self.hourlyData.groupby('Date')['USDC-DFI_reserveB'].first()
lmCoins['overall'] = lmCoins['BTC_pool'] + lmCoins['ETH_pool'] + lmCoins['USDT_pool'] + lmCoins['DOGE_pool'].fillna(0) + lmCoins['LTC_pool'].fillna(0) + lmCoins['USDC_pool'] .fillna(0)
self.dailyData['lmDFI'] = lmCoins['overall']
# sum of addresses and DFI
self.dailyData['nbOverall'] = self.dailyData['nbMnId'] + self.dailyData['nbOtherId']
self.dailyData['circDFI'] = self.dailyData['mnDFI'] + self.dailyData['otherDFI'] \
+ self.dailyData['tokenDFI'].fillna(0) + self.dailyData['lmDFI'].fillna(0) + self.dailyData['erc20DFI'].fillna(0) \
- (self.dailyData['nbMNlocked10']+self.dailyData['nbMNlocked5']).fillna(0)*20000
self.dailyData['totalDFI'] = self.dailyData['circDFI'] + self.dailyData['fundDFI'] + self.dailyData['foundationDFI'].fillna(0) \
+ self.dailyData['burnedDFI'].fillna(method="ffill") + (self.dailyData['nbMNlocked10']+self.dailyData['nbMNlocked5']).fillna(0)*20000
# calc market cap data in USD and BTC
print('>>>>>>>> Update market cap in loadExtractedRichlistData... <<<<<<<<')
self.dailyData['marketCapUSD'] = self.dailyData['circDFI']*self.dailyData['DFIPriceUSD']
self.dailyData['marketCapBTC'] = self.dailyData['marketCapUSD'] / self.dailyData['BTCPriceUSD']
# calculate daily change in addresses and DFI amount
self.dailyData['diffDate'] = pd.to_datetime(self.dailyData.index).to_series().diff().values
self.dailyData['diffDate'] = self.dailyData['diffDate'].fillna(pd.Timedelta(seconds=0)) # set nan-entry to timedelta 0
self.dailyData['diffDate'] = self.dailyData['diffDate'].apply(lambda x: float(x.days))
self.dailyData['diffNbOther'] = self.dailyData['nbOtherId'].diff() / self.dailyData['diffDate']
self.dailyData['diffNbMN'] = self.dailyData['nbMnId'].diff() / self.dailyData['diffDate']
self.dailyData['diffNbNone'] = None
self.dailyData['diffotherDFI'] = self.dailyData['otherDFI'].diff() / self.dailyData['diffDate']
self.dailyData['diffmnDFI'] = self.dailyData['mnDFI'].diff() / self.dailyData['diffDate']
self.dailyData['difffundDFI'] = self.dailyData['fundDFI'].diff() / self.dailyData['diffDate']
self.dailyData['difffoundationDFI'] = self.dailyData['foundationDFI'].diff() / self.dailyData['diffDate']
self.dailyData['diffLMDFI'] = self.dailyData['lmDFI'].diff() / self.dailyData['diffDate']
self.updated_extractedRichlist = fileInfo.stat()
print('>>>> Richlist data loaded from csv-file <<<<')
def calcOverallTVLdata(self):
self.dailyData['tvlMNDFI'] = self.dailyData['nbMnId'] * ((pd.to_datetime(self.dailyData.index)<pd.Timestamp('2021-03-02')) * 1 * 1000000 + \
(pd.to_datetime(self.dailyData.index)>=pd.Timestamp('2021-03-02')) * 1 * 20000)
dexLockedDFI = (self.hourlyData['BTC-DFI_lockedDFI']+self.hourlyData['ETH-DFI_lockedDFI']+self.hourlyData['USDT-DFI_lockedDFI'] +
self.hourlyData['DOGE-DFI_lockedDFI'].fillna(0)+self.hourlyData['LTC-DFI_lockedDFI'].fillna(0) +
self.hourlyData['BCH-DFI_lockedDFI'].fillna(0) + self.hourlyData['USDC-DFI_lockedDFI'].fillna(0))
dexLockedDFI.index = dexLockedDFI.index.floor('D').astype(str) # remove time information, only date is needed
self.dailyData['tvlDEXDFI'] = dexLockedDFI.groupby(level=0).first()
def loadDailyTradingData(self):
print('>>>> Start update trading data ... <<<<')
filePath = self.dataPath + 'dailyTradingResultsDEX.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_tradingData:
dailyTradingResults = pd.read_csv(self.dataPath+'dailyTradingResultsDEX.csv',index_col=0)
ind2Delete = self.dailyData.columns.intersection(dailyTradingResults.columns) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(dailyTradingResults, how='outer', left_index=True, right_index=True) # add new columns to daily table
# calc market cap data in USD and BTC (same as in loadExtractedRichlistData to get updated price information
if 'circDFI' in self.dailyData.columns:
print('>>>>>>>> Update market cap in loadDailyTradingData... <<<<<<<<')
self.dailyData['marketCapUSD'] = self.dailyData['circDFI']*self.dailyData['DFIPriceUSD']
self.dailyData['marketCapBTC'] = self.dailyData['marketCapUSD'] / self.dailyData['BTCPriceUSD']
self.updated_tradingData = fileInfo.stat()
print('>>>> Trading data loaded from csv-file <<<<')
def loadDailyBlocktimeData(self):
print('>>>> Start update blocktime data ... <<<<')
filePath = self.dataPath + 'BlockListStatistics.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_blocktime:
dailyBlocktimeData = pd.read_csv(filePath, index_col=0)
dailyBlocktimeData['tps'] = dailyBlocktimeData['txCount'] / (24 * 60 * 60)
ind2Delete = self.dailyData.columns.intersection(dailyBlocktimeData.columns) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(dailyBlocktimeData, how='outer', left_index=True,right_index=True) # add new columns to daily table
self.updated_blocktime = fileInfo.stat()
print('>>>> Blocktime data loaded from csv-file <<<<')
def loadDAAData(self):
print('>>>> Start update DAA data ... <<<<')
filePath = self.dataPath + 'analyzedDataDAA.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_daa:
dailyDAAData = pd.read_csv(filePath, index_col=0)
ind2Delete = self.dailyData.columns.intersection(dailyDAAData.columns) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(dailyDAAData, how='outer', left_index=True, right_on='Date') # add new columns to daily table
self.dailyData.set_index('Date', inplace=True)
self.dailyData.sort_index(inplace=True)
self.updated_daa = fileInfo.stat()
print('>>>> DAA data loaded from csv-file <<<<')
def loadTwitterData(self):
print('>>>> Start update twitter data ... <<<<')
filePath = self.dataPath + 'analyzedTwitterData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_twitterData:
twitterData = pd.read_csv(filePath, index_col=0)
columns2update = ['overall_Activity', 'defichain_Activity', 'dfi_Activity', 'overall_Likes', 'overall_UniqueUserOverall', 'overall_UniqueUserTweet', 'overall_UniqueUserReply', 'overall_UniqueUserRetweet']
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(twitterData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.updated_twitterData = fileInfo.stat()
print('>>>> Twitter data loaded from csv-file <<<<')
def loadTwitterFollowerData(self):
print('>>>> Start update twitter follower data ... <<<<')
filePath = self.dataPath + 'TwitterData_follower.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_twitterFollower:
twitterFollowData = pd.read_csv(filePath, index_col=0)
twitterFollowData.set_index('Date',inplace=True)
columns2update = ['Follower', 'followedToday', 'unfollowedToday']
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(twitterFollowData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.updated_twitterFollower = fileInfo.stat()
print('>>>> Twitter data loaded from csv-file <<<<')
def loadIncomeVisitsData(self):
print('>>>> Start update income visits data ... <<<<')
filePath = self.dataPath + 'dataVisitsIncome.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.update_incomeVisits:
incomeVisitsData = pd.read_csv(filePath, index_col=0)
incomeVisitsData.rename(columns={'0': 'incomeVisits'}, inplace=True)
incomeVisitsData.set_index(incomeVisitsData.index.str[:10], inplace=True) # just use date information without hh:mm
columns2update = ['incomeVisits']
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(incomeVisitsData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.update_incomeVisits = fileInfo.stat()
print('>>>> Income visits data loaded from csv-file <<<<')
def loadPortfolioDownloads(self):
print('>>>> Start update portfolio downloads data ... <<<<')
filePath = self.dataPath + 'dataPortfolioDownloads.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.update_portfolioDownloads:
portfolioRawData = pd.read_csv(filePath)
columns2update = ['PortfolioWindows', 'PortfolioMac', 'PortfolioLinux']
dfPortfolioData = pd.DataFrame(index=portfolioRawData['DateCaptured'].unique(), columns=columns2update)
dfPortfolioData['PortfolioWindows'] = portfolioRawData.groupby(portfolioRawData.DateCaptured).Windows.sum()
dfPortfolioData['PortfolioMac'] = portfolioRawData.groupby(portfolioRawData.DateCaptured).Mac.sum()
dfPortfolioData['PortfolioLinux'] = portfolioRawData.groupby(portfolioRawData.DateCaptured).Linux.sum()
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(dfPortfolioData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.update_portfolioDownloads = fileInfo.stat()
print('>>>> Portfolio downloads data loaded from csv-file <<<<')
def loadPromoDatabase(self):
print('>>>> Start update DefiChain promo database ... <<<<')
filePath = self.dataPath + 'defichainPromoData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.update_promoDatabase:
promoRawData = pd.read_csv(filePath, index_col=0)
columns2update = ['postActive', 'mediaActive', 'incentivePointsToday', 'incentiveUsers']
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(promoRawData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.update_promoDatabase = fileInfo.stat()
print('>>>> DefiChain promo database loaded from csv-file <<<<')
def loadMNMonitorDatabase(self):
print('>>>> Start update masternode monitor database ... <<<<')
filePath = self.dataPath + 'masternodeMonitorData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.update_MNmonitor:
monitorRawData = pd.read_csv(filePath, index_col=0)
columns2update = ['nbMasternodes', 'nbAccounts']
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(monitorRawData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.update_MNmonitor = fileInfo.stat()
print('>>>> MN Monitor database loaded from csv-file <<<<')
def loadAnalyticsVisitsData(self):
print('>>>> Start update raw data analytics visits ... <<<<')
filePath = self.dataPath + 'rawDataUserVisit.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.update_analyticsVisits:
analyticsRawVisitsData = pd.read_csv(filePath, index_col=0)
analyticsRawVisitsData['visitDate'] = pd.to_datetime(analyticsRawVisitsData.visitTimestamp).dt.date
analyticsVisitData = analyticsRawVisitsData.groupby('visitDate').count()
analyticsVisitData.rename(columns={'visitTimestamp': 'analyticsVisits'}, inplace=True)
columns2update = ['analyticsVisits']
analyticsVisitData.index = analyticsVisitData.index.map(str) # change index from dt to str format
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(analyticsVisitData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.update_analyticsVisits = fileInfo.stat()
print('>>>> Analytics visits data loaded from csv-file <<<<')
def loadDFIsignalDatabase(self):
print('>>>> Start update DFI-signal database ... <<<<')
filePath = self.dataPath + 'dfiSignalData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.update_DFIsignal:
dfiSignalRawData = pd.read_csv(filePath, index_col=0)
columns2update = ['user_count','masternode_count','messages_sent','commands_received','minted_blocks']
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(dfiSignalRawData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.update_DFIsignal = fileInfo.stat()
print('>>>> DFI-Signal database loaded from csv-file <<<<')
#### HOURLY DATA ####
def loadHourlyData(self):
self.loadHourlyDEXdata()
self.loadDEXVolume()
self.loadTokenCrypto()
self.loadHourlyDEXTrades()
self.loadDFXdata()
def loadHourlyDEXdata(self):
print('>>>> Start update hourly DEX data ... <<<<')
filePath = self.dataPath + 'LMPoolData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_dexHourly:
hourlyDEXData = pd.read_csv(filePath, index_col=0)
hourlyDEXData['timeRounded'] = pd.to_datetime(hourlyDEXData.Time).dt.floor('H')
hourlyDEXData.set_index(['timeRounded'], inplace=True)
hourlyDEXData['reserveA_DFI'] = hourlyDEXData['reserveA'] / hourlyDEXData['DFIPrices']
for poolSymbol in hourlyDEXData.symbol.dropna().unique():
df2Add = hourlyDEXData[hourlyDEXData.symbol == poolSymbol]
df2Add = df2Add.drop(columns=['Time', 'symbol'])
# calculate locked DFI and corresponding values
df2Add = df2Add.assign(lockedDFI=df2Add['reserveB'] + df2Add['reserveA_DFI'])
df2Add = df2Add.assign(lockedUSD=df2Add['lockedDFI']*hourlyDEXData[hourlyDEXData.symbol == 'USDT-DFI'].DFIPrices)
df2Add = df2Add.assign(lockedBTC=df2Add['lockedDFI'] * hourlyDEXData[hourlyDEXData.symbol == 'BTC-DFI'].DFIPrices)
# calculate relative price deviations
df2Add = df2Add.assign(relPriceDevCoingecko=((df2Add['DFIPrices'] - df2Add['reserveA/reserveB'])/df2Add['DFIPrices']))
df2Add = df2Add.assign(relPriceDevBittrex=((df2Add['DFIPricesBittrex'] - df2Add['reserveA/reserveB']) / df2Add['DFIPricesBittrex']))
# add prefix to column names for pool identification
colNamesOrig = df2Add.columns.astype(str)
colNamesNew = poolSymbol+'_' + colNamesOrig
df2Add = df2Add.rename(columns=dict(zip(colNamesOrig, colNamesNew)))
# delete existing information and add new one
ind2Delete = self.hourlyData.columns.intersection(colNamesNew) # check if columns exist
self.hourlyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.hourlyData = self.hourlyData.merge(df2Add, how='outer', left_index=True, right_index=True) # add new columns to daily table
self.hourlyData['Date'] = pd.to_datetime(self.hourlyData.index).strftime('%Y-%m-%d')
self.updated_dexHourly = fileInfo.stat()
print('>>>> Hourly DEX data loaded from csv-file <<<<')
def loadDEXVolume(self):
print('>>>> Start update DEX volume data ... <<<<')
filePath = self.dataPath + 'DEXVolumeData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_dexVolume:
volumeData = pd.read_csv(filePath, index_col=0)
volumeData['timeRounded'] = pd.to_datetime(volumeData.Time).dt.floor('H')
volumeData.set_index(['timeRounded'], inplace=True)
for poolSymbol in volumeData['base_name'].unique():
df2Add = volumeData[volumeData['base_name']==poolSymbol][['base_volume', 'quote_volume']]
df2Add['VolTotal'] = df2Add[['base_volume', 'quote_volume']].sum(axis=1)
# add prefix to column names for pool identification
colNamesOrig = df2Add.columns.astype(str)
colNamesNew = poolSymbol + '_' + colNamesOrig
df2Add = df2Add.rename(columns=dict(zip(colNamesOrig, colNamesNew)))
# delete existing information and add new one
ind2Delete = self.hourlyData.columns.intersection(colNamesNew) # check if columns exist
self.hourlyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.hourlyData = self.hourlyData.merge(df2Add, how='outer', left_index=True, right_index=True) # add new columns to daily table
# calculate total volume after merge of data
self.hourlyData['VolTotal'] = self.hourlyData['BTC_VolTotal']*0 # only use rows with data; BTC was the first pool and have to most data (beside ETH, USDT)
for poolSymbol in volumeData['base_name'].unique():
self.hourlyData['VolTotal'] = self.hourlyData['VolTotal'] + self.hourlyData[poolSymbol+'_'+'VolTotal'].fillna(0)
self.hourlyData['VolTotalCoingecko'] = volumeData[volumeData['base_name']=='BTC']['coingeckoVolume']
self.updated_dexVolume = fileInfo.stat()
print('>>>> DEX volume data loaded from csv-file <<<<')
def loadHourlyDEXTrades(self):
print('>>>> Start update hourly DEX trade data ... <<<<')
filePath = self.dataPath + 'hourlyDEXTrades.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_hourlyDEXTrades:
hourlyTrades = pd.read_csv(filePath, index_col=0)
hourlyTrades.fillna(0, inplace=True)
hourlyTrades.index = pd.to_datetime(hourlyTrades.index).tz_localize(None)
columns2update = []
currName = ['BTC', 'ETH', 'USDT', 'DOGE', 'LTC', 'BCH', 'USDC', 'DFI']
for ind in range(7):
hourlyTrades['volume'+currName[ind]+'buyDFI'] = hourlyTrades[currName[ind]+'pool_base'+currName[ind]] * hourlyTrades[currName[ind]+'-USD']
hourlyTrades['volume'+currName[ind]+'sellDFI'] = hourlyTrades[currName[ind]+'pool_quote'+currName[ind]] * hourlyTrades[currName[ind]+'-USD']
columns2update.extend(['volume'+currName[ind]+'buyDFI', 'volume'+currName[ind]+'sellDFI'])
hourlyTrades['volumeOverallbuyDFI'] = hourlyTrades['volumeBTCbuyDFI']+hourlyTrades['volumeETHbuyDFI']+hourlyTrades['volumeUSDTbuyDFI'] + \
hourlyTrades['volumeDOGEbuyDFI']+hourlyTrades['volumeLTCbuyDFI']+hourlyTrades['volumeBCHbuyDFI'] + \
hourlyTrades['volumeUSDCbuyDFI']
hourlyTrades['volumeOverallsellDFI'] = hourlyTrades['volumeBTCsellDFI']+hourlyTrades['volumeETHsellDFI']+hourlyTrades['volumeUSDTsellDFI'] + \
hourlyTrades['volumeDOGEsellDFI']+hourlyTrades['volumeLTCsellDFI']+hourlyTrades['volumeBCHsellDFI'] + \
hourlyTrades['volumeUSDCsellDFI']
columns2update.extend(['volumeOverallbuyDFI', 'volumeOverallsellDFI'])
ind2Delete = self.hourlyData.columns.intersection(columns2update) # check if columns exist
self.hourlyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.hourlyData = self.hourlyData.merge(hourlyTrades[columns2update], how='outer', left_index=True, right_index=True) # delete existing columns to add new ones
self.updated_hourlyDEXTrades = fileInfo.stat()
print('>>>> DEX volume data loaded from csv-file <<<<')
def loadTokenCrypto(self):
print('>>>> Start update token data ... <<<<')
filePath = self.dataPath + 'TokenData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_tokenCryptos:
tokenData = pd.read_csv(filePath, index_col=0)
tokenData['timeRounded'] = pd.to_datetime(tokenData.Time).dt.floor('H')
tokenData.set_index(['timeRounded'], inplace=True)
for coinSymbol in tokenData['symbol'].unique():
df2Add = tokenData[tokenData['symbol']==coinSymbol][['Burned', 'minted', 'Collateral']]
df2Add['tokenDefiChain'] = df2Add['minted'] - df2Add['Burned'].fillna(0)
df2Add['diffToken'] = df2Add['Collateral']-df2Add['minted']+df2Add['Burned'].fillna(0)
# add prefix to column names for pool identification
colNamesOrig = df2Add.columns.astype(str)
colNamesNew = coinSymbol + '_' + colNamesOrig
df2Add = df2Add.rename(columns=dict(zip(colNamesOrig, colNamesNew)))
# delete existing information and add new one
ind2Delete = self.hourlyData.columns.intersection(colNamesNew) # check if columns exist
self.hourlyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.hourlyData = self.hourlyData.merge(df2Add, how='outer', left_index=True, right_index=True) # add new columns to daily table
self.updated_tokenCryptos = fileInfo.stat()
print('>>>> DAT Cryptos data loaded from csv-file <<<<')
def loadDFXdata(self):
print('>>>> Start update DFX data ... <<<<')
filePath = self.dataPath + 'dfxData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_dfx:
dfxData = pd.read_csv(filePath, index_col=0)
dfxData['timeRounded'] = pd.to_datetime(dfxData.index).floor('H')
dfxData.set_index(['timeRounded'], inplace=True)
columns2update = ['dfxBuyRoutes', 'dfxSellRoutes', 'dfxBuyVolume', 'dfxSellVolume', 'dfxBuyVolumeCHF', 'dfxSellVolumeCHF']
# delete existing information and add new one
ind2Delete = self.hourlyData.columns.intersection(columns2update) # check if columns exist
self.hourlyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.hourlyData = self.hourlyData.merge(dfxData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.updated_dfx = fileInfo.stat()
print('>>>> DFX data loaded from csv-file <<<<')
#### MINUTELY DATA ####
def loadMinutelyData(self):
self.loadMinutelyDEXdata()
def loadMinutelyDEXdata(self):
filePath = self.dataPath + 'LMPoolData_ShortTerm.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.update_dexMinutely:
minutelyDEXData = pd.read_csv(filePath, index_col=0)
minutelyDEXData['timeRounded'] = pd.to_datetime(minutelyDEXData.Time).dt.floor('min') #.dt.strftime('%Y-%m-%d %H:%M')
minutelyDEXData.set_index(['timeRounded'], inplace=True)
for poolSymbol in minutelyDEXData.symbol.unique():
df2Add = minutelyDEXData[minutelyDEXData.symbol == poolSymbol]
df2Add = df2Add.drop(columns=['Time', 'symbol'])
# calculate relative price deviations
df2Add = df2Add.assign(relPriceDevCoingecko=((df2Add['DFIPrices'] - df2Add['reserveA/reserveB'])/df2Add['DFIPrices']))
df2Add = df2Add.assign(relPriceDevBittrex=((df2Add['DFIPricesBittrex'] - df2Add['reserveA/reserveB']) / df2Add['DFIPricesBittrex']))
# add prefix to column names for pool identification
colNamesOrig = df2Add.columns.astype(str)
colNamesNew = poolSymbol+'_' + colNamesOrig
df2Add = df2Add.rename(columns=dict(zip(colNamesOrig, colNamesNew)))
# delete existing information and add new one
ind2Delete = self.minutelyData.columns.intersection(colNamesNew) # check if columns exist
self.minutelyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.minutelyData = self.minutelyData.merge(df2Add, how='outer', left_index=True, right_index=True) # add new columns to daily table
self.minutelyData.dropna(axis=0, how='all',inplace=True)
self.update_dexMinutely = fileInfo.stat()
print('>>>> Minutely DEX data loaded from csv-file <<<<')
#### NO TIMESERIES ####
def loadNoTimeseriesData(self):
self.loadLastRichlist()
self.loadSnapshotData()
self.loadChangelogData()
def loadLastRichlist(self):
filePath = self.dataPath + 'Richlist/'
listCSVFiles = glob.glob(filePath + "*_01-*.csv") # get all csv-files generated at night
newestDate = self.dailyData['nbMnId'].dropna().index.max() # find newest date in extracted Data
file2Load = [x for x in listCSVFiles if re.search(newestDate, x)] # find corresponding csv-file of richlist
fname = pathlib.Path(file2Load[0])
if fname.stat() != self.updated_LastRichlist:
self.lastRichlist = pd.read_csv(file2Load[0]) # load richlist
# date for information/explanation
self.lastRichlist['date'] = | pd.to_datetime(newestDate) | pandas.to_datetime |
import numpy as np
import pandas as pd
from scipy.stats import gamma # type: ignore
def _sigmoid(x: float) -> float:
"""Helper function to apply sigmoid to a float.
Args:
x: a float to apply the sigmoid function to.
Returns:
(float): x after applying the sigmoid function.
"""
return 1 / (1 + np.exp(-x))
def simulate_randomized_trial(
n: int = 1000, p: int = 5, sigma: float = 1.0, binary_outcome: bool = False, add_cost_benefit: bool = False
) -> pd.DataFrame:
"""Simulates a synthetic dataset corresponding to a randomized trial
The version with continuous outcome and without cost/benefit columns corresponds to Setup B in <NAME>. and <NAME>. (2018) 'Quasi-Oracle Estimation of Heterogeneous Treatment Effects' and is aligned with the implementation in the CausalML package.
Args:
n (int, optional): number of observations to generate
p (int optional): number of covariates. Should be >= 5, since treatment heterogeneity is determined based on the first 5 features.
sigma (float): standard deviation of the error term
binary_outcome (bool): whether the outcome should be binary or continuous
add_cost_benefit (bool): whether to generate cost and benefit columns
Returns:
(pandas.DataFrame): a dataframe containing the following columns:
- treatment
- outcome
- propensity
- expected_outcome
- actual_cate
- benefit (only if add_cost_benefit=True)
- cost (only if add_cost_benefit=True)
"""
X = np.random.normal(loc=0.0, scale=1.0, size=n * p).reshape((n, -1))
b = np.maximum(np.repeat(0.0, n), X[:, 0] + X[:, 1] + X[:, 2]) + np.maximum(np.repeat(0.0, n), X[:, 3] + X[:, 4])
e = np.repeat(0.5, n)
tau = X[:, 0] + np.log1p(np.exp(X[:, 1]))
w = np.random.binomial(1, e, size=n)
if binary_outcome:
y1 = b + (1 - 0.5) * tau + sigma * np.random.normal(loc=0.0, scale=1.0, size=n)
y0 = b + (0 - 0.5) * tau + sigma * np.random.normal(loc=0.0, scale=1.0, size=n)
y1_binary = pd.Series(_sigmoid(y1) > 0.5).astype(np.int32) # potential outcome when w=1
y0_binary = pd.Series(_sigmoid(y0) > 0.5).astype(np.int32) # potential outcome when w=0
# observed outcome
y = y0_binary
y[w == 1] = y1_binary # type: ignore
# ensure that tau is between [-1, 1]
tau = _sigmoid(y1) - _sigmoid(y0)
else:
y = b + (w - 0.5) * tau + sigma * np.random.normal(loc=0.0, scale=1.0, size=n)
data = pd.DataFrame({"treatment": w, "outcome": y, "propensity": e, "expected_outcome": b, "actual_cate": tau})
features = pd.DataFrame(X, columns=[f"feature_{i}" for i in range(1, X.shape[1] + 1)])
data = | pd.concat([data, features], axis=1) | pandas.concat |
import pandas as pd
import re, json
import argparse
'''
preprocessing for mimic discharge summary note
1. load NOTEEVENTS.csv
2. get discharge sumamry notes
a) NOTEVENTS.CATEGORY = 'Discharge Summary'
b) NOTEVENTS.DESCRIPTION = 'Report'
c) eliminate a short-note
3. preprocess discharge sumamry notes
a) clean text
b) split sections by headers
4. save csv file
a) PK: NOTEVENTS.ROW_ID
b) TEXT: string(doubled-list)
'''
def config():
parser = argparse.ArgumentParser()
parser.add_argument('--load_file_path', type=str, default='file/NOTEEVENTS.csv')
parser.add_argument('--save_file_path', type=str, default='sections_discharge_summary.csv')
opt = parser.parse_args()
return opt
def load_noteevents(file_path):
df = pd.read_csv(file_path)
# dataframe dtype config
df.CHARTDATE = pd.to_datetime(df.CHARTDATE, format='%Y-%m-%d', errors='raise')
df.CHARTTIME = pd.to_datetime(df.CHARTTIME, format='%Y-%m-%d %H:%M:%S', errors='raise')
df.STORETIME = | pd.to_datetime(df.STORETIME) | pandas.to_datetime |
import util
import argparse
from model import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
parser = argparse.ArgumentParser()
parser.add_argument('--device',type=str,default='cuda:3',help='')
parser.add_argument('--data',type=str,default='data/METR-LA',help='data path')
parser.add_argument('--adjdata',type=str,default='data/sensor_graph/adj_mx.pkl',help='adj data path')
parser.add_argument('--adjtype',type=str,default='doubletransition',help='adj type')
parser.add_argument('--gcn_bool',action='store_true',help='whether to add graph convolution layer')
parser.add_argument('--aptonly',action='store_true',help='whether only adaptive adj')
parser.add_argument('--addaptadj',action='store_true',help='whether add adaptive adj')
parser.add_argument('--randomadj',action='store_true',help='whether random initialize adaptive adj')
parser.add_argument('--seq_length',type=int,default=12,help='')
parser.add_argument('--nhid',type=int,default=32,help='')
parser.add_argument('--in_dim',type=int,default=2,help='inputs dimension')
parser.add_argument('--num_nodes',type=int,default=207,help='number of nodes')
parser.add_argument('--batch_size',type=int,default=64,help='batch size')
parser.add_argument('--learning_rate',type=float,default=0.001,help='learning rate')
parser.add_argument('--dropout',type=float,default=0.3,help='dropout rate')
parser.add_argument('--weight_decay',type=float,default=0.0001,help='weight decay rate')
parser.add_argument('--checkpoint',type=str,help='')
parser.add_argument('--plotheatmap',type=str,default='True',help='')
parser.add_argument('--yrealy',type=int,default=82,help='sensor_id which will be used to produce the real vs. preds output')
args = parser.parse_args()
def main():
device = torch.device(args.device)
_, _, adj_mx = util.load_adj(args.adjdata,args.adjtype)
supports = [torch.tensor(i).to(device) for i in adj_mx]
if args.randomadj:
adjinit = None
else:
adjinit = supports[0]
if args.aptonly:
supports = None
model = gwnet(device, args.num_nodes, args.dropout, supports=supports, gcn_bool=args.gcn_bool, addaptadj=args.addaptadj, aptinit=adjinit)
model.to(device)
model.load_state_dict(torch.load(args.checkpoint))
model.eval()
print('model load successfully')
dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size, args.batch_size)
scaler = dataloader['scaler']
outputs = []
realy = torch.Tensor(dataloader['y_test']).to(device)
realy = realy.transpose(1,3)[:,0,:,:]
for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
testx = torch.Tensor(x).to(device)
testx = testx.transpose(1,3)
with torch.no_grad():
preds = model(testx).transpose(1,3)
outputs.append(preds.squeeze())
yhat = torch.cat(outputs,dim=0)
yhat = yhat[:realy.size(0),...]
amae = []
amape = []
armse = []
for i in range(args.seq_length):
pred = scaler.inverse_transform(yhat[:,:,i])
real = realy[:,:,i]
metrics = util.metric(pred,real)
log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
print(log.format(i+1, metrics[0], metrics[1], metrics[2]))
amae.append(metrics[0])
amape.append(metrics[1])
armse.append(metrics[2])
log = 'On average over {:.4f} horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
print(log.format(args.seq_length,np.mean(amae),np.mean(amape),np.mean(armse)))
if args.addaptadj == True:
addaptadj_text = "Adapt"
else:
addaptadj_text = "NoAdapt"
variant = args.adjdata
variant = str(str(str(variant.split("/")[2]).split(".")[0]).split("_")[3])
if args.plotheatmap == "True":
adp = F.softmax(F.relu(torch.mm(model.nodevec1, model.nodevec2)), dim=1)
device = torch.device('cpu')
adp.to(device)
adp = adp.cpu().detach().numpy()
adp = adp*(1/np.max(adp))
df = pd.DataFrame(adp)
sns.heatmap(df, cmap="RdYlBu")
plt.savefig("./heatmap" + "_" + variant + "_" + addaptadj_text + '.pdf')
y12 = realy[:,args.yrealy,11].cpu().detach().numpy()
yhat12 = scaler.inverse_transform(yhat[:,args.yrealy,11]).cpu().detach().numpy()
y1 = realy[:,args.yrealy,0].cpu().detach().numpy()
yhat1 = scaler.inverse_transform(yhat[:,args.yrealy,0]).cpu().detach().numpy()
df2 = | pd.DataFrame({'real1': y1, 'pred1':yhat1 , 'real12':y12,'pred12':yhat12}) | pandas.DataFrame |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import mplcursors
data = {'jenis': ['set1', 'set2', 'set3', 'set4', 'set5'],
'data1': [0.80443, 0.84176, 0.84278, 0.82316, 0.82260],
'data2': [0.71956, 0.77691, 0.77279, 0.74522, 0.74747],
'data3': [0.84256, 0.83268, 0.84152, 0.84204, 0.83775],
'data4': [0.71956, 0.77691, 0.77279, 0.74522, 0.74747],
'data5': [0.80320, 0.83787, 0.83933, 0.82087, 0.82008],
'data6': [0.71956, 0.77043, 0.76772, 0.74286, 0.74432],
'data7': [0.83641, 0.83009, 0.83847, 0.83743, 0.83333],
'data8': [0.71956, 0.77043, 0.76772, 0.74286, 0.74432]}
df = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
"""
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserWarning
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import Categorical, DataFrame, Index, MultiIndex, Series, Timestamp, concat
import pandas._testing as tm
@pytest.mark.parametrize("dtype", [str, object])
@pytest.mark.parametrize("check_orig", [True, False])
def test_dtype_all_columns(all_parsers, dtype, check_orig):
# see gh-3795, gh-6607
parser = all_parsers
df = DataFrame(
np.random.rand(5, 2).round(4),
columns=list("AB"),
index=["1A", "1B", "1C", "1D", "1E"],
)
with tm.ensure_clean("__passing_str_as_dtype__.csv") as path:
df.to_csv(path)
result = parser.read_csv(path, dtype=dtype, index_col=0)
if check_orig:
expected = df.copy()
result = result.astype(float)
else:
expected = df.astype(str)
tm.assert_frame_equal(result, expected)
def test_dtype_all_columns_empty(all_parsers):
# see gh-12048
parser = all_parsers
result = parser.read_csv(StringIO("A,B"), dtype=str)
expected = DataFrame({"A": [], "B": []}, index=[], dtype=str)
tm.assert_frame_equal(result, expected)
def test_dtype_per_column(all_parsers):
parser = all_parsers
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
expected = DataFrame(
[[1, "2.5"], [2, "3.5"], [3, "4.5"], [4, "5.5"]], columns=["one", "two"]
)
expected["one"] = expected["one"].astype(np.float64)
expected["two"] = expected["two"].astype(object)
result = parser.read_csv(StringIO(data), dtype={"one": np.float64, 1: str})
tm.assert_frame_equal(result, expected)
def test_invalid_dtype_per_column(all_parsers):
parser = all_parsers
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
with pytest.raises(TypeError, match="data type [\"']foo[\"'] not understood"):
parser.read_csv(StringIO(data), dtype={"one": "foo", 1: "int"})
@pytest.mark.parametrize(
"dtype",
[
"category",
CategoricalDtype(),
{"a": "category", "b": "category", "c": CategoricalDtype()},
],
)
def test_categorical_dtype(all_parsers, dtype):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = DataFrame(
{
"a": Categorical(["1", "1", "2"]),
"b": Categorical(["a", "a", "b"]),
"c": Categorical(["3.4", "3.4", "4.5"]),
}
)
actual = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize("dtype", [{"b": "category"}, {1: "category"}])
def test_categorical_dtype_single(all_parsers, dtype):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = DataFrame(
{"a": [1, 1, 2], "b": Categorical(["a", "a", "b"]), "c": [3.4, 3.4, 4.5]}
)
actual = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_unsorted(all_parsers):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,b,3.4
1,b,3.4
2,a,4.5"""
expected = DataFrame(
{
"a": Categorical(["1", "1", "2"]),
"b": Categorical(["b", "b", "a"]),
"c": Categorical(["3.4", "3.4", "4.5"]),
}
)
actual = parser.read_csv(StringIO(data), dtype="category")
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_missing(all_parsers):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,b,3.4
1,nan,3.4
2,a,4.5"""
expected = DataFrame(
{
"a": Categorical(["1", "1", "2"]),
"b": Categorical(["b", np.nan, "a"]),
"c": Categorical(["3.4", "3.4", "4.5"]),
}
)
actual = parser.read_csv(StringIO(data), dtype="category")
tm.assert_frame_equal(actual, expected)
@pytest.mark.slow
def test_categorical_dtype_high_cardinality_numeric(all_parsers):
# see gh-18186
parser = all_parsers
data = np.sort([str(i) for i in range(524289)])
expected = DataFrame({"a": Categorical(data, ordered=True)})
actual = parser.read_csv(StringIO("a\n" + "\n".join(data)), dtype="category")
actual["a"] = actual["a"].cat.reorder_categories(
np.sort(actual.a.cat.categories), ordered=True
)
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_latin1(all_parsers, csv_dir_path):
# see gh-10153
pth = os.path.join(csv_dir_path, "unicode_series.csv")
parser = all_parsers
encoding = "latin-1"
expected = parser.read_csv(pth, header=None, encoding=encoding)
expected[1] = Categorical(expected[1])
actual = parser.read_csv(pth, header=None, encoding=encoding, dtype={1: "category"})
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_utf16(all_parsers, csv_dir_path):
# see gh-10153
pth = os.path.join(csv_dir_path, "utf16_ex.txt")
parser = all_parsers
encoding = "utf-16"
sep = "\t"
expected = parser.read_csv(pth, sep=sep, encoding=encoding)
expected = expected.apply(Categorical)
actual = parser.read_csv(pth, sep=sep, encoding=encoding, dtype="category")
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_chunksize_infer_categories(all_parsers):
# see gh-10153
parser = all_parsers
data = """a,b
1,a
1,b
1,b
2,c"""
expecteds = [
DataFrame({"a": [1, 1], "b": Categorical(["a", "b"])}),
DataFrame({"a": [1, 2], "b": Categorical(["b", "c"])}, index=[2, 3]),
]
actuals = parser.read_csv(StringIO(data), dtype={"b": "category"}, chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_chunksize_explicit_categories(all_parsers):
# see gh-10153
parser = all_parsers
data = """a,b
1,a
1,b
1,b
2,c"""
cats = ["a", "b", "c"]
expecteds = [
DataFrame({"a": [1, 1], "b": Categorical(["a", "b"], categories=cats)}),
DataFrame(
{"a": [1, 2], "b": Categorical(["b", "c"], categories=cats)}, index=[2, 3]
),
]
dtype = CategoricalDtype(cats)
actuals = parser.read_csv(StringIO(data), dtype={"b": dtype}, chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize(
"categories",
[["a", "b", "c"], ["a", "c", "b"], ["a", "b", "c", "d"], ["c", "b", "a"]],
)
def test_categorical_category_dtype(all_parsers, categories, ordered):
parser = all_parsers
data = """a,b
1,a
1,b
1,b
2,c"""
expected = DataFrame(
{
"a": [1, 1, 1, 2],
"b": Categorical(
["a", "b", "b", "c"], categories=categories, ordered=ordered
),
}
)
dtype = {"b": CategoricalDtype(categories=categories, ordered=ordered)}
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_category_dtype_unsorted(all_parsers):
parser = all_parsers
data = """a,b
1,a
1,b
1,b
2,c"""
dtype = CategoricalDtype(["c", "b", "a"])
expected = DataFrame(
{
"a": [1, 1, 1, 2],
"b": Categorical(["a", "b", "b", "c"], categories=["c", "b", "a"]),
}
)
result = parser.read_csv(StringIO(data), dtype={"b": dtype})
tm.assert_frame_equal(result, expected)
def test_categorical_coerces_numeric(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype([1, 2, 3])}
data = "b\n1\n1\n2\n3"
expected = DataFrame({"b": Categorical([1, 1, 2, 3])})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_coerces_datetime(all_parsers):
parser = all_parsers
dti = pd.DatetimeIndex(["2017-01-01", "2018-01-01", "2019-01-01"], freq=None)
dtype = {"b": CategoricalDtype(dti)}
data = "b\n2017-01-01\n2018-01-01\n2019-01-01"
expected = DataFrame({"b": Categorical(dtype["b"].categories)})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_coerces_timestamp(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype([Timestamp("2014")])}
data = "b\n2014-01-01\n2014-01-01T00:00:00"
expected = DataFrame({"b": Categorical([Timestamp("2014")] * 2)})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_coerces_timedelta(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype(pd.to_timedelta(["1H", "2H", "3H"]))}
data = "b\n1H\n2H\n3H"
expected = DataFrame({"b": Categorical(dtype["b"].categories)})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
"b\nTrue\nFalse\nNA\nFalse",
"b\ntrue\nfalse\nNA\nfalse",
"b\nTRUE\nFALSE\nNA\nFALSE",
"b\nTrue\nFalse\nNA\nFALSE",
],
)
def test_categorical_dtype_coerces_boolean(all_parsers, data):
# see gh-20498
parser = all_parsers
dtype = {"b": CategoricalDtype([False, True])}
expected = DataFrame({"b": Categorical([True, False, None, False])})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_unexpected_categories(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype(["a", "b", "d", "e"])}
data = "b\nd\na\nc\nd" # Unexpected c
expected = DataFrame({"b": Categorical(list("dacd"), dtype=dtype["b"])})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_empty_pass_dtype(all_parsers):
parser = all_parsers
data = "one,two"
result = parser.read_csv(StringIO(data), dtype={"one": "u1"})
expected = DataFrame(
{"one": np.empty(0, dtype="u1"), "two": np.empty(0, dtype=object)},
index=Index([], dtype=object),
)
tm.assert_frame_equal(result, expected)
def test_empty_with_index_pass_dtype(all_parsers):
parser = all_parsers
data = "one,two"
result = parser.read_csv(
StringIO(data), index_col=["one"], dtype={"one": "u1", 1: "f"}
)
expected = DataFrame(
{"two": np.empty(0, dtype="f")}, index=Index([], dtype="u1", name="one")
)
tm.assert_frame_equal(result, expected)
def test_empty_with_multi_index_pass_dtype(all_parsers):
parser = all_parsers
data = "one,two,three"
result = parser.read_csv(
StringIO(data), index_col=["one", "two"], dtype={"one": "u1", 1: "f8"}
)
exp_idx = MultiIndex.from_arrays(
[np.empty(0, dtype="u1"), np.empty(0, dtype=np.float64)], names=["one", "two"]
)
expected = DataFrame({"three": np.empty(0, dtype=object)}, index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_empty_with_mangled_column_pass_dtype_by_names(all_parsers):
parser = all_parsers
data = "one,one"
result = parser.read_csv(StringIO(data), dtype={"one": "u1", "one.1": "f"})
expected = DataFrame(
{"one": np.empty(0, dtype="u1"), "one.1": np.empty(0, dtype="f")},
index=Index([], dtype=object),
)
tm.assert_frame_equal(result, expected)
def test_empty_with_mangled_column_pass_dtype_by_indexes(all_parsers):
parser = all_parsers
data = "one,one"
result = parser.read_csv(StringIO(data), dtype={0: "u1", 1: "f"})
expected = DataFrame(
{"one": np.empty(0, dtype="u1"), "one.1": np.empty(0, dtype="f")},
index=Index([], dtype=object),
)
tm.assert_frame_equal(result, expected)
def test_empty_with_dup_column_pass_dtype_by_indexes(all_parsers):
# see gh-9424
parser = all_parsers
expected = concat(
[Series([], name="one", dtype="u1"), Series([], name="one.1", dtype="f")],
axis=1,
)
expected.index = expected.index.astype(object)
data = "one,one"
result = parser.read_csv(StringIO(data), dtype={0: "u1", 1: "f"})
tm.assert_frame_equal(result, expected)
def test_empty_with_dup_column_pass_dtype_by_indexes_raises(all_parsers):
# see gh-9424
parser = all_parsers
expected = concat(
[Series([], name="one", dtype="u1"), Series([], name="one.1", dtype="f")],
axis=1,
)
expected.index = expected.index.astype(object)
with pytest.raises(ValueError, match="Duplicate names"):
data = ""
parser.read_csv(StringIO(data), names=["one", "one"], dtype={0: "u1", 1: "f"})
def test_raise_on_passed_int_dtype_with_nas(all_parsers):
# see gh-2631
parser = all_parsers
data = """YEAR, DOY, a
2001,106380451,10
2001,,11
2001,106380451,67"""
msg = (
"Integer column has NA values"
if parser.engine == "c"
else "Unable to convert column DOY"
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), dtype={"DOY": np.int64}, skipinitialspace=True)
def test_dtype_with_converters(all_parsers):
parser = all_parsers
data = """a,b
1.1,2.2
1.2,2.3"""
# Dtype spec ignored if converted specified.
with tm.assert_produces_warning(ParserWarning):
result = parser.read_csv(
StringIO(data), dtype={"a": "i8"}, converters={"a": lambda x: str(x)}
)
expected = DataFrame({"a": ["1.1", "1.2"], "b": [2.2, 2.3]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype,expected",
[
(np.float64, DataFrame(columns=["a", "b"], dtype=np.float64)),
("category", DataFrame({"a": Categorical([]), "b": Categorical([])}, index=[])),
(
dict(a="category", b="category"),
DataFrame({"a": Categorical([]), "b": | Categorical([]) | pandas.Categorical |
#===============================================================================
# Copyright 2020 BenchmarkXPRT Development Community
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
#*******************************************************************************
# Copyright 2017-2019 by Contributors
# \file bench_utils.py
# \brief utills for a benchmark for 'hist' tree_method on both CPU/GPU arhitectures
# \author <NAME>
#*******************************************************************************
import os
import re
import bz2
import sys
import timeit
import tarfile
import requests
import numpy as np
import pandas as pd
from sklearn.metrics import log_loss
if sys.version_info[0] >= 3:
from urllib.request import urlretrieve # pylint: disable=import-error,no-name-in-module
else:
from urllib import urlretrieve # pylint: disable=import-error,no-name-in-module
#DATASET_DIR="./data/"
DATASET_DIR="/var/www/archive/" #MinIO
def measure(func, string, nrepeat):
t = timeit.Timer(stmt="%s()" % func.__name__, setup="from __main__ import %s" % func.__name__)
res = t.repeat(repeat=nrepeat, number=1)
def box_filter(timing, left=0.25, right=0.75): # statistically remove outliers and compute average
timing.sort()
size = len(timing)
if size == 1:
return timing[0]
Q1, Q2 = timing[int(size * left)], timing[int(size * right)]
IQ = Q2 - Q1
lower = Q1 - 1.5 * IQ
upper = Q2 + 1.5 * IQ
result = np.array([item for item in timing if lower < item < upper])
return np.mean(result)
timing = box_filter(res)
print((string + " = {:.4f} sec (").format(timing), res, ")")
def compute_logloss(y1, y2):
return log_loss(y1.ravel(), y2)
def download_file(url):
local_filename = DATASET_DIR + url.split('/')[-1]
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=2**20):
if chunk:
f.write(chunk)
return local_filename
def load_higgs(nrows_train, nrows_test, dtype):
"""
Higgs dataset from UCI machine learning repository (
https://archive.ics.uci.edu/ml/datasets/HIGGS).
TaskType:binclass
NumberOfFeatures:28
NumberOfInstances:11M
"""
if not os.path.isfile(DATASET_DIR + "HIGGS.csv.gz"):
print("Loading data set...")
download_file("https://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz")
print("Reading data set...")
data = pd.read_csv(DATASET_DIR + "HIGGS.csv.gz", delimiter=",", header=None, compression="gzip", dtype=dtype, nrows=nrows_train+nrows_test)
print("Pre-processing data set...")
data = data[list(data.columns[1:])+list(data.columns[0:1])]
n_features = data.shape[1]-1
train_data = np.ascontiguousarray(data.values[:nrows_train,:n_features], dtype=dtype)
train_label = np.ascontiguousarray(data.values[:nrows_train,n_features], dtype=dtype)
test_data = np.ascontiguousarray(data.values[nrows_train:nrows_train+nrows_test,:n_features], dtype=dtype)
test_label = np.ascontiguousarray(data.values[nrows_train:nrows_train+nrows_test,n_features], dtype=dtype)
n_classes = len(np.unique(train_label))
return train_data, train_label, test_data, test_label, n_classes
def load_higgs1m(dtype):
return load_higgs(1000000, 500000, dtype)
def read_libsvm_msrank(file_obj, n_samples, n_features, dtype):
X = np.zeros((n_samples, n_features))
y = np.zeros((n_samples,))
counter = 0
regexp = re.compile(r'[A-Za-z0-9]+:(-?\d*\.?\d+)')
for line in file_obj:
line = str(line).replace("\\n'", "")
line = regexp.sub('\g<1>', line)
line = line.rstrip(" \n\r").split(' ')
y[counter] = int(line[0])
X[counter] = [float(i) for i in line[1:]]
counter += 1
if counter == n_samples:
break
return np.array(X, dtype=dtype), np.array(y, dtype=dtype)
def _make_gen(reader):
b = reader(1024 * 1024)
while b:
yield b
b = reader(1024 * 1024)
def _count_lines(filename):
with open(filename, 'rb') as f:
f_gen = _make_gen(f.read)
return sum(buf.count(b'\n') for buf in f_gen)
def load_msrank_10k(dtype):
"""
Dataset from szilard benchmarks: https://github.com/szilard/GBM-perf
TaskType:binclass
NumberOfFeatures:700
NumberOfInstances:10100000
"""
#url = "https://storage.mds.yandex.net/get-devtools-opensource/471749/msrank.tar.gz"
#tar = DATASET_DIR + "msrank.tar.gz"
#if not os.path.isfile(tar):
# print("Loading data set...")
# download_file(url)
#if not os.path.isfile(DATASET_DIR + "MSRank/train.txt"):
# tar = tarfile.open(tar, "r:gz")
# tar.extractall(DATASET_DIR)
# tar.close()
sets = []
labels = []
n_features = 137
print("Reading data set...")
for set_name in ['train.txt', 'vali.txt', 'test.txt']:
file_name = DATASET_DIR + os.path.join('MSRank', set_name)
n_samples = _count_lines(file_name)
with open(file_name, 'r') as file_obj:
X, y = read_libsvm_msrank(file_obj, n_samples, n_features, dtype)
sets.append(X)
labels.append(y)
sets[0] = np.vstack((sets[0], sets[1]))
labels[0] = np.hstack((labels[0], labels[1]))
sets = [ np.ascontiguousarray(sets[i]) for i in [0, 2]]
labels = [ np.ascontiguousarray(labels[i]) for i in [0, 2]]
n_classes = len(np.unique(labels[0]))
return sets[0], labels[0], sets[1], labels[1], n_classes
def load_airline_one_hot(dtype):
"""
Dataset from szilard benchmarks: https://github.com/szilard/GBM-perf
TaskType:binclass
NumberOfFeatures:700
NumberOfInstances:10100000
"""
url = 'https://s3.amazonaws.com/benchm-ml--main/'
name_train = 'train-10m.csv'
name_test = 'test.csv'
sets = []
labels = []
categorical_names = ["Month", "DayofMonth", "DayOfWeek", "UniqueCarrier", "Origin", "Dest"]
categorical_ids = [0, 1, 2, 4, 5, 6]
numeric_names = ["DepTime", "Distance"]
numeric_ids = [3, 7]
for name in [name_train, name_test]:
filename = os.path.join(DATASET_DIR, name)
if not os.path.exists(filename):
print("Loading", filename)
urlretrieve(url + name, filename)
print("Reading", filename)
df = pd.read_csv(filename, nrows=1000000) if name == 'train-10m.csv' else pd.read_csv(filename)
X = df.drop('dep_delayed_15min', 1)
y = df["dep_delayed_15min"]
y_num = np.where(y == "Y", 1, 0)
sets.append(X)
labels.append(y_num)
n_samples_train = sets[0].shape[0]
X = | pd.concat(sets) | pandas.concat |
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
import pandas as pd
from scipy.optimize import minimize
import matplotlib.gridspec as gridspec
from datetime import date, timedelta
import geopandas as gpd
#import today date
date_today = date.today()
year_t,month_t,date_t=str(date_today).split('-')
# The SIR model differential equations.
def deriv(y, t, N, beta,gamma):
S,I,R = y
dSdt = -(beta*I/N)*S
dIdt = (beta*S/N)*I - gamma*I
dRdt = gamma*I
return dSdt, dIdt, dRdt
#Integration of the differential equations
def time_evo(N,beta,gamma,I0=1,R0=0,t=np.arange(0,365)):
# Definition of the initial conditions
# I0 and R0 denotes the number of initial infected people (I0)
# and the number of people that recovered and are immunized (R0)
# t ise the timegrid
S0=N-I0-R0 # number of people that can still contract the virus
# Initial conditions vector
y0 = S0, I0, R0
# Integrate the SIR equations over the time grid, t.
ret = odeint(deriv, y0, t, args=(N,beta,gamma))
S, I, R = np.transpose(ret)
return (t,S,I,R)
countries_list=['Albania',
'Armenia',
'Austria',
'Azerbaijan',
'Belarus',
'Belgium',
'Bosnia and Herzegovina',
'Bulgaria',
'Cyprus',
'Croatia',
'Czechia',
'Denmark',
'Estonia',
'Finland',
'France',
'Georgia',
'Germany',
'Greece',
'Hungary',
'Iceland',
'Ireland',
'Israel',
'Italy',
'Kazakhstan',
'Kyrgyzstan',
'Latvia',
'Lithuania',
'Luxembourg',
'Malta',
'Moldova',
'Monaco',
'Montenegro',
'Netherlands',
'North Macedonia',
'Norway',
'Poland',
'Portugal',
'Romania',
'Serbia',
'Slovakia',
'Slovenia',
'Spain',
'Sweden',
'Switzerland',
'Turkey',
'Ukraine',
'United Kingdom']
#IMPORT FILES WORLD
#i files sono: le righe sono le nazioni, le colonne i giorni del mese (DATE).
file_confirmed='https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
file_deaths='https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'
file_recovered='https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv'
df_confirmed=pd.read_csv(file_confirmed)
df_deaths=pd.read_csv(file_deaths)
df_recovered=pd.read_csv(file_recovered)
countries_w_confirmed = df_confirmed['Country/Region']
countries_w_deaths = df_deaths['Country/Region']
countries_w_recovered = df_recovered['Country/Region']
#confirmed world
confirmed_world0 = df_confirmed.drop(['Province/State','Lat','Long'],
axis=1)
confirmed_world0.rename(index=countries_w_confirmed, inplace=True)
confirmed_world = confirmed_world0.drop(['Country/Region'],
axis=1).T.reset_index()
confirmed_world.rename(columns={'index':'Date'}, inplace=True)
#deaths world
deaths_world0 = df_deaths.drop(['Province/State','Lat','Long'],
axis=1)
deaths_world0.rename(index=countries_w_deaths, inplace=True)
deaths_world = deaths_world0.drop(['Country/Region'],
axis=1).T.reset_index()
deaths_world.rename(columns={'index':'Date'}, inplace=True)
#recovered world
recovered_world0 = df_recovered.drop(['Province/State','Lat','Long'],
axis=1)
recovered_world0.rename(index=countries_w_recovered, inplace=True)
recovered_world = recovered_world0.drop(['Country/Region'],
axis=1).T.reset_index()
recovered_world.rename(columns={'index':'Date'}, inplace=True)
confirmed_europe0 = confirmed_world[countries_list]
deaths_europe0 = deaths_world[countries_list]
recovered_europe0 = recovered_world[countries_list]
array_names=([])
for name in countries_list:
array_names.append([name,list(countries_w_confirmed).count(name)])
Totale=pd.DataFrame()
for i in range(0, len(countries_list)):
if array_names[i][1] > 1:
Totale.insert(i,
countries_list[i],
value=confirmed_europe0[countries_list[i]].T.sum())
elif array_names[i][1]==1:
Totale.insert(i,
countries_list[i],
value=confirmed_europe0[countries_list[i]].T)
Totale.insert(0, 'Date', confirmed_world['Date'])
Deceduti=pd.DataFrame()
for i in range(0, len(countries_list)):
if array_names[i][1] > 1:
Deceduti.insert(i,
countries_list[i],
value=deaths_europe0[countries_list[i]].T.sum())
elif array_names[i][1]==1:
Deceduti.insert(i,
countries_list[i],
value=deaths_europe0[countries_list[i]].T)
Deceduti.insert(0, 'Date', deaths_world['Date'])
Guariti=pd.DataFrame()
for i in range(0, len(countries_list)):
if array_names[i][1] > 1:
Guariti.insert(i,
countries_list[i],
value=recovered_europe0[countries_list[i]].T.sum())
elif array_names[i][1]==1:
Guariti.insert(i,
countries_list[i],
value=recovered_europe0[countries_list[i]].T)
Guariti.insert(0, 'Date', recovered_world['Date'])
#Active Infected
Attualmente_positivi=pd.DataFrame()
for i in range(0, len(countries_list)):
Attualmente_positivi.insert(i,
countries_list[i],
value=
Totale[countries_list[i]]-
Deceduti[countries_list[i]]-
Guariti[countries_list[i]])
Attualmente_positivi.insert(0, 'Date', confirmed_world['Date'])
Totale.to_csv('output/10_tot_casi_europe_'+date_t+month_t+'.csv', index=True)
Deceduti.to_csv('output/10_deceduti_europe_'+date_t+month_t+'.csv', index=True)
Guariti.to_csv('output/10_guariti_europe_'+date_t+month_t+'.csv', index=True)
Attualmente_positivi.to_csv('output/10_attualmente_positivi_europe_'+date_t+month_t+'.csv', index=True)
#Daily variation infected
Variazione_giornaliera = pd.DataFrame(Attualmente_positivi['Date'].iloc[1:])
for name in countries_list:
active_var=([])
for i in range(1,len(Attualmente_positivi)):
active_var.append(Attualmente_positivi[name][i]-Attualmente_positivi[name][i-1])
Variazione_giornaliera[name]=active_var
Variazione_giornaliera.to_csv('output/10_variazione_giornaliera_europe_'+date_t+month_t+'.csv', index=True)
def func_plot(df):
y_world=[]
n_cols=df.shape[1]
for i in range(n_cols-4):
y_world.append(df.iloc[:,i+4].sum())
x_world2=df.columns[4:]
x_world=pd.to_datetime(x_world2,infer_datetime_format=False)
return (x_world,y_world)
#Generalization to other countries
def whichcountry(name):
######## INPUT PARAMETERS ########
country=name
t0=pd.to_datetime('2020-01-22')
#################################
mask_coun=df_confirmed['Country/Region']==country # you can change the country here
mask_coun_rec=df_recovered['Country/Region']==country
df_confirmed_C=df_confirmed.loc[mask_coun,:]
df_deaths_C=df_deaths.loc[mask_coun,:]
df_recovered_C=df_recovered.loc[mask_coun_rec,:]
ytot=np.array(func_plot(df_confirmed_C)[1])
ydeaths=np.array(func_plot(df_deaths_C)[1])
yrec=np.array(func_plot(df_recovered_C)[1])
return ytot-ydeaths-yrec, ytot[-1], yrec[-1],ydeaths[-1]
xdata=pd.to_numeric(range(Attualmente_positivi.shape[0]))
today=len(xdata)
def minimizer(R0,t1=today-5,t2=today):
array_country_bis=array_country
#true data
ydata_inf_2=array_country[t1:t2]
xdata_2=np.arange(0,len(ydata_inf_2))
#model
fin_result=time_evo(60*10**6,1/14*R0,1/14,I0=ydata_inf_2[0])
i_vec=fin_result[2]
i_vec_2=i_vec[0:len(xdata_2)]
#average error
error=np.sum(np.abs(ydata_inf_2-i_vec_2)/ydata_inf_2)*100
return error
minimizer_vec=np.vectorize(minimizer)
time_window=5
def minimizer_gen(t1,t2,xgrid=np.arange(0.1,5,0.01)):
ygrid=minimizer_vec(xgrid,t1=t1,t2=t2)
r0_ideal=round(xgrid[np.argmin(ygrid)],2)
return r0_ideal
r0_today=[]
scangrid=np.linspace(0,3,400)
name_R0_array = []
for name in range(0, len(countries_list)):
array_country=whichcountry(countries_list[name])[0]
i = today-(time_window-1)
min_today=minimizer_gen(i,i+time_window,scangrid)
r0_today.append(min_today)
#scangrid=np.linspace(0,5,200)
name_R0_array.append([countries_list[name], min_today])
name_R0_df = pd.DataFrame(name_R0_array, columns=['Country', 'R0'])
countries_hist=['United Kingdom',
'Ukraine',
'Poland',
'Greece',
'Netherlands',
'Portugal',
'Belgium',
'France',
'Slovenia',
'Serbia',
'Spain',
'Italy',
'Sweden',
'Austria',
'Slovakia',
'Turkey']
hist_list=[]
for i in range(len(countries_hist)):
ind = name_R0_df.loc[name_R0_df['Country'] == countries_hist[i]].index[0]
hist_list.append([name_R0_df['Country'][ind], name_R0_df['R0'][ind]])
hist_df = pd.DataFrame(hist_list, columns=['Country', 'R0'])
hist_df.to_csv('output/10_R0_europe_hist_'+date_t+month_t+'.csv')
#import yesterday date
yesterday = date.today() - timedelta(days=1)
year_y,month_y,date_y=str(yesterday).split('-')
r0_countries_imp = | pd.read_excel('input/input.xlsx') | pandas.read_excel |
#!/usr/bin/python3 `
""" The api.py module contains the classes and functions.
class tsSLD implements the Supervices Learning Data concept for modelled time series.
Auxuliary functions imports neural net models and AR models objects from predictor package/
"""
import copy
from os import getcwd,path
import sys
from pathlib import Path
import numpy as np
import datetime
from datetime import datetime, timedelta
from dateutil import parser
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import pmdarima as pm
import matplotlib.pyplot as plt
from clustgelDL.auxcfg import D_LOGS, log2All,exec_time
from predictor.utility import msg2log,shift,cSFMT
from predictor.NNmodel import MLP,LSTM,CNN
from predictor.Statmodel import tsARIMA
from stcgelDL.api import prepareLossCharts
""" NN model hyperparameters """
EPOCHS = 10 # training model
N_STEPS = 64
N_FEATURES = 1
UNITS = 32 # LSTM
FILTERS = 64 # CNN models
KERNEL_SIZE = 2
POOL_SIZE = 2
HIDDEN_NEYRONS = 16 # MLP model
DROPOUT = 0.2
""" ARIMA model hyperparameter """
SEASONALY_PERIOD = 6 # ARIMA 6- hour season , 144 for daily season
PREDICT_LAG = 4
MAX_P = 3
MAX_Q = 2
MAX_D = 2
PSD_SEGMENT_SIZE = 512
if sys.platform == 'win32':
PATH_REPOSITORY = str(Path(Path(Path(getcwd()).drive) / '/' / "model_Repository"/ "offline_predictor"))
elif sys.platform == 'linux':
PATH_REPOSITORY = str(Path(Path.home() / "model_Repository"/"offline_predictor"))
""" ================================================================================================================ """
""" Classes definition """
indTS4SLD_predict=lambda n,rows,cols,i,j: n-(rows-1-i)-(cols-j)
indTS4out_predict=lambda n,rows,i: n-(rows-1-i)
class tsSLD(object):
def __init__(self,df:pd.DataFrame = None, data_col_name:str = None, dt_col_name:str = None, n_step:int = 32,
n_eval:int = 256, n_test:int = 64,bscaled:bool = False, discret:int = 10,title:str = None,
f:object = None):
self.df = df
df[dt_col_name] = | pd.to_datetime(df[dt_col_name], dayfirst=True) | pandas.to_datetime |
import torch
import sys
import importlib
import os
from sklearn.neighbors import NearestNeighbors
import transform as t
import ShapeNetDataLoader as dset
import numpy as np
sys.path.append("/content/treelearning/python")
import cloud
position_path = "/content/drive/MyDrive/Colab/tree_learning/data/positions_attempt2.json"
def get_device(cuda_preference=True):
print('cuda available:', torch.cuda.is_available(),
'; cudnn available:', torch.backends.cudnn.is_available(),
'; num devices:', torch.cuda.device_count())
use_cuda = False if not cuda_preference else torch.cuda.is_available()
device = torch.device('cuda:0' if use_cuda else 'cpu')
device_name = torch.cuda.get_device_name(device) if use_cuda else 'cpu'
print('Using device', device_name)
return device
def gen_split(percentages=(0.5, 0.2),
paths=("/content/Pointnet_Pointnet2_pytorch/data/trainsplit.npy",
"/content/Pointnet_Pointnet2_pytorch/data/valsplit.npy"),
sample_number=252,
shuffle=True,
seed=1):
import random
import numpy as np
if shuffle:
random.seed(seed)
indices = range(sample_number)
indices = np.array(random.sample(indices, sample_number))
else:
indices = np.arange(0, sample_number)
start, percentage = 0, 0
for i, (path) in enumerate(paths):
percentage += percentages[i]
stop = np.floor(percentage * sample_number).astype(int)
index_subset = indices[start:stop]
np.save(path, index_subset)
start = stop
def gen_spatial_split(percentages=(0.7, 0.3),
paths=("/content/Pointnet_Pointnet2_pytorch/data/trainsplit.npy",
"/content/Pointnet_Pointnet2_pytorch/data/valsplit.npy"),
position_path=position_path,
sample_number=252,
shuffle=True,
seed=1):
# constructs a rectangle that encompasses a part of the forest given by percentages
import random
import numpy as np
with open(position_path, "r") as f:
positions = json.load(f)
positions = np.array([i[1] for i in positions])
minval, maxval = np.amin(positions, axis=0)[0:2], np.amax(positions, axis=0)[0:2]
sidelength = np.sqrt(percentages[1]) * (maxval - minval)
if shuffle:
random.seed(seed)
offset = np.array([random.random(), random.random()])
offset = offset * (maxval - minval - sidelength)
isin = np.all(np.logical_and(positions[:, :2] > minval + offset, positions[:, :2] < minval + sidelength + offset), axis=1)
else:
isin = np.all(positions[:,:2] > maxval[:2] - sidelength, axis=1)
offset = 0
indices = np.arange(0, len(positions))
np.save(paths[1], indices[isin])
np.save(paths[0], indices[np.invert(isin)])
return sidelength, offset
def get_model(source_path, device):
model_name = set(os.listdir(source_path)) - set(
["pointnet2_utils.py", "logs", "checkpoints", "performance", "split", "__pycache__"])
model_name = list(model_name)[0]
model_name = model_name[0:-3]
sys.path.append(source_path)
model = importlib.import_module(model_name)
def inplace_relu(m):
classname = m.__class__.__name__
if classname.find('ReLU') != -1:
m.inplace = True
classifier = model.get_model(2, normal_channel=False).to(device)
classifier.apply(inplace_relu)
model_path = source_path + "/checkpoints/best_model.pth"
checkpoint = torch.load(model_path)
classifier.load_state_dict(checkpoint['model_state_dict'])
return classifier
def gen_pred(classifier, tree_number, treedataset, device):
# predict targets for arbitrary tree number
points, label, target, _, upoints, alltarget = treedataset[tree_number]
points, label, target = torch.tensor(points), torch.tensor(label), torch.tensor(target)
points, target = torch.unsqueeze(points, 0), torch.unsqueeze(target, 0)
points, label, target = points.float().to(device), label.long().to(device), target.long().to(device)
points = points.transpose(2, 1)
def to_categorical(y, num_classes):
""" 1-hot encodes a tensor """
new_y = torch.eye(num_classes)[y.cpu().data.numpy(),]
if y.is_cuda:
return new_y.cuda()
return new_y
with torch.no_grad():
classifier.eval()
result = classifier(points, to_categorical(label, 1))[0]
pred_probabilities = torch.exp(result[0])[:, 1].detach().cpu().numpy()
return pred_probabilities, upoints
def find_neighbours(upoints, allpoints, k):
nbrs = NearestNeighbors(n_neighbors=k, algorithm='ball_tree').fit(upoints)
neighbours_indices = nbrs.kneighbors(allpoints, k, return_distance=False)
return neighbours_indices
def extrapolate(pred_probabilities, neighbours_indices):
# produce nxk array
mapped_probabilities = pred_probabilities[neighbours_indices]
return np.mean(mapped_probabilities, axis=1)
def compute_certainty_score(probability, threshold):
if (probability - threshold) < 0:
certainty_score = (probability - threshold) / threshold
else:
certainty_score = (probability - threshold) / (1 - threshold)
return certainty_score
def multi_sample_ensemble(source_path, npoints, tree_number, n_samples=5, method="mean"):
split_path = source_path + "/split/valsplit.npy"
root = "/content/Pointnet_Pointnet2_pytorch/data/"
# if best threshold is available, choose it, otherwise simply use 0.5 as threshold
try:
checkpoint = torch.load(source_path + '/checkpoints/best_model.pth')
best_threshold = checkpoint["best_threshold"]
except:
best_threshold = 0.5
use_cuda = torch.cuda.is_available()
device = torch.device('cuda:0' if use_cuda else 'cpu')
classifier = get_model(source_path, device)
dataset = dset.PartNormalDataset(root=root,
npoints=npoints,
transform=t.Compose([t.Normalize()]),
splitpath=split_path,
normal_channel=False, mode="eval")
# generate n_samples predictions
allpoints = dataset[tree_number][3]
targets = dataset[tree_number][5]
preds = np.empty((len(allpoints), n_samples))
for i in range(n_samples):
pred_probabilities, upoints = gen_pred(classifier, tree_number=tree_number, treedataset=dataset, device=device)
indices = find_neighbours(upoints, allpoints, 5)
preds[:, i] = extrapolate(pred_probabilities, indices)
if method == "mean":
preds = np.vectorize(compute_certainty_score)(preds, best_threshold)
preds = preds / 2 + 0.5
preds = np.mean(preds, axis=1)
elif method == "majority":
preds = (preds > best_threshold).astype("int")
preds = np.mean(preds, axis=1)
return preds, allpoints, targets, best_threshold
def multi_model_ensemble(source_paths, npoints, tree_number, n_samples=5, method="mean"):
best_thresholds = []
preds = []
for source_path in source_paths:
pred, allpoints, targets, best_threshold = multi_sample_ensemble(source_path, npoints, tree_number, n_samples, method)
preds.append(pred)
best_thresholds.append(best_threshold)
preds = np.array(preds).T
if method == "mean":
preds = np.mean(preds, axis=1)
elif method == "majority":
preds = (preds >= 0.5).astype("int")
preds = np.mean(preds, axis=1)
return preds, allpoints, targets, best_thresholds
position_path = "/content/drive/MyDrive/Colab/tree_learning/data/positions_attempt2.json"
import json
from tqdm import tqdm
forest_path = "/content/drive/MyDrive/Colab/tree_learning/data/forest_labeled_cleanest2.npy"
def fnv_hash_vec(arr):
"""
FNV64-1A see wikipedia
"""
assert arr.ndim == 2
# Floor first for negative points
hashed_arr = arr[:, 0] * arr[:, 1] * arr[:, 2] + 0.1 * np.sqrt(arr[:, 2])
return hashed_arr.tolist()
def multi_tree_ensemble(source_paths, npoints, tree_number, radius=10, n_samples=5, method="mean",
position_path=position_path):
"""
This function is not really good but just a proof of concept. The points of different chunks can not be easily combined to find their partners in the other chunks
due to the voxelization, we only manage roughly 1/3.
The targets are not useable
"""
# determine tree numbers where a prediction is needed
with open(position_path, "r") as f:
positions = json.load(f)
split = np.load(source_paths[0] + "/split/valsplit.npy")
old_number = tree_number
tree_number = split[tree_number]
positions = np.array([i[1] for i in positions])
center = positions[tree_number] # todo verify that this is correct
distances = np.linalg.norm(positions[:, :2] - center[:2], ord=None, axis=1)
tree_indices = np.argwhere(distances < radius)
tree_indices = tree_indices.reshape(len(tree_indices))
print(tree_indices)
# only choose tree_indices in valsplit
ids = []
for index in tree_indices:
test = np.argwhere(index == split)
if len(test) > 0:
ids.append(test[0, 0])
print(ids)
# detect points that are common to other trees and main tree
pc = cloud.Cloud(points_path=forest_path, position_path=position_path, subsetting=1)
pc.filter(positions[tree_number], radius=6.9, remove999=True)
relevant_points = pc.filtered_points[:, 0:3]
hash = fnv_hash_vec(relevant_points)
print(len(np.unique(hash)), len(hash))
pointlist = []
for i, index in enumerate(tree_indices):
pc.filter(positions[index], radius=6.9, remove999=True)
points = pc.filtered_points[:, 0:3]
hashnew = fnv_hash_vec(points)
print(len(np.unique(hashnew)), len(hashnew))
if not (index == tree_number):
pointlist.append(np.hstack((points, np.isin(hashnew, hash)[:, np.newaxis])))
# generate predictions
all_preds = []
assert len(ids) > 0
for i, tree in enumerate(tree_indices):
pred, points, target = multi_sample_ensemble2(source_paths[0], npoints, tree_number=tree,
n_samples=n_samples)[:3]
start = positions[tree_indices[i]]
points = points + start
print(len(points))
if tree == tree_number:
relevant_points = points.copy()
prediction = pred
alltarget = target
else:
all_preds.append((points, pred))
# aggregate predictions on pointlist
import pandas as pd
stack = np.hstack(
(relevant_points, prediction[:, np.newaxis], alltarget[:, np.newaxis], np.array(hash)[:, np.newaxis]))
rel = | pd.DataFrame(stack, columns=["x", "y", "z", "pred", "target", "hash"]) | pandas.DataFrame |
"""
Main pipeline helpers
=====================
"""
import os
import sys
import json
import time
import uuid
import logging
import itertools
from dataclasses import asdict
from datetime import datetime
import multiprocessing
from collections import Counter
from pprint import pprint
import pandas as pd
import numpy as np
from tqdm import tqdm
import sklearn.model_selection
import joblib
from .config_reader import ConfigReader
from .misc import JSONEncoder, get_df_hash
from .nested_dict import flatten_dict, set_nested_value
from .config_manager import Mode
# from ..models.fasttext import FastText
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def enable_local_loggers():
for name, logger in logging.root.manager.loggerDict.items():
if 'txcl' in name:
logger.disabled = False
def get_model(model_name):
"""Dynamically import model class and return model instance"""
if model_name == 'fasttext':
from ..models.fasttext import FastText
enable_local_loggers()
return FastText()
if model_name == 'fasttext_pretrain':
from ..models.fasttext_pretrain import FastTextPretrain
enable_local_loggers()
return FastTextPretrain()
if model_name == 'bag_of_words':
from ..models.bag_of_words import BagOfWordsModel
enable_local_loggers()
return BagOfWordsModel()
if model_name == 'bertmodel':
from ..models.bertmodel import BERTModel
enable_local_loggers()
return BERTModel()
if model_name == 'openai_gpt2':
from ..models.openai_gpt2 import OpenAIGPT2
enable_local_loggers()
return OpenAIGPT2()
if model_name == 'dummy':
from ..models.dummy_models import DummyModel
enable_local_loggers()
return DummyModel()
if model_name == 'random':
from ..models.dummy_models import RandomModel
enable_local_loggers()
return RandomModel()
if model_name == 'weighted_random':
from ..models.dummy_models import WeightedRandomModel
enable_local_loggers()
return WeightedRandomModel()
else:
raise NotImplementedError("Model '{}' is unknown".format(model_name))
def preprocess(run_config):
try:
set_label_mapping = __import__(
'txcl.models.' + run_config.model.name,
fromlist=['set_label_mapping']).set_label_mapping
set_label_mapping(run_config.data.train,
run_config.data.test,
run_config.path.output)
except AttributeError:
logger.info("No 'set_label_mapping'")
prepare_data = __import__(
'txcl.models.' + run_config.model.name,
fromlist=['prepare_data']).prepare_data
for v in asdict(run_config.data).values():
data_path = prepare_data(
v, run_config.path.output,
asdict(run_config.preprocess))
if isinstance(data_path, list):
data_path = ', '.join(data_path)
logger.info(f"Prepared data from '{v}' to '{data_path}'")
def test(run_config, model=None, validation=False):
mode = 'validation' if validation else 'test'
logger.info(f"\n\n{mode.capitalize()} results for '{run_config.name}':\n")
output = '\n'
model = model if model else get_model(run_config.model.name)
result = model.test(run_config, validation)
if result is None:
logger.warning(f'No {mode} results were generated')
return
if run_config.write_test_output:
keys = ['text', 'label', 'prediction']
df = | pd.DataFrame({i: result[i] for i in keys}) | pandas.DataFrame |
import pandas as pd
from .datastore import merge_postcodes
from .types import ErrorDefinition
from .utils import add_col_to_tables_CONTINUOUSLY_LOOKED_AFTER as add_CLA_column # Check 'Episodes' present before use!
def validate_165():
error = ErrorDefinition(
code = '165',
description = 'Data entry for mother status is invalid.',
affected_fields = ['MOTHER', 'SEX', 'ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
valid_values = ['0','1']
# prepare to merge
oc3.reset_index(inplace=True)
header.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['EPS'] = (episodes['DECOM']>=collection_start) & (episodes['DECOM']<=collection_end)
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']).merge(oc3, on='CHILD', how='left')
# Raise error if provided <MOTHER> is not a valid value.
value_validity = merged['MOTHER'].notna() & (~merged['MOTHER'].isin(valid_values))
# If not provided
female = (merged['SEX']=='1')
eps_in_year = (merged['EPS_COUNT']>0)
none_provided = (merged['ACTIV'].isna()& merged['ACCOM'].isna()& merged['IN_TOUCH'].isna())
# If provided <MOTHER> must be a valid value. If not provided <MOTHER> then either <GENDER> is male or no episode record for current year and any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided
mask = value_validity | (merged['MOTHER'].isna() & (female & (eps_in_year | none_provided)))
# That is, if value not provided and child is a female with eps in current year or no values of IN_TOUCH, ACTIV and ACCOM, then raise error.
error_locs_eps = merged.loc[mask, 'index_eps']
error_locs_header = merged.loc[mask, 'index_er']
error_locs_oc3 = merged.loc[mask, 'index']
return {'Header':error_locs_header.dropna().unique().tolist(),
'OC3':error_locs_oc3.dropna().unique().tolist()}
return error, _validate
def validate_1014():
error = ErrorDefinition(
code='1014',
description='UASC information is not required for care leavers',
affected_fields=['ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'UASC' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
uasc = dfs['UASC']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
# prepare to merge
oc3.reset_index(inplace=True)
uasc.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
date_check = (
((episodes['DECOM'] >= collection_start) & (episodes['DECOM'] <= collection_end))
| ((episodes['DEC'] >= collection_start) & (episodes['DEC'] <= collection_end))
| ((episodes['DECOM'] <= collection_start) & episodes['DEC'].isna())
)
episodes['EPS'] = date_check
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
# inner merge to take only episodes of children which are also found on the uasc table
merged = episodes.merge(uasc, on='CHILD', how='inner', suffixes=['_eps', '_sc']).merge(oc3, on='CHILD',
how='left')
# adding suffixes with the secondary merge here does not go so well yet.
some_provided = (merged['ACTIV'].notna() | merged['ACCOM'].notna() | merged['IN_TOUCH'].notna())
mask = (merged['EPS_COUNT'] == 0) & some_provided
error_locs_uasc = merged.loc[mask, 'index_sc']
error_locs_oc3 = merged.loc[mask, 'index']
return {'UASC': error_locs_uasc.unique().tolist(), 'OC3': error_locs_oc3.unique().tolist()}
return error, _validate
# !# not sure what this rule is actually supposed to be getting at - description is confusing
def validate_197B():
error = ErrorDefinition(
code='197B',
description="SDQ score or reason for no SDQ should be reported for 4- or 17-year-olds.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
(
(oc2['DOB'] + pd.DateOffset(years=4) == start) # ???
| (oc2['DOB'] + pd.DateOffset(years=17) == start)
)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
& oc2['SDQ_REASON'].isna()
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_157():
error = ErrorDefinition(
code='157',
description="Child is aged 4 years or over at the beginning of the year or 16 years or under at the end of the "
"year and Strengths and Difficulties Questionnaire (SDQ) 1 has been recorded as the reason for no "
"Strengths and Difficulties Questionnaire (SDQ) score.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
oc2['CONTINUOUSLY_LOOKED_AFTER']
& (oc2['DOB'] + pd.DateOffset(years=4) <= start)
& (oc2['DOB'] + pd.DateOffset(years=16) >= endo)
& oc2['SDQ_SCORE'].isna()
& (oc2['SDQ_REASON'] == 'SDQ1')
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_357():
error = ErrorDefinition(
code='357',
description='If this is the first episode ever for this child, reason for new episode must be S. '
'Check whether there is an episode immediately preceding this one, which has been left out. '
'If not the reason for new episode code must be amended to S.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
eps = dfs['Episodes']
eps['DECOM'] = pd.to_datetime(eps['DECOM'], format='%d/%m/%Y', errors='coerce')
eps = eps.loc[eps['DECOM'].notnull()]
first_eps = eps.loc[eps.groupby('CHILD')['DECOM'].idxmin()]
errs = first_eps[first_eps['RNE'] != 'S'].index.to_list()
return {'Episodes': errs}
return error, _validate
def validate_117():
error = ErrorDefinition(
code='117',
description='Date of decision that a child should/should no longer be placed for adoption is beyond the current collection year or after the child ceased to be looked after.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_PLACED', 'DEC', 'REC', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placed_adoption = dfs['PlacedAdoption']
collection_end = dfs['metadata']['collection_end']
# datetime
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# Drop nans and continuing episodes
episodes = episodes.dropna(subset=['DECOM'])
episodes = episodes[episodes['REC'] != 'X1']
episodes = episodes.loc[episodes.groupby('CHILD')['DECOM'].idxmax()]
# prepare to merge
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
p4a_cols = ['DATE_PLACED', 'DATE_PLACED_CEASED']
# latest episodes
merged = episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
mask = (
(merged['DATE_PLACED'] > collection_end)
| (merged['DATE_PLACED'] > merged['DEC'])
| (merged['DATE_PLACED_CEASED'] > collection_end)
| (merged['DATE_PLACED_CEASED'] > merged['DEC'])
)
# If provided <DATE_PLACED> and/or <DATE_PLACED_CEASED> must not be > <COLLECTION_END_DATE> or <DEC> of latest episode where <REC> not = 'X1'
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_118():
error = ErrorDefinition(
code='118',
description='Date of decision that a child should no longer be placed for adoption is before the current collection year or before the date the child started to be looked after.',
affected_fields=['DECOM', 'DECOM', 'LS']
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
code_list = ['V3', 'V4']
# datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
filter_by_ls = episodes[~(episodes['LS'].isin(code_list))]
earliest_episode_idxs = filter_by_ls.groupby('CHILD')['DECOM'].idxmin()
earliest_episodes = episodes[episodes.index.isin(earliest_episode_idxs)]
# prepare to merge
placed_adoption.reset_index(inplace=True)
earliest_episodes.reset_index(inplace=True)
# merge
merged = earliest_episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
# drop rows where DATE_PLACED_CEASED is not provided
merged = merged.dropna(subset=['DATE_PLACED_CEASED'])
# If provided <DATE_PLACED_CEASED> must not be prior to <COLLECTION_START_DATE> or <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
mask = (merged['DATE_PLACED_CEASED'] < merged['DECOM']) | (merged['DATE_PLACED_CEASED'] < collection_start)
# error locations
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_352():
error = ErrorDefinition(
code='352',
description='Child who started to be looked after was aged 18 or over.',
affected_fields=['DECOM', 'RNE'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + pd.DateOffset(years=18)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
care_start = episodes_merged['RNE'].str.upper().astype(str).isin(['S'])
started_over_18 = episodes_merged['DOB18'] <= episodes_merged['DECOM']
error_mask = care_start & started_over_18
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_209():
error = ErrorDefinition(
code='209',
description='Child looked after is of school age and should not have an unknown Unique Pupil Number (UPN) code of UN1.',
affected_fields=['UPN', 'DOB']
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
collection_start = dfs['metadata']['collection_start']
# convert to datetime
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
yr = collection_start.year - 1
reference_date = pd.to_datetime('31/08/' + str(yr), format='%d/%m/%Y', errors='coerce')
# If <DOB> >= 4 years prior to 31/08/YYYY then <UPN> should not be 'UN1' Note: YYYY in this instance refers to the year prior to the collection start (for collection year 2019-2020, it would be looking at the 31/08/2018).
mask = (reference_date >= (header['DOB'] + pd.offsets.DateOffset(years=4))) & (header['UPN'] == 'UN1')
# error locations
error_locs_header = header.index[mask]
return {'Header': error_locs_header.tolist()}
return error, _validate
def validate_198():
error = ErrorDefinition(
code='198',
description="Child has not been looked after continuously for at least 12 months at 31 March but a reason "
"for no Strengths and Difficulties (SDQ) score has been completed. ",
affected_fields=['SDQ_REASON'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_REASON'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_185():
error = ErrorDefinition(
code='185',
description="Child has not been looked after continuously for at least 12 months at " +
"31 March but a Strengths and Difficulties (SDQ) score has been completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_SCORE'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_186():
error = ErrorDefinition(
code='186',
description="Children aged 4 or over at the start of the year and children aged under 17 at the " +
"end of the year and who have been looked after for at least 12 months continuously " +
"should have a Strengths and Difficulties (SDQ) score completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_start_str = dfs['metadata']['collection_start']
collection_end_str = dfs['metadata']['collection_end']
collection_start = pd.to_datetime(collection_start_str, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2 = add_CLA_column(dfs, 'OC2')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
oc2['17th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=17)
error_mask = (
(oc2['4th_bday'] <= collection_start)
& (oc2['17th_bday'] > collection_end)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_187():
error = ErrorDefinition(
code='187',
description="Child cannot be looked after continuously for 12 months at " +
"31 March (OC2) and have any of adoption or care leavers returns completed.",
affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR', # OC3
'IN_TOUCH', 'ACTIV', 'ACCOM'], # AD1
)
def _validate(dfs):
if (
'OC3' not in dfs
or 'AD1' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
ad1, oc3 = add_CLA_column(dfs, ['AD1', 'OC3'])
# OC3
should_be_blank = ['IN_TOUCH', 'ACTIV', 'ACCOM']
oc3_mask = oc3['CONTINUOUSLY_LOOKED_AFTER'] & oc3[should_be_blank].notna().any(axis=1)
oc3_error_locs = oc3[oc3_mask].index.to_list()
# AD1
should_be_blank = ['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR']
ad1_mask = ad1['CONTINUOUSLY_LOOKED_AFTER'] & ad1[should_be_blank].notna().any(axis=1)
ad1_error_locs = ad1[ad1_mask].index.to_list()
return {'AD1': ad1_error_locs,
'OC3': oc3_error_locs}
return error, _validate
def validate_188():
error = ErrorDefinition(
code='188',
description="Child is aged under 4 years at the end of the year, "
"but a Strengths and Difficulties (SDQ) score or a reason "
"for no SDQ score has been completed. ",
affected_fields=['SDQ_SCORE', 'SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_end_str = dfs['metadata']['collection_end']
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
error_mask = (
(oc2['4th_bday'] > collection_end)
& oc2[['SDQ_SCORE', 'SDQ_REASON']].notna().any(axis=1)
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_190():
error = ErrorDefinition(
code='190',
description="Child has not been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been completed.",
affected_fields=['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
, # AD1
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_blank = ['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
mask = ~oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_blank].notna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_191():
error = ErrorDefinition(
code='191',
description="Child has been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been left blank.",
affected_fields=['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'], # OC2
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_present = ['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE']
mask = oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_present].isna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_607():
error = ErrorDefinition(
code='607',
description='Child ceased to be looked after in the year, but mother field has not been completed.',
affected_fields=['DEC', 'REC', 'MOTHER', 'LS', 'SEX']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
code_list = ['V3', 'V4']
# convert to datetiime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# CEASED_TO_BE_LOOKED_AFTER = DEC is not null and REC is filled but not equal to X1
CEASED_TO_BE_LOOKED_AFTER = merged['DEC'].notna() & ((merged['REC'] != 'X1') & merged['REC'].notna())
# and <LS> not = ‘V3’ or ‘V4’
check_LS = ~(merged['LS'].isin(code_list))
# and <DEC> is in <CURRENT_COLLECTION_YEAR
check_DEC = (collection_start <= merged['DEC']) & (merged['DEC'] <= collection_end)
# Where <CEASED_TO_BE_LOOKED_AFTER> = ‘Y’, and <LS> not = ‘V3’ or ‘V4’ and <DEC> is in <CURRENT_COLLECTION_YEAR> and <SEX> = ‘2’ then <MOTHER> should be provided.
mask = CEASED_TO_BE_LOOKED_AFTER & check_LS & check_DEC & (merged['SEX'] == '2') & (merged['MOTHER'].isna())
header_error_locs = merged.loc[mask, 'index_er']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_210():
error = ErrorDefinition(
code='210',
description='Children looked after for more than a week at 31 March should not have an unknown Unique Pupil Number (UPN) code of UN4.',
affected_fields=['UPN', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_end = dfs['metadata']['collection_end']
# convert to datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
yr = collection_end.year
reference_date = ref_date = pd.to_datetime('24/03/' + str(yr), format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
# the logical way is to merge left on UPN but that will be a one to many merge and may not go as well as a many to one merge that we've been doing.
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <UPN> = 'UN4' then no episode <DECOM> must be >` = 24/03/YYYY Note: YYYY refers to the current collection year.
mask = (merged['UPN'] == 'UN4') & (merged['DECOM'] >= reference_date)
# error locations
error_locs_header = merged.loc[mask, 'index_er']
error_locs_eps = merged.loc[mask, 'index_eps']
return {'Episodes': error_locs_eps.tolist(), 'Header': error_locs_header.unique().tolist()}
return error, _validate
def validate_1010():
error = ErrorDefinition(
code='1010',
description='This child has no episodes loaded for current year even though there was an open episode of '
+ 'care at the end of the previous year, and care leaver data has been entered.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
oc3 = dfs['OC3']
# convert DECOM to datetime, drop missing/invalid sort by CHILD then DECOM,
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last = episodes_last.dropna(subset=['DECOM']).sort_values(['CHILD', 'DECOM'], ascending=True)
# Keep only the final episode for each child (ie where the following row has a different CHILD value)
episodes_last = episodes_last[
episodes_last['CHILD'].shift(-1) != episodes_last['CHILD']
]
# Keep only the final episodes that were still open
episodes_last = episodes_last[episodes_last['DEC'].isna()]
# The remaining children ought to have episode data in the current year if they are in OC3
has_current_episodes = oc3['CHILD'].isin(episodes['CHILD'])
has_open_episode_last = oc3['CHILD'].isin(episodes_last['CHILD'])
error_mask = ~has_current_episodes & has_open_episode_last
validation_error_locations = oc3.index[error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_525():
error = ErrorDefinition(
code='525',
description='A child for whom the decision to be placed for adoption has been reversed cannot be adopted during the year.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR',
'LS_ADOPTR']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs or 'AD1' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
ad1 = dfs['AD1']
# prepare to merge
placed_adoption.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = placed_adoption.merge(ad1, on='CHILD', how='left', suffixes=['_placed', '_ad1'])
# If <DATE_PLACED_CEASED> not Null, then <DATE_INT>; <DATE_MATCH>; <FOSTER_CARE>; <NB_ADOPTR>; <SEX_ADOPTR>; and <LS_ADOPTR> should not be provided
mask = merged['DATE_PLACED_CEASED'].notna() & (
merged['DATE_INT'].notna() | merged['DATE_MATCH'].notna() | merged['FOSTER_CARE'].notna() |
merged['NB_ADOPTR'].notna() | merged['SEX_ADOPTR'].notna() | merged['LS_ADOPTR'].notna())
# error locations
pa_error_locs = merged.loc[mask, 'index_placed']
ad_error_locs = merged.loc[mask, 'index_ad1']
# return result
return {'PlacedAdoption': pa_error_locs.tolist(), 'AD1': ad_error_locs.tolist()}
return error, _validate
def validate_335():
error = ErrorDefinition(
code='335',
description='The current foster value (0) suggests that child is not adopted by current foster carer, but last placement is A2, A3, or A5. Or the current foster value (1) suggests that child is adopted by current foster carer, but last placement is A1, A4 or A6.',
affected_fields=['PLACE', 'FOSTER_CARE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'AD1' not in dfs:
return {}
else:
episodes = dfs['Episodes']
ad1 = dfs['AD1']
# prepare to merge
episodes.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = episodes.merge(ad1, on='CHILD', how='left', suffixes=['_eps', '_ad1'])
# Where <PL> = 'A2', 'A3' or 'A5' and <DEC> = 'E1', 'E11', 'E12' <FOSTER_CARE> should not be '0'; Where <PL> = ‘A1’, ‘A4’ or ‘A6’ and <REC> = ‘E1’, ‘E11’, ‘E12’ <FOSTER_CARE> should not be ‘1’.
mask = (
merged['REC'].isin(['E1', 'E11', 'E12']) & (
(merged['PLACE'].isin(['A2', 'A3', 'A5']) & (merged['FOSTER_CARE'].astype(str) == '0'))
| (merged['PLACE'].isin(['A1', 'A4', 'A6']) & (merged['FOSTER_CARE'].astype(str) == '1'))
)
)
eps_error_locs = merged.loc[mask, 'index_eps']
ad1_error_locs = merged.loc[mask, 'index_ad1']
# use .unique since join is many to one
return {'Episodes': eps_error_locs.tolist(), 'AD1': ad1_error_locs.unique().tolist()}
return error, _validate
def validate_215():
error = ErrorDefinition(
code='215',
description='Child has care leaver information but one or more data items relating to children looked after for 12 months have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM', 'CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK',
'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
)
def _validate(dfs):
if 'OC3' not in dfs or 'OC2' not in dfs:
return {}
else:
oc3 = dfs['OC3']
oc2 = dfs['OC2']
# prepare to merge
oc3.reset_index(inplace=True)
oc2.reset_index(inplace=True)
merged = oc3.merge(oc2, on='CHILD', how='left', suffixes=['_3', '_2'])
# If any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided then <CONVICTED>; <HEALTH_CHECK>; <IMMUNISATIONS>; <TEETH_CHECK>; <HEALTH_ASSESSMENT>; <SUBSTANCE MISUSE>; <INTERVENTION_RECEIVED>; <INTERVENTION_OFFERED>; should not be provided
mask = (merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna()) & (
merged['CONVICTED'].notna() | merged['HEALTH_CHECK'].notna() | merged['IMMUNISATIONS'].notna() |
merged['TEETH_CHECK'].notna() | merged['HEALTH_ASSESSMENT'].notna() | merged[
'SUBSTANCE_MISUSE'].notna() | merged['INTERVENTION_RECEIVED'].notna() | merged[
'INTERVENTION_OFFERED'].notna())
# error locations
oc3_error_locs = merged.loc[mask, 'index_3']
oc2_error_locs = merged.loc[mask, 'index_2']
return {'OC3': oc3_error_locs.tolist(), 'OC2': oc2_error_locs.tolist()}
return error, _validate
def validate_399():
error = ErrorDefinition(
code='399',
description='Mother field, review field or participation field are completed but '
+ 'child is looked after under legal status V3 or V4.',
affected_fields=['MOTHER', 'LS', 'REVIEW', 'REVIEW_CODE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs or 'Reviews' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
reviews = dfs['Reviews']
code_list = ['V3', 'V4']
# prepare to merge
episodes['index_eps'] = episodes.index
header['index_hdr'] = header.index
reviews['index_revs'] = reviews.index
# merge
merged = (episodes.merge(header, on='CHILD', how='left')
.merge(reviews, on='CHILD', how='left'))
# If <LS> = 'V3' or 'V4' then <MOTHER>, <REVIEW> and <REVIEW_CODE> should not be provided
mask = merged['LS'].isin(code_list) & (
merged['MOTHER'].notna() | merged['REVIEW'].notna() | merged['REVIEW_CODE'].notna())
# Error locations
eps_errors = merged.loc[mask, 'index_eps']
header_errors = merged.loc[mask, 'index_hdr'].unique()
revs_errors = merged.loc[mask, 'index_revs'].unique()
return {'Episodes': eps_errors.tolist(),
'Header': header_errors.tolist(),
'Reviews': revs_errors.tolist()}
return error, _validate
def validate_189():
error = ErrorDefinition(
code='189',
description='Child is aged 17 years or over at the beginning of the year, but an Strengths and Difficulties '
+ '(SDQ) score or a reason for no Strengths and Difficulties (SDQ) score has been completed.',
affected_fields=['DOB', 'SDQ_SCORE', 'SDQ_REASON']
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
collection_start = dfs['metadata']['collection_start']
# datetime format allows appropriate comparison between dates
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# If <DOB> >17 years prior to <COLLECTION_START_DATE> then <SDQ_SCORE> and <SDQ_REASON> should not be provided
mask = ((oc2['DOB'] + pd.offsets.DateOffset(years=17)) <= collection_start) & (
oc2['SDQ_REASON'].notna() | oc2['SDQ_SCORE'].notna())
# That is, raise error if collection_start > DOB + 17years
oc_error_locs = oc2.index[mask]
return {'OC2': oc_error_locs.tolist()}
return error, _validate
def validate_226():
error = ErrorDefinition(
code='226',
description='Reason for placement change is not required.',
affected_fields=['REASON_PLACE_CHANGE', 'PLACE']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
code_list = ['T0', 'T1', 'T2', 'T3', 'T4']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# create column to see previous REASON_PLACE_CHANGE
episodes = episodes.sort_values(['CHILD', 'DECOM'])
episodes['PREVIOUS_REASON'] = episodes.groupby('CHILD')['REASON_PLACE_CHANGE'].shift(1)
# If <PL> = 'T0'; 'T1'; 'T2'; 'T3' or 'T4' then <REASON_PLACE_CHANGE> should be null in current episode and current episode - 1
mask = episodes['PLACE'].isin(code_list) & (
episodes['REASON_PLACE_CHANGE'].notna() | episodes['PREVIOUS_REASON'].notna())
# error locations
error_locs = episodes.index[mask]
return {'Episodes': error_locs.tolist()}
return error, _validate
def validate_358():
error = ErrorDefinition(
code='358',
description='Child with this legal status should not be under 10.',
affected_fields=['DECOM', 'DOB', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['J1', 'J2', 'J3']
# convert dates to datetime format
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# Where <LS> = ‘J1’ or ‘J2’ or ‘J3’ then <DOB> should <= to 10 years prior to <DECOM>
mask = merged['LS'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=10) < merged['DECOM'])
# That is, raise error if DECOM > DOB + 10years
# error locations
header_error_locs = merged.loc[mask, 'index_er']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_407():
error = ErrorDefinition(
code='407',
description='Reason episode ceased is Special Guardianship Order, but child has reached age 18.',
affected_fields=['DEC', 'DOB', 'REC']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['E45', 'E46', 'E47', 'E48']
# convert dates to datetime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <REC> = ‘E45’ or ‘E46’ or ‘E47’ or ‘E48’ then <DOB> must be < 18 years prior to <DEC>
mask = merged['REC'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=18) < merged['DEC'])
# That is, raise error if DEC > DOB + 10years
# error locations
header_error_locs = merged.loc[mask, 'index_er']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_1007():
error = ErrorDefinition(
code='1007',
description='Care leaver information is not required for 17- or 18-year olds who are still looked after.',
affected_fields=['DEC', 'REC', 'DOB', 'IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_end = dfs['metadata']['collection_end']
# convert dates to datetime format
oc3['DOB'] = pd.to_datetime(oc3['DOB'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
oc3.reset_index(inplace=True)
merged = episodes.merge(oc3, on='CHILD', how='left', suffixes=['_eps', '_oc3'])
# If <DOB> < 19 and >= to 17 years prior to <COLLECTION_END_DATE> and current episode <DEC> and or <REC> not provided then <IN_TOUCH>, <ACTIV> and <ACCOM> should not be provided
check_age = (merged['DOB'] + pd.offsets.DateOffset(years=17) <= collection_end) & (
merged['DOB'] + pd.offsets.DateOffset(years=19) > collection_end)
# That is, check that 17<=age<19
check_dec_rec = merged['REC'].isna() | merged['DEC'].isna()
# if either DEC or REC are absent
mask = check_age & check_dec_rec & (
merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna())
# Then raise an error if either IN_TOUCH, ACTIV, or ACCOM have been provided too
# error locations
oc3_error_locs = merged.loc[mask, 'index_oc3']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'OC3': oc3_error_locs.unique().tolist()}
return error, _validate
def validate_442():
error = ErrorDefinition(
code='442',
description='Unique Pupil Number (UPN) field is not completed.',
affected_fields=['UPN', 'LS']
)
def _validate(dfs):
if ('Episodes' not in dfs) or ('Header' not in dfs):
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
code_list = ['V3', 'V4']
# merge left on episodes to get all children for which episodes have been recorded even if they do not exist on the header.
merged = episodes.merge(header, on=['CHILD'], how='left', suffixes=['_eps', '_er'])
# Where any episode present, with an <LS> not = 'V3' or 'V4' then <UPN> must be provided
mask = (~merged['LS'].isin(code_list)) & merged['UPN'].isna()
episode_error_locs = merged.loc[mask, 'index_eps']
header_error_locs = merged.loc[mask, 'index_er']
return {'Episodes': episode_error_locs.tolist(),
# Select unique values since many episodes are joined to one header
# and multiple errors will be raised for the same index.
'Header': header_error_locs.dropna().unique().tolist()}
return error, _validate
def validate_344():
error = ErrorDefinition(
code='344',
description='The record shows the young person has died or returned home to live with parent(s) or someone with parental responsibility for a continuous period of 6 months or more, but activity and/or accommodation on leaving care have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
# If <IN_TOUCH> = 'DIED' or 'RHOM' then <ACTIV> and <ACCOM> should not be provided
mask = ((oc3['IN_TOUCH'] == 'DIED') | (oc3['IN_TOUCH'] == 'RHOM')) & (
oc3['ACTIV'].notna() | oc3['ACCOM'].notna())
error_locations = oc3.index[mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_345():
error = ErrorDefinition(
code='345',
description='The data collection record shows the local authority is in touch with this young person, but activity and/or accommodation data items are zero.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
# If <IN_TOUCH> = 'Yes' then <ACTIV> and <ACCOM> must be provided
mask = (oc3['IN_TOUCH'] == 'YES') & (oc3['ACTIV'].isna() | oc3['ACCOM'].isna())
error_locations = oc3.index[mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_384():
error = ErrorDefinition(
code='384',
description='A child receiving respite care cannot be in a long-term foster placement ',
affected_fields=['PLACE', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# Where <LS> = 'V3' or 'V4' then <PL> must not be 'U1' or 'U4'
mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & (
(episodes['PLACE'] == 'U1') | (episodes['PLACE'] == 'U4'))
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_390():
error = ErrorDefinition(
code='390',
description='Reason episode ceased is adopted but child has not been previously placed for adoption.',
affected_fields=['PLACE', 'REC']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# If <REC> = 'E11' or 'E12' then <PL> must be one of 'A3', 'A4', 'A5' or 'A6'
mask = ((episodes['REC'] == 'E11') | (episodes['REC'] == 'E12')) & ~(
(episodes['PLACE'] == 'A3') | (episodes['PLACE'] == 'A4') | (episodes['PLACE'] == 'A5') | (
episodes['PLACE'] == 'A6'))
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_378():
error = ErrorDefinition(
code='378',
description='A child who is placed with parent(s) cannot be looked after under a single period of accommodation under Section 20 of the Children Act 1989.',
affected_fields=['PLACE', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# the & sign supercedes the ==, so brackets are necessary here
mask = (episodes['PLACE'] == 'P1') & (episodes['LS'] == 'V2')
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_398():
error = ErrorDefinition(
code='398',
description='Distance field completed but child looked after under legal status V3 or V4.',
affected_fields=['LS', 'HOME_POST', 'PL_POST']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & (
episodes['HOME_POST'].notna() | episodes['PL_POST'].notna())
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_451():
error = ErrorDefinition(
code='451',
description='Child is still freed for adoption, but freeing orders could not be applied for since 30 December 2005.',
affected_fields=['DEC', 'REC', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = episodes['DEC'].isna() & episodes['REC'].isna() & (episodes['LS'] == 'D1')
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_519():
error = ErrorDefinition(
code='519',
description='Data entered on the legal status of adopters shows civil partnership couple, but data entered on genders of adopters does not show it as a couple.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR']
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
mask = (ad1['LS_ADOPTR'] == 'L2') & (
(ad1['SEX_ADOPTR'] != 'MM') & (ad1['SEX_ADOPTR'] != 'FF') & (ad1['SEX_ADOPTR'] != 'MF'))
error_locations = ad1.index[mask]
return {'AD1': error_locations.to_list()}
return error, _validate
def validate_520():
error = ErrorDefinition(
code='520',
description='Data entry on the legal status of adopters shows different gender married couple but data entry on genders of adopters shows it as a same gender couple.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR']
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
# check condition
mask = (ad1['LS_ADOPTR'] == 'L11') & (ad1['SEX_ADOPTR'] != 'MF')
error_locations = ad1.index[mask]
return {'AD1': error_locations.to_list()}
return error, _validate
def validate_522():
error = ErrorDefinition(
code='522',
description='Date of decision that the child should be placed for adoption must be on or before the date that a child should no longer be placed for adoption.',
affected_fields=['DATE_PLACED', 'DATE_PLACED_CEASED']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
# Convert to datetimes
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
# Boolean mask
mask = placed_adoption['DATE_PLACED_CEASED'] > placed_adoption['DATE_PLACED']
error_locations = placed_adoption.index[mask]
return {'PlacedAdoption': error_locations.to_list()}
return error, _validate
def validate_563():
error = ErrorDefinition(
code='563',
description='The child should no longer be placed for adoption but the date of the decision that the child should be placed for adoption is blank',
affected_fields=['DATE_PLACED', 'REASON_PLACED_CEASED', 'DATE_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
mask = placed_adoption['REASON_PLACED_CEASED'].notna() & placed_adoption['DATE_PLACED_CEASED'].notna() & \
placed_adoption['DATE_PLACED'].isna()
error_locations = placed_adoption.index[mask]
return {'PlacedAdoption': error_locations.to_list()}
return error, _validate
def validate_544():
error = ErrorDefinition(
code='544',
description="Any child who has conviction information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.",
affected_fields=['CONVICTED', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
convict = oc2['CONVICTED'].astype(str) == '1'
immunisations = oc2['IMMUNISATIONS'].isna()
teeth_ck = oc2['TEETH_CHECK'].isna()
health_ass = oc2['HEALTH_ASSESSMENT'].isna()
sub_misuse = oc2['SUBSTANCE_MISUSE'].isna()
error_mask = convict & (immunisations | teeth_ck | health_ass | sub_misuse)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_634():
error = ErrorDefinition(
code='634',
description='There are entries for previous permanence options, but child has not started to be looked after from 1 April 2016 onwards.',
affected_fields=['LA_PERM', 'PREV_PERM', 'DATE_PERM', 'DECOM']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PrevPerm' not in dfs:
return {}
else:
episodes = dfs['Episodes']
prevperm = dfs['PrevPerm']
collection_start = dfs['metadata']['collection_start']
# convert date field to appropriate format
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# the maximum date has the highest possibility of satisfying the condition
episodes['LAST_DECOM'] = episodes.groupby('CHILD')['DECOM'].transform('max')
# prepare to merge
episodes.reset_index(inplace=True)
prevperm.reset_index(inplace=True)
merged = prevperm.merge(episodes, on='CHILD', how='left', suffixes=['_prev', '_eps'])
# If <PREV_PERM> or <LA_PERM> or <DATE_PERM> provided, then at least 1 episode must have a <DECOM> later than 01/04/2016
mask = (merged['PREV_PERM'].notna() | merged['DATE_PERM'].notna() | merged['LA_PERM'].notna()) & (
merged['LAST_DECOM'] < collection_start)
eps_error_locs = merged.loc[mask, 'index_eps']
prevperm_error_locs = merged.loc[mask, 'index_prev']
# return {'PrevPerm':prevperm_error_locs}
return {'Episodes': eps_error_locs.unique().tolist(), 'PrevPerm': prevperm_error_locs.unique().tolist()}
return error, _validate
def validate_158():
error = ErrorDefinition(
code='158',
description='If a child has been recorded as receiving an intervention for their substance misuse problem, then the additional item on whether an intervention was offered should be left blank.',
affected_fields=['INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
error_mask = oc2['INTERVENTION_RECEIVED'].astype(str).eq('1') & oc2['INTERVENTION_OFFERED'].notna()
error_locations = oc2.index[error_mask]
return {'OC2': error_locations.tolist()}
return error, _validate
def validate_133():
error = ErrorDefinition(
code='133',
description='Data entry for accommodation after leaving care is invalid. If reporting on a childs accommodation after leaving care the data entry must be valid',
affected_fields=['ACCOM'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
valid_codes = ['B1', 'B2', 'C1', 'C2', 'D1', 'D2', 'E1', 'E2', 'G1', 'G2', 'H1', 'H2', 'K1', 'K2', 'R1',
'R2', 'S2', 'T1', 'T2', 'U1', 'U2', 'V1', 'V2', 'W1', 'W2', 'X2', 'Y1', 'Y2', 'Z1', 'Z2',
'0']
error_mask = ~oc3['ACCOM'].isna() & ~oc3['ACCOM'].isin(valid_codes)
error_locations = oc3.index[error_mask]
return {'OC3': error_locations.tolist()}
return error, _validate
def validate_565():
error = ErrorDefinition(
code='565',
description='The date that the child started to be missing or away from placement without authorisation has been completed but whether the child was missing or away from placement without authorisation has not been completed.',
affected_fields=['MISSING', 'MIS_START']
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
mask = missing['MIS_START'].notna() & missing['MISSING'].isna()
error_locations = missing.index[mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_433():
error = ErrorDefinition(
code='433',
description='The reason for new episode suggests that this is a continuation episode, but the episode does not start on the same day as the last episode finished.',
affected_fields=['RNE', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['original_index'] = episodes.index
episodes.sort_values(['CHILD', 'DECOM', 'DEC'], inplace=True)
episodes[['PREVIOUS_DEC', 'PREVIOUS_CHILD']] = episodes[['DEC', 'CHILD']].shift(1)
rne_is_ongoing = episodes['RNE'].str.upper().astype(str).isin(['P', 'L', 'T', 'U', 'B'])
date_mismatch = episodes['PREVIOUS_DEC'] != episodes['DECOM']
missing_date = episodes['PREVIOUS_DEC'].isna() | episodes['DECOM'].isna()
same_child = episodes['PREVIOUS_CHILD'] == episodes['CHILD']
error_mask = rne_is_ongoing & (date_mismatch | missing_date) & same_child
error_locations = episodes['original_index'].loc[error_mask].sort_values()
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_437():
error = ErrorDefinition(
code='437',
description='Reason episode ceased is child has died or is aged 18 or over but there are further episodes.',
affected_fields=['REC'],
)
# !# potential false negatives, as this only operates on the current year's data
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes.sort_values(['CHILD', 'DECOM'], inplace=True)
episodes[['NEXT_DECOM', 'NEXT_CHILD']] = episodes[['DECOM', 'CHILD']].shift(-1)
# drop rows with missing DECOM as invalid/missing values can lead to errors
episodes = episodes.dropna(subset=['DECOM'])
ceased_e2_e15 = episodes['REC'].str.upper().astype(str).isin(['E2', 'E15'])
has_later_episode = episodes['CHILD'] == episodes['NEXT_CHILD']
error_mask = ceased_e2_e15 & has_later_episode
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_547():
error = ErrorDefinition(
code='547',
description="Any child who has health promotion information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.",
affected_fields=['HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
healthck = oc2['HEALTH_CHECK'].astype(str) == '1'
immunisations = oc2['IMMUNISATIONS'].isna()
teeth_ck = oc2['TEETH_CHECK'].isna()
health_ass = oc2['HEALTH_ASSESSMENT'].isna()
sub_misuse = oc2['SUBSTANCE_MISUSE'].isna()
error_mask = healthck & (immunisations | teeth_ck | health_ass | sub_misuse)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_635():
error = ErrorDefinition(
code='635',
description='There are entries for date of order and local authority code where previous permanence option was arranged but previous permanence code is Z1',
affected_fields=['LA_PERM', 'DATE_PERM', 'PREV_PERM']
)
def _validate(dfs):
if 'PrevPerm' not in dfs:
return {}
else:
prev_perm = dfs['PrevPerm']
# raise and error if either LA_PERM or DATE_PERM are present, yet PREV_PERM is absent.
mask = ((prev_perm['LA_PERM'].notna() | prev_perm['DATE_PERM'].notna()) & prev_perm['PREV_PERM'].isna())
error_locations = prev_perm.index[mask]
return {'PrevPerm': error_locations.to_list()}
return error, _validate
def validate_550():
error = ErrorDefinition(
code='550',
description='A placement provider code of PR0 can only be associated with placement P1.',
affected_fields=['PLACE', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = (episodes['PLACE'] != 'P1') & episodes['PLACE_PROVIDER'].eq('PR0')
validation_error_locations = episodes.index[mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_217():
error = ErrorDefinition(
code='217',
description='Children who are placed for adoption with current foster carers (placement types A3 or A5) must have a reason for new episode of S, T or U.',
affected_fields=['PLACE', 'DECOM', 'RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
max_decom_allowed = pd.to_datetime('01/04/2015', format='%d/%m/%Y', errors='coerce')
reason_new_ep = ['S', 'T', 'U']
place_codes = ['A3', 'A5']
mask = (episodes['PLACE'].isin(place_codes) & (episodes['DECOM'] >= max_decom_allowed)) & ~episodes[
'RNE'].isin(reason_new_ep)
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_518():
error = ErrorDefinition(
code='518',
description='If reporting legal status of adopters is L4 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L4') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_517():
error = ErrorDefinition(
code='517',
description='If reporting legal status of adopters is L3 then the genders of adopters should be coded as MF. MF = the adopting couple are male and female.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L3') & ~AD1['SEX_ADOPTR'].isin(['MF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_558():
error = ErrorDefinition(
code='558',
description='If a child has been adopted, then the decision to place them for adoption has not been disrupted and the date of the decision that a child should no longer be placed for adoption should be left blank. if the REC code is either E11 or E12 then the DATE PLACED CEASED date should not be provided',
affected_fields=['DATE_PLACED_CEASED', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes = episodes.reset_index()
rec_codes = ['E11', 'E12']
placeEpisodes = episodes[episodes['REC'].isin(rec_codes)]
merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED_CEASED'].notna()]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_453():
error = ErrorDefinition(
code='453',
description='Contradiction between placement distance in the last episode of the previous year and in the first episode of the current year.',
affected_fields=['PL_DISTANCE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['PL_DISTANCE'] = pd.to_numeric(episodes['PL_DISTANCE'], errors='coerce')
episodes_last['PL_DISTANCE'] = pd.to_numeric(episodes_last['PL_DISTANCE'], errors='coerce')
# drop rows with missing DECOM before finding idxmin/max, as invalid/missing values can lead to errors
episodes = episodes.dropna(subset=['DECOM'])
episodes_last = episodes_last.dropna(subset=['DECOM'])
episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin()
episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax()
episodes = episodes[episodes.index.isin(episodes_min)]
episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)]
episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'],
suffixes=('', '_last'), indicator=True).set_index('index')
in_both_years = episodes_merged['_merge'] == 'both'
same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last']
last_year_open = episodes_merged['DEC_last'].isna()
different_pl_dist = abs(episodes_merged['PL_DISTANCE'] - episodes_merged['PL_DISTANCE_last']) >= 0.2
error_mask = in_both_years & same_rne & last_year_open & different_pl_dist
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_516():
error = ErrorDefinition(
code='516',
description='The episode data submitted for this child does not show that he/she was with their former foster carer(s) during the year.If the code in the reason episode ceased is E45 or E46 the child must have a placement code of U1 to U6.',
affected_fields=['REC', 'PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
place_codes = ['U1', 'U2', 'U3', 'U4', 'U5', 'U6']
rec_codes = ['E45', 'E46']
error_mask = episodes['REC'].isin(rec_codes) & ~episodes['PLACE'].isin(place_codes)
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_511():
error = ErrorDefinition(
code='511',
description='If reporting that the number of person(s) adopting the looked after child is two adopters then the code should only be MM, FF or MF. MM = the adopting couple are both males; FF = the adopting couple are both females; MF = The adopting couple are male and female.',
affected_fields=['NB_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
mask = AD1['NB_ADOPTR'].astype(str).eq('2') & AD1['SEX_ADOPTR'].isin(['M1', 'F1'])
validation_error_mask = mask
validation_error_locations = AD1.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_524():
error = ErrorDefinition(
code='524',
description='If reporting legal status of adopters is L12 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L12') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_441():
error = ErrorDefinition(
code='441',
description='Participation method indicates child was 4 years old or over at the time of the review, but the date of birth and review date indicates the child was under 4 years old.',
affected_fields=['DOB', 'REVIEW', 'REVIEW_CODE'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
else:
reviews = dfs['Reviews']
reviews['DOB'] = pd.to_datetime(reviews['DOB'], format='%d/%m/%Y', errors='coerce')
reviews['REVIEW'] = pd.to_datetime(reviews['REVIEW'], format='%d/%m/%Y', errors='coerce')
reviews = reviews.dropna(subset=['REVIEW', 'DOB'])
mask = reviews['REVIEW_CODE'].isin(['PN1', 'PN2', 'PN3', 'PN4', 'PN5', 'PN6', 'PN7']) & (
reviews['REVIEW'] < reviews['DOB'] + pd.offsets.DateOffset(years=4))
validation_error_mask = mask
validation_error_locations = reviews.index[validation_error_mask]
return {'Reviews': validation_error_locations.tolist()}
return error, _validate
def validate_184():
error = ErrorDefinition(
code='184',
description='Date of decision that a child should be placed for adoption is before the child was born.',
affected_fields=['DATE_PLACED', # PlacedAdoptino
'DOB'], # Header
)
def _validate(dfs):
if 'Header' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
child_record = dfs['Header']
placed_for_adoption = dfs['PlacedAdoption']
all_data = (placed_for_adoption
.reset_index()
.merge(child_record, how='left', on='CHILD', suffixes=[None, '_P4A']))
all_data['DATE_PLACED'] = pd.to_datetime(all_data['DATE_PLACED'], format='%d/%m/%Y', errors='coerce')
all_data['DOB'] = pd.to_datetime(all_data['DOB'], format='%d/%m/%Y', errors='coerce')
mask = (all_data['DATE_PLACED'] >= all_data['DOB']) | all_data['DATE_PLACED'].isna()
validation_error = ~mask
validation_error_locations = all_data[validation_error]['index'].unique()
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_612():
error = ErrorDefinition(
code='612',
description="Date of birth field has been completed but mother field indicates child is not a mother.",
affected_fields=['SEX', 'MOTHER', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
error_mask = (
((header['MOTHER'].astype(str) == '0') | header['MOTHER'].isna())
& (header['SEX'].astype(str) == '2')
& header['MC_DOB'].notna()
)
validation_error_locations = header.index[error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_552():
"""
This error checks that the first adoption episode is after the last decision !
If there are multiple of either there may be unexpected results !
"""
error = ErrorDefinition(
code="552",
description="Date of Decision to place a child for adoption should be on or prior to the date that the child was placed for adoption.",
# Field that defines date of decision to place a child for adoption is DATE_PLACED and the start of adoption is defined by DECOM with 'A' placement types.
affected_fields=['DATE_PLACED', 'DECOM'],
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
# get the required datasets
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
# keep index values so that they stay the same when needed later on for error locations
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
adoption_eps = episodes[episodes['PLACE'].isin(['A3', 'A4', 'A5', 'A6'])].copy()
# find most recent adoption decision
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
# remove rows where either of the required values have not been filled.
placed_adoption = placed_adoption[placed_adoption['DATE_PLACED'].notna()]
placed_adoption_inds = placed_adoption.groupby('CHILD')['DATE_PLACED'].idxmax(skipna=True)
last_decision = placed_adoption.loc[placed_adoption_inds]
# first time child started adoption
adoption_eps["DECOM"] = pd.to_datetime(adoption_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
adoption_eps = adoption_eps[adoption_eps['DECOM'].notna()]
adoption_eps_inds = adoption_eps.groupby('CHILD')['DECOM'].idxmin(skipna=True)
# full information of first adoption
first_adoption = adoption_eps.loc[adoption_eps_inds]
# date of decision and date of start of adoption (DECOM) have to be put in one table
merged = first_adoption.merge(last_decision, on=['CHILD'], how='left', suffixes=['_EP', '_PA'])
# check to see if date of decision to place is less than or equal to date placed.
decided_after_placed = merged["DECOM"] < merged["DATE_PLACED"]
# find the corresponding location of error values per file.
episode_error_locs = merged.loc[decided_after_placed, 'index_EP']
placedadoption_error_locs = merged.loc[decided_after_placed, 'index_PA']
return {"PlacedAdoption": placedadoption_error_locs.to_list(), "Episodes": episode_error_locs.to_list()}
return error, _validate
def validate_551():
error = ErrorDefinition(
code='551',
description='Child has been placed for adoption but there is no date of the decision that the child should be placed for adoption.',
affected_fields=['DATE_PLACED', 'PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes = episodes.reset_index()
place_codes = ['A3', 'A4', 'A5', 'A6']
placeEpisodes = episodes[episodes['PLACE'].isin(place_codes)]
merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED'].isna()]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_557():
error = ErrorDefinition(
code='557',
description="Child for whom the decision was made that they should be placed for adoption has left care " +
"but was not adopted and information on the decision that they should no longer be placed for " +
"adoption items has not been completed.",
affected_fields=['DATE_PLACED_CEASED', 'REASON_PLACED_CEASED', # PlacedAdoption
'PLACE', 'LS', 'REC'], # Episodes
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'PlacedAdoption' not in dfs:
return {}
else:
eps = dfs['Episodes']
placed = dfs['PlacedAdoption']
eps = eps.reset_index()
placed = placed.reset_index()
child_placed = eps['PLACE'].isin(['A3', 'A4', 'A5', 'A6'])
order_granted = eps['LS'].isin(['D1', 'E1'])
not_adopted = ~eps['REC'].isin(['E11', 'E12']) & eps['REC'].notna()
placed['ceased_incomplete'] = (
placed['DATE_PLACED_CEASED'].isna() | placed['REASON_PLACED_CEASED'].isna()
)
eps = eps[(child_placed | order_granted) & not_adopted]
eps = eps.merge(placed, on='CHILD', how='left', suffixes=['_EP', '_PA'], indicator=True)
eps = eps[(eps['_merge'] == 'left_only') | eps['ceased_incomplete']]
EP_errors = eps['index_EP']
PA_errors = eps['index_PA'].dropna()
return {
'Episodes': EP_errors.to_list(),
'PlacedAdoption': PA_errors.to_list(),
}
return error, _validate
def validate_207():
error = ErrorDefinition(
code='207',
description='Mother status for the current year disagrees with the mother status already recorded for this child.',
affected_fields=['MOTHER'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
mother_is_different = header_merged['MOTHER'].astype(str) != header_merged['MOTHER_last'].astype(str)
mother_was_true = header_merged['MOTHER_last'].astype(str) == '1'
error_mask = in_both_years & mother_is_different & mother_was_true
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_523():
error = ErrorDefinition(
code='523',
description="Date of decision that the child should be placed for adoption should be the same date as the decision that adoption is in the best interest (date should be placed).",
affected_fields=['DATE_PLACED', 'DATE_INT'],
)
def _validate(dfs):
if ("AD1" not in dfs) or ("PlacedAdoption" not in dfs):
return {}
else:
placed_adoption = dfs["PlacedAdoption"]
ad1 = dfs["AD1"]
# keep initial index values to be reused for locating errors later on.
placed_adoption.reset_index(inplace=True)
ad1.reset_index(inplace=True)
# convert to datetime to enable comparison
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format="%d/%m/%Y",
errors='coerce')
ad1["DATE_INT"] = pd.to_datetime(ad1['DATE_INT'], format='%d/%m/%Y', errors='coerce')
# drop rows where either of the required values have not been filled.
placed_adoption = placed_adoption[placed_adoption["DATE_PLACED"].notna()]
ad1 = ad1[ad1["DATE_INT"].notna()]
# bring corresponding values together from both dataframes
merged_df = placed_adoption.merge(ad1, on=['CHILD'], how='inner', suffixes=["_AD", "_PA"])
# find error values
different_dates = merged_df['DATE_INT'] != merged_df['DATE_PLACED']
# map error locations to corresponding indices
pa_error_locations = merged_df.loc[different_dates, 'index_PA']
ad1_error_locations = merged_df.loc[different_dates, 'index_AD']
return {"PlacedAdoption": pa_error_locations.to_list(), "AD1": ad1_error_locations.to_list()}
return error, _validate
def validate_3001():
error = ErrorDefinition(
code='3001',
description='Where care leavers information is being returned for a young person around their 17th birthday, the accommodation cannot be with their former foster carer(s).',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
oc3 = dfs['OC3']
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
header['DOB17'] = header['DOB'] + pd.DateOffset(years=17)
oc3_merged = oc3.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
accom_foster = oc3_merged['ACCOM'].str.upper().astype(str).isin(['Z1', 'Z2'])
age_17_in_year = (oc3_merged['DOB17'] <= collection_end) & (oc3_merged['DOB17'] >= collection_start)
error_mask = accom_foster & age_17_in_year
error_locations = oc3.index[error_mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_389():
error = ErrorDefinition(
code='389',
description='Reason episode ceased is that child transferred to care of adult social care services, but child is aged under 16.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB16'] = header['DOB'] + pd.DateOffset(years=16)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
ceased_asc = episodes_merged['REC'].str.upper().astype(str).isin(['E7'])
ceased_over_16 = episodes_merged['DOB16'] <= episodes_merged['DEC']
error_mask = ceased_asc & ~ceased_over_16
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_387():
error = ErrorDefinition(
code='387',
description='Reason episode ceased is child moved into independent living arrangement, but the child is aged under 14.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB14'] = header['DOB'] + pd.DateOffset(years=14)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
ceased_indep = episodes_merged['REC'].str.upper().astype(str).isin(['E5', 'E6'])
ceased_over_14 = episodes_merged['DOB14'] <= episodes_merged['DEC']
dec_present = episodes_merged['DEC'].notna()
error_mask = ceased_indep & ~ceased_over_14 & dec_present
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_452():
error = ErrorDefinition(
code='452',
description='Contradiction between local authority of placement code in the last episode of the previous year and in the first episode of the current year.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin()
episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax()
episodes = episodes[episodes.index.isin(episodes_min)]
episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)]
episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'],
suffixes=('', '_last'), indicator=True).set_index('index')
in_both_years = episodes_merged['_merge'] == 'both'
same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last']
last_year_open = episodes_merged['DEC_last'].isna()
different_pl_la = episodes_merged['PL_LA'].astype(str) != episodes_merged['PL_LA_last'].astype(str)
error_mask = in_both_years & same_rne & last_year_open & different_pl_la
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_386():
error = ErrorDefinition(
code='386',
description='Reason episode ceased is adopted but child has reached age 18.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + pd.DateOffset(years=18)
episodes_merged = (
episodes
.reset_index()
.merge(header, how='left', on=['CHILD'], suffixes=('', '_header'), indicator=True)
.set_index('index')
.dropna(subset=['DOB18', 'DEC'])
)
ceased_adopted = episodes_merged['REC'].str.upper().astype(str).isin(['E11', 'E12'])
ceased_under_18 = episodes_merged['DOB18'] > episodes_merged['DEC']
error_mask = ceased_adopted & ~ceased_under_18
error_locations = episodes_merged.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_363():
error = ErrorDefinition(
code='363',
description='Child assessment order (CAO) lasted longer than 7 days allowed in the Children Act 1989.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
collection_end_str = dfs['metadata']['collection_end']
L2_eps = episodes[episodes['LS'] == 'L3'].copy()
L2_eps['original_index'] = L2_eps.index
L2_eps = L2_eps[L2_eps['DECOM'].notna()]
L2_eps.loc[L2_eps['DEC'].isna(), 'DEC'] = collection_end_str
L2_eps['DECOM'] = pd.to_datetime(L2_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
L2_eps = L2_eps.dropna(subset=['DECOM'])
L2_eps['DEC'] = pd.to_datetime(L2_eps['DEC'], format='%d/%m/%Y', errors='coerce')
L2_eps.sort_values(['CHILD', 'DECOM'])
L2_eps['index'] = pd.RangeIndex(0, len(L2_eps))
L2_eps['index+1'] = L2_eps['index'] + 1
L2_eps = L2_eps.merge(L2_eps, left_on='index', right_on='index+1',
how='left', suffixes=[None, '_prev'])
L2_eps = L2_eps[['original_index', 'DECOM', 'DEC', 'DEC_prev', 'CHILD', 'CHILD_prev', 'LS']]
L2_eps['new_period'] = (
(L2_eps['DECOM'] > L2_eps['DEC_prev'])
| (L2_eps['CHILD'] != L2_eps['CHILD_prev'])
)
L2_eps['duration'] = (L2_eps['DEC'] - L2_eps['DECOM']).dt.days
L2_eps['period_id'] = L2_eps['new_period'].astype(int).cumsum()
L2_eps['period_duration'] = L2_eps.groupby('period_id')['duration'].transform(sum)
error_mask = L2_eps['period_duration'] > 7
return {'Episodes': L2_eps.loc[error_mask, 'original_index'].to_list()}
return error, _validate
def validate_364():
error = ErrorDefinition(
code='364',
description='Sections 41-46 of Police and Criminal Evidence (PACE; 1984) severely limits ' +
'the time a child can be detained in custody in Local Authority (LA) accommodation.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
collection_end_str = dfs['metadata']['collection_end']
J2_eps = episodes[episodes['LS'] == 'J2'].copy()
J2_eps['original_index'] = J2_eps.index
J2_eps['DECOM'] = pd.to_datetime(J2_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
J2_eps = J2_eps[J2_eps['DECOM'].notna()]
J2_eps.loc[J2_eps['DEC'].isna(), 'DEC'] = collection_end_str
J2_eps['DEC'] = pd.to_datetime(J2_eps['DEC'], format='%d/%m/%Y', errors='coerce')
J2_eps.sort_values(['CHILD', 'DECOM'])
J2_eps['index'] = pd.RangeIndex(0, len(J2_eps))
J2_eps['index_prev'] = J2_eps['index'] + 1
J2_eps = J2_eps.merge(J2_eps, left_on='index', right_on='index_prev',
how='left', suffixes=[None, '_prev'])
J2_eps = J2_eps[['original_index', 'DECOM', 'DEC', 'DEC_prev', 'CHILD', 'CHILD_prev', 'LS']]
J2_eps['new_period'] = (
(J2_eps['DECOM'] > J2_eps['DEC_prev'])
| (J2_eps['CHILD'] != J2_eps['CHILD_prev'])
)
J2_eps['duration'] = (J2_eps['DEC'] - J2_eps['DECOM']).dt.days
J2_eps['period_id'] = J2_eps['new_period'].astype(int).cumsum()
J2_eps['period_duration'] = J2_eps.groupby('period_id')['duration'].transform(sum)
error_mask = J2_eps['period_duration'] > 21
return {'Episodes': J2_eps.loc[error_mask, 'original_index'].to_list()}
return error, _validate
def validate_365():
error = ErrorDefinition(
code='365',
description='Any individual short- term respite placement must not exceed 17 days.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
collection_end_str = dfs['metadata']['collection_end']
episodes.loc[episodes['DEC'].isna(), 'DEC'] = collection_end_str
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
over_17_days = episodes['DEC'] > episodes['DECOM'] + pd.DateOffset(days=17)
error_mask = (episodes['LS'] == 'V3') & over_17_days
return {'Episodes': episodes.index[error_mask].to_list()}
return error, _validate
def validate_367():
error = ErrorDefinition(
code='367',
description='The maximum amount of respite care allowable is 75 days in any 12-month period.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
V3_eps = episodes[episodes['LS'] == 'V3']
V3_eps = V3_eps.dropna(subset=['DECOM']) # missing DECOM should get fixed before looking for this error
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
V3_eps['DECOM_dt'] = pd.to_datetime(V3_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
V3_eps['DEC_dt'] = pd.to_datetime(V3_eps['DEC'], format='%d/%m/%Y', errors='coerce')
# truncate episode start/end dates to collection start/end respectively
V3_eps.loc[V3_eps['DEC'].isna() | (V3_eps['DEC_dt'] > collection_end), 'DEC_dt'] = collection_end
V3_eps.loc[V3_eps['DECOM_dt'] < collection_start, 'DECOM_dt'] = collection_start
V3_eps['duration'] = (V3_eps['DEC_dt'] - V3_eps['DECOM_dt']).dt.days
V3_eps = V3_eps[V3_eps['duration'] > 0]
V3_eps['year_total_duration'] = V3_eps.groupby('CHILD')['duration'].transform(sum)
error_mask = V3_eps['year_total_duration'] > 75
return {'Episodes': V3_eps.index[error_mask].to_list()}
return error, _validate
def validate_440():
error = ErrorDefinition(
code='440',
description='Participation method indicates child was under 4 years old at the time of the review, but date of birth and review date indicates the child was 4 years old or over.',
affected_fields=['DOB', 'REVIEW', 'REVIEW_CODE'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
else:
reviews = dfs['Reviews']
reviews['DOB'] = pd.to_datetime(reviews['DOB'], format='%d/%m/%Y', errors='coerce')
reviews['REVIEW'] = pd.to_datetime(reviews['REVIEW'], format='%d/%m/%Y', errors='coerce')
mask = reviews['REVIEW_CODE'].eq('PN0') & (
reviews['REVIEW'] > reviews['DOB'] + pd.offsets.DateOffset(years=4))
validation_error_mask = mask
validation_error_locations = reviews.index[validation_error_mask]
return {'Reviews': validation_error_locations.tolist()}
return error, _validate
def validate_445():
error = ErrorDefinition(
code='445',
description='D1 is not a valid code for episodes starting after December 2005.',
affected_fields=['LS', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
max_decom_allowed = pd.to_datetime('31/12/2005', format='%d/%m/%Y', errors='coerce')
mask = episodes['LS'].eq('D1') & (episodes['DECOM'] > max_decom_allowed)
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_446():
error = ErrorDefinition(
code='446',
description='E1 is not a valid code for episodes starting before December 2005.',
affected_fields=['LS', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
min_decom_allowed = pd.to_datetime('01/12/2005', format='%d/%m/%Y', errors='coerce')
mask = episodes['LS'].eq('E1') & (episodes['DECOM'] < min_decom_allowed)
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_208():
error = ErrorDefinition(
code='208',
description='Unique Pupil Number (UPN) for the current year disagrees with the Unique Pupil Number (UPN) already recorded for this child.',
affected_fields=['UPN'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
upn_is_different = header_merged['UPN'].str.upper().astype(str) != header_merged[
'UPN_last'].str.upper().astype(str)
upn_not_recorded = header_merged['UPN'].str.upper().astype(str).isin(['UN2', 'UN3', 'UN4', 'UN5', 'UN6']) & \
header_merged['UPN_last'].str.upper().astype(str).isin(['UN1'])
error_mask = in_both_years & upn_is_different & ~upn_not_recorded
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_204():
error = ErrorDefinition(
code='204',
description='Ethnic origin code disagrees with the ethnic origin already recorded for this child.',
affected_fields=['ETHNIC'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
ethnic_is_different = header_merged['ETHNIC'].astype(str).str.upper() != header_merged[
'ETHNIC_last'].astype(str).str.upper()
error_mask = in_both_years & ethnic_is_different
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_203():
error = ErrorDefinition(
code='203',
description='Date of birth disagrees with the date of birth already recorded for this child.',
affected_fields=['DOB'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
header_last['DOB'] = pd.to_datetime(header_last['DOB'], format='%d/%m/%Y', errors='coerce')
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
dob_is_different = header_merged['DOB'].astype(str) != header_merged['DOB_last'].astype(str)
error_mask = in_both_years & dob_is_different
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_530():
error = ErrorDefinition(
code='530',
description="A placement provider code of PR4 cannot be associated with placement P1.",
affected_fields=['PLACE', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = episodes['PLACE'].eq('P1') & episodes['PLACE_PROVIDER'].eq('PR4')
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_571():
error = ErrorDefinition(
code='571',
description='The date that the child ceased to be missing or away from placement without authorisation is before the start or after the end of the collection year.',
affected_fields=['MIS_END'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
missing['fMIS_END'] = pd.to_datetime(missing['MIS_END'], format='%d/%m/%Y', errors='coerce')
end_date_before_year = missing['fMIS_END'] < collection_start
end_date_after_year = missing['fMIS_END'] > collection_end
error_mask = end_date_before_year | end_date_after_year
error_locations = missing.index[error_mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_1005():
error = ErrorDefinition(
code='1005',
description='The end date of the missing episode or episode that the child was away from placement without authorisation is not a valid date.',
affected_fields=['MIS_END'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
missing['fMIS_END'] = pd.to_datetime(missing['MIS_END'], format='%d/%m/%Y', errors='coerce')
missing_end_date = missing['MIS_END'].isna()
invalid_end_date = missing['fMIS_END'].isna()
error_mask = ~missing_end_date & invalid_end_date
error_locations = missing.index[error_mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_1004():
error = ErrorDefinition(
code='1004',
description='The start date of the missing episode or episode that the child was away from placement without authorisation is not a valid date.',
affected_fields=['MIS_START'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
missing['fMIS_START'] = pd.to_datetime(missing['MIS_START'], format='%d/%m/%Y', errors='coerce')
missing_start_date = missing['MIS_START'].isna()
invalid_start_date = missing['fMIS_START'].isna()
error_mask = missing_start_date | invalid_start_date
error_locations = missing.index[error_mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_202():
error = ErrorDefinition(
code='202',
description='The gender code conflicts with the gender already recorded for this child.',
affected_fields=['SEX'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
sex_is_different = header_merged['SEX'].astype(str) != header_merged['SEX_last'].astype(str)
error_mask = in_both_years & sex_is_different
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_621():
error = ErrorDefinition(
code='621',
description="Mother’s field has been completed but date of birth shows that the mother is younger than her child.",
affected_fields=['DOB', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
header['MC_DOB'] = pd.to_datetime(header['MC_DOB'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
mask = (header['MC_DOB'] > header['DOB']) | header['MC_DOB'].isna()
validation_error_mask = ~mask
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_556():
error = ErrorDefinition(
code='556',
description='Date of decision that the child should be placed for adoption should be on or prior to the date that the freeing order was granted.',
affected_fields=['DATE_PLACED', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
placedAdoptions['DATE_PLACED'] = pd.to_datetime(placedAdoptions['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
episodes = episodes.reset_index()
D1Episodes = episodes[episodes['LS'] == 'D1']
merged = D1Episodes.reset_index().merge(placedAdoptions, how='left', on='CHILD', ).set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED'] > merged['DECOM']]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_393():
error = ErrorDefinition(
code='393',
description='Child is looked after but mother field is not completed.',
affected_fields=['MOTHER'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header_female = header[header['SEX'].astype(str) == '2']
applicable_episodes = episodes[~episodes['LS'].str.upper().isin(['V3', 'V4'])]
error_mask = header_female['CHILD'].isin(applicable_episodes['CHILD']) & header_female['MOTHER'].isna()
error_locations = header_female.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_NoE():
error = ErrorDefinition(
code='NoE',
description='This child has no episodes loaded for previous year even though child started to be looked after before this current year.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last = dfs['Episodes_last']
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
episodes_before_year = episodes[episodes['DECOM'] < collection_start]
episodes_merged = episodes_before_year.reset_index().merge(episodes_last, how='left', on=['CHILD'],
indicator=True).set_index('index')
episodes_not_matched = episodes_merged[episodes_merged['_merge'] == 'left_only']
error_mask = episodes.index.isin(episodes_not_matched.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_356():
error = ErrorDefinition(
code='356',
description='The date the episode ceased is before the date the same episode started.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
error_mask = episodes['DEC'].notna() & (episodes['DEC'] < episodes['DECOM'])
return {'Episodes': episodes.index[error_mask].to_list()}
return error, _validate
def validate_611():
error = ErrorDefinition(
code='611',
description="Date of birth field is blank, but child is a mother.",
affected_fields=['MOTHER', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
validation_error_mask = header['MOTHER'].astype(str).isin(['1']) & header['MC_DOB'].isna()
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_1009():
error = ErrorDefinition(
code='1009',
description='Reason for placement change is not a valid code.',
affected_fields=['REASON_PLACE_CHANGE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'CARPL',
'CLOSE',
'ALLEG',
'STAND',
'APPRR',
'CREQB',
'CREQO',
'CHILD',
'LAREQ',
'PLACE',
'CUSTOD',
'OTHER'
]
mask = episodes['REASON_PLACE_CHANGE'].isin(code_list) | episodes['REASON_PLACE_CHANGE'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_1006():
error = ErrorDefinition(
code='1006',
description='Missing type invalid.',
affected_fields=['MISSING'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
missing_from_care = dfs['Missing']
code_list = ['M', 'A']
mask = missing_from_care['MISSING'].isin(code_list) | missing_from_care['MISSING'].isna()
validation_error_mask = ~mask
validation_error_locations = missing_from_care.index[validation_error_mask]
return {'Missing': validation_error_locations.tolist()}
return error, _validate
def validate_631():
error = ErrorDefinition(
code='631',
description='Previous permanence option not a valid value.',
affected_fields=['PREV_PERM'],
)
def _validate(dfs):
if 'PrevPerm' not in dfs:
return {}
previous_permanence = dfs['PrevPerm']
code_list = ['P1', 'P2', 'P3', 'P4', 'Z1']
mask = previous_permanence['PREV_PERM'].isin(code_list) | previous_permanence['PREV_PERM'].isna()
validation_error_mask = ~mask
validation_error_locations = previous_permanence.index[validation_error_mask]
return {'PrevPerm': validation_error_locations.tolist()}
return error, _validate
def validate_196():
error = ErrorDefinition(
code='196',
description='Strengths and Difficulties (SDQ) reason is not a valid code.',
affected_fields=['SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
code_list = ['SDQ1', 'SDQ2', 'SDQ3', 'SDQ4', 'SDQ5']
mask = oc2['SDQ_REASON'].isin(code_list) | oc2['SDQ_REASON'].isna()
validation_error_mask = ~mask
validation_error_locations = oc2.index[validation_error_mask]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_177():
error = ErrorDefinition(
code='177',
description='The legal status of adopter(s) code is not a valid code.',
affected_fields=['LS_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
adoptions = dfs['AD1']
code_list = ['L0', 'L11', 'L12', 'L2', 'L3', 'L4']
mask = adoptions['LS_ADOPTR'].isin(code_list) | adoptions['LS_ADOPTR'].isna()
validation_error_mask = ~mask
validation_error_locations = adoptions.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_176():
error = ErrorDefinition(
code='176',
description='The gender of adopter(s) at the date of adoption code is not a valid code.',
affected_fields=['SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
adoptions = dfs['AD1']
code_list = ['M1', 'F1', 'MM', 'FF', 'MF']
mask = adoptions['SEX_ADOPTR'].isin(code_list) | adoptions['SEX_ADOPTR'].isna()
validation_error_mask = ~mask
validation_error_locations = adoptions.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_175():
error = ErrorDefinition(
code='175',
description='The number of adopter(s) code is not a valid code.',
affected_fields=['NB_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
adoptions = dfs['AD1']
code_list = ['1', '2']
mask = adoptions['NB_ADOPTR'].astype(str).isin(code_list) | adoptions['NB_ADOPTR'].isna()
validation_error_mask = ~mask
validation_error_locations = adoptions.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_132():
error = ErrorDefinition(
code='132',
description='Data entry for activity after leaving care is invalid.',
affected_fields=['ACTIV'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
care_leavers = dfs['OC3']
code_list = [
'F1',
'P1',
'F2',
'P2',
'F4',
'P4',
'F5',
'P5',
'G4',
'G5',
'G6',
'0'
]
mask = care_leavers['ACTIV'].astype(str).isin(code_list) | care_leavers['ACTIV'].isna()
validation_error_mask = ~mask
validation_error_locations = care_leavers.index[validation_error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_131():
error = ErrorDefinition(
code='131',
description='Data entry for being in touch after leaving care is invalid.',
affected_fields=['IN_TOUCH'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
care_leavers = dfs['OC3']
code_list = [
'YES',
'NO',
'DIED',
'REFU',
'NREQ',
'RHOM'
]
mask = care_leavers['IN_TOUCH'].isin(code_list) | care_leavers['IN_TOUCH'].isna()
validation_error_mask = ~mask
validation_error_locations = care_leavers.index[validation_error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_120():
error = ErrorDefinition(
code='120',
description='The reason for the reversal of the decision that the child should be placed for adoption code is not valid.',
affected_fields=['REASON_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
placed_adoptions = dfs['PlacedAdoption']
code_list = ['RD1', 'RD2', 'RD3', 'RD4']
mask = placed_adoptions['REASON_PLACED_CEASED'].isin(code_list) | placed_adoptions[
'REASON_PLACED_CEASED'].isna()
validation_error_mask = ~mask
validation_error_locations = placed_adoptions.index[validation_error_mask]
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_114():
error = ErrorDefinition(
code='114',
description='Data entry to record the status of former carer(s) of an adopted child is invalid.',
affected_fields=['FOSTER_CARE'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
adoptions = dfs['AD1']
code_list = ['0', '1']
mask = adoptions['FOSTER_CARE'].astype(str).isin(code_list) | adoptions['FOSTER_CARE'].isna()
validation_error_mask = ~mask
validation_error_locations = adoptions.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_178():
error = ErrorDefinition(
code='178',
description='Placement provider code is not a valid code.',
affected_fields=['PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list_placement_provider = ['PR0', 'PR1', 'PR2', 'PR3', 'PR4', 'PR5']
code_list_placement_with_no_provider = ['T0', 'T1', 'T2', 'T3', 'T4', 'Z1']
place_provider_needed_and_correct = episodes['PLACE_PROVIDER'].isin(code_list_placement_provider) & ~episodes[
'PLACE'].isin(code_list_placement_with_no_provider)
place_provider_not_provided = episodes['PLACE_PROVIDER'].isna()
place_provider_not_needed = episodes['PLACE_PROVIDER'].isna() & episodes['PLACE'].isin(
code_list_placement_with_no_provider)
mask = place_provider_needed_and_correct | place_provider_not_provided | place_provider_not_needed
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_103():
error = ErrorDefinition(
code='103',
description='The ethnicity code is either not valid or has not been entered.',
affected_fields=['ETHNIC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
header = dfs['Header']
code_list = [
'WBRI',
'WIRI',
'WOTH',
'WIRT',
'WROM',
'MWBC',
'MWBA',
'MWAS',
'MOTH',
'AIND',
'APKN',
'ABAN',
'AOTH',
'BCRB',
'BAFR',
'BOTH',
'CHNE',
'OOTH',
'REFU',
'NOBT'
]
mask = header['ETHNIC'].isin(code_list)
validation_error_mask = ~mask
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_143():
error = ErrorDefinition(
code='143',
description='The reason for new episode code is not a valid code.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = ['S', 'P', 'L', 'T', 'U', 'B']
mask = episodes['RNE'].isin(code_list) | episodes['RNE'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_144():
error = ErrorDefinition(
code='144',
description='The legal status code is not a valid code.',
affected_fields=['LS'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'C1',
'C2',
'D1',
'E1',
'V2',
'V3',
'V4',
'J1',
'J2',
'J3',
'L1',
'L2',
'L3'
]
mask = episodes['LS'].isin(code_list) | episodes['LS'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_145():
error = ErrorDefinition(
code='145',
description='Category of need code is not a valid code.',
affected_fields=['CIN'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'N1',
'N2',
'N3',
'N4',
'N5',
'N6',
'N7',
'N8',
]
mask = episodes['CIN'].isin(code_list) | episodes['CIN'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_146():
error = ErrorDefinition(
code='146',
description='Placement type code is not a valid code.',
affected_fields=['PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'A3',
'A4',
'A5',
'A6',
'H5',
'K1',
'K2',
'P1',
'P2',
'P3',
'R1',
'R2',
'R3',
'R5',
'S1',
'T0',
'T1',
'T2',
'T3',
'T4',
'U1',
'U2',
'U3',
'U4',
'U5',
'U6',
'Z1'
]
mask = episodes['PLACE'].isin(code_list) | episodes['PLACE'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_149():
error = ErrorDefinition(
code='149',
description='Reason episode ceased code is not valid. ',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'E11',
'E12',
'E2',
'E3',
'E4A',
'E4B',
'E13',
'E41',
'E45',
'E46',
'E47',
'E48',
'E5',
'E6',
'E7',
'E8',
'E9',
'E14',
'E15',
'E16',
'E17',
'X1'
]
mask = episodes['REC'].isin(code_list) | episodes['REC'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_167():
error = ErrorDefinition(
code='167',
description='Data entry for participation is invalid or blank.',
affected_fields=['REVIEW_CODE'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
review = dfs['Reviews']
code_list = ['PN0', 'PN1', 'PN2', 'PN3', 'PN4', 'PN5', 'PN6', 'PN7']
mask = review['REVIEW'].notna() & review['REVIEW_CODE'].isin(code_list) | review['REVIEW'].isna() & review[
'REVIEW_CODE'].isna()
validation_error_mask = ~mask
validation_error_locations = review.index[validation_error_mask]
return {'Reviews': validation_error_locations.tolist()}
return error, _validate
def validate_101():
error = ErrorDefinition(
code='101',
description='Gender code is not valid.',
affected_fields=['SEX'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
header = dfs['Header']
code_list = ['1', '2']
mask = header['SEX'].astype(str).isin(code_list)
validation_error_mask = ~mask
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_141():
error = ErrorDefinition(
code='141',
description='Date episode began is not a valid date.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce').notna()
na_location = episodes['DECOM'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_147():
error = ErrorDefinition(
code='147',
description='Date episode ceased is not a valid date.',
affected_fields=['DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce').notna()
na_location = episodes['DEC'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_171():
error = ErrorDefinition(
code='171',
description="Date of birth of mother's child is not a valid date.",
affected_fields=['MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
mask = pd.to_datetime(header['MC_DOB'], format='%d/%m/%Y', errors='coerce').notna()
na_location = header['MC_DOB'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_102():
error = ErrorDefinition(
code='102',
description='Date of birth is not a valid date.',
affected_fields=['DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
mask = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce').notna()
validation_error_mask = ~mask
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_112():
error = ErrorDefinition(
code='112',
description='Date should be placed for adoption is not a valid date.',
affected_fields=['DATE_INT'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
mask = pd.to_datetime(ad1['DATE_INT'], format='%d/%m/%Y', errors='coerce').notna()
na_location = ad1['DATE_INT'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = ad1.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_115():
error = ErrorDefinition(
code='115',
description="Date of Local Authority's (LA) decision that a child should be placed for adoption is not a valid date.",
affected_fields=['DATE_PLACED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
adopt = dfs['PlacedAdoption']
mask = pd.to_datetime(adopt['DATE_PLACED'], format='%d/%m/%Y', errors='coerce').notna()
na_location = adopt['DATE_PLACED'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = adopt.index[validation_error_mask]
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_116():
error = ErrorDefinition(
code='116',
description="Date of Local Authority's (LA) decision that a child should no longer be placed for adoption is not a valid date.",
affected_fields=['DATE_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
adopt = dfs['PlacedAdoption']
mask = pd.to_datetime(adopt['DATE_PLACED_CEASED'], format='%d/%m/%Y', errors='coerce').notna()
na_location = adopt['DATE_PLACED_CEASED'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = adopt.index[validation_error_mask]
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_392c():
error = ErrorDefinition(
code='392c',
description='Postcode(s) provided are invalid.',
affected_fields=['HOME_POST', 'PL_POST'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
home_provided = episodes['HOME_POST'].notna()
home_details = merge_postcodes(episodes, "HOME_POST")
home_valid = home_details['pcd'].notna()
pl_provided = episodes['PL_POST'].notna()
pl_details = merge_postcodes(episodes, "PL_POST")
pl_valid = pl_details['pcd'].notna()
error_mask = (home_provided & ~home_valid) | (pl_provided & ~pl_valid)
return {'Episodes': episodes.index[error_mask].tolist()}
return error, _validate
def validate_213():
error = ErrorDefinition(
code='213',
description='Placement provider information not required.',
affected_fields=['PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
mask = df['PLACE'].isin(['T0', 'T1', 'T2', 'T3', 'T4', 'Z1']) & df['PLACE_PROVIDER'].notna()
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_168():
error = ErrorDefinition(
code='168',
description='Unique Pupil Number (UPN) is not valid. If unknown, default codes should be UN1, UN2, UN3, UN4 or UN5.',
affected_fields=['UPN'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
df = dfs['Header']
mask = df['UPN'].str.match(r'(^((?![IOS])[A-Z]){1}(\d{12}|\d{11}[A-Z]{1})$)|^(UN[1-5])$', na=False)
mask = ~mask
return {'Header': df.index[mask].tolist()}
return error, _validate
def validate_388():
error = ErrorDefinition(
code='388',
description='Reason episode ceased is coded new episode begins, but there is no continuation episode.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
df['DECOM'] = pd.to_datetime(df['DECOM'], format='%d/%m/%Y', errors='coerce')
df['DEC'] = pd.to_datetime(df['DEC'], format='%d/%m/%Y', errors='coerce')
df['DECOM'] = df['DECOM'].fillna('01/01/1901') # Watch for potential future issues
df = df.sort_values(['CHILD', 'DECOM'])
df['DECOM_NEXT_EPISODE'] = df.groupby(['CHILD'])['DECOM'].shift(-1)
# The max DECOM for each child is also the one with no next episode
# And we also add the skipna option
# grouped_decom_by_child = df.groupby(['CHILD'])['DECOM'].idxmax(skipna=True)
no_next = df.DECOM_NEXT_EPISODE.isna() & df.CHILD.notna()
# Dataframe with the maximum DECOM removed
max_decom_removed = df[~no_next]
# Dataframe with the maximum DECOM only
max_decom_only = df[no_next]
# Case 1: If reason episode ceased is coded X1 there must be a subsequent episode
# starting on the same day.
case1 = max_decom_removed[(max_decom_removed['REC'] == 'X1') &
(max_decom_removed['DEC'].notna()) &
(max_decom_removed['DECOM_NEXT_EPISODE'].notna()) &
(max_decom_removed['DEC'] != max_decom_removed['DECOM_NEXT_EPISODE'])]
# Case 2: If an episode ends but the child continues to be looked after, a new
# episode should start on the same day.The reason episode ceased code of
# the episode which ends must be X1.
case2 = max_decom_removed[(max_decom_removed['REC'] != 'X1') &
(max_decom_removed['REC'].notna()) &
(max_decom_removed['DEC'].notna()) &
(max_decom_removed['DECOM_NEXT_EPISODE'].notna()) &
(max_decom_removed['DEC'] == max_decom_removed['DECOM_NEXT_EPISODE'])]
# Case 3: If a child ceases to be looked after reason episode ceased code X1 must
# not be used.
case3 = max_decom_only[(max_decom_only['DEC'].notna()) &
(max_decom_only['REC'] == 'X1')]
mask_case1 = case1.index.tolist()
mask_case2 = case2.index.tolist()
mask_case3 = case3.index.tolist()
mask = mask_case1 + mask_case2 + mask_case3
mask.sort()
return {'Episodes': mask}
return error, _validate
def validate_113():
error = ErrorDefinition(
code='113',
description='Date matching child and adopter(s) is not a valid date.',
affected_fields=['DATE_MATCH'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
mask = pd.to_datetime(ad1['DATE_MATCH'], format='%d/%m/%Y', errors='coerce').notna()
na_location = ad1['DATE_MATCH'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = ad1.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_134():
error = ErrorDefinition(
code='134',
description='Data on adoption should not be entered for the OC3 cohort.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM', 'DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR',
'SEX_ADOPTR', 'LS_ADOPTR'],
)
def _validate(dfs):
if 'OC3' not in dfs or 'AD1' not in dfs:
return {}
else:
oc3 = dfs['OC3']
ad1 = dfs['AD1']
ad1['ad1_index'] = ad1.index
all_data = ad1.merge(oc3, how='left', on='CHILD')
na_oc3_data = (
all_data['IN_TOUCH'].isna() &
all_data['ACTIV'].isna() &
all_data['ACCOM'].isna()
)
na_ad1_data = (
all_data['DATE_INT'].isna() &
all_data['DATE_MATCH'].isna() &
all_data['FOSTER_CARE'].isna() &
all_data['NB_ADOPTR'].isna() &
all_data['SEX_ADOPTR'].isna() &
all_data['LS_ADOPTR'].isna()
)
validation_error = ~na_oc3_data & ~na_ad1_data
validation_error_locations = all_data.loc[validation_error, 'ad1_index'].unique()
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_119():
error = ErrorDefinition(
code='119',
description='If the decision is made that a child should no longer be placed for adoption, then the date of this decision and the reason why this decision was made must be completed.',
affected_fields=['REASON_PLACED_CEASED', 'DATE_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
adopt = dfs['PlacedAdoption']
na_placed_ceased = adopt['DATE_PLACED_CEASED'].isna()
na_reason_ceased = adopt['REASON_PLACED_CEASED'].isna()
validation_error = (na_placed_ceased & ~na_reason_ceased) | (~na_placed_ceased & na_reason_ceased)
validation_error_locations = adopt.index[validation_error]
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_159():
error = ErrorDefinition(
code='159',
description='If a child has been recorded as not receiving an intervention for their substance misuse problem, then the additional item on whether an intervention was offered should be completed as well.',
affected_fields=['SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
mask1 = oc2['SUBSTANCE_MISUSE'].astype(str) == '1'
mask2 = oc2['INTERVENTION_RECEIVED'].astype(str) == '0'
mask3 = oc2['INTERVENTION_OFFERED'].isna()
validation_error = mask1 & mask2 & mask3
validation_error_locations = oc2.index[validation_error]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_142():
error = ErrorDefinition(
code='142',
description='A new episode has started, but the previous episode has not ended.',
affected_fields=['DEC', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
df['DECOM'] = pd.to_datetime(df['DECOM'], format='%d/%m/%Y', errors='coerce')
df['DEC'] = pd.to_datetime(df['DEC'], format='%d/%m/%Y', errors='coerce')
df['DECOM'] = df['DECOM'].fillna('01/01/1901') # Watch for potential future issues
df['DECOM'] = df['DECOM'].replace('01/01/1901', pd.NA)
last_episodes = df.sort_values('DECOM').reset_index().groupby(['CHILD'])['index'].last()
ended_episodes_df = df.loc[~df.index.isin(last_episodes)]
ended_episodes_df = ended_episodes_df[(ended_episodes_df['DEC'].isna() | ended_episodes_df['REC'].isna()) &
ended_episodes_df['CHILD'].notna() & ended_episodes_df[
'DECOM'].notna()]
mask = ended_episodes_df.index.tolist()
return {'Episodes': mask}
return error, _validate
def validate_148():
error = ErrorDefinition(
code='148',
description='Date episode ceased and reason episode ceased must both be coded, or both left blank.',
affected_fields=['DEC', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
df['DEC'] = pd.to_datetime(df['DEC'], format='%d/%m/%Y', errors='coerce')
mask = ((df['DEC'].isna()) & (df['REC'].notna())) | ((df['DEC'].notna()) & (df['REC'].isna()))
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_151():
error = ErrorDefinition(
code='151',
description='All data items relating to a childs adoption must be coded or left blank.',
affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTER', 'SEX_ADOPTR', 'LS_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
na_date_int = ad1['DATE_INT'].isna()
na_date_match = ad1['DATE_MATCH'].isna()
na_foster_care = ad1['FOSTER_CARE'].isna()
na_nb_adoptr = ad1['NB_ADOPTR'].isna()
na_sex_adoptr = ad1['SEX_ADOPTR'].isna()
na_lsadoptr = ad1['LS_ADOPTR'].isna()
ad1_not_null = (
~na_date_int & ~na_date_match & ~na_foster_care & ~na_nb_adoptr & ~na_sex_adoptr & ~na_lsadoptr)
validation_error = (
~na_date_int | ~na_date_match | ~na_foster_care | ~na_nb_adoptr | ~na_sex_adoptr | ~na_lsadoptr) & ~ad1_not_null
validation_error_locations = ad1.index[validation_error]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_182():
error = ErrorDefinition(
code='182',
description='Data entries on immunisations, teeth checks, health assessments and substance misuse problem identified should be completed or all OC2 fields should be left blank.',
affected_fields=['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'CONVICTED',
'HEALTH_CHECK', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
mask1 = (
oc2['IMMUNISATIONS'].isna() |
oc2['TEETH_CHECK'].isna() |
oc2['HEALTH_ASSESSMENT'].isna() |
oc2['SUBSTANCE_MISUSE'].isna()
)
mask2 = (
oc2['CONVICTED'].isna() &
oc2['HEALTH_CHECK'].isna() &
oc2['INTERVENTION_RECEIVED'].isna() &
oc2['INTERVENTION_OFFERED'].isna()
)
validation_error = mask1 & ~mask2
validation_error_locations = oc2.index[validation_error]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_214():
error = ErrorDefinition(
code='214',
description='Placement location information not required.',
affected_fields=['PL_POST', 'URN'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
mask = df['LS'].isin(['V3', 'V4']) & ((df['PL_POST'].notna()) | (df['URN'].notna()))
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_222():
error = ErrorDefinition(
code='222',
description='Ofsted Unique reference number (URN) should not be recorded for this placement type.',
affected_fields=['URN'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
place_code_list = ['H5', 'P1', 'P2', 'P3', 'R1', 'R2', 'R5', 'T0', 'T1', 'T2', 'T3', 'T4', 'Z1']
mask = (df['PLACE'].isin(place_code_list)) & (df['URN'].notna()) & (df['URN'] != 'XXXXXX')
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_366():
error = ErrorDefinition(
code='366',
description='A child cannot change placement during the course of an individual short-term respite break.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
mask = (df['LS'] == 'V3') & (df['RNE'] != 'S')
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_628():
error = ErrorDefinition(
code='628',
description='Motherhood details are not required for care leavers who have not been looked after during the year.',
affected_fields=['MOTHER'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs or 'OC3' not in dfs:
return {}
else:
hea = dfs['Header']
epi = dfs['Episodes']
oc3 = dfs['OC3']
hea = hea.reset_index()
oc3_no_nulls = oc3[oc3[['IN_TOUCH', 'ACTIV', 'ACCOM']].notna().any(axis=1)]
hea_merge_epi = hea.merge(epi, how='left', on='CHILD', indicator=True)
hea_not_in_epi = hea_merge_epi[hea_merge_epi['_merge'] == 'left_only']
cohort_to_check = hea_not_in_epi.merge(oc3_no_nulls, how='inner', on='CHILD')
error_cohort = cohort_to_check[cohort_to_check['MOTHER'].notna()]
error_list = list(set(error_cohort['index'].to_list()))
error_list.sort()
return {'Header': error_list}
return error, _validate
def validate_164():
error = ErrorDefinition(
code='164',
description='Distance is not valid. Please check a valid postcode has been entered.',
affected_fields=['PL_DISTANCE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
is_short_term = df['LS'].isin(['V3', 'V4'])
distance = pd.to_numeric(df['PL_DISTANCE'], errors='coerce')
# Use a bit of tolerance in these bounds
distance_valid = distance.gt(-0.2) & distance.lt(1001.0)
mask = ~is_short_term & ~distance_valid
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_169():
error = ErrorDefinition(
code='169',
description='Local Authority (LA) of placement is not valid or is missing. Please check a valid postcode has been entered.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
is_short_term = df['LS'].isin(['V3', 'V4'])
# Because PL_LA is derived, it will always be valid if present
mask = ~is_short_term & df['PL_LA'].isna()
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_179():
error = ErrorDefinition(
code='179',
description='Placement location code is not a valid code.',
affected_fields=['PL_LOCATION'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
is_short_term = df['LS'].isin(['V3', 'V4'])
# Because PL_LOCATION is derived, it will always be valid if present
mask = ~is_short_term & df['PL_LOCATION'].isna()
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_1015():
error = ErrorDefinition(
code='1015',
description='Placement provider is own provision but child not placed in own LA.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
local_authority = dfs['metadata']['localAuthority']
placement_fostering_or_adoption = df['PLACE'].isin([
'A3', 'A4', 'A5', 'A6', 'U1', 'U2', 'U3', 'U4', 'U5', 'U6',
])
own_provision = df['PLACE_PROVIDER'].eq('PR1')
is_short_term = df['LS'].isin(['V3', 'V4'])
is_pl_la = df['PL_LA'].eq(local_authority)
checked_episodes = ~placement_fostering_or_adoption & ~is_short_term & own_provision
checked_episodes = checked_episodes & df['LS'].notna() & df['PLACE'].notna()
mask = checked_episodes & ~is_pl_la
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_411():
error = ErrorDefinition(
code='411',
description='Placement location code disagrees with LA of placement.',
affected_fields=['PL_LOCATION'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
local_authority = dfs['metadata']['localAuthority']
mask = df['PL_LOCATION'].eq('IN') & df['PL_LA'].ne(local_authority)
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_420():
error = ErrorDefinition(
code='420',
description='LA of placement completed but child is looked after under legal status V3 or V4.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
is_short_term = df['LS'].isin(['V3', 'V4'])
mask = is_short_term & df['PL_LA'].notna()
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_355():
error = ErrorDefinition(
code='355',
description='Episode appears to have lasted for less than 24 hours',
affected_fields=['DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
mask = df['DECOM'].astype(str) == df['DEC'].astype(str)
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_586():
error = ErrorDefinition(
code='586',
description='Dates of missing periods are before child’s date of birth.',
affected_fields=['MIS_START'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
df = dfs['Missing']
df['DOB'] = pd.to_datetime(df['DOB'], format='%d/%m/%Y', errors='coerce')
df['MIS_START'] = pd.to_datetime(df['MIS_START'], format='%d/%m/%Y', errors='coerce')
error_mask = df['MIS_START'].notna() & (df['MIS_START'] <= df['DOB'])
return {'Missing': df.index[error_mask].to_list()}
return error, _validate
def validate_630():
error = ErrorDefinition(
code='630',
description='Information on previous permanence option should be returned.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'PrevPerm' not in dfs or 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
pre = dfs['PrevPerm']
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
epi = epi.reset_index()
# Form the episode dataframe which has an 'RNE' of 'S' in this financial year
epi_has_rne_of_S_in_year = epi[(epi['RNE'] == 'S') & (epi['DECOM'] >= collection_start)]
# Merge to see
# 1) which CHILD ids are missing from the PrevPerm file
# 2) which CHILD are in the prevPerm file, but don't have the LA_PERM/DATE_PERM field completed where they should be
# 3) which CHILD are in the PrevPerm file, but don't have the PREV_PERM field completed.
merged_epi_preperm = epi_has_rne_of_S_in_year.merge(pre, on='CHILD', how='left', indicator=True)
error_not_in_preperm = merged_epi_preperm['_merge'] == 'left_only'
error_wrong_values_in_preperm = (merged_epi_preperm['PREV_PERM'] != 'Z1') & (
merged_epi_preperm[['LA_PERM', 'DATE_PERM']].isna().any(axis=1))
error_null_prev_perm = (merged_epi_preperm['_merge'] == 'both') & (merged_epi_preperm['PREV_PERM'].isna())
error_mask = error_not_in_preperm | error_wrong_values_in_preperm | error_null_prev_perm
error_list = merged_epi_preperm[error_mask]['index'].to_list()
error_list = list(set(error_list))
error_list.sort()
return {'Episodes': error_list}
return error, _validate
def validate_501():
error = ErrorDefinition(
code='501',
description='A new episode has started before the end date of the previous episode.',
affected_fields=['DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
epi = epi.reset_index()
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
epi['DEC'] = | pd.to_datetime(epi['DEC'], format='%d/%m/%Y', errors='coerce') | pandas.to_datetime |
import IMLearn.learners.regressors.linear_regression
from IMLearn.learners.regressors import PolynomialFitting
from IMLearn.utils import split_train_test
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.io as pio
import plotly.graph_objects as go
import matplotlib.pyplot as plt
pio.templates.default = "simple_white"
def load_data(filename: str) -> pd.DataFrame:
"""
Load city daily temperature dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp)
"""
df = pd.read_csv(filename, parse_dates=['Date'])
# remove missing temp samples
df.dropna(subset=['Temp'], axis=0, inplace=True)
# remove temp values lower than -20 or higher than 60
df = df.loc[(df['Temp'] >= -20) & (df['Temp'] <= 60)]
# Consider only samples which their features range correctly
df = df.loc[(df['Year'] >= 0) & (df['Year'] <= 2022)]
df = df.loc[(df['Month'] > 0) & (df['Month'] <= 12)]
df = df.loc[(df['Day'] > 0) & (df['Day'] <= 31)]
# create day of year feature and remove Date feature as it is already
# represented in other features
df['DayOfYear'] = df['Date'].dt.dayofyear
df.drop("Date", axis=1, inplace=True)
# replace categorical features with label encoding
dummies = pd.get_dummies(df.City)
df = | pd.concat([df, dummies], axis='columns') | pandas.concat |
#!/usr/bin/env python
"""
Parses SPINS' EA log files into BIDS tsvs
usage:
parse_ea_task.py <log_file>
arguments:
<log_file> The location of the EA file to parse
Details:
insert these later
Requires:
insert these later
"""
import pandas as pd
import numpy as np
from docopt import docopt
import re
import os
#reads in log file and subtracts the initial TRs/MRI startup time
def read_in_logfile(path):
log_file=pd.read_csv(path, sep='\t', skiprows=3)
time_to_subtract=int(log_file.Duration[log_file.Code=='MRI_start'])
log_file.Time=log_file.Time-time_to_subtract #subtracts mri start times from all onset times
return log_file
#Grabs the starts of blocks and returns rows for them
def get_blocks(log,vid_info):
#identifies the video trial types (as opposed to button press events etc)
mask = ["vid" in log['Code'][i] for i in range(0,log.shape[0])]
#creates the dataframe with onset times and event types
df = pd.DataFrame({'onset':log.loc[mask]['Time'],
'trial_type':log.loc[mask]['Event Type'],
'movie_name':log.loc[mask]['Code']})
#adds trial type info
df['trial_type']=df['movie_name'].apply(lambda x: "circle_block" if "cvid" in x else "EA_block")
#add durations and convert them into the units here? 10000ths of seconds
df['duration']=df['movie_name'].apply(lambda x: int(vid_info[x]['duration'])*10000 if x in vid_info else "n/a")
#adds names of stim_files, according to the vid_info spreadsheet
df['stim_file']=df['movie_name'].apply(lambda x: vid_info[x]['stim_file'] if x in vid_info else "n/a")
#adds an end column to the beginning of blocks (it's useful for processing but will remove later)
df['end']=df['onset']+df['duration']
return(df)
#grabs stimulus metadata
def format_vid_info(vid):
vid.columns = [c.lower() for c in vid.columns]
vid = vid.rename(index={0:"stim_file", 1:"duration"}) #grabs the file name and the durations from the info file
vid = vid.to_dict()
return(vid)
#Reads in gold standard answers
def read_in_standard(timing_path):
df = | pd.read_csv(timing_path) | pandas.read_csv |
"""Preprocessing code for Sumo outputs.
Use to put data into hdf stores with A, X, Y arrays.
"""
import logging
import multiprocessing
import os
import re
import time
from collections import OrderedDict
from itertools import repeat
import networkx as nx
import pandas as pd
import six
from trafficgraphnn.load_data import pad_value_for_feature
from trafficgraphnn.preprocessing.io import (get_preprocessed_filenames,
light_switch_out_files_for_sumo_network,
light_timing_xml_to_phase_df,
write_hdf_for_sumo_network)
from trafficgraphnn.preprocessing.liumethod_new import liu_method_for_net
raw_xml_x_feature_defaults=[
'occupancy', 'speed', 'green', 'liu_estimated_veh', 'nVehContrib',
'nVehEntered']
raw_xml_y_feature_defaults=['nVehSeen', 'maxJamLengthInVehicles']
_logger = logging.getLogger(__name__)
def run_preprocessing(sumo_network, output_filename=None):
if output_filename is None:
output_filename = os.path.join(
os.path.dirname(sumo_network.netfile),
'preprocessed_data',
'{:04}.h5').format(_next_file_number(sumo_network))
t0 = time.time()
hdf_filename = write_hdf_for_sumo_network(sumo_network)
t = time.time() - t0
_logger.debug('Extracting xml took {} s'.format(t))
t0 = time.time()
write_per_lane_tables(output_filename, sumo_network, hdf_filename)
t - time.time() - t0
_logger.debug('Writing preprocessed data took {} s'.format(t))
return output_filename
def write_per_lane_tables(output_filename,
sumo_network,
raw_xml_filename=None,
X_features=raw_xml_x_feature_defaults,
Y_features=raw_xml_y_feature_defaults,
complib='blosc:lz4', complevel=5):
"""Write an hdf file with per-lane X and Y data arrays"""
X_df, Y_df = build_X_Y_tables_for_lanes(
sumo_network, raw_xml_filename=raw_xml_filename, X_features=X_features,
Y_features=Y_features)
lanes_with_data = X_df.index.get_level_values(0).unique()
assert len(lanes_with_data.difference(
Y_df.index.get_level_values(0)).unique()) == 0
A_dfs = build_A_tables_for_lanes(sumo_network, lanes_with_data)
A_df = | pd.concat(A_dfs, axis=1) | pandas.concat |
import pandas as pd
import os
from collections import namedtuple
from strategy.strategy import Exposures, Portfolio
from strategy.rebalance import get_relative_to_expiry_instrument_weights, \
get_relative_to_expiry_rebalance_dates, get_fixed_frequency_rebalance_dates
from strategy.calendar import get_mtm_dates
def make_container(holdings, trades, pnl):
container = namedtuple("sim_result", ["holdings", "trades", "pnl"])
return container(holdings, trades, pnl)
def make_exposures(root_generics, meta_fp, market_fp):
return Exposures.from_folder(meta_fp, market_fp, root_generics)
def make_portfolio(exposures, sd, ed, capital, offset, all_monthly=False,
holidays=None):
rebal_dts = get_relative_to_expiry_rebalance_dates(
sd, ed, exposures.expiries, offset, all_monthly=all_monthly
)
exchanges = exposures.meta_data.loc["exchange", :].unique()
mtm_dates = get_mtm_dates(sd, ed, exchanges, holidays=holidays)
root_generics = exposures.future_root_and_generics
wts = get_relative_to_expiry_instrument_weights(
mtm_dates, root_generics, exposures.expiries, offset,
all_monthly=all_monthly
)
portfolio = Portfolio(
exposures, rebal_dts, mtm_dates, wts, initial_capital=capital
)
return portfolio
def make_frequency_portfolio(frequency, offset, exposures, sd, ed, capital,
holidays=None):
rebal_dts = get_fixed_frequency_rebalance_dates(
sd, ed, frequency, offset
)
wts = {}
exchanges = exposures.meta_data.loc["exchange", :].unique()
mtm_dates = get_mtm_dates(sd, ed, exchanges, holidays=holidays)
portfolio = Portfolio(
exposures, rebal_dts, mtm_dates, wts, initial_capital=capital
)
return portfolio
def make_signal(portfolio):
asts = portfolio.future_generics + portfolio.equities
dates = portfolio.rebalance_dates
signal = pd.DataFrame(1, index=dates, columns=asts)
return signal
def get_notionals(risk_target, capital, signals, prices, multipliers,
discrete):
if discrete:
def calc(sig, price, mult):
return round(sig * risk_target * capital / (price * mult)) * price * mult # NOQA
else:
def calc(sig, price, mult):
return sig * risk_target * capital * price * mult
notionals = []
for s_i, p_i, m_i in zip(signals, prices, multipliers):
notionals.append(calc(s_i, p_i, m_i))
return notionals
def read_futures_instr(data_path, instr):
fn = os.path.join(data_path, instr[:2], instr + ".csv")
data = pd.read_csv(fn, parse_dates=True, index_col=0)
data = data.Settle
data.sort_index(inplace=True)
return data
def splice_futures_and_pnl(data_path, instr_sd_ed):
# instr_sd_ed is a list of tuples,
# e.g. [("ESH2015", sd, ed1), ("ESM2015", ed2)], only sd is given for
# first contract, assummed consecutive afterwards
MULTS = {"ES": 50, "TY": 1000}
prices = []
pnls = []
instr, sd, ed = instr_sd_ed[0]
sd = pd.Timestamp(sd)
ed = | pd.Timestamp(ed) | pandas.Timestamp |
# *****************************************************************************
# © Copyright IBM Corp. 2018. All Rights Reserved.
#
# This program and the accompanying materials
# are made available under the terms of the Apache V2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#
# *****************************************************************************
import pytest
from unittest.mock import MagicMock, patch
import os
import json
import pandas as pd
import numpy as np
from iotfunctions import bif
from iotfunctions import metadata
def pytest_generate_tests(metafunc):
# called once per each test function
funcarglist = metafunc.cls.params[metafunc.function.__name__]
argnames = sorted(funcarglist[0])
metafunc.parametrize(
argnames, [[funcargs[name] for name in argnames] for funcargs in funcarglist]
)
EXPRESSION = "df['col1'] > 3"
# object for mocking
class Object():
pass
@pytest.fixture()
def empty_df():
return | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import math
import os
import sys
import time
import re
from datetime import date
import logging
from django.conf import settings
import sqlite3
import scipy.spatial
from stations import IDS_AND_DAS, STATIONS_DF
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# This BASE_DIR is for my personal system, where the DB
# is saved two levels up in the file directory\
DB_PRODUCTION_DIR = os.path.join(os.path.dirname(os.path.dirname(BASE_DIR)), 'db/hydat_db/')
DB_DEV_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(BASE_DIR)))), 'hydat_db/Hydat_sqlite3_20210116/')
day_labels = {}
flag_labels = {}
for i in range(1, 32):
day_labels['FLOW' + str(i)] = i
flag_labels['FLOW_SYMBOL' + str(i)] = i
def map_day_to_var_name(s):
if re.search('\d', s):
return s[re.search('\d', s).span()[0]:]
else:
print('substring not found')
def melt_(df):
id_vars = df.index.names
return df.reset_index().melt(id_vars=id_vars).set_index(id_vars)
def create_connection():
""" create a database connection to the SQLite database
specified by the db_file
:param db_file: database file
:return: Connection object or None
"""
try:
files_list = os.listdir(DB_PRODUCTION_DIR)
DB_DIR = DB_PRODUCTION_DIR
except FileNotFoundError as e:
print("###############")
print("Running in Development")
files_list = os.listdir(DB_DEV_DIR)
DB_DIR = DB_DEV_DIR
try:
db_file_path = DB_DIR + get_newest_db_file([f for f in files_list if '.sqlite3' in f])
conn = sqlite3.connect(db_file_path)
return conn
except sqlite3.Error as e:
logging.warn('Sqlite3 connection Error: {}'.format(e))
print(e)
return None
def get_newest_db_file(files):
if len(files) == 0:
print('No database file found. Check the database path.')
newest_file = None
elif len(files) == 1:
newest_file = files[0]
else:
# sort the list in ascending order and return
# the last entry (latest date)
newest_file = sorted(files)[-1]
return newest_file
def get_daily_UR(station):
time0 = time.time()
# create a database connection
cols = ['STATION_NUMBER', 'YEAR', 'MONTH', 'NO_DAYS']
cols += day_labels.keys()
# columns = ['YEAR', 'MONTH', 'NO_DAYS']
conn = create_connection()
with conn:
return select_dly_flows_by_station_ID(conn, station)
conn.close()
def get_data_type(label, table_name, var_name):
conn = create_connection()
with conn:
cur = conn.cursor()
cur.execute(
"SELECT * FROM {}".format(table_name), ())
rows = cur.fetchall()
column_headers = [description[0] for description in cur.description]
# id_var_headers = column_headers[:11]
df = pd.DataFrame(rows, columns=column_headers)
conn.close()
return df
def get_annual_inst_peaks(station):
# create a database connection
conn = create_connection()
with conn:
return get_peak_inst_flows_by_station_ID(conn, station)
conn.close()
def get_peak_inst_flows_by_station_ID(conn, station):
"""
Query tasks by priority
:param conn: the Connection object
:param station: station number (ID) according to WSC convention
:return: dataframe object of annual maximum peak instantaneous flows
Notes:
Data type Q = flow, H = water level
Peak Code H = high, L = low (not sure if this is 24 hour day or not,
need to figure out how to access this info.)
"""
time0 = time.time()
query = "SELECT * FROM ANNUAL_INSTANT_PEAKS WHERE STATION_NUMBER=? AND DATA_TYPE=? AND PEAK_CODE=?"
df = | pd.read_sql_query(query, con=conn, params=(station, 'Q', 'H')) | pandas.read_sql_query |
"""
Code for scraping Reddit data using PushShift.io instead of normal praw (Python Reddit API wrapper) due to size
constraints imposed by Reddit after they moved off of cloudsearch
"""
import datetime as dt
import re
import time
import pandas as pd
import requests
from nltk.stem import WordNetLemmatizer
# Define fuction to pull subreddit submissions and pull fields from 'subfield'
def query_pushshift(subreddit, kind='submission', skip=30, times=10, subfield=None, comfields=None):
"""
Query PushShift API to get required information
:param subreddit:
:param kind:
:param skip:
:param times:
:param subfield:
:param comfields:
:return:
"""
# Create stem of PushShift API URL + kind and subreddit name
if comfields is None:
comfields = ['body', 'score', 'created_utc']
if subfield is None:
subfield = ['title', 'selftext', 'subreddit', 'created_utc', 'author', 'num_comments', 'score',
'is_self']
stem = "https://api.pushshift.io/reddit/search/{}/?subreddit={}&size=1000".format(kind, subreddit)
mylist = []
# Create for loop from 1 to times (specified in )
for x in range(1, times):
# Pull json from URL every X days
URL = "{}&after={}d".format(stem, skip * x)
print(URL)
response = requests.get(URL)
# Check API status
assert response.status_code == 200
mine = response.json()['data']
# Create dataframe from json data dictionary
df = pd.DataFrame.from_dict(mine)
mylist.append(df)
# Set sleep timer
time.sleep(1)
# Compile all posts into full list
full = | pd.concat(mylist, sort=False) | pandas.concat |
import pandas as pd
import numpy as np
import pytest
from kgextension.endpoints import DBpedia
from kgextension.schema_matching import (
relational_matching,
label_schema_matching,
value_overlap_matching,
string_similarity_matching
)
class TestRelationalMatching:
def test1_default(self):
path_input = "test/data/schema_matching/default_matches_cities_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/default_matches_cities_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = relational_matching(df)
output_matches['value'] = pd.to_numeric(output_matches['value'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test2_no_matches(self):
path_input = "test/data/schema_matching/no_matches_cities_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/no_matches_cities_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = relational_matching(df)
output_matches['value'] = pd.to_numeric(output_matches['value'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test3_uri_querier(self):
path_input = "test/data/schema_matching/default_matches_cities_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/default_matches_cities_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = relational_matching(df, uri_data_model=True)
output_matches['value'] = pd.to_numeric(output_matches['value'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test4_uri_querier_no_matches(self):
path_input = "test/data/schema_matching/no_matches_cities_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/no_matches_cities_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = relational_matching(df, uri_data_model=True)
output_matches['value'] = pd.to_numeric(output_matches['value'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test5_match_score(self):
score = 0.76
path_input = "test/data/schema_matching/default_matches_cities_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/default_matches_cities_expected.csv"
expected_matches = pd.read_csv(path_expected)
expected_matches['value'] = np.where(
expected_matches['value']==1, score, expected_matches['value'])
output_matches = relational_matching(df, match_score=score)
output_matches['value'] = pd.to_numeric(output_matches['value'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test6_one_endpoint(self):
path_input = "test/data/schema_matching/default_matches_cities_input.csv"
df = pd.read_csv(path_input)
path_expected = "test/data/schema_matching/default_matches_cities_expected.csv"
expected_matches = pd.read_csv(path_expected)
output_matches = relational_matching(df, endpoints=DBpedia)
output_matches['value'] = pd.to_numeric(output_matches['value'])
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
def test7_no_http_input(self):
df = pd.DataFrame({'a': [1, 2, 3],
'b': [4, 5, 6]})
expected_matches = pd.DataFrame(columns=["uri_1", "uri_2", "value"])
output_matches = relational_matching(df)
pd.testing.assert_frame_equal(
output_matches, expected_matches, check_like=True)
class TestStringSimilarityMatching():
def test1_default(self):
path_input = "test/data/schema_matching/string_matching_input_t1t2.csv"
df = | pd.read_csv(path_input) | pandas.read_csv |
import os
import numpy as np
import matplotlib as mpl
mpl.use("pgf")
general_fontsize = 16
custon_pgf_rcparams = {
'font.family': 'serif',
'font.serif': 'cm',
'font.size': general_fontsize,
'xtick.labelsize': general_fontsize,
'ytick.labelsize': general_fontsize,
'axes.labelsize': general_fontsize,
'axes.titlesize': general_fontsize,
'axes.grid': True,
'legend.fontsize': general_fontsize - 2,
'legend.borderaxespad': 0.5,
'legend.borderpad': 0.4,
'legend.columnspacing': 2.0,
'legend.edgecolor': '0.8',
'legend.facecolor': 'inherit',
'legend.fancybox': True,
'legend.framealpha': 0.8,
'legend.frameon': True,
'legend.handleheight': 0.7,
'legend.handlelength': 2.0,
'legend.handletextpad': 0.8,
'legend.labelspacing': 0.5,
'legend.loc': 'best',
'legend.markerscale': 1.0,
'legend.numpoints': 1,
'legend.scatterpoints': 1,
'legend.shadow': False,
'text.usetex': True # use inline math for ticks
}
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from pypet.trajectory import Trajectory
from .. import analysis_collective as cba
figure_path = './figures/results/'
if not os.path.exists(figure_path):
os.makedirs(figure_path)
sns.set()
sns.set_palette('colorblind')
sns_colors = sns.color_palette()
mpl.rcParams.update(custon_pgf_rcparams)
filename = os.path.join(os.path.expanduser('/extra/swarmstartle_results'),
'looming_swarm_fitted_model_fixed_rho_null_kmd_matrix.hdf5')
traj = Trajectory(filename=filename)
# Now we want to load all stored data.
traj.f_load(index=-1, load_parameters=2, load_results=1)
traj.v_auto_load = True
starttime = -5
endtime = 505
# for i in range(3):
seed_idx = 1
noise_vals = np.linspace(0.01, 0.2, 5)
vmin = np.min(0.01)
vmax = np.max(0.2)
sm = plt.cm.ScalarMappable(cmap=plt.cm.viridis, norm=mpl.colors.Normalize(vmin=vmin, vmax=vmax))
fig, axes = plt.subplots(nrows=4, ncols=2, figsize=(16, 12), gridspec_kw={'height_ratios': [3, 3, 1, 3]})
plt.subplots_adjust(hspace=0.4)
for noise_idx, noise_val in enumerate(noise_vals[[2, 4]]):
filter_params = ['speed0', 'noisep']
def filter_func(speed0, noisep):
return speed0 == 1.125 and noisep == noise_val
idx_iterator = traj.f_find_idx(filter_params, filter_func)
pos_data = []
uw_data = []
vis_method_data = []
startle_data = []
vm_data = []
visangle_data = []
# fill result arrays
for run_idx in idx_iterator:
traj.v_idx = run_idx
pos_data.append(traj.f_get('results.outdata.crun.pos', fast_access=True, auto_load=True))
uw_data.append(traj.f_get('results.outdata.crun.uw', fast_access=True, auto_load=True))
startle_data.append(traj.f_get('results.outdata.crun.startle', fast_access=True, auto_load=True))
pol = cba.calcPolarization(uw_data[seed_idx])
ax1 = axes[0, noise_idx]
ax1.plot(np.arange(len(pol)) * traj.par.output, pol, color=sns_colors[noise_idx])
ax1.fill_between([0, 50], [0, 0], [1, 1], color='r', alpha=0.5, label='burned period')
ax1.set_xlim([starttime, endtime])
ax1.set_title('Noise = ' + str(noise_val))
ax1.legend(loc='lower right')
if noise_idx == 0:
ax1.set_ylabel('Polarization')
# ax1.set_ylim([0.7, 1])
startles = np.array(startle_data[seed_idx])
startle_rows = [np.where(startles[:, row_idx]) for row_idx in range(startles.shape[1])]
ax2 = axes[1, noise_idx]
for row_idx in range(startles.shape[1]):
ax2.eventplot(np.array(startle_rows[row_idx]) * traj.par.output, orientation='horizontal', color=sns_colors[noise_idx],
lineoffsets=row_idx)
if noise_idx == 0:
ax2.set_ylabel('Fish index')
ax2.set_xlim([starttime, endtime])
ax2.set_title('Startle events')
stlsum = np.sum(startles, axis=1)
stlseries = | pd.Series(stlsum) | pandas.Series |
"""
provider_JST_macrohistory.py
JORDÀ-SCHULARICK-TAYLOR MACROHISTORY DATABASE
Note: these data are in an Excel spreadsheet (XLSX); the user needs to download and place it in the appropriate
directory (based on config settings). The code assumes that there is only one spreadsheet in the directory.
Description from the website: http://www.macrohistory.net/data/
The Jordà-Schularick-Taylor Macrohistory Database is the result of an extensive data collection effort over several
years. In one place it brings together macroeconomic data that previously had been dispersed across a variety of
sources. On this website, we provide convenient no-cost open access under a license to the most extensive long-run
macro-financial dataset to date. Under the Terms of Use and Licence Terms below, the data is made freely available,
expressly forbidding commercial data providers from integrating, in addition to any existing data they may already
provide, all or parts of the dataset into their services, or to sell the data.
The database covers 17 advanced economies since 1870 on an annual basis. It comprises 45 real and nominal variables.
Among these, there are time series that had been hitherto unavailable to researchers, among them financial variables
such as bank credit to the non-financial private sector, mortgage lending and long-term returns on housing, equities,
bonds and bills. The database captures the near-universe of advanced-country macroeconomic and asset price dynamics,
covering on average over 90 percent of advanced-economy output and over 50 percent of world output.
Assembling the database, we relied on the input from colleagues, coauthors and doctoral students in many countries,
and consulted a broad range of historical sources and various publications of statistical offices and central banks.
For some countries we extended existing data series, for others, we relied on recent data collection efforts by others.
Yet in a non-negligible number of cases, we had to go back to archival sources including documents from governments,
central banks, and private banks. Typically, we combined information from various sources and spliced series to create
long-run datasets spanning the entire 1870–2016 period for the first time. The table below lists the available series.
---------------------------- CITATION OF DATA ------------------------------------------------------------------------
[From provider website.]
There are two citations to consider, depending on the data used. Please read this section to the end.
Under the terms of use, any information taken directly or indirectly from this source should be cited as <NAME>,
<NAME>, and <NAME>. 2017. “Macrofinancial History and the New Business Cycle Facts.” in NBER
Macroeconomics Annual 2016, volume 31, edited by <NAME> and <NAME>. Chicago: University of
Chicago Press.
However, those using any data pertaining to rates of return should cite <NAME>, <NAME>, <NAME>,
<NAME>, and <NAME>. 2019. “The Rate of Return on Everything, 1870–2015.” Quarterly Journal of
Economics. Forthcoming
We advise making explicit reference to the date when the database was consulted, as statistics are subject to revisions.
--------------- END CITATION NOTE ------------------------------------------------------------------------------------
Copyright 2019 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import pandas
import glob
import datetime
import econ_platform_core
import econ_platform_core.entity_and_errors
import econ_platform_core.series_metadata
from econ_platform_core import log, log_warning, log_debug
import econ_platform_core.configuration
import econ_platform_core.tickers as tickers
import econ_platform_core.utils
class ProviderJSTMacrohistory(econ_platform_core.ProviderWrapper):
"""
Jordà-Schularick-Taylor Macrohistory Database XLS parser.
This spreadsheet was so different from the Australian data that it did not make sense to try to
find into the XLS fetcher class.
"""
def __init__(self):
super().__init__(name='JST_Macrohistory')
self.WebPage = 'http://www.macrohistory.net/data/#DownloadData'
self.Directory = None
def fetch(self, series_meta):
if self.Directory is None:
self.Directory = econ_platform_core.utils.parse_config_path(
econ_platform_core.PlatformConfiguration['P_JST']['directory'])
flist = glob.glob(os.path.join(self.Directory, '*.xlsx'))
# Excel can lock files, throw them out...
flist = [x for x in flist if not '~' in x]
if len(flist) == 0:
raise econ_platform_core.entity_and_errors.PlatformError('No XLSX file in {0}'.format(self.Directory))
if len(flist) > 1:
raise econ_platform_core.entity_and_errors.PlatformError('More than one XLSX file in {0}: cannot tell which to use'.format(
self.Directory))
fname = flist[0]
log_debug('Reading %s', fname)
data_sheet = pandas.read_excel(fname, sheet_name='Data', header=0)
description_sheet = pandas.read_excel(fname, sheet_name='Variable description', index_col=0, header=None)
# All data is in one giant honkin' DataFrame, with one row per country per year.
# To generate time series, need to select one country at a time.
country_list = set(data_sheet['country'])
self.TableWasFetched = True
self.TableMeta = {}
self.TableSeries = {}
for country in country_list:
df = data_sheet.loc[data_sheet['country'] == country]
iso_code = df['iso'][df.index[0]]
# Now, blast through the data types.
dates = df['year']
cal_dates = [datetime.date(x, 1, 1) for x in dates]
exclusions = ('year', 'iso', 'country', 'ifs')
for c in df.columns:
if c in exclusions:
continue
ser = | pandas.Series(df[c]) | pandas.Series |
import time
import numpy as np
import pandas as pd
from sklearn.datasets import fetch_openml
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from dataset.biased_dataset import BiasedDataset
DATA_ADULT_TRAIN = './data/raw/adult.data.csv'
DATA_ADULT_TEST = './data/raw/adult.test.csv'
DATA_CRIME_FILENAME = './data/raw/crime.csv'
DATA_GERMAN_FILENAME = './data/raw/german.csv'
# ADULT DATASET
# Listing of attributes:
# target: >50K, <=50K.
# age: continuous.
# workclass: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov,
# Without-pay, Never-worked.
# fnlwgt: continuous.
# education: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th,
# 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.
# education-num: continuous.
# marital-status: Married-civ-spouse, Divorced, Never-married, Separated, Widowed,
# Married-spouse-absent, Married-AF-spouse.
# occupation: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty,
# Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving,
# Priv-house-serv, Protective-serv, Armed-Forces.
# relationship: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.
# race: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black.
# sex: Female, Male.
# capital-gain: continuous.
# capital-loss: continuous.
# hours-per-week: continuous.
# native-country: United-States, Cambodia, England, Puerto-Rico, Canada, Germany,
# Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras,
# Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France,
# Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua,
# Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.
def get_adult_data(sensitive_features, drop_columns=[], test_size=0.2, random_state=42):
"""
train_path: path to training data
test_path: path to test data
returns: tuple of training features, training labels, test features and test labels
"""
train_df = pd.read_csv(DATA_ADULT_TRAIN, na_values='?').dropna()
test_df = pd.read_csv(DATA_ADULT_TEST, na_values='?').dropna()
target = 'target'
X_train = train_df.drop(columns=[target])
y_train = train_df[[target]]
X_test = test_df.drop(columns=[target])
y_test = test_df[[target]]
train_ds = BiasedDataset(
X_train, y_train, sensitive_features=sensitive_features, drop_columns=drop_columns)
test_ds = BiasedDataset(
X_test, y_test, sensitive_features=sensitive_features, drop_columns=drop_columns)
return train_ds, test_ds
# CREDIT DATASET:
# This research employed a binary variable, default payment (Yes = 1, No = 0), as the response
# variable. This study reviewed the literature and used the following 23 variables as explanatory
# variables:
# x1: Amount of the given credit (NT dollar): it includes both the individual consumer
# credit and his/her family (supplementary) credit.
# x2: Gender (1 = male; 2 = female).
# x3: Education (1 = graduate school; 2 = university; 3 = high school; 4 = others).
# x4: Marital status (1 = married; 2 = single; 3 = others).
# x5: Age (year).
# x6 - x11: History of past payment. We tracked the past monthly payment records (from April to
# September, 2005) as follows: x6 = the repayment status in September, 2005; x7 = the
# repayment status in August, 2005; . . .;x11 = the repayment status in April, 2005. The
# measurement scale for the repayment status is: -1 = pay duly; 1 = payment delay for one
# month; 2 = payment delay for two months; . . .; 8 = payment delay for eight months;
# 9 = payment delay for nine months and above.
# x12-x17: Amount of bill statement (NT dollar). x12 = amount of bill statement in September,
# 2005; x13 = amount of bill statement in August, 2005; . . .; x17 = amount of bill
# statement in April, 2005.
# x18-x23: Amount of previous payment (NT dollar). x18 = amount paid in September, 2005;
# x19 = amount paid in August, 2005; . . .;x23 = amount paid in April, 2005.
def get_credit_data(sensitive_features, drop_columns=[], test_size=0.2, random_state=42):
"""
sensitive_features: features that should be considered sensitive when building the
BiasedDataset object
drop_columns: columns we can ignore and drop
random_state: to pass to train_test_split
return: two BiasedDataset objects, for training and test data respectively
"""
credit_data = fetch_openml(data_id=42477, as_frame=True, data_home='./data/raw')
# Force categorical data do be dtype: category
features = credit_data.data
categorical_features = ['x2', 'x3', 'x4', 'x5', 'x6', 'x7', 'x8', 'x9', 'x10', 'x11']
for cf in categorical_features:
features[cf] = features[cf].astype(str).astype('category')
# Encode output
target = (credit_data.target == "1") * 1
target = | pd.DataFrame({'target': target}) | pandas.DataFrame |
"""Hardware FonduerModel."""
import pickle
import numpy as np
from emmental.data import EmmentalDataLoader
from pandas import DataFrame
from fonduer.learning.dataset import FonduerDataset
from fonduer.packaging import FonduerModel
from fonduer.parser.models import Document
from tests.shared.hardware_lfs import TRUE
from tests.shared.hardware_utils import get_implied_parts
ATTRIBUTE = "stg_temp_max"
class HardwareFonduerModel(FonduerModel):
"""Customized FonduerModel for hardware."""
def _classify(self, doc: Document) -> DataFrame:
# Only one candidate class is used.
candidate_class = self.candidate_extractor.candidate_classes[0]
test_cands = getattr(doc, candidate_class.__tablename__ + "s")
features_list = self.featurizer.apply(doc)
# Convert features into a sparse matrix
F_test = FonduerModel.convert_features_to_matrix(
features_list[0], self.key_names
)
test_dataloader = EmmentalDataLoader(
task_to_label_dict={ATTRIBUTE: "labels"},
dataset=FonduerDataset(ATTRIBUTE, test_cands, F_test, self.word2id, 2),
split="test",
batch_size=100,
shuffle=False,
)
test_preds = self.emmental_model.predict(test_dataloader, return_preds=True)
positive = np.where(np.array(test_preds["probs"][ATTRIBUTE])[:, TRUE] > 0.7)
true_preds = [test_cands[_] for _ in positive[0]]
pickle_file = "tests/data/parts_by_doc_dict.pkl"
with open(pickle_file, "rb") as f:
parts_by_doc = pickle.load(f)
df = DataFrame()
for c in true_preds:
part = c[0].context.get_span()
doc = c[0].context.sentence.document.name.upper()
val = c[1].context.get_span()
for p in get_implied_parts(part, doc, parts_by_doc):
entity_relation = (doc, p, val)
df = df.append(
| DataFrame([entity_relation], columns=["doc", "part", "val"]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import scipy.stats as sp
# file path
DATA_DIR = "./data"
ORI_DATA_PATH = DATA_DIR + "/diabetic_data.csv"
MAP_PATH = DATA_DIR + "/IDs_mapping.csv"
OUTPUT_DATA_PATH = DATA_DIR + "/preprocessed_data.csv"
# load data
dataframe_ori = pd.read_csv(ORI_DATA_PATH)
NUM_RECORDS = dataframe_ori.shape[0]
NUM_FEATURE = dataframe_ori.shape[1]
# make a copy of the dataframe for preprocessing
df = dataframe_ori.copy(deep=True)
# Drop features
df = df.drop(['weight', 'payer_code', 'medical_specialty', 'examide', 'citoglipton'], axis=1)
# drop bad data with 3 '?' in diag
drop_ID = set(df[(df['diag_1'] == '?') & (df['diag_2'] == '?') & (df['diag_3'] == '?')].index)
# drop died patient data which 'discharge_disposition_id' == 11 | 19 | 20 | 21 indicates 'Expired'
drop_ID = drop_ID.union(set(df[(df['discharge_disposition_id'] == 11) | (df['discharge_disposition_id'] == 19) | \
(df['discharge_disposition_id'] == 20) | (df['discharge_disposition_id'] == 21)].index))
# drop 3 data with 'Unknown/Invalid' gender
drop_ID = drop_ID.union(df['gender'][df['gender'] == 'Unknown/Invalid'].index)
new_ID = list(set(df.index) - set(drop_ID))
df = df.iloc[new_ID]
# process readmitted data
df['readmitted'] = df['readmitted'].replace('>30', 2)
df['readmitted'] = df['readmitted'].replace('<30', 1)
df['readmitted'] = df['readmitted'].replace('NO', 0)
# cnt0, cnt1, cnt2 = 0, 0, 0
'''
for i in df['readmitted']:
if i == 0:
cnt0 += 1
if i == 1:
cnt1 += 1
else:
cnt2 += 1
print(cnt0, cnt1, cnt2)
'''
# 53208 11357 88753
# calculate change times through 23 kinds of medicines
# high change times refer to higher prob to readmit
# 'num_med_changed' to counts medicine change
print('\n--Medicine related--')
medicine = ['metformin', 'repaglinide', 'nateglinide', 'chlorpropamide', 'glimepiride', 'glipizide', 'glyburide',
'pioglitazone', 'rosiglitazone', 'acarbose', 'miglitol', 'insulin', 'glyburide-metformin', 'tolazamide',
'metformin-pioglitazone', 'metformin-rosiglitazone', 'glimepiride-pioglitazone', 'glipizide-metformin',
'troglitazone', 'tolbutamide', 'acetohexamide']
for med in medicine:
tmp = med + 'temp'
df[tmp] = df[med].apply(lambda x: 1 if (x == 'Down' or x == 'Up') else 0)
# two new feature
df['num_med_changed'] = 0
for med in medicine:
tmp = med + 'temp'
df['num_med_changed'] += df[tmp]
del df[tmp]
for i in medicine:
df[i] = df[i].replace('Steady', 1)
df[i] = df[i].replace('No', 0)
df[i] = df[i].replace('Up', 1)
df[i] = df[i].replace('Down', 1)
df['num_med_taken'] = 0
for med in medicine:
print(med)
df['num_med_taken'] = df['num_med_taken'] + df[med]
# encode race
df['race'] = df['race'].replace('Asian', 0)
df['race'] = df['race'].replace('AfricanAmerican', 1)
df['race'] = df['race'].replace('Caucasian', 2)
df['race'] = df['race'].replace('Hispanic', 3)
df['race'] = df['race'].replace('Other', 4)
df['race'] = df['race'].replace('?', 4)
# map
df['A1Cresult'] = df['A1Cresult'].replace('None', -99) # -1 -> -99
df['A1Cresult'] = df['A1Cresult'].replace('>8', 1)
df['A1Cresult'] = df['A1Cresult'].replace('>7', 1)
df['A1Cresult'] = df['A1Cresult'].replace('Norm', 0)
df['max_glu_serum'] = df['max_glu_serum'].replace('>200', 1)
df['max_glu_serum'] = df['max_glu_serum'].replace('>300', 1)
df['max_glu_serum'] = df['max_glu_serum'].replace('Norm', 0)
df['max_glu_serum'] = df['max_glu_serum'].replace('None', -99) # -1 -> -99
df['change'] = df['change'].replace('No', 0)
df['change'] = df['change'].replace("Ch", 1)
df['gender'] = df['gender'].replace('Male', 1)
df['gender'] = df['gender'].replace('Female', 0)
df['diabetesMed'] = df['diabetesMed'].replace('Yes', 1)
df['diabetesMed'] = df['diabetesMed'].replace('No', 0)
print('diabetesMed end')
age_dict = {'[0-10)': 5, '[10-20)': 15, '[20-30)': 25, '[30-40)': 35, '[40-50)': 45, '[50-60)': 55, '[60-70)': 65,
'[70-80)': 75, '[80-90)': 85, '[90-100)': 95}
df['age'] = df.age.map(age_dict)
df['age'] = df['age'].astype('int64')
print('age end')
# simplify
# admission_type_id : [2, 7] -> 1, [6, 8] -> 5
a, b = [2, 7], [6, 8]
for i in a:
df['admission_type_id'] = df['admission_type_id'].replace(i, 1)
for j in b:
df['admission_type_id'] = df['admission_type_id'].replace(j, 5)
# discharge_disposition_id : [6, 8, 9, 13] -> 1, [3, 4, 5, 14, 22, 23, 24] -> 2,
# [12, 15, 16, 17] -> 10, [19, 20, 21] -> 11, [25, 26] -> 18
a, b, c, d, e = [6, 8, 9, 13], [3, 4, 5, 14, 22, 23, 24], [12, 15, 16, 17], \
[19, 20, 21], [25, 26]
for i in a:
df['discharge_disposition_id'] = df['discharge_disposition_id'].replace(i, 1)
for j in b:
df['discharge_disposition_id'] = df['discharge_disposition_id'].replace(j, 2)
for k in c:
df['discharge_disposition_id'] = df['discharge_disposition_id'].replace(k, 10)
# data of died patients have been dropped
# for p in d:
# df['discharge_disposition_id'] = df['discharge_disposition_id'].replace(p, 11)
for q in e:
df['discharge_disposition_id'] = df['discharge_disposition_id'].replace(q, 18)
# admission_source_id : [3, 2] -> 1, [5, 6, 10, 22, 25] -> 4,
# [15, 17, 20, 21] -> 9, [13, 14] -> 11
a, b, c, d = [3, 2], [5, 6, 10, 22, 25], [15, 17, 20, 21], [13, 14]
for i in a:
df['admission_source_id'] = df['admission_source_id'].replace(i, 1)
for j in b:
df['admission_source_id'] = df['admission_source_id'].replace(j, 4)
for k in c:
df['admission_source_id'] = df['admission_source_id'].replace(k, 9)
for p in d:
df['admission_source_id'] = df['admission_source_id'].replace(p, 11)
print('id end')
# Classify Diagnoses by ICD-9
df.loc[df['diag_1'].str.contains('V', na=False), ['diag_1']] = 0
df.loc[df['diag_1'].str.contains('E', na=False), ['diag_1']] = 0
df['diag_1'] = df['diag_1'].replace('?', -1)
df['diag_1'] = pd.to_numeric(df['diag_1'], errors='coerce')
for index, row in df.iterrows():
if (row['diag_1'] >= 1 and row['diag_1'] <= 139):
df.loc[index, 'diag_1'] = 1
elif (row['diag_1'] >= 140 and row['diag_1'] <= 239):
df.loc[index, 'diag_1'] = 2
elif (row['diag_1'] >= 240 and row['diag_1'] <= 279):
df.loc[index, 'diag_1'] = 3
elif (row['diag_1'] >= 280 and row['diag_1'] <= 289):
df.loc[index, 'diag_1'] = 4
elif (row['diag_1'] >= 290 and row['diag_1'] <= 319):
df.loc[index, 'diag_1'] = 5
elif (row['diag_1'] >= 320 and row['diag_1'] <= 389):
df.loc[index, 'diag_1'] = 6
elif (row['diag_1'] >= 390 and row['diag_1'] <= 459):
df.loc[index, 'diag_1'] = 7
elif (row['diag_1'] >= 460 and row['diag_1'] <= 519):
df.loc[index, 'diag_1'] = 8
elif (row['diag_1'] >= 520 and row['diag_1'] <= 579):
df.loc[index, 'diag_1'] = 9
elif (row['diag_1'] >= 580 and row['diag_1'] <= 629):
df.loc[index, 'diag_1'] = 10
elif (row['diag_1'] >= 630 and row['diag_1'] <= 679):
df.loc[index, 'diag_1'] = 11
elif (row['diag_1'] >= 680 and row['diag_1'] <= 709):
df.loc[index, 'diag_1'] = 12
elif (row['diag_1'] >= 710 and row['diag_1'] <= 739):
df.loc[index, 'diag_1'] = 13
elif (row['diag_1'] >= 740 and row['diag_1'] <= 759):
df.loc[index, 'diag_1'] = 14
elif (row['diag_1'] >= 760 and row['diag_1'] <= 779):
df.loc[index, 'diag_1'] = 15
elif (row['diag_1'] >= 780 and row['diag_1'] <= 799):
df.loc[index, 'diag_1'] = 16
elif (row['diag_1'] >= 800 and row['diag_1'] <= 999):
df.loc[index, 'diag_1'] = 17
print('diag_1 end')
df.loc[df['diag_2'].str.contains('V', na=False), ['diag_2']] = 0
df.loc[df['diag_2'].str.contains('E', na=False), ['diag_2']] = 0
df['diag_2'] = df['diag_2'].replace('?', -1)
df['diag_2'] = pd.to_numeric(df['diag_2'], errors='coerce')
for index, row in df.iterrows():
if (row['diag_2'] >= 1 and row['diag_2'] <= 139):
df.loc[index, 'diag_2'] = 1
elif (row['diag_2'] >= 140 and row['diag_2'] <= 239):
df.loc[index, 'diag_2'] = 2
elif (row['diag_2'] >= 240 and row['diag_2'] <= 279):
df.loc[index, 'diag_2'] = 3
elif (row['diag_2'] >= 280 and row['diag_2'] <= 289):
df.loc[index, 'diag_2'] = 4
elif (row['diag_2'] >= 290 and row['diag_2'] <= 319):
df.loc[index, 'diag_2'] = 5
elif (row['diag_2'] >= 320 and row['diag_2'] <= 389):
df.loc[index, 'diag_2'] = 6
elif (row['diag_2'] >= 390 and row['diag_2'] <= 459):
df.loc[index, 'diag_2'] = 7
elif (row['diag_2'] >= 460 and row['diag_2'] <= 519):
df.loc[index, 'diag_2'] = 8
elif (row['diag_2'] >= 520 and row['diag_2'] <= 579):
df.loc[index, 'diag_2'] = 9
elif (row['diag_2'] >= 580 and row['diag_2'] <= 629):
df.loc[index, 'diag_2'] = 10
elif (row['diag_2'] >= 630 and row['diag_2'] <= 679):
df.loc[index, 'diag_2'] = 11
elif (row['diag_2'] >= 680 and row['diag_2'] <= 709):
df.loc[index, 'diag_2'] = 12
elif (row['diag_2'] >= 710 and row['diag_2'] <= 739):
df.loc[index, 'diag_2'] = 13
elif (row['diag_2'] >= 740 and row['diag_2'] <= 759):
df.loc[index, 'diag_2'] = 14
elif (row['diag_2'] >= 760 and row['diag_2'] <= 779):
df.loc[index, 'diag_2'] = 15
elif (row['diag_2'] >= 780 and row['diag_2'] <= 799):
df.loc[index, 'diag_2'] = 16
elif (row['diag_2'] >= 800 and row['diag_2'] <= 999):
df.loc[index, 'diag_2'] = 17
print('diag_2 end')
df.loc[df['diag_3'].str.contains('V', na=False), ['diag_3']] = 0
df.loc[df['diag_3'].str.contains('E', na=False), ['diag_3']] = 0
df['diag_3'] = df['diag_3'].replace('?', -1)
df['diag_3'] = | pd.to_numeric(df['diag_3'], errors='coerce') | pandas.to_numeric |
# This script preps the county level COVID data for he ultraCOVID project
# Importing required modules
import pandas as pd
import numpy as np
import datetime
# Specifying the path to the data -- update this accordingly!
username = ''
filepath = 'C:/Users/' + username + '/Documents/Data/ultraCOVID/'
# Reading in the COVID data sets
cases = pd.read_csv(filepath + 'time_series_covid19_confirmed_US.csv')
death = pd.read_csv(filepath + 'time_series_covid19_deaths_US.csv')
# Creating a list of all county level cases and deaths cumulative totals by day
case_vals = []
death_vals = []
for i in range(81,81+365):
tmp = cases[list(cases.columns)[i]].to_list()
for t in tmp:
case_vals.append(t)
for i in range(82,82+365):
tmp = cases[list(death.columns)[i]].to_list()
for t in tmp:
death_vals.append(t)
# Creating lists of other variables that will go into the dataframe
counties = cases['Admin2'].to_list()*365
states = cases['Province_State'].to_list()*365
fips = cases['FIPS'].to_list()*365
lat = cases['Lat'].to_list()*365
lon = cases['Long_'].to_list()*365
pop = death['Population'].to_list()*365
d0 = datetime.datetime.strptime('April 1, 2020', '%B %d, %Y')
dates_tmp = [[d0+datetime.timedelta(i)]*len(cases) for i in range(365)]
dates = [i for j in dates_tmp for i in j]
# Creating the dataframe for COVID data
counties = pd.Series(counties, name = 'County')
states = pd.Series(states, name = 'State')
fips = pd.Series(fips, name = 'FIPS')
lat = pd.Series(lat, name = 'Lat')
lon = pd.Series(lon, name = 'Lon')
pop = pd.Series(pop, name = 'Population')
dates = pd.Series(dates, name = 'Date')
case_vals = pd.Series(case_vals, name = 'Cumulative_Cases')
death_vals = pd.Series(death_vals, name = 'Cumulative_Deaths')
covidata = | pd.concat([counties, states, fips, lat, lon, pop, dates, case_vals, death_vals], axis = 1) | pandas.concat |
import pandas as pd
import numpy as np
def label_to_pos_map(all_codes):
label_to_pos = dict([(code,pos) for code, pos in zip(sorted(all_codes),range(len(all_codes)))])
pos_to_label = dict([(pos,code) for code, pos in zip(sorted(all_codes),range(len(all_codes)))])
return label_to_pos, pos_to_label
def label_to_tensor(data, label_to_pos):
tmp = np.zeros((len(data),
len(label_to_pos)))
c = 0
for idx, row in data.iterrows():
for code in row['labels']:
tmp[c, label_to_pos[code]] = 1
c += 1
return tmp
def stratified_sampling_multilearn(df, y, train_data_output_path):
from skmultilearn.model_selection import iterative_train_test_split
from skmultilearn.model_selection import IterativeStratification
df = df.reset_index(drop=True).sample(frac=1, random_state=42)
k_fold = IterativeStratification(n_splits=3, order=1, random_state=42)
nfold = 1
for train, test in k_fold.split(df, y):
df_train = df.iloc[train]
y_train = y[train, :]
df_test = df.iloc[test]
y_test = y[test, :]
val_tmp, y_val, df_test_tmp, y_test = iterative_train_test_split(df_test.values, y_test, test_size = 0.5,)
df_val = pd.DataFrame(val_tmp, columns=df_test.columns)
df_test = pd.DataFrame(df_test_tmp, columns=df_test.columns)
df_train.to_csv(f"{train_data_output_path}_fold_{nfold}_train.csv", index=False)
df_val.to_csv(f"{train_data_output_path}_fold_{nfold}_dev.csv", index=False)
df_test.to_csv(f"{train_data_output_path}_fold_{nfold}_test.csv", index=False)
nfold = nfold + 1
def load_mimic_paper_split(df, train_data_output_path):
dev_patients = pd.read_csv('dataset_creation/input_files/ids_mimic_dev.csv')
test_patients = pd.read_csv('dataset_creation/input_files/ids_mimic_test.csv')
train_patients = pd.read_csv('dataset_creation/input_files/ids_mimic_train.csv')
df_train = df[df.HADM_ID.isin(train_patients.HADM_ID)]
df_test = df[df.HADM_ID.isin(test_patients.HADM_ID)]
df_val = df[df.HADM_ID.isin(dev_patients.HADM_ID)]
df_train.to_csv(f"{train_data_output_path}_train.csv", index=False)
df_val.to_csv(f"{train_data_output_path}_dev.csv", index=False)
df_test.to_csv(f"{train_data_output_path}_test.csv", index=False)
def load_codie_paper_split(df, train_data_output_path):
dev_patients = pd.read_csv('dataset_creation/input_files/ids_codie_dev.csv')
test_patients = | pd.read_csv('dataset_creation/input_files/ids_codie_test.csv') | pandas.read_csv |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
##find parent directory and import model
#parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
#sys.path.append(parentddir)
from ..agdrift_exe import Agdrift
test = {}
class TestAgdrift(unittest.TestCase):
"""
IEC unit tests.
"""
def setUp(self):
"""
setup the test as needed
e.g. pandas to open agdrift qaqc csv
Read qaqc csv and create pandas DataFrames for inputs and expected outputs
:return:
"""
pass
def tearDown(self):
"""
teardown called after each test
e.g. maybe write test results to some text file
:return:
"""
pass
def create_agdrift_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty agdrift object
agdrift_empty = Agdrift(df_empty, df_empty)
return agdrift_empty
def test_validate_sim_scenarios(self):
"""
:description determines if user defined scenarios are valid for processing
:param application_method: type of Tier I application method employed
:param aquatic_body_def: type of endpoint of concern (e.g., pond, wetland); implies whether
: endpoint of concern parameters (e.g.,, pond width) are set (i.e., by user or EPA standard)
:param drop_size_*: qualitative description of spray droplet size for aerial & ground applications
:param boom_height: qualitative height above ground of spray boom
:param airblast_type: type of orchard being sprayed
:NOTE we perform an additional validation check related to distances later in the code just before integration
:return
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
agdrift_empty.out_sim_scenario_chk = pd.Series([], dtype='object')
expected_result = pd.Series([
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Aquatic Ground Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Aquatic Ground Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Valid Tier I Aquatic Airblast Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Valid Tier I Aquatic Airblast Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Invalid Tier I Aquatic Aerial Scenario',
'Invalid Tier I Aquatic Ground Scenario',
'Invalid Tier I Aquatic Airblast Scenario',
'Invalid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Invalid scenario ecosystem_type',
'Invalid Tier I Aquatic Assessment application method',
'Invalid Tier I Terrestrial Assessment application method'],dtype='object')
try:
#set test data
agdrift_empty.num_simulations = len(expected_result)
agdrift_empty.application_method = pd.Series(
['tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_aerial',
'tier_1_ground',
'tier_1_airblast',
'tier_1_aerial',
'tier_1_ground',
'tier_1_airblast',
'tier_1_aerial',
'Tier II Aerial',
'Tier III Aerial'], dtype='object')
agdrift_empty.ecosystem_type = pd.Series(
['aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'terrestrial_assessment',
'terrestrial_assessment',
'Field Assessment',
'aquatic_assessment',
'terrestrial_assessment'], dtype='object')
agdrift_empty.aquatic_body_type = pd.Series(
['epa_defined_pond',
'NaN',
'epa_defined_wetland',
'NaN',
'user_defined_pond',
'NaN',
'user_defined_wetland',
'NaN',
'epa_defined_wetland',
'NaN',
'user_defined_pond',
'NaN',
'user_defined_wetland',
'NaN',
'Defined Pond',
'user_defined_pond',
'epa_defined_pond',
'NaN',
'NaN',
'NaN',
'epa_defined_pond',
'user_defined_wetland',
'user_defined_pond'], dtype='object')
agdrift_empty.terrestrial_field_type = pd.Series(
['NaN',
'user_defined_terrestrial',
'NaN',
'epa_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'epa_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'NaN',
'NaN',
'user_defined_terrestrial',
'user_defined_terrestrial',
'user_defined_terrestrial',
'NaN',
'NaN',
'user_defined_terrestrial'], dtype='object')
agdrift_empty.drop_size_aerial = pd.Series(
['very_fine_to_fine',
'fine_to_medium',
'medium_to_coarse',
'coarse_to_very_coarse',
'fine_to_medium',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'medium_to_coarse',
'NaN',
'very_fine_to_medium',
'NaN',
'very_fine Indeed',
'NaN',
'very_fine_to_medium',
'medium_to_coarse',
'NaN'], dtype='object')
agdrift_empty.drop_size_ground = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'fine_to_medium-coarse',
'very_fine',
'fine_to_medium-coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'NaN',
'fine_to_medium-coarse',
'very_fine',
'NaN',
'very_fine_to_medium',
'NaN',
'very_fine'], dtype='object')
agdrift_empty.boom_height = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'high',
'low',
'high',
'low',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'high',
'NaN',
'NaN',
'NaN',
'NaN'],dtype='object')
agdrift_empty.airblast_type = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'normal',
'dense',
'sparse',
'orchard',
'vineyard',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'vineyard',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.validate_sim_scenarios()
result = agdrift_empty.out_sim_scenario_chk
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_set_sim_scenario_id(self):
"""
:description provides scenario ids per simulation that match scenario names (i.e., column_names) from SQL database
:param out_sim_scenario_id: scenario name as assigned to individual simulations
:param num_simulations: number of simulations to assign scenario names
:param out_sim_scenario_chk: from previous method where scenarios were checked for validity
:param application_method: application method of scenario
:param drop_size_*: qualitative description of spray droplet size for aerial and ground applications
:param boom_height: qualitative height above ground of spray boom
:param airblast_type: type of airblast application (e.g., vineyard, orchard)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series(['aerial_vf2f',
'aerial_f2m',
'aerial_m2c',
'aerial_c2vc',
'ground_low_vf',
'ground_low_fmc',
'ground_high_vf',
'ground_high_fmc',
'airblast_normal',
'airblast_dense',
'airblast_sparse',
'airblast_vineyard',
'airblast_orchard',
'Invalid'], dtype='object')
try:
agdrift_empty.num_simulations = len(expected_result)
agdrift_empty.out_sim_scenario_chk = pd.Series(['Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Invalid Scenario'], dtype='object')
agdrift_empty.application_method = pd.Series(['tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_aerial'], dtype='object')
agdrift_empty.drop_size_aerial = pd.Series(['very_fine_to_fine',
'fine_to_medium',
'medium_to_coarse',
'coarse_to_very_coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.drop_size_ground = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'fine_to_medium-coarse',
'very_fine',
'fine_to_medium-coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.boom_height = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'low',
'low',
'high',
'high',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.airblast_type = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'normal',
'dense',
'sparse',
'vineyard',
'orchard',
'NaN'], dtype='object')
agdrift_empty.set_sim_scenario_id()
result = agdrift_empty.out_sim_scenario_id
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_assign_column_names(self):
"""
:description assigns column names (except distaqnce column) from sql database to internal scenario names
:param column_name: short name for pesiticide application scenario for which distance vs deposition data is provided
:param scenario_name: internal variable for holding scenario names
:param scenario_number: index for scenario_name (this method assumes the distance values could occur in any column
:param distance_name: internal name for the column holding distance data
:NOTE to test both outputs of this method I simply appended them together
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
agdrift_empty.scenario_name = pd.Series([], dtype='object')
expected_result = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc',
'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse',
'airblast_vineyard', 'airblast_orchard'], dtype='object')
try:
agdrift_empty.column_names = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc',
'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse',
'airblast_vineyard', 'airblast_orchard', 'distance_ft'])
#call method to assign scenario names
agdrift_empty.assign_column_names()
result = agdrift_empty.scenario_name
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_distances(self):
"""
:description retrieves distance values for deposition scenario datasets
: all scenarios use same distances
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE any blank fields are filled with 'nan'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
expected_result = pd.Series([], dtype='float')
try:
expected_result = [0.,0.102525,0.20505,0.4101,0.8202,1.6404,3.2808,4.9212,6.5616,9.8424,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016,997.3632]
agdrift_empty.distance_name = 'distance_ft'
agdrift_empty.num_db_values = len(expected_result)
result = agdrift_empty.get_distances(agdrift_empty.num_db_values)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_scenario_deposition_data(self):
"""
:description retrieves deposition data for all scenarios from sql database
: and checks that for each the first, last, and total number of values
: are correct
:param scenario: name of scenario for which data is to be retrieved
:param num_values: number of values included in scenario datasets
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
#scenario_data = pd.Series([[]], dtype='float')
result = pd.Series([], dtype='float')
#changing expected values to the 161st
expected_result = [0.50013,0.041273,161.0, #aerial_vf2f
0.49997,0.011741,161.0, #aerial_f2m
0.4999,0.0053241,161.0, #aerial_m2c
0.49988,0.0031189,161.0, #aerial_c2vc
1.019339,9.66E-04,161.0, #ground_low_vf
1.007885,6.13E-04,161.0, #ground_low_fmc
1.055205,1.41E-03,161.0, #ground_high_vf
1.012828,7.72E-04,161.0, #ground_high_fmc
8.91E-03,3.87E-05,161.0, #airblast_normal
0.1155276,4.66E-04,161.0, #airblast_dense
0.4762651,5.14E-05,161.0, #airblast_sparse
3.76E-02,3.10E-05,161.0, #airblast_vineyard
0.2223051,3.58E-04,161.0] #airblast_orchard
try:
agdrift_empty.num_db_values = 161 #set number of data values in sql db
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
#agdrift_empty.db_name = 'sqlite_agdrift_distance.db'
#this is the list of scenario names (column names) in sql db (the order here is important because
#the expected values are ordered in this manner
agdrift_empty.scenario_name = ['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard']
#cycle through reading scenarios and building result list
for i in range(len(agdrift_empty.scenario_name)):
#get scenario data
scenario_data = agdrift_empty.get_scenario_deposition_data(agdrift_empty.scenario_name[i],
agdrift_empty.num_db_values)
print(scenario_data)
#extract 1st and last values of scenario data and build result list (including how many values are
#retrieved for each scenario
if i == 0:
#fix this
result = [scenario_data[0], scenario_data[agdrift_empty.num_db_values - 1],
float(len(scenario_data))]
else:
result.extend([scenario_data[0], scenario_data[agdrift_empty.num_db_values - 1],
float(len(scenario_data))])
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_column_names(self):
"""
:description retrieves column names from sql database (sqlite_agdrift_distance.db)
: (each column name refers to a specific deposition scenario;
: the scenario name is used later to retrieve the deposition data)
:parameter output name of sql database table from which to retrieve requested data
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
result = pd.Series([], dtype='object')
expected_result = ['distance_ft','aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard']
try:
result = agdrift_empty.get_column_names()
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_filter_arrays(self):
"""
:description eliminate blank data cells (i.e., distances for which no deposition value is provided)
(and thus reduce the number of x,y values to be used)
:parameter x_in: array of distance values associated with values for a deposition scenario (e.g., Aerial/EPA Defined Pond)
:parameter y_in: array of deposition values associated with a deposition scenario (e.g., Aerial/EPA Defined Pond)
:parameter x_out: processed array of x_in values eliminating indices of blank distance/deposition values
:parameter y_out: processed array of y_in values eliminating indices of blank distance/deposition values
:NOTE y_in array is assumed to be populated by values >= 0. except for the blanks as 'nan' entries
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([0.,1.,4.,5.,6.,7.], dtype='float')
expected_result_y = pd.Series([10.,11.,14.,15.,16.,17.], dtype='float')
try:
x_in = pd.Series([0.,1.,2.,3.,4.,5.,6.,7.], dtype='float')
y_in = pd.Series([10.,11.,'nan','nan',14.,15.,16.,17.], dtype='float')
x_out, y_out = agdrift_empty.filter_arrays(x_in, y_in)
result_x = x_out
result_y = y_out
npt.assert_allclose(result_x, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(result_y, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result_x, expected_result_x]
tab = [result_y, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_list_sims_per_scenario(self):
"""
:description scan simulations and count number and indices of simulations that apply to each scenario
:parameter num_scenarios number of deposition scenarios included in SQL database
:parameter num_simulations number of simulations included in this model execution
:parameter scenario_name name of deposition scenario as recorded in SQL database
:parameter out_sim_scenario_id identification of deposition scenario specified per model run simulation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_num_sims = pd.Series([2,2,2,2,2,2,2,2,2,2,2,2,2], dtype='int')
expected_sim_indices = pd.Series([[0,13,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[1,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[2,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[3,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[4,17,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[5,18,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[6,19,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[7,20,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[8,21,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[9,22,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[10,23,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[11,24,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[12,25,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]], dtype='int')
try:
agdrift_empty.scenario_name = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard'], dtype='object')
agdrift_empty.out_sim_scenario_id = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard','aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard'], dtype='object')
agdrift_empty.num_simulations = len(agdrift_empty.out_sim_scenario_id)
agdrift_empty.num_scenarios = len(agdrift_empty.scenario_name)
result_num_sims, result_sim_indices = agdrift_empty.list_sims_per_scenario()
npt.assert_array_equal(result_num_sims, expected_num_sims, err_msg='', verbose=True)
npt.assert_array_equal(result_sim_indices, expected_sim_indices, err_msg='', verbose=True)
finally:
tab = [result_num_sims, expected_num_sims, result_sim_indices, expected_sim_indices]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_determine_area_dimensions(self):
"""
:description determine relevant area/length/depth of waterbody or terrestrial area
:param i: simulation number
:param ecosystem_type: type of assessment to be conducted
:param aquatic_body_type: source of dimensional data for area (EPA or User defined)
:param terrestrial_field_type: source of dimensional data for area (EPA or User defined)
:param *_width: default or user specified width of waterbody or terrestrial field
:param *_length: default or user specified length of waterbody or terrestrial field
:param *_depth: default or user specified depth of waterbody or terrestrial field
:NOTE all areas, i.e., ponds, wetlands, and terrestrial fields are of 1 hectare size; the user can elect
to specify a width other than the default width but it won't change the area size; thus for
user specified areas the length is calculated and not specified by the user)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_width = pd.Series([208.7, 208.7, 100., 400., 150., 0.], dtype='float')
expected_length = pd.Series([515.8, 515.8, 1076.39, 269.098, 717.593, 0.], dtype='float')
expected_depth = pd.Series([6.56, 0.4921, 7., 23., 0., 0.], dtype='float')
try:
agdrift_empty.ecosystem_type = pd.Series(['aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'terrestrial_assessment'], dtype='object')
agdrift_empty.aquatic_body_type = pd.Series(['epa_defined_pond',
'epa_defined_wetland',
'user_defined_pond',
'user_defined_wetland',
'NaN',
'NaN'], dtype='object')
agdrift_empty.terrestrial_field_type = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'user_defined_terrestrial',
'epa_defined_terrestrial'], dtype='object')
num_simulations = len(agdrift_empty.ecosystem_type)
agdrift_empty.default_width = 208.7
agdrift_empty.default_length = 515.8
agdrift_empty.default_pond_depth = 6.56
agdrift_empty.default_wetland_depth = 0.4921
agdrift_empty.user_pond_width = pd.Series(['NaN', 'NaN', 100., 'NaN', 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_pond_depth = pd.Series(['NaN', 'NaN', 7., 'NaN', 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_wetland_width = pd.Series(['NaN', 'NaN', 'NaN', 400., 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_wetland_depth = pd.Series(['NaN','NaN', 'NaN', 23., 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_terrestrial_width = pd.Series(['NaN', 'NaN', 'NaN', 'NaN', 150., 'NaN'], dtype='float')
width_result = pd.Series(num_simulations * ['NaN'], dtype='float')
length_result = pd.Series(num_simulations * ['NaN'], dtype='float')
depth_result = pd.Series(num_simulations * ['NaN'], dtype='float')
agdrift_empty.out_area_width = pd.Series(num_simulations * ['nan'], dtype='float')
agdrift_empty.out_area_length = pd.Series(num_simulations * ['nan'], dtype='float')
agdrift_empty.out_area_depth = pd.Series(num_simulations * ['nan'], dtype='float')
agdrift_empty.sqft_per_hectare = 107639
for i in range(num_simulations):
width_result[i], length_result[i], depth_result[i] = agdrift_empty.determine_area_dimensions(i)
npt.assert_allclose(width_result, expected_width, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(length_result, expected_length, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(depth_result, expected_depth, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [width_result, expected_width, length_result, expected_length, depth_result, expected_depth]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_foa(self):
"""
:description calculation of average deposition over width of water body
:param integration_result result of integration of deposition curve across the distance
: beginning at the near distance and extending to the far distance of the water body
:param integration_distance effectively the width of the water body
:param avg_dep_foa average deposition rate across the width of the water body
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([0.1538462, 0.5, 240.])
try:
integration_result = pd.Series([1.,125.,3e5], dtype='float')
integration_distance = pd.Series([6.5,250.,1250.], dtype='float')
result = agdrift_empty.calc_avg_dep_foa(integration_result, integration_distance)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac(self):
"""
Deposition calculation.
:param avg_dep_foa: average deposition over width of water body as fraction of applied
:param application_rate: actual application rate
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([6.5, 3.125e4, 3.75e8])
try:
avg_dep_foa = pd.Series([1.,125.,3e5], dtype='float')
application_rate = pd.Series([6.5,250.,1250.], dtype='float')
result = agdrift_empty.calc_avg_dep_lbac(avg_dep_foa, application_rate)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_foa_from_lbac(self):
"""
Deposition calculation.
:param avg_dep_foa: average deposition over width of water body as fraction of applied
:param application_rate: actual application rate
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([1.553846e-01, 8.8e-06, 4.e-08])
try:
avg_dep_lbac = pd.Series([1.01, 0.0022, 0.00005], dtype='float')
application_rate = pd.Series([6.5,250.,1250.], dtype='float')
result = agdrift_empty.calc_avg_dep_foa_from_lbac(avg_dep_lbac, application_rate)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac_from_gha(self):
"""
Deposition calculation.
:param avg_dep_gha: average deposition over width of water body in units of grams/hectare
:param gms_per_lb: conversion factor to convert lbs to grams
:param acres_per_hectare: conversion factor to convert hectares to acres
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([0.01516739, 0.111524, 0.267659])
try:
avg_dep_gha = pd.Series([17., 125., 3e2], dtype='float')
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.acres_per_hectare = 2.471
result = agdrift_empty.calc_avg_dep_lbac_from_gha(avg_dep_gha)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac_from_waterconc_ngl(self):
"""
:description calculate the average deposition onto the pond/wetland/field
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_width: average width of water body
:parem area_length: average length of water body
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param ng_per_gram conversion factor
:param sqft_per_acre conversion factor
:param liters_per_ft3 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([2.311455e-05, 2.209479e-03, 2.447423e-03])
try:
avg_waterconc_ngl = pd.Series([17., 125., 3e2], dtype='float')
area_width = pd.Series([50., 200., 500.], dtype='float')
area_length = pd.Series([6331., 538., 215.], dtype='float')
area_depth = pd.Series([0.5, 6.5, 3.], dtype='float')
agdrift_empty.liters_per_ft3 = 28.3168
agdrift_empty.sqft_per_acre = 43560.
agdrift_empty.ng_per_gram = 1.e9
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.acres_per_hectare = 2.471
result = agdrift_empty.calc_avg_dep_lbac_from_waterconc_ngl(avg_waterconc_ngl, area_width,
area_length, area_depth)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac_from_mgcm2(self):
"""
:description calculate the average deposition of pesticide over the terrestrial field in lbs/acre
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param mg_per_gram conversion factor
:param sqft_per_acre conversion factor
:param cm2_per_ft2 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([2.676538e-02, 2.2304486, 44.608973])
try:
avg_fielddep_mgcm2 = pd.Series([3.e-4, 2.5e-2, 5.e-01])
agdrift_empty.sqft_per_acre = 43560.
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.cm2_per_ft2 = 929.03
agdrift_empty.mg_per_gram = 1.e3
result = agdrift_empty.calc_avg_dep_lbac_from_mgcm2(avg_fielddep_mgcm2)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_gha(self):
"""
:description average deposition over width of water body in grams per acre
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param gms_per_lb: conversion factor to convert lbs to grams
:param acres_per_hectare: conversion factor to convert acres to hectares
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([1.401061, 0.3648362, 0.03362546])
try:
avg_dep_lbac = pd.Series([1.25e-3,3.255e-4,3e-5], dtype='float')
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.acres_per_hectare = 2.47105
result = agdrift_empty.calc_avg_dep_gha(avg_dep_lbac)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_waterconc_ngl(self):
"""
:description calculate the average concentration of pesticide in the pond/wetland
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_width: average width of water body
:parem area_length: average length of water body
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param ng_per_gram conversion factor
:param sqft_per_acre conversion factor
:param liters_per_ft3 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([70.07119, 18.24654, 22.41823])
try:
avg_dep_lbac = pd.Series([1.25e-3,3.255e-4,3e-5], dtype='float')
area_width = pd.Series([6.56, 208.7, 997.], dtype='float')
area_length = pd.Series([1.640838e4, 515.7595, 107.9629], dtype='float')
area_depth = pd.Series([6.56, 6.56, 0.4921], dtype='float')
agdrift_empty.ng_per_gram = 1.e9
agdrift_empty.liters_per_ft3 = 28.3168
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.sqft_per_acre = 43560.
result = agdrift_empty.calc_avg_waterconc_ngl(avg_dep_lbac ,area_width, area_length, area_depth)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_fielddep_mgcm2(self):
"""
:description calculate the average deposition of pesticide over the terrestrial field
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param mg_per_gram conversion factor
:param sqft_per_acre conversion factor
:param cm2_per_ft2 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([1.401063e-5, 3.648369e-6, 3.362552e-7])
try:
avg_dep_lbac = pd.Series([1.25e-3,3.255e-4,3e-5], dtype='float')
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.sqft_per_acre = 43560.
agdrift_empty.mg_per_gram = 1.e3
agdrift_empty.cm2_per_ft2 = 929.03
result = agdrift_empty.calc_avg_fielddep_mgcm2(avg_dep_lbac)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_generate_running_avg(self):
"""
:description retrieves values for distance and the first deposition scenario from the sql database
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE any blank fields are filled with 'nan'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = | pd.Series([], dtype='float') | pandas.Series |
import os
import sys
from numpy.core.numeric import zeros_like
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('seaborn-poster')
# I hate this too but it allows everything to use the same helper functions.
sys.path.insert(0, 'model')
from helper_functions import read_in_NNDSS
from Reff_constants import *
from params import alpha_start_date, delta_start_date, omicron_start_date, vaccination_start_date
def read_in_posterior(date):
"""
read in samples from posterior from inference
"""
df = pd.read_hdf("results/soc_mob_posterior"+date+".h5", key='samples')
return df
def read_in_google(Aus_only=True, local=False, moving=False):
"""
Read in the Google data set
"""
if local:
if type(local) == str:
df = pd.read_csv(local, parse_dates=['date'])
elif type(local) == bool:
local = 'data/Global_Mobility_Report.csv'
df = | pd.read_csv(local, parse_dates=['date']) | pandas.read_csv |
import numpy as np
import pandas as pd
import numba
from vtools.functions.filter import cosine_lanczos
def get_smoothed_resampled(df, cutoff_period='2H', resample_period='1T', interpolate_method='pchip'):
"""Resample the dataframe (indexed by time) to the regular period of resample_period using the interpolate method
Furthermore the cosine lanczos filter is used with a cutoff_period to smooth the signal to remove high frequency noise
Args:
df (DataFrame): A single column dataframe indexed by datetime
cutoff_period (str, optional): cutoff period for cosine lanczos filter. Defaults to '2H'.
resample_period (str, optional): Resample to regular period. Defaults to '1T'.
interpolate_method (str, optional): interpolation for resampling. Defaults to 'pchip'.
Returns:
DataFrame: smoothed and resampled dataframe indexed by datetime
"""
dfb = df.resample(resample_period).fillna(method='backfill')
df = df.resample(resample_period).interpolate(method=interpolate_method)
df[dfb.iloc[:, 0].isna()] = np.nan
return cosine_lanczos(df, cutoff_period)
@numba.jit(nopython=True)
def lmax(arr):
'''Local maximum: Returns value only when centered on maximum
'''
idx = np.argmax(arr)
if idx == len(arr)/2:
return arr[idx]
else:
return np.NaN
@numba.jit(nopython=True)
def lmin(arr):
'''Local minimum: Returns value only when centered on minimum
'''
idx = np.argmin(arr)
if idx == len(arr)/2:
return arr[idx]
else:
return np.NaN
def periods_per_window(moving_window_size: str, period_str: str) -> int:
"""Number of period size in moving window
Args:
moving_window_size (str): moving window size as a string e.g 7H for 7 hour
period_str (str): period as str e.g. 1T for 1 min
Returns:
int: number of periods in the moving window rounded to an integer
"""
return int(pd.Timedelta(moving_window_size)/pd.to_timedelta(pd.tseries.frequencies.to_offset(period_str)))
def tidal_highs(df, moving_window_size='7H'):
"""Tidal highs (could be upto two highs in a 25 hr period)
Args:
df (DataFrame): a time series with a regular frequency
moving_window_size (str, optional): moving window size to look for highs within. Defaults to '7H'.
Returns:
DataFrame: an irregular time series with highs at resolution of df.index
"""
period_str = df.index.freqstr
periods = periods_per_window(moving_window_size, period_str)
dfmax = df.rolling(moving_window_size, min_periods=periods).apply(lmax, raw=True)
dfmax = dfmax.shift(periods=-(periods//2-1))
dfmax = dfmax.dropna()
dfmax.columns = ['max']
return dfmax
def tidal_lows(df, moving_window_size='7H'):
"""Tidal lows (could be upto two lows in a 25 hr period)
Args:
df (DataFrame): a time series with a regular frequency
moving_window_size (str, optional): moving window size to look for lows within. Defaults to '7H'.
Returns:
DataFrame: an irregular time series with lows at resolution of df.index
"""
period_str = df.index.freqstr
periods = periods_per_window(moving_window_size, period_str)
dfmin = df.rolling(moving_window_size, min_periods=periods).apply(lmin, raw=True)
dfmin = dfmin.shift(periods=-(periods//2-1))
dfmin = dfmin.dropna()
dfmin.columns = ['min']
return dfmin
def get_tidal_hl(df, cutoff_period='2H', resample_period='1T', interpolate_method='pchip', moving_window_size='7H'):
"""Get Tidal highs and lows
Args:
df (DataFrame): A single column dataframe indexed by datetime
cutoff_period (str, optional): cutoff period for cosine lanczos filter. Defaults to '2H'.
resample_period (str, optional): Resample to regular period. Defaults to '1T'.
interpolate_method (str, optional): interpolation for resampling. Defaults to 'pchip'.
moving_window_size (str, optional): moving window size to look for lows within. Defaults to '7H'.
Returns:
tuple of DataFrame: Tidal high and tidal low time series
"""
dfs = get_smoothed_resampled(df, cutoff_period, resample_period, interpolate_method)
return tidal_highs(dfs), tidal_lows(dfs)
get_tidal_hl_rolling = get_tidal_hl # for older refs. #FIXME
def get_tidal_amplitude(dfh, dfl):
"""Tidal amplitude given tidal highs and lows
Args:
dfh (DataFrame): Tidal highs time series
dfl (DataFrame): Tidal lows time series
Returns:
DataFrame: Amplitude timeseries, at the times of the low following the high being used for amplitude calculation
"""
dfamp = pd.concat([dfh, dfl], axis=1)
dfamp = dfamp[['min']].dropna().join(dfamp[['max']].ffill())
return pd.DataFrame(dfamp['max']-dfamp['min'], columns=['amplitude'])
def get_value_diff(df, percent_diff=False):
'''
Get the difference of values of each element in the dataframe
The times in the dataframe may or may not coincide as this is a slice of irregularly sampled time series
On any error, the returned value is np.nan
'''
try:
arr = [df[c].dropna() for c in df.columns]
if percent_diff:
value_diff = 100.0 * (arr[0].values[0]-arr[1].values[0])/arr[1].values[0]
else:
value_diff = arr[0].values[0]-arr[1].values[0]
return value_diff
except:
return np.nan
def get_tidal_amplitude_diff(dfamp1, dfamp2, percent_diff=False):
"""Get the difference of values within +/- 4H of values in the two amplitude arrays
Args:
dfamp1 (DataFrame): Amplitude time series
dfamp2 (DataFrame): Amplitude time series
percent_diff (bool, optional): If true do percent diff. Defaults to False.
Returns:
DataFrame: Difference dfamp1-dfamp2 or % Difference (dfamp1-dfamp2)/dfamp2*100 for values within +/- 4H of each other
"""
dfamp = pd.concat([dfamp1, dfamp2], axis=1).dropna(how='all')
dfamp.columns = ['2', '1']
tdelta = '4H'
sliceamp = [slice(t-pd.to_timedelta(tdelta), t+pd.to_timedelta(tdelta)) for t in dfamp.index]
ampdiff = [get_value_diff(dfamp[sl], percent_diff) for sl in sliceamp]
return pd.DataFrame(ampdiff, index=dfamp.index)
def get_index_diff(df):
'''
Get the difference of index values of each element in the dataframe
The times in the dataframe may or may not coincide
The difference is in Timedelta and is converted to minutes
On any error, the returned value is np.nan
'''
try:
arr = [df[c].dropna() for c in df.columns]
tidal_phase_diff = (arr[0].index[0]-arr[1].index[0]).total_seconds()/60.
return tidal_phase_diff
except:
return np.nan
def get_tidal_phase_diff(dfh2, dfl2, dfh1, dfl1):
"""Calculates the phase difference between df2 and df1 tidal highs and lows
Scans +/- 4 hours in df1 to get the highs and lows in that windows for df2 to
get the tidal highs and lows at the times of df1
Args:
dfh2 (DataFrame): Timeseries of tidal highs
dfl2 (DataFrame): Timeseries of tidal lows
dfh1 (DataFrame): Timeseries of tidal highs
dfl1 (DataFRame): Timeseries of tidal lows
Returns:
DataFrame: Phase difference (dfh2-dfh1) and (dfl2-dfl1) in minutes
"""
'''
'''
tdelta = '4H'
sliceh1 = [slice(t-pd.to_timedelta(tdelta), t+pd.to_timedelta(tdelta)) for t in dfh1.index]
slicel1 = [slice(t-pd.to_timedelta(tdelta), t+pd.to_timedelta(tdelta)) for t in dfl1.index]
dfh21 = pd.concat([dfh2, dfh1], axis=1)
dfh21.columns = ['2', '1']
dfl21 = pd.concat([dfl2, dfl1], axis=1)
dfl21.columns = ['2', '1']
high_phase_diff, low_phase_diff = [get_index_diff(dfh21[sl]) for sl in sliceh1], [
get_index_diff(dfl21[sl]) for sl in slicel1]
merged_diff = pd.merge(pd.DataFrame(high_phase_diff, index=dfh1.index), pd.DataFrame(
low_phase_diff, index=dfl1.index), how='outer', left_index=True, right_index=True)
return merged_diff.iloc[:, 0].fillna(merged_diff.iloc[:, 1])
def get_tidal_hl_zerocrossing(df, round_to='1T'):
'''
Finds the tidal high and low times using zero crossings of the first derivative.
This works for all situations but is not robust in the face of noise and perturbations in the signal
'''
zc, zi = zerocross(df)
if round_to:
zc = | pd.to_datetime(zc) | pandas.to_datetime |
#%%
import os
from pathlib import Path
import colorcet as cc
import matplotlib.colors as mplc
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from joblib import Parallel, delayed
from scipy.sparse import csr_matrix, lil_matrix
from scipy.sparse.csgraph import dijkstra
from sklearn.metrics import adjusted_rand_score, pairwise_distances
from graspy.cluster import AutoGMMCluster, GaussianCluster
from graspy.embed import AdjacencySpectralEmbed, ClassicalMDS, LaplacianSpectralEmbed
from graspy.plot import gridplot, heatmap, pairplot
from graspy.utils import get_lcc, symmetrize
from src.data import load_metagraph
from src.embed import ase, lse, preprocess_graph
from src.graph import MetaGraph, preprocess
from src.io import savecsv, savefig, saveskels
from src.visualization import remove_spines, screeplot
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
def stashfig(name, **kws):
savefig(name, foldername=FNAME, save_on=True, **kws)
def stashcsv(df, name, **kws):
savecsv(df, name, foldername=FNAME, save_on=True, **kws)
def pairwise_sparse_jaccard_distance(X, Y=None):
"""
Computes the Jaccard distance between two sparse matrices or between all pairs in
one sparse matrix.
Args:
X (scipy.sparse.csr_matrix): A sparse matrix.
Y (scipy.sparse.csr_matrix, optional): A sparse matrix.
Returns:
numpy.ndarray: A similarity matrix.
REF https://stackoverflow.com/questions/32805916/compute-jaccard-distances-on-sparse-matrix
"""
if Y is None:
Y = X
assert X.shape[1] == Y.shape[1]
X = X.astype(bool).astype(int)
Y = Y.astype(bool).astype(int)
intersect = X.dot(Y.T)
x_sum = X.sum(axis=1).A1
y_sum = Y.sum(axis=1).A1
xx, yy = np.meshgrid(x_sum, y_sum)
union = (xx + yy).T - intersect
return (1 - intersect / union).A
run_name = "86.1-BDP-prob-path-cluster"
threshold = 1
weight = "weight"
graph_type = "Gad"
cutoff = 8
base = f"-c{cutoff}-t{threshold}-{graph_type}"
base_path = Path(f"./maggot_models/notebooks/outs/{run_name}/csvs")
meta = pd.read_csv(base_path / str("meta" + base + ".csv"), index_col=0)
path_mat = pd.read_csv(base_path / str("prob-path-mat" + base + ".csv"), index_col=0)
sparse_path = csr_matrix(path_mat.values)
euclid_dists = pairwise_distances(sparse_path, metric="euclidean")
mds = ClassicalMDS(dissimilarity="precomputed")
mds_embed = mds.fit_transform(euclid_dists)
embed_df = | pd.DataFrame(data=mds_embed) | pandas.DataFrame |
import pandas as pd
import numpy as np
import re
from transformers import pipeline
nlp = pipeline("zero-shot-classification")
sr = pd.read_csv('/Back End Code/sports_reference_teams.csv')
df = pd.read_csv('/Data/MTeams.csv')
df.TeamName = [re.sub(r'Univ$', "University", team.replace(' St', ' State')) for team in df.TeamName]
sr['team'] = [team.replace('&', '&') for team in sr.team]
straight_join = pd.merge(df, sr, how='left', left_on='TeamName', right_on='team')
need_team = straight_join[straight_join.team.isna()]
def nlp_flow(input, options, l):
if len(options) == 1:
ret = {'TeamName': team, "team": options[0]}
l.append(ret)
elif len(options) > 1:
output = nlp(team, options)
ret = {'TeamName': team, "team": output['labels'][0]}
l.append(ret)
else:
output = nlp(team, sr.team.to_list())
ret = {'TeamName': team, "team": output['labels'][0]}
l.append(ret)
print(ret)
fixed_list = []
for team in need_team.TeamName:
team2 = team.replace('-', " ")
if len(re.findall(" ", team2)) > 0:
s = team.split()
bad_words = ['St', "St.", "Saint", "University"]
regex = max([word for word in s if word not in bad_words])
options = sr[sr.team.str.lower().str.contains(regex.lower())].team.tolist()
nlp_flow(team, options, fixed_list)
else:
options = sr[sr.team.str.lower().str.contains(team.lower())].team.tolist()
nlp_flow(team, options, fixed_list)
| pd.DataFrame(fixed_list) | pandas.DataFrame |
import numpy as np
import pandas as pd
import hydrostats.data as hd
import hydrostats.visual as hv
import HydroErr as he
import matplotlib.pyplot as plt
import os
from netCDF4 import Dataset
# *****************************************************************************************************
# ****************ERA 5**********************ERA 5**********************ERA 5**************************
# *****************************************************************************************************
# User Input Information:
# location = 'south_asia-geoglows-era_5' # Match output folder name from RAPIDpy
location = 'sam_magdalena-aggQ' # Match output folder name from RAPIDpy
comid_list = [132748, 133929, 135366, 133140, 131847, 131959, 132592, 133694, 134044, 133892, 132851, 133243, 134286, 135363, 131952, 133877, 134847, 132410, 134702] #Comid's for which csv files are desired
# comid_list = [5061131, 5070113, 5076664, 5074545, 5076937, 5077851, 5080349, 5080535, 5080177, 5080305] # Comid's for which csv files are desired
dir = '/Users/chrisedwards/Documents/era5_test/output_netcdf'
# dir = '/Users/chrisedwards/Documents/era5_test/SouthAsiaGeoglows/outputNetCDF'
csv_dir = '/Users/chrisedwards/Documents/era5_test/output_timeseries'
# csv_dir = '/Users/chrisedwards/Documents/era5_test/SouthAsiaGeoglows/timeSeries'
# qout_file = 'DailyAggregatedERA5_Qout_era5_t640_1hr_19790101to20181231.nc4'
qout_file = 'DailyAggregated_Qout_era5_t640_1hr_19790101to20181231.nc4'
# Call the NetCDF file.
file = os.path.join(dir, location, qout_file)
nc = Dataset(file)
nc.variables.keys()
nc.dimensions.keys()
# Define variables from the NetCDF file.
riv = nc.variables['rivid'][:].tolist()
# lat = nc.variables['lat'][:]
# lon = nc.variables['lon'][:]
time = nc.variables['time'][:].tolist()
# Q_error = nc.variables['Qout_error'][:]
Q = nc.variables['Qout'][:]
newTime = []
countT = 0
while countT < len(time):
newTime.append(time[countT] + 3287)
countT += 1
# Convert time from 'seconds since 1970' to the actual date.
dates = pd.to_datetime(newTime, unit='d', origin='unix')
temp_dictionary_era5 = {}
streamflow_dict_era5 = {}
list_streams_era5 = []
counter = 0
for n in riv:
comid = int(n)
name = 'era5-{}-{}'.format(location, comid)
if Q.shape[0] > Q.shape[1]:
temp_dictionary_era5['{}'.format(name)] = pd.DataFrame(data=Q[:, counter], index=dates, columns=['flowrate (cms)'])
else:
temp_dictionary_era5['{}'.format(name)] = | pd.DataFrame(data=Q[counter, :], index=dates, columns=['flowrate (cms)']) | pandas.DataFrame |
"""
Various tools to process WAIS data
Author: <NAME> <<EMAIL>>
"""
import fnmatch
import numpy as np
import icecap as icp
import inspect
import os
import rsr.run as run
import rsr.fit as fit
import rsr.utils as utils
import rsr.invert as invert
#import string
import subradar as sr
import time
import pandas as pd
import multiprocessing
def timing(func):
"""Outputs the time a function takes to execute.
"""
def func_wrapper(*args, **kwargs):
t1 = time.time()
func(*args, **kwargs)
t2 = time.time()
print("- Processed in %.1f s.\n" % (t2-t1))
return func_wrapper
def loop(func):
"""Decorator for processing sequentialy over multiple PSTs
"""
def func_wrapper(*args, **kwargs):
pst_list = icp.get.pst(args[0])
if 'from_process' in kwargs:
process = kwargs['from_process']
elif 'process' not in kwargs:
process = None
_processes = []
for pst_i in pst_list:
product_list, pik_list = icp.get.pik(pst_i, process=process, **kwargs)
for i, pik_i in enumerate(pik_list):
if fnmatch.filter([pik_i], args[1]):
func(pst_i, pik_i, **kwargs)
# p = multiprocessing.Process(target=func, args=(pst_i, pik_i), kwargs=kwargs)
# p.start()
# _processes.append(p)
# for p in _processes:
# p.join()
return func_wrapper
def save(data, target):
"""Save a dictionnary of data in a text file
"""
df = pd.DataFrame(data)
df.to_csv(target, sep='\t', index=False, float_format='%.7f', header=False, na_rep='nan')
print('CREATED: ' + target)
def rsr(pst, pik, frame, **kwargs):
"""Apply RSR from a section of a transect
"""
val = icp.get.signal(pst, pik, **kwargs)
amp = 10**(val[frame[0]:frame[1]]/20.)
return run.processor(amp)
#return fit.lmfit(amp)
@loop
@timing
def rsr_inline(pst, pik, save=True, product='MagHiResInco1', nbcores=4, **kwargs):
"""Process RSR along a profile
"""
p = icp.get.params()
val = icp.get.signal(pst, pik, product=product, air_loss=False, **kwargs)
amp = 10**(val/20)
b = run.along(amp, nbcores=nbcores, **kwargs)
data = pd.DataFrame({'1':b['xa'].values,
'2':b['xo'].values,
'3':b['xb'].values,
'4':b['pt'].values,# + diff,
'5':b['pc'].values,# + diff,
'6':b['pn'].values,# + diff,
'7':b['mu'].values,
'8':b['crl'].values,
'9':b['chisqr'].values}
)
if save is True:
folder = p['rsr_path'] + '/' + pst
if not os.path.exists(folder):
os.makedirs(folder)
data.to_csv(folder + '/' + product+'.'+pik, sep='\t', float_format='%.7f',
na_rep='nan', header=False, index=False)
print('CREATED: ' + folder + '/' + product+'.'+pik)
@timing
@loop
def topik1m(pst, pik, from_process='pik1', from_product='MagLoResInco1', to_product='MagHiResInco1'):
""" Inteprolate any pik file into 1m sampling
NOTE: at this point, from_process is mandatory for @loop to work
"""
p = icp.get.params()
source = p['pik_path'].replace( p['process'], '') + from_process + '/' + pst + '/' + from_product + '.' + pik
bxds = p['cmp_path'] + '/' + pst + '/'+ to_product
sweep = '/'.join([p['sweep_path'], pst, 'sweeps'])
test = icp.read.isfile(source) * icp.read.isfile(bxds) * icp.read.isfile(sweep, verbose=False)
if test == 0: return
if not os.path.exists(p['pik_path'] + '/' + pst):
os.makedirs(p['pik_path'] + '/' + pst)
target = p['pik_path'] + '/' + pst + '/'+ to_product + '.' + pik
LU = target + '_LU'
P = target + '_P'
os.system('mkdir -p ' + p['pik_path'] + '/' + pst)
os.system('pik4Hzto1m ' + pst + ' < ' + source + ' > ' + LU)
os.system('pk3 3200 0 3200 ' + bxds + ' < ' + LU + ' > ' + P)
os.system('cat ' + LU + ' ' + P + ' > ' + target)
os.system('rm -f ' + LU + ' ' + P)
print('CREATED: ' + target)
@loop
@timing
def gather(pst, pik, fil=None, product='MagHiResInco1', **kwargs):
"""Gather data in a text file
"""
p = icp.get.params()
a = | pd.DataFrame() | pandas.DataFrame |
# Module deals with creation of ligand and receptor scores, and creation of scConnect tables etc.
import scConnect as cn
import scanpy as sc
version = cn.database.version
organism = cn.database.organism
# Scoring logic for ligands
def ligandScore(ligand, genes):
"""calculate ligand score for given ligand and gene set"""
from scipy.stats.mstats import gmean
import numpy as np
if ligand.ligand_type == "peptide" and isinstance(ligand.preprogene, str):
# check if multiple genes needs to be accounted for
if isinstance(eval(ligand.preprogene), list):
ligand_genes = list()
for gene in eval(ligand.preprogene):
try:
ligand_genes.append(genes[gene])
except KeyError:
#print(f"{gene} not found")
ligand_genes.append(0.0)
# use max, as there might be many orthologs genes for one original
# gene and not all have to be expressed
try:
ligand_score = max(ligand_genes)
except ValueError:
print(f"something is wrong with the list {ligand_genes}")
ligand_score = 0.0
return ligand_score
elif ligand.ligand_type == "molecule":
synthesis = ligand.synthesis
transport = ligand.transport
reuptake = ligand.reuptake
excluded = ligand.excluded
# get geometric mean of synthesis genes (all need to be present)
if not isinstance(synthesis, str):
# If no genes are needed, synthesis is set to nan
synthesis = np.nan
else:
synthesis_expression = list()
for gene in eval(synthesis):
try:
synthesis_expression.append(genes[gene])
except KeyError:
# If gene was not found append 0
#print(f"{gene} not found")
synthesis_expression.append(0.0)
synthesis = gmean(synthesis_expression)
# get maximum of vesicle transporters (only one is needed for molecule transport)
if not isinstance(transport, str):
# If no specific genes are needed, set transport to nan
transport = np.nan
else:
transport_expression = list()
for gene in eval(transport):
try:
transport_expression.append(genes[gene])
except KeyError:
# If gene was not found append 0
#print(f"{gene} not found")
transport_expression.append(0.0)
transport = max(transport_expression)
# Get maximum of reuptake genes (only one is needed)
if not isinstance(reuptake, str):
# If no specific genes are needed, set reuptake to nan
reuptake = np.nan
else:
reuptake_expression = list()
for gene in eval(reuptake):
try:
reuptake_expression.append(genes[gene])
except KeyError:
# If gene was not found append 0
#print(f"{gene} not found")
reuptake_expression.append(0.0)
reuptake = max(reuptake_expression)
# get maximum among exluding genes where any gene expression divert to other ligands
if not isinstance(excluded, str):
# If no specific genes are needed, set excluded to 0
excluded = 0
else:
excluded_expression = list()
for gene in eval(excluded):
try:
excluded_expression.append(genes[gene])
except KeyError:
# If gene was not found append 0
#print(f"{gene} not found")
excluded_expression.append(0.0)
excluded = max(excluded_expression)
# return geometric mean of synthesis, transport and reuptake multipled exclusion
promoting_factor = gmean(([x for x in [synthesis, transport, reuptake] if str(x) != "nan"])) # genes driving ligand production, remove nan values
if str(promoting_factor) == "nan": # capture cases where no promoting genes were present
print(f"no promoting genes detected for {ligand.ligand}")
return 0.0 # exit before running exclusion calculation
ligand_score = promoting_factor - excluded # correct ligand expression based on the exclusion factor
if ligand_score < 0: # ligand score should be 0 or positive
ligand_score = 0.0
return ligand_score
# If genes are missing from ligand gene list
else:
print("Big error! ligand type is not defined!")
return 0.0
def ligands(adata, organism=organism, select_ligands=None):
"""return a dataframe with ligand scores for each cluster.
.. note::
Needs a gene call dataframe under adata.uns.gene_call.
Use scConnect.genecall to create such dataframe
organism defaults to mouse, to use genes for other organism select this here.
use select_ligands to only asses given ligands
(used by optimize_segregation to only check for gaba and glutamate)
Returns: Dict of ligand call for each cluster.
"""
import scConnect as cn
import pkg_resources
import pandas as pd
ligands = pd.read_csv(pkg_resources.resource_filename(
__name__, (f"data/Gene_annotation/{version}/{organism}/ligands.csv")))
if isinstance(select_ligands, list):
select = [True if ligand in select_ligands else False for ligand in ligands.ligand]
ligands = ligands[select]
ligand_df = pd.DataFrame(index=ligands.ligand)
for cluster, genes in adata.uns["gene_call"].items():
cluster_scores = list()
for ligand_data in ligands.iterrows():
ligand = ligand_data[1]
# fetch ligand score for specific ligand and gene set
ligand_score = ligandScore(ligand, genes)
cluster_scores.append(ligand_score)
ligand_df[cluster] = cluster_scores
adata.uns["ligands"] = ligand_df.to_dict()
return adata
# Scoring logic for receptors
def receptorScore(receptor, genes):
"""calculate receptor score given receptor and gene set"""
from scipy.stats.mstats import gmean
gene_expression = list()
for gene in eval(receptor.gene):
try:
gene_expression.append(genes[gene])
except KeyError:
# If gene was not found append 0
#print(f"{gene} not found")
gene_expression.append(0.0)
# use max, as several genes might be found during ortholog search,
# not all might bee needed to create the receptor
gene_expression = max(gene_expression)
return gene_expression
def receptors(adata, organism=organism):
"""return a dataframe with receptor scores for each cluster.
.. note::
Needs a gene call dataframe under adata.uns.gene_call.
Use scConnect.genecall to create such dataframe.
Returns: Dict of receptor call for each cluster.
"""
import scConnect as cn
import pkg_resources
import pandas as pd
receptors = pd.read_csv(pkg_resources.resource_filename(
__name__, (f"data/Gene_annotation/{version}/{organism}/receptors.csv")))
receptor_df = pd.DataFrame(index=receptors.receptor)
for cluster, genes in adata.uns["gene_call"].items():
cluster_scores = list()
for receptor_data in receptors.iterrows():
receptor = receptor_data[1]
# fetch ligand score for specific ligand and gene set
receptor_score = receptorScore(receptor, genes)
cluster_scores.append(receptor_score)
receptor_df[cluster] = cluster_scores
adata.uns["receptors"] = receptor_df.to_dict()
return adata
# Interaction logic
def interactions(emitter, target, self_reference=True, organism=organism, corr_pval=True):
"""return an edge list of interactions between clusters.
If all connections are of interest, use the same data source for
emitter and target.
.. note::
self_reference is only valid when emitter == target.
.. note::
edge_list is returned as a list, and not in a adata object.
This is since multiple adata objects can be passed in to the
function, and whould lead to ambiguity of which object to append the edge_list to.
Returns: List of edges between given emmitor and target clusters.
"""
import pkg_resources
import pandas as pd
from itertools import product
from scConnect.tools import printProgressBar
interactions = pd.read_csv(pkg_resources.resource_filename(
__name__, (f"data/Gene_annotation/{version}/interactions.csv")), index_col=[0, 1], sep=";")
interactions.sort_index(axis="index", inplace=True)
# Create a set of all possible index combinations.
# This is used to test if ligand receptor combination is present.
interaction_set = set(interactions.index)
# An edge list should contain u, v and d,
# where u is input node, v is output node
# and d is a dictionary with edge attributes.
edge_list = list()
# get all clusters
# NOTE: if the same cluster name is used in emitter and target datasets, they are
# assumed to be the same cluster. Give your clusters uniqe names between your datasets.
try:
emitter_clusters = pd.DataFrame(emitter.uns["ligands"]).columns
target_clusters = | pd.DataFrame(target.uns["ligands"]) | pandas.DataFrame |
import time, os, pickle
import numpy as np
import pandas as pd
from flask import Flask, request, jsonify, make_response, Response
from flask_restplus import Api, fields, Resource
from flask_cors import CORS, cross_origin
from werkzeug.utils import secure_filename
from werkzeug.datastructures import FileStorage
app = Flask(__name__)
CORS(app)
app = Flask(__name__)
api = Api(
app,
version='1.0',
title='Santander Customer Transaction Prediction API',
description='Customer Transaction Prediction API')
ns = api.namespace('transaction_prediction', description='Transaction Prediction')
model = pickle.load(open('./lgbm.pkl','rb'))
parser = api.parser()
#In this case, my number of features: length=202
number_features = 200
#parse features for API
for idx in range(number_features):
parser.add_argument(
'var_'+str(idx),
type=float,
required=True,
help='feature'+str(idx),
location='form'
)
parser.add_argument(
'ID_code',
type=str,
required=False,
help='id',
location='form'
)
# Setup the request parameters, feed them into the model, and determine the transaction prediction (prababilites).
resource_fields = api.model('Resource', {
'result': fields.List(fields.Float)
})
upload_parser = api.parser()
upload_parser.add_argument('file', location='files', type=FileStorage, required=True)
@ns.route('/upload/')
@api.expect(upload_parser)
class Upload(Resource):
def post(self):
args = upload_parser.parse_args()
file = args.get('file') # This is FileStorage instance
result = | pd.read_csv(file) | pandas.read_csv |
'''
Open Power System Data
Time series Datapackage
read.py : read time series files
'''
import pytz
import yaml
import os
import sys
import numpy as np
import pandas as pd
import logging
from datetime import datetime, date, time, timedelta
import xlrd
from xml.sax import ContentHandler, parse
from .excel_parser import ExcelHandler
logger = logging.getLogger(__name__)
logger.setLevel('DEBUG')
def read_entso_e_transparency(
areas,
filepath,
dataset_name,
headers,
cols,
stacked,
unstacked,
append_headers,
**kwargs):
'''
Read a .csv file from ENTSO-E TRansparency into a DataFrame.
Parameters
----------
filepath : str
Directory path of file to be read
dataset_name : str
Name of variable, e.g. ``solar``
url : str
URL linking to the source website where this data comes from
headers : list
List of strings indicating the level names of the pandas.MultiIndex
for the columns of the dataframe
cols : dict
A mapping of of columnnames to use from input file and a new name to
rename them to. The new name is the header level whose corresponding
values are specified in that column
stacked : list
List of strings indicating the header levels that are reported
column-wise in the input files
unstacked
One strings indicating the header level that is reported row-wise in the
input files
append_headers: dict
Map of header levels and values to append to Multiindex
kwargs: dict
placeholder for further named function arguments
Returns
----------
df: pandas.DataFrame
The content of one file from ENTSO-E Transparency
'''
df_raw = pd.read_csv(
filepath,
sep='\t',
encoding='utf-16',
header=0,
index_col='timestamp',
parse_dates={'timestamp': ['DateTime']},
date_parser=None,
dayfirst=False,
decimal='.',
thousands=None,
usecols=cols.keys(),
)
# rename columns to comply with other data
df_raw.rename(columns=cols, inplace=True)
if dataset_name == 'Actual Generation per Production Type':
# keep only renewables columns
renewables = {
'Solar': 'solar',
'Wind Onshore': 'wind_onshore',
'Wind Offshore': 'wind_offshore'
}
df_raw = df_raw[df_raw['variable'].isin(renewables.keys())]
df_raw.replace({'variable': renewables}, inplace=True)
if dataset_name == 'Day-ahead Prices':
# Omit polish price data reported in EUR (keeping PLN prices)
# (Before 2017-03-02, the data is very messy)
no_polish_euro = ~(
(df_raw['region'] == 'PSE SA BZ') &
(df_raw.index < pd.to_datetime('2017-03-02 00:00:00')))
df_raw = df_raw.loc[no_polish_euro]
if dataset_name in ['Actual Total Load', 'Day-ahead Total Load Forecast']:
# Zero load is highly unlikely. Such occurences are actually NaNs
df_raw['load'].replace(0, np.nan, inplace=True)
# keep only entries for selected geographic entities as specified in
# areas.csv
area_filter = areas['primary AreaName ENTSO-E'].dropna()
df_raw = df_raw.loc[df_raw['region'].isin(area_filter)]
# based on the AreaName column, map the area names used throughout OPSD
lookup = areas.set_index('primary AreaName ENTSO-E')['area ID'].dropna()
lookup = lookup[~lookup.index.duplicated()]
df_raw['region'] = df_raw['region'].map(lookup)
dfs = {}
for res in ['15', '30', '60']:
df = (df_raw.loc[df_raw['resolution'] == 'PT' + res + 'M', :]
.copy().sort_index(axis='columns'))
df = df.drop(columns=['resolution'])
# DST-handling
# Hours 2-3 of the DST-day in March are both labelled 3:00, with no possibility
# to distinguish them. We have to delete both
dst_transitions_spring = [d.replace(hour=3, minute=m)
for d in pytz.timezone('Europe/Paris')._utc_transition_times
if 2000 <= d.year <= datetime.today().year and d.month == 3
for m in [0, 15, 30, 45]]
df = df.loc[~df.index.isin(dst_transitions_spring)]
# juggle the index and columns
df.set_index(stacked, append=True, inplace=True)
# at this point, only the values we are intereseted in are left as
# columns
df.columns.rename(unstacked, inplace=True)
df = df.unstack(stacked)
# keep only columns that have at least some nonzero values
df = df.loc[:, (df > 0).any(axis=0)]
# add source, url and unit to the column names.
# Note: pd.concat inserts new MultiIndex values infront of the old ones
df = pd.concat([df],
keys=[tuple(append_headers.values())],
names=append_headers.keys(),
axis='columns')
# reorder and sort columns
df = df.reorder_levels(headers, axis=1)
dfs[res + 'min'] = df
return dfs
def read_pse(filepath):
'''
Read a .csv file from PSE into a DataFrame.
Parameters
----------
filepath : str
Directory path of file to be read
url : str
URL linking to the source website where this data comes from
headers : list
List of strings indicating the level names of the pandas.MultiIndex
for the columns of the dataframe
Returns
----------
df: pandas.DataFrame
The content of one file from PSE
'''
df = pd.read_csv(
filepath,
sep=';',
encoding='cp1250',
header=0,
index_col=None,
parse_dates=None,
date_parser=None,
dayfirst=False,
decimal=',',
thousands=None,
# hours are indicated by their ending time. During fall DST,
# UTC 23:00-00:00 = CEST 1:00-2:00 is indicated by '02',
# UTC 00:00-01:00 = CEST 2:00-3:00 is indicated by '02A',
# UTC 01:00-02:00 = CET 2:00-3:00 is indicated by '03'.
# regular hours require backshifting by 1 period
converters={
'Time': lambda x: '2:00' if x == '2A' else str(int(x) - 1) + ':00'
}
)
# Create a list of spring-daylight savings time (DST)-transitions
dst_transitions_spring = [
d.replace(hour=2)
for d in pytz.timezone('Europe/Warsaw')._utc_transition_times
if 2000 <= d.year <= datetime.today().year and d.month == 3]
# Account for an error where an hour is jumped in the data, incrementing
# the hour by one
#time_int = df['Time'].str[:-3].astype(int)
# if (time_int time_int.shift(1) - 1).
# if (time_int == 24).any():
# logger.info(filepath)
# df = df[time_int != 24]
if df['Date'][0] == 20130324:
df['Time'] = [str(num) + ':00' for num in range(24)]
# The hour from 01:00 - 02:00 (CET) should by PSE's logic be indexed
# by "02:00" (the endpoint), but at DST day in spring they use "03:00" in
# the files. Our routine requires it to be "01:00" (the start point).
df['proto_timestamp'] = pd.to_datetime(
df['Date'].astype(str) + ' ' + df['Time'])
slicer = df['proto_timestamp'].isin(dst_transitions_spring)
df.loc[slicer, 'Time'] = '1:00'
# create the actual timestamp from the corrected "Date"-column
df.index = pd.to_datetime(df['Date'].astype(str) + ' ' + df['Time'])
# DST-handling
# 'ambiguous' refers to how the October dst-transition hour is handled.
# 'infer' will attempt to infer dst-transition hours based on order.
df.index = df.index.tz_localize('Europe/Warsaw', ambiguous='infer')
df.index = df.index.tz_convert(None)
return df
def read_ceps(filepath):
'''Read a file from CEPS into a DataFrame'''
df = pd.read_csv(
filepath,
sep=';',
header=2,
parse_dates=True,
dayfirst=True,
skiprows=None,
index_col=0,
usecols=[0, 1, 2]
)
# DST-handling
df.index = df.index.tz_localize('Europe/Prague', ambiguous='infer')
df.index = df.index.tz_convert(None)
return df
def read_elia(filepath):
'''Read a file from Elia into a DataFrame'''
df = pd.read_excel(
io=filepath,
header=3,
parse_dates={'timestamp': ['DateTime']},
dayfirst=True,
index_col='timestamp',
usecols=None
)
# DST handling
df.index = df.index.tz_localize('Europe/Brussels', ambiguous='infer')
df.index = df.index.tz_convert(None)
return df
def read_energinet_dk(filepath):
'''Read a file from energinet.dk into a DataFrame'''
df = pd.read_excel(
io=filepath,
header=2, # the column headers are taken from 3rd row.
# 2nd row also contains header info like in a multiindex,
# i.e. wether the colums are price or generation data.
# However, we will make our own columnnames below.
# Row 3 is enough to unambiguously identify the columns
skiprows=None,
index_col=None,
parse_dates=True,
dayfirst=False,
usecols=None, # None means: parse all columns
thousands=',',
# hours in 2nd column run from 1-24, we need 0-23:
# (converters seem not to work in combination with parse_dates)
converters={1: lambda x: x - 1}
)
# Create the timestamp column and set as index
df.index = df.iloc[:, 0] + pd.to_timedelta(df.iloc[:, 1], unit='h')
# DST-handling
# Create a list of spring-daylight savings time (DST)-transitions
dst_transitions_spring = [
d.replace(hour=2)
for d in pytz.timezone('Europe/Copenhagen')._utc_transition_times
if 2000 <= d.year <= datetime.today().year and d.month == 3]
# Drop 3rd hour for (spring) DST-transition from df.
df = df[~df.index.isin(dst_transitions_spring)]
# Verify that daylight savings time transitions are handled as expected
check_dst(df.index, autumn_expect=1)
# Conform index to UTC
dst_arr = np.ones(len(df.index), dtype=bool)
df.index = df.index.tz_localize('Europe/Copenhagen', ambiguous=dst_arr)
df.index = df.index.tz_convert(None)
return df
def read_entso_e_statistics(filepath,):
'''Read a file from ENTSO-E into a DataFrame'''
df = pd.read_excel(
io=filepath,
header=18,
usecols='A, B, G, K, L, N, P:AU'
)
# rename columns
# According to the specific national considerations, GB data reflects the
# whole UK including Northern Ireland since 2016
renamer = {df.columns[0]: 'date', df.columns[1]: 'time', 'GB': 'GB_UKM'}
df.rename(columns=renamer, inplace=True)
# Zero load is highly unlikely. Such occurences are actually NaNs
df.replace(0, np.nan, inplace=True)
# Construct the index and set timezone
# for some reason, the 'date' column has already been parsed to datetime
df['date'] = df['date'].fillna(method='ffill').dt.strftime('%Y-%m-%d')
df.index = pd.to_datetime(df.pop('date') + ' ' + df.pop('time').str[:5])
# DST-handling
df.index = df.index.tz_localize('Europe/Brussels', ambiguous='infer')
df.index = df.index.tz_convert(None)
return df
def read_entso_e_portal(filepath):
'''Read a file from the old ENTSO-E Data Portal into a DataFrame'''
df = pd.read_excel(
io=filepath,
header=3, # 0 indexed, so the column names are actually in the 4th row
skiprows=None,
# create MultiIndex from first 2 columns ['date', 'Country']
index_col=[0, 1],
parse_dates={'date': ['Year', 'Month', 'Day']},
dayfirst=False,
usecols=None, # None means: parse all columns
)
# The "Coverage ratio"-column specifies for some countries scaling factor
# with which we should upscale the reported values
df = df.divide(df.pop('Coverage ratio'), axis='index') * 100
# The original data has days and countries in the rows and hours in the
# columns. This rearranges the table, mapping hours on the rows and
# countries on the columns.
df.columns.names = ['hour']
df = df.stack(level='hour').unstack(level='Country').reset_index()
# Create the timestamp column and set as index
df.index = df.pop('date') + pd.to_timedelta(df.pop('hour'), unit='h')
# DST-handling
# Delete values in DK and FR that should not exist
df = df.loc[df.index != '2015-03-29 02:00', :]
# Delete values in DK that are obviously twice as high as they should be
df.loc[df.index.isin(['2014-10-26 02:00:00', '2015-10-25 02:00:00']),
'DK'] = np.nan
# Delete values in UK that are all zero except for one day
df.loc[(df.index.year == 2010) & (df.index.month == 1), 'GB'] = np.nan
# Delete values in CY that are mostly zero but not always
df.loc[(df.index.year < 2013), 'CY'] = np.nan
# Zero load is highly unlikely. Such occurences are actually NaNs
df.replace(0, np.nan, inplace=True)
# Verify that daylight savings time transitions are handled as expected
check_dst(df.index, autumn_expect=1)
# Conform index to UTC
dst_arr = np.ones(len(df.index), dtype=bool)
df.index = df.index.tz_localize('CET', ambiguous=dst_arr)
df.index = df.index.tz_convert(None)
# Rename regions to comply with naming conventions
renamer = {'DK_W': 'DK_1', 'UA_W': 'UA_west', 'NI': 'GB_NIR', 'GB': 'GB_GBN'}
df.rename(columns=renamer, inplace=True)
# Calculate load for whole UK from Great Britain and Northern Ireland data
df['GB_UKM'] = df['GB_GBN'].add(df['GB_NIR'])
return df
def read_hertz(filepath, dataset_name):
'''Read a file from 50Hertz into a DataFrame'''
df = pd.read_csv(
filepath,
sep=';',
header=3,
index_col='timestamp',
parse_dates={'timestamp': ['Datum', 'Von']},
date_parser=None,
dayfirst=True,
decimal=',',
thousands='.',
# truncate values in 'time' column after 5th character
converters={'Von': lambda x: x[:5]},
)
# Wind onshore
if dataset_name == 'wind generation_actual pre-offshore':
df['wind_onshore'] = df['MW']
# Until 2006, and in 2015 (except for wind_generation_pre-offshore),
# during the fall dst-transistion, only the
# wintertime hour (marked by a B in the data) is reported, the summertime
# hour, (marked by an A) is missing in the data.
# dst_arr is a boolean array consisting only of "False" entries, telling
# python to treat the hour from 2:00 to 2:59 as wintertime.
# Verify that daylight savings time transitions are handled as expected
check_dst(df.index, autumn_expect=2)
# Conform index to UTC
if (pd.to_datetime(df.index.values[0]).year not in [2005, 2006, 2015] or
(dataset_name == 'wind generation_actual pre-offshore' and
| pd.to_datetime(df.index.values[0]) | pandas.to_datetime |
import pandas as pd
import numpy as np
from pandas.api.types import is_numeric_dtype, is_categorical, infer_dtype
def dataset_profile(data: pd.DataFrame):
"""A simple function to get you a simple dataset variables overview
Args:
data (pd.DataFrame): the dataset to be profiled
Returns:
pd.DataFrame: containing the report
"""
report = {}
for col in data.columns:
col_dict = {}
col_dict['feature_name'] = col
col_dict['inferred_type'] = infer_dtype(data[col])
col_dict['current_type'] = data[col].dtype
col_dict['missing_values_sum'] = data[col].isna().sum()
col_dict['missing_values_perc'] = data[col].isna().mean()
if | infer_dtype(data[col]) | pandas.api.types.infer_dtype |
import numpy as np
import pandas as pd
df1 = pd.DataFrame(np.ones((3,4))*0,columns=['a','b','c','d'])
df2 = pd.DataFrame(np.ones((3,4))*1,columns=['a','b','c','d'])
df3 = pd.DataFrame(np.ones((3,4))*2,columns=['a','b','c','d'])
print(df1)
print(df2)
print(df3)
#ignore_index 会将index进行重新重排
res = pd.concat([df1,df2,df3],axis=0,ignore_index=True)
print(res)
res = | pd.concat([df1,df2,df3],axis=1,ignore_index=True) | pandas.concat |
# External Libraries
from datetime import date
import pandas as pd
pd.options.mode.chained_assignment = None
import os
from pathlib import Path
import logging, coloredlogs
# Internal Libraries
import dicts_and_lists as dal
import Helper
# ------ Logger ------- #
logger = logging.getLogger('get_past_datasets.py')
coloredlogs.install(level='DEBUG')
folder = 'past_data/2017_2018/'
months = ['october', 'november', 'december', 'january', 'february', 'march', 'april', 'may', 'june']
for month in months:
url = 'https://www.basketball-reference.com/leagues/NBA_2018_games-'+ month + '.html'
df_url = | pd.read_html(url) | pandas.read_html |
# pip install pytest
# pytest tests\test_bn.py
from pgmpy.factors.discrete import TabularCPD
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pgmpy.estimators import TreeSearch
from pgmpy.models import BayesianModel
import networkx as nx
from pgmpy.inference import VariableElimination
from pgmpy.estimators import BDeuScore, K2Score, BicScore
import bnlearn as bn
def test_import_DAG():
import bnlearn as bn
DAG = bn.import_DAG('Sprinkler')
# TEST 1: check output is unchanged
assert [*DAG.keys()]==['model', 'adjmat']
# TEST 2: Check model output is unchanged
assert DAG['adjmat'].sum().sum()==4
# TEST 3:
assert 'pgmpy.models.BayesianModel.BayesianModel' in str(type(DAG['model']))
# TEST 4:
DAG = bn.import_DAG('alarm', verbose=0)
assert [*DAG.keys()]==['model', 'adjmat']
DAG = bn.import_DAG('andes', verbose=0)
assert [*DAG.keys()]==['model', 'adjmat']
DAG = bn.import_DAG('asia', verbose=0)
assert [*DAG.keys()]==['model', 'adjmat']
def test_make_DAG():
import bnlearn as bn
edges = [('Cloudy', 'Sprinkler')]
methodtypes = ['bayes', 'naivebayes']
for methodtype in methodtypes:
DAG = bn.make_DAG(edges, methodtype=methodtype)
# TEST 1
if methodtype=='bayes':
assert 'pgmpy.models.BayesianModel.BayesianModel' in str(type(DAG['model']))
else:
assert 'pgmpy.models.NaiveBayes.NaiveBayes' in str(type(DAG['model']))
# TEST 2
cpt_cloudy = TabularCPD(variable='Cloudy', variable_card=2, values=[[0.3], [0.7]])
cpt_sprinkler = TabularCPD(variable='Sprinkler', variable_card=2, values=[[0.4, 0.9], [0.6, 0.1]], evidence=['Cloudy'], evidence_card=[2])
assert bn.make_DAG(DAG, CPD=[cpt_cloudy, cpt_sprinkler], checkmodel=True)
# TEST 3
assert np.all(np.isin([*DAG.keys()], ['adjmat', 'model', 'methodtype', 'model_edges']))
def test_make_DAG():
import bnlearn as bn
# TEST 1:
df = bn.import_example()
assert df.shape==(1000, 4)
def test_sampling():
import bnlearn as bn
# TEST 1:
model = bn.import_DAG('Sprinkler')
n = np.random.randint(10, 1000)
df = bn.sampling(model, n=n)
assert df.shape==(n, 4)
def test_to_undirected():
import bnlearn as bn
# TEST 1:
randdata=['sprinkler', 'alarm', 'andes', 'asia', 'sachs']
n = np.random.randint(0, len(randdata))
DAG = bn.import_DAG(randdata[n], CPD=False, verbose=0)
assert (DAG['adjmat'].sum().sum() *2)==bn.to_undirected(DAG['adjmat']).sum().sum()
def test_compare_networks():
import bnlearn as bn
DAG = bn.import_DAG('Sprinkler', verbose=0)
G = bn.compare_networks(DAG, DAG, showfig=False)
assert np.all(G[0]==[[12, 0], [0, 4]])
def test_adjmat2vec():
import bnlearn as bn
DAG = bn.import_DAG('Sprinkler', verbose=0)
out = bn.adjmat2vec(DAG['adjmat'])
assert np.all(out['source']==['Cloudy', 'Cloudy', 'Sprinkler', 'Rain'])
def test_vec2adjmat():
import bnlearn as bn
DAG = bn.import_DAG('Sprinkler', verbose=0)
out = bn.adjmat2vec(DAG['adjmat'])
# TEST: conversion
assert bn.vec2adjmat(out['source'], out['target']).shape==DAG['adjmat'].shape
def test_structure_learning():
import bnlearn as bn
df = bn.import_example()
model = bn.structure_learning.fit(df)
assert [*model.keys()]==['model', 'model_edges', 'adjmat', 'config']
model = bn.structure_learning.fit(df, methodtype='hc', scoretype='bic')
assert [*model.keys()]==['model', 'model_edges', 'adjmat', 'config']
model = bn.structure_learning.fit(df, methodtype='hc', scoretype='k2')
assert [*model.keys()]==['model', 'model_edges', 'adjmat', 'config']
model = bn.structure_learning.fit(df, methodtype='cs', scoretype='bdeu')
assert [*model.keys()]==['undirected', 'undirected_edges', 'pdag', 'pdag_edges', 'dag', 'dag_edges', 'model', 'model_edges', 'adjmat', 'config']
model = bn.structure_learning.fit(df, methodtype='cs', scoretype='k2')
assert [*model.keys()]==['undirected', 'undirected_edges', 'pdag', 'pdag_edges', 'dag', 'dag_edges', 'model', 'model_edges', 'adjmat', 'config']
model = bn.structure_learning.fit(df, methodtype='ex', scoretype='bdeu')
assert [*model.keys()]==['model', 'model_edges', 'adjmat', 'config']
model = bn.structure_learning.fit(df, methodtype='ex', scoretype='k2')
assert [*model.keys()]==['model', 'model_edges', 'adjmat', 'config']
model = bn.structure_learning.fit(df, methodtype='cl', root_node='Cloudy')
assert [*model.keys()]==['model', 'model_edges', 'adjmat', 'config']
model = bn.structure_learning.fit(df, methodtype='tan', root_node='Cloudy', class_node='Rain')
assert [*model.keys()]==['model', 'model_edges', 'adjmat', 'config']
# Test the filtering
DAG = bn.import_DAG('asia')
# Sampling
df = bn.sampling(DAG, n=1000)
# Structure learning of sampled dataset
model = bn.structure_learning.fit(df)
assert np.all(np.isin(model['adjmat'].columns.values, ['smoke', 'bronc', 'lung', 'asia', 'tub', 'either', 'dysp', 'xray']))
# hc filter on edges
model = bn.structure_learning.fit(df, methodtype='hc', white_list=['smoke', 'either'], bw_list_method='nodes')
assert np.all(model['adjmat'].columns.values==['smoke', 'either'])
model = bn.structure_learning.fit(df, methodtype='hc', white_list=[('smoke', 'either')], bw_list_method='edges')
assert np.all(np.isin(model['adjmat'].columns.values, ['smoke', 'bronc', 'lung', 'asia', 'tub', 'either', 'dysp', 'xray']))
model = bn.structure_learning.fit(df, methodtype='hc', black_list=['smoke', 'either'], bw_list_method='nodes')
assert np.all(np.isin(model['adjmat'].columns.values, ['bronc', 'lung', 'asia', 'tub', 'dysp', 'xray']))
model = bn.structure_learning.fit(df, methodtype='hc', scoretype='bic', black_list=['smoke', 'either'], bw_list_method='edges')
assert np.all(np.isin(model['adjmat'].columns.values, ['smoke', 'bronc', 'lung', 'asia', 'tub', 'either', 'dysp', 'xray']))
# hc filter on node
model = bn.structure_learning.fit(df, methodtype='ex', white_list=['smoke', 'either'], bw_list_method='nodes')
assert np.all(model['adjmat'].columns.values==['either', 'smoke'])
model = bn.structure_learning.fit(df, methodtype='ex', black_list=['asia', 'tub', 'either', 'dysp', 'xray'], bw_list_method='nodes')
assert np.all(model['adjmat'].columns.values==['bronc', 'lung', 'smoke'])
# cs filter
model = bn.structure_learning.fit(df, methodtype='cs', white_list=['smoke', 'either'], bw_list_method='nodes')
assert np.all(np.isin(model['adjmat'].columns.values, ['smoke', 'either']))
model= bn.structure_learning.fit(df, methodtype='cs', black_list=['asia', 'tub', 'either', 'dysp', 'xray'], bw_list_method='nodes')
assert np.all(np.isin(model['adjmat'].columns.values, ['smoke', 'lung', 'bronc']))
# cl filter
model = bn.structure_learning.fit(df, methodtype='cl', white_list=['smoke', 'either'], bw_list_method='nodes', root_node='smoke')
assert np.all(model['adjmat'].columns.values==['smoke', 'either'])
# tan
model = bn.structure_learning.fit(df, methodtype='tan', white_list=['smoke', 'either'], bw_list_method='nodes', root_node='smoke', class_node='either')
assert np.all(model['adjmat'].columns.values==['smoke', 'either'])
# naivebayes
model = bn.structure_learning.fit(df, methodtype='naivebayes', root_node="smoke")
assert np.all(model['adjmat'].columns.values==['smoke', 'asia', 'tub', 'lung', 'bronc', 'either', 'xray', 'dysp'])
df=bn.import_example(data='andes')
# PGMPY
est = TreeSearch(df)
dag = est.estimate(estimator_type="tan", class_node='DISPLACEM0')
bnq = BayesianModel(dag.edges())
bnq.fit(df, estimator=None) # None means maximum likelihood estimator
bn_infer = VariableElimination(bnq)
q = bn_infer.query(variables=['DISPLACEM0'], evidence={'RApp1': 1})
print(q)
# BNLEARN
model = bn.structure_learning.fit(df, methodtype='tan', class_node='DISPLACEM0', scoretype='bic')
model_bn = bn.parameter_learning.fit(model, df, methodtype='ml') # maximum likelihood estimator
query=bn.inference.fit(model_bn, variables=['DISPLACEM0'], evidence={'RApp1': 1})
# DAG COMPARISON
assert np.all(model_bn['adjmat']==model['adjmat'])
assert list(dag.edges())==list(model['model'].edges())
assert list(dag.edges())==model['model_edges']
# COMPARE THE CPDs names
qbn_cpd = []
bn_cpd = []
for cpd in bnq.get_cpds(): qbn_cpd.append(cpd.variable)
for cpd in model_bn['model'].get_cpds(): bn_cpd.append(cpd.variable)
assert len(bn_cpd)==len(qbn_cpd)
assert np.all(np.isin(bn_cpd, qbn_cpd))
# COMPARE THE CPD VALUES
nr_diff = 0
for cpd_bnlearn in model_bn['model'].get_cpds():
for cpd_pgmpy in bnq.get_cpds():
if cpd_bnlearn.variable==cpd_pgmpy.variable:
assert np.all(cpd_bnlearn.values==cpd_pgmpy.values)
# if not np.all(cpd_bnlearn.values==cpd_pgmpy.values):
# print('%s-%s'%(cpd_bnlearn.variable, cpd_pgmpy.variable))
# print(cpd_bnlearn)
# print(cpd_pgmpy)
# nr_diff=nr_diff+1
# input('press enter to see the next difference in CPD.')
def test_parameter_learning():
import bnlearn as bn
df = bn.import_example()
model = bn.import_DAG('sprinkler', CPD=False)
model_update = bn.parameter_learning.fit(model, df)
assert [*model_update.keys()]==['model', 'adjmat', 'config', 'model_edges']
def test_inference():
import bnlearn as bn
DAG = bn.import_DAG('sprinkler')
q1 = bn.inference.fit(DAG, variables=['Wet_Grass'], evidence={'Rain': 1, 'Sprinkler': 0, 'Cloudy': 1}, to_df=False, verbose=0)
assert 'pgmpy.factors.discrete.DiscreteFactor.DiscreteFactor' in str(type(q1))
assert q1.df is None
q1 = bn.inference.fit(DAG, variables=['Wet_Grass'], evidence={'Rain': 1, 'Sprinkler': 0, 'Cloudy': 1}, to_df=True, verbose=0)
assert q1.df is not None
def test_query2df():
import bnlearn as bn
DAG = bn.import_DAG('sprinkler')
query = bn.inference.fit(DAG, variables=['Wet_Grass'], evidence={'Rain': 1, 'Sprinkler': 0, 'Cloudy': 1}, to_df=False, verbose=0)
df = bn.query2df(query)
assert df.shape==(2, 2)
assert np.all(df.columns==['Wet_Grass', 'p'])
query = bn.inference.fit(DAG, variables=['Wet_Grass', 'Sprinkler'], evidence={'Rain': 1, 'Cloudy': 1}, to_df=False, verbose=0)
df = bn.query2df(query)
assert np.all(np.isin(df.columns, ['Sprinkler', 'Wet_Grass', 'p']))
assert df.shape==(4, 3)
# Load example mixed dataset
df_raw = bn.import_example(data='titanic')
# Convert to onehot
dfhot, dfnum = bn.df2onehot(df_raw)
dfnum.loc[0:50, 'Survived'] = 2
# Structure learning
# DAG = bn.structure_learning.fit(dfnum, methodtype='cl', black_list=['Embarked','Parch','Name'], root_node='Survived', bw_list_method='nodes')
DAG = bn.structure_learning.fit(dfnum, methodtype='hc', black_list=['Embarked', 'Parch', 'Name'], bw_list_method='edges')
# Parameter learning
model = bn.parameter_learning.fit(DAG, dfnum)
# Make inference
q1 = bn.inference.fit(model, variables=['Survived'], evidence={'Sex': True, 'Pclass': True}, verbose=0)
df = bn.query2df(q1)
assert np.all(df==q1.df)
assert df.shape==(3, 2)
def test_predict():
import bnlearn as bn
df = bn.import_example('asia')
edges = [('smoke', 'lung'),
('smoke', 'bronc'),
('lung', 'xray'),
('bronc', 'xray')]
# Make the actual Bayesian DAG
DAG = bn.make_DAG(edges, verbose=0)
model = bn.parameter_learning.fit(DAG, df, verbose=3)
# Generate some data based on DAG
Xtest = bn.sampling(model, n=100)
out = bn.predict(model, Xtest, variables=['bronc', 'xray'])
assert np.all(np.isin(out.columns, ['bronc', 'xray', 'p']))
assert out.shape==(100, 3)
out = bn.predict(model, Xtest, variables=['smoke', 'bronc', 'lung', 'xray'])
assert np.all(np.isin(out.columns, ['xray', 'bronc', 'lung', 'smoke', 'p']))
assert out.shape==(100, 5)
out = bn.predict(model, Xtest, variables='smoke')
assert np.all(out.columns==['smoke', 'p'])
assert out.shape==(100, 2)
def test_topological_sort():
import bnlearn as bn
DAG = bn.import_DAG('sprinkler')
# Check DAG input
assert bn.topological_sort(DAG, 'Rain')==['Rain', 'Wet_Grass']
assert bn.topological_sort(DAG)==['Cloudy', 'Sprinkler', 'Rain', 'Wet_Grass']
# Different inputs
assert bn.topological_sort(DAG['adjmat'], 'Rain')==['Rain', 'Wet_Grass']
assert bn.topological_sort(bn.adjmat2vec(DAG['adjmat']), 'Rain')
# Check model output
df = bn.import_example('sprinkler')
model = bn.structure_learning.fit(df, methodtype='chow-liu', root_node='Wet_Grass')
assert bn.topological_sort(model, 'Rain')==['Rain', 'Cloudy', 'Sprinkler']
def test_save():
import bnlearn as bn
# Load asia DAG
df = bn.import_example('asia')
model = bn.structure_learning.fit(df, methodtype='tan', class_node='lung')
bn.save(model, overwrite=True)
# Load the DAG
model_load = bn.load()
assert model.keys()==model_load.keys()
for key in model.keys():
if not key=='model':
assert np.all(model[key]==model_load[key])
edges = [('smoke', 'lung'),
('smoke', 'bronc'),
('lung', 'xray'),
('bronc', 'xray')]
# Make the actual Bayesian DAG
DAG = bn.make_DAG(edges, verbose=0)
# Save the DAG
bn.save(DAG, overwrite=True)
# Load the DAG
DAGload = bn.load()
# Compare
assert DAG.keys()==DAGload.keys()
for key in DAG.keys():
if not key=='model':
assert np.all(DAG[key]==DAGload[key])
# Learn its parameters from data and perform the inference.
model = bn.parameter_learning.fit(DAG, df, verbose=0)
# Save the DAG
bn.save(model, overwrite=True)
# Load the DAG
model_load = bn.load()
# Compare
assert model.keys()==model_load.keys()
for key in model.keys():
if not key=='model':
assert np.all(model[key]==model_load[key])
def test_independence_test():
import bnlearn as bn
df = bn.import_example(data='asia')
# Structure learning of sampled dataset
model = bn.structure_learning.fit(df)
# Compute edge weights based on chi_square test statistic
tests = ['chi_square', 'g_sq', 'log_likelihood', 'freeman_tuckey', 'modified_log_likelihood', 'neyman', 'cressie_read']
for test in tests:
model = bn.independence_test(model, df, test=test)
assert model.get('independence_test', None) is not None
assert set(model['independence_test'].columns)==set({test, 'dof', 'p_value', 'source', 'stat_test', 'target'})
assert model['independence_test'].columns[-2]==test
assert np.any(model['independence_test']['stat_test'])
assert model['independence_test'].shape[0]>1
DAG = bn.import_DAG('water', verbose=0)
# Sampling
df = bn.sampling(DAG, n=1000)
# Parameter learning
model = bn.parameter_learning.fit(DAG, df)
# Test for independence
model1 = bn.independence_test(model, df, prune=False)
# Test for independence
model2 = bn.independence_test(model, df, prune=True)
assert model['model_edges']==model1['model_edges']
assert len(model1['model_edges'])==model1['independence_test'].shape[0]
assert len(model2['model_edges'])==model2['independence_test'].shape[0]
assert len(model2['model_edges'])<len(model1['model_edges'])
assert len(model2['model_edges'])<len(model['model_edges'])
def test_edge_properties():
import bnlearn as bn
# Example 1
edges = [('A', 'B'), ('A', 'C'), ('A', 'D')]
# Create DAG and store in model
model = bn.make_DAG(edges)
edge_properties = bn.get_edge_properties(model)
# Check availability of properties
assert edge_properties[('A', 'B')].get('color')
assert edge_properties[('A', 'B')].get('weight')
assert edge_properties[('A', 'C')].get('color')
assert edge_properties[('A', 'C')].get('weight')
assert edge_properties[('A', 'D')].get('color')
assert edge_properties[('A', 'D')].get('weight')
# Make plot
assert bn.plot(model, edge_properties=edge_properties, interactive=False)
assert bn.plot(model, interactive=False)
edges = [('A', 'B'), ('A', 'C'), ('A', 'D')]
# Create DAG and store in model
methodtypes=['bayes', 'naivebayes']
for methodtype in methodtypes:
model = bn.make_DAG(edges, methodtype=methodtype)
# Remove methodtype
model['methodtype']=''
# Check if it is restored to the correct methodtype
model = bn.make_DAG(model['model'])
assert model['methodtype']==methodtype
# Load asia DAG
df = bn.import_example(data='asia')
# Structure learning of sampled dataset
model = bn.structure_learning.fit(df)
edge_properties1 = bn.get_edge_properties(model)
assert np.all(pd.DataFrame(edge_properties1).iloc[1, :]==1)
# Compute edge weights based on chi_square test statistic
model = bn.independence_test(model, df, test='chi_square')
# Get the edge properties
edge_properties2 = bn.get_edge_properties(model)
assert np.sum( | pd.DataFrame(edge_properties2) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 13 18:53:16 2021
@author: <NAME>
https://www.kaggle.com/ash316/eda-to-prediction-dietanic
"""
"""
Part1: Exploratory Data Analysis(EDA):
1)Analysis of the features.
2)Finding any relations or trends considering multiple features.
Part2: Feature Engineering and Data Cleaning:
1)Adding any few features.
2)Removing redundant features.
3)Converting features into suitable form for modeling.
Part3: Predictive Modeling
1)Running Basic Algorithms.
2)Cross Validation.
3)Ensembling.
4)Important Features Extraction.
"""
# Part1: Exploratory Data Analysis(EDA)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('fivethirtyeight')
import warnings
warnings.filterwarnings('ignore')
data= | pd.read_csv('D:\\AI\\Kaggle\\EDA To Prediction(DieTanic)\\train.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 28 17:32:32 2020
@author: hugokleikamp
"""
#%% clear variables and console
try:
from IPython import get_ipython
get_ipython().magic('clear')
get_ipython().magic('reset -f')
except:
pass
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
"Parameters & setup"
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
#%% change directory to script directory (should work on windows and mac)
import os
from pathlib import Path
from inspect import getsourcefile
os.chdir(str(Path(os.path.abspath(getsourcefile(lambda:0))).parents[0]))
script_dir=os.getcwd()
print(os.getcwd())
basedir=os.getcwd()
#%% parameters Part 1: Unipept submission
starting_vars=dir()
#filter parameters
ALC_cutoff=40 # miniumum required ALC score (Peaks score)
Score_cutoff=-0.1 # mininum required score cutoff (DeepNovo score)
ppm_cutoff=20 # maximum allowed ppm
length_cutoff=7 # mininum required peptide length
Area_cutoff=0 # mininum required peak area
Intensity_cutoff=0 # mininum required intensity
No_candidates=20 # maximum number of candidates submitted
#get variables for writing to output
current_vars=set(dir()).difference(starting_vars)
parameters=[i for i in locals().items() if i[0] in current_vars]
#%% parameters Part 2: Composition
starting_vars=dir()
#filter parameters
comp_ALC_cutoff=70 # miniumum required ALC score (Peaks score)
comp_Score_cutoff=-0.1 # mininum required score cutoff (DeepNovo score)
comp_ppm_cutoff=15 # maximum allowed ppm
comp_length_cutoff=7 # mininum required peptide length
comp_Area_cutoff=0 # mininum required peak area
comp_Intensity_cutoff=0 # mininum required intensity
cutbranch=3 # minimum amount of unique peptides per taxonomic branch in denoising
#Which ranks to annotate
comp_ranks=["superkingdom","phylum","class","order","family","genus","species"]
#quantification parameters
tax_count_targets=["Spectral_counts"]#,"Area","Intensity"]
tax_count_methods=["average","total","topx"]
tax_topx=5
tax_normalize=False # normalize quantification to total for that rank
#get variables for writing to output
current_vars=set(dir()).difference(starting_vars)
comp_parameters=[i for i in locals().items() if i[0] in current_vars]
#%% Parameters Part 3: Function
starting_vars=dir()
#which pathways to include
Pathways=['09100 Metabolism',
'09120 Genetic Information Processing'
'09130 Environmental Information Processing'
'09140 Cellular Processes']
#which levels to include
cats=["cat1","cat2","cat3","cat4"]
#quantification parameters
fun_count_targets=["Spectral_counts"]#,"Area","Intensity"]
fun_count_methods=["average","total","topx"]
fun_topx=5
fun_normalize=False # normalize quantification to total for that rank
current_vars=set(dir()).difference(starting_vars)
fun_parameters=comp_parameters+[i for i in locals().items() if i[0] in current_vars]
#%% Modules
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random, re, requests
import threading, time, string
from itertools import chain
from collections import Counter
from openpyxl import load_workbook
#%% change directory to script directory (should work on windows and mac)
import os
from pathlib import Path
from inspect import getsourcefile
os.chdir(str(Path(os.path.abspath(getsourcefile(lambda:0))).parents[0]))
print(os.getcwd())
#%% Functions
# make stacked bars
def stacked_bar(ranks,df,ylabel,pathout,filename):
labels=[i.split("_name")[0] for i in ranks];
countcols=[i for i in df.columns if "count" in i]
absdat=df[countcols]
absdat.columns=labels
normdat= absdat/np.nansum(absdat.to_numpy(),axis=0)
figure, axes = plt.subplots(1, 2)
ax1=absdat.T.plot(ax=axes[0],kind='bar', stacked=True, figsize=(10, 6), legend=False)
ax1.set_ylabel(ylabel)
ax1.set_xlabel('taxonomic ranks')
ax1.set_xticklabels(labels, rotation=30)
ax1.set_title("Absolute")
ax2=normdat.T.plot(ax=axes[1],kind='bar', stacked=True, figsize=(10, 6), legend=False)
ax2.set_ylabel(ylabel)
ax2.set_xlabel('taxonomic ranks')
ax2.set_xticklabels(labels, rotation=30)
ax2.set_title("Normalized")
plt.gcf().suptitle(Path(basename).stem)
figname=str(Path(pathout,(filename.replace(os.path.splitext(filename)[1], '.png'))))
plt.savefig(figname)
return figname
# Threading unipept
def unipept_scrape(r,url):
while True:
try:
r.extend(requests.get(url,stream=True).json())
break
except:
"sleeping"
time.sleep(2)
# chunker
def chunks(lst,n):
for i in range(0,len(lst),n):
yield lst[i:i+n]
#peptide mass calculator
std_aa_mass = {'G': 57.02146, 'A': 71.03711, 'S': 87.03203, 'P': 97.05276, 'V': 99.06841,
'T': 101.04768,'C': 103.00919,'L': 113.08406,'I': 113.08406,'J': 113.08406,
'N': 114.04293,'D': 115.02694,'Q': 128.05858,'K': 128.09496,'E': 129.04259,
'M': 131.04049,'H': 137.05891,'F': 147.06841,'U': 150.95364,'R': 156.10111,
'Y': 163.06333,'W': 186.07931,'O': 237.14773}
def pep_mass_calc(x,std_aa_mass=std_aa_mass):
return sum(std_aa_mass.get(aa) for aa in x if aa in std_aa_mass.keys())+18.01056
#convert deepnovo to peaks format
def convert_deepnovo(xlsdf):
xlsdf=xlsdf[xlsdf['predicted_sequence'].notnull()]
mass=np.zeros((1,len(xlsdf)))
mass+=xlsdf['predicted_sequence'].str.count("(Carbamidomethylation)")*57.021463
mass+=xlsdf['predicted_sequence'].str.count("(Oxidation)")*15.994915
xlsdf['Peptide']=xlsdf['predicted_sequence'].apply(lambda x: re.sub("[\(\[].*?[\)\]]", "", x).replace(",","")) #remove ptms in peptides
xlsdf['Peptide']=xlsdf['Peptide'].apply(lambda x: re.sub("[\(\[].*?[\)\]]", "", x).replace(",","")) #remove ptms in peptides
#recalculate peptide mass foor DeepNovo
xlsdf['calculated_mass']=mass[0]+xlsdf['Peptide'].apply(lambda x: pep_mass_calc(x)).values
xlsdf['precursor_mass']=xlsdf['precursor_mz']*xlsdf['precursor_charge']-xlsdf['precursor_charge']*1.007277
xlsdf["ppm"]=(1000000/xlsdf['calculated_mass'])*(xlsdf['calculated_mass']-xlsdf['precursor_mass'])
#rename columns
if "feature_id" in xlsdf.columns: xlsdf=xlsdf.rename(columns={"feature_id":"Scan"})
if "feature_area" in xlsdf.columns: xlsdf=xlsdf.rename(columns={"feature_area":"Area"})
if "feature_intensity" in xlsdf.columns: xlsdf=xlsdf.rename(columns={"feature_intensity":"Intensity"})
return xlsdf
#non essential
#only use this when submitting a fasta file
def unipept_digest(records):
for i in records:
peps=re.split('[RK](?!P)',str(i.seq)) #cleave rule: trypsin, no Proline
lenfilt=[i for i in peps if len(i) >5 and len(i) <50] #length filter >5, <50
for filtpeps in lenfilt: #unreadable amino acids
if "*" not in filtpeps and "Z" not in filtpeps and "B" not in filtpeps and "X" not in filtpeps:
yield (i.id ,filtpeps)
import Bio
from Bio import SeqIO
def fasta_digest(filename):
record_dict = SeqIO.parse(filename, "fasta")
filtpeps=unipept_digest(record_dict)
xlsdf=pd.DataFrame(list(filtpeps),columns=["Acession","Peptide"])
return xlsdf
#%%
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
"Part 1: Annotation with unipept"
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
pathin="input_peaks" #input folder
s=time.time()
#for filename in ["/Volumes/Seagate_SSD/NovoBridge-main 2/input_peaks/de_novo_peptides.csv"]:
for filename in ["/Volumes/Seagate_SSD/NovoBridge-main 2/input_peaks/all_de_novo_candidates.csv"]:
#for filename in os.listdir(pathin):
filepath=str(Path(pathin,filename))
if Path(filename).stem[0].isalnum(): # check if not a hidden file, filename should start with alphanumeric
for Randomize in [False]: #, 'scramble']:
xlsdf=[]
if filename.endswith('.txt'):
with open(filename,"r") as f:
xlsdf=pd.DataFrame(f.readlines())
#fasta format input data
if filename.endswith(('.fasta','.fa','.faa')): xlsdf=fasta_digest(filepath)
#tabular format
#added dynamic delimiter detection
if filename.endswith('.csv'):
xlsdf=pd.read_csv(str(Path(pathin,filename)),sep=",")
if "Peptide" not in xlsdf.columns:
delims=[i[0] for i in Counter([i for i in str(xlsdf.iloc[0]) if not i.isalnum()]).most_common()]
for delim in delims:
if delim==" ": sep="\s"
try:
xlsdf=pd.read_csv(str(Path(pathin,filename)),sep=delim)
if "Peptide" in xlsdf.columns:
break
except:
pass
if filename.endswith('.tsv'): xlsdf=pd.read_csv(filepath,sep="\t")
#excel input
if filename.endswith('.xlsx') or filename.endswith('.xls'): xlsdf=pd.read_excel(filepath,engine='openpyxl')
if len(xlsdf):
# if DeepNovo output, convert to PEAKS output
if 'predicted_sequence' in xlsdf.columns: xlsdf=convert_deepnovo(xlsdf)
xlsdf["Tag Length"]=xlsdf['Peptide'].apply(len)
xlsdf=xlsdf.fillna("0") #replace nans with zero
#set datatypes as float
for i in ['Tag Length','ALC (%)','predicted_score','ppm','Area','Intensity']:
if i in xlsdf.columns:
xlsdf[i]=xlsdf[i].astype(float)
#add Scan if not present, add Scan
if 'Scan' not in xlsdf.columns:
xlsdf['Scan']=list(range(0,len(xlsdf)))
# recalibrate ppm
if 'ppm' in xlsdf.columns:
hist, bin_edges = np.histogram(xlsdf['ppm'].tolist(),bins=100)
xlsdf['ppm']=abs(xlsdf['ppm'] -(bin_edges[np.where(hist==np.max(hist))[0]]
+bin_edges[np.where(hist==np.max(hist))[0]+1])/2)
#remove ptms in peptides
peplist=xlsdf['Peptide'].tolist()
xlsdf['Peptide']=list(map(lambda x: re.sub("[\(\[].*?[\)\]]", "", x),peplist))
# filter by ALC scores, ppm and peptide lengths
filt="on"
if filt=="on":
if 'Tag Length' in xlsdf.columns:
xlsdf=xlsdf[xlsdf['Tag Length']>=length_cutoff]
if 'ALC (%)' in xlsdf.columns:
xlsdf=xlsdf[xlsdf['ALC (%)']>=ALC_cutoff] #scoring Peaks
if 'predicted_score' in xlsdf.columns:
xlsdf=xlsdf[xlsdf['predicted_score']<=Score_cutoff] #scoring DeepNovo
if 'ppm' in xlsdf.columns:
xlsdf=xlsdf[xlsdf['ppm']<=ppm_cutoff]
if 'Area' in xlsdf.columns:
xlsdf=xlsdf[xlsdf['Area']>=Area_cutoff]
if 'Intensity' in xlsdf.columns:
xlsdf=xlsdf[xlsdf['Intensity']>=Intensity_cutoff]
#%% randomization (optional)
if Randomize=='scramble': #scramble all in front of cleavage site
xlsdf['Peptide']=[''.join(random.sample(i[:len(i)-1], len(i)-1)+[i[-1]]) for i in xlsdf['Peptide']]
#%% cleave misscleaved peptides
xlsdf=xlsdf[xlsdf["Peptide"].notnull()]
xlsdf["Peptide"]=xlsdf["Peptide"].apply(lambda x: [i for i in re.split( r'(?<=[RK])(?=[^P])',x) if len(i)>=length_cutoff])
xlsdf=xlsdf.explode("Peptide")
xlsdf=xlsdf[xlsdf["Peptide"].notnull()]
#if multiple candidates, sort by scan, add candidate list
if 'ALC (%)' in xlsdf.columns: xlsdf=xlsdf.sort_values(['Scan', 'ALC (%)','Tag Length'], ascending=[1, 0, 0])
elif 'predicted_score' in xlsdf.columns: xlsdf=xlsdf.sort_values(['Scan', 'predicted_score','Tag Length'], ascending=[1, 0, 0])
else: xlsdf=xlsdf.sort_values(['Scan','Tag Length'], ascending=[1, 0, 0])
xlsdf['Candidate'] = xlsdf.groupby('Scan').cumcount() + 1
#%% submit peptides to unipept
#urls
twl='http://api.unipept.ugent.be/api/v1/pept2lca.json?input[]=';
twr='&equate_il=true&extra=true&names=true';
fwl='http://api.unipept.ugent.be/api/v1/pept2funct.json?input[]=';
fwr='&equate_il=true';
ranks=[rank+"_name" for rank in comp_ranks]
fields=["peptide"]+ranks
base_thread=threading.active_count()
unipeps=np.unique(xlsdf['Peptide'])
batchsize=100
steps=list(range(0,len(unipeps),batchsize))
taxalist=list()
funlist=list()
threads=[]
counter=0
for chunk in chunks(unipeps,batchsize):
time.sleep(0.1)
counter+=1
print(counter)
query="&input[]=".join(chunk)
#taxonomy
turl=twl+query+twr
t=threading.Thread(target=unipept_scrape, args=[taxalist,turl])
t.start()
threads.append(t)
#function
furl=fwl+query+fwr
t=threading.Thread(target=unipept_scrape, args=[funlist,furl])
t.start()
threads.append(t)
#unwind in case of thread overload, unipept does not like too many requests
cur_thread=threading.active_count()
if (cur_thread-base_thread)>100:
print("unwinding, query at: "+str(counter/len(unipeps)))
for thread in threads:
thread.join()
threads=[] #this seems to act different on windows?
for thread in threads:
thread.join()
#%% post processing
#parse taxonomy dataframe
fields=["peptide","taxon_name","superkingdom_name","phylum_name","class_name","order_name","family_name","genus_name","species_name"];
taxa=pd.DataFrame(taxalist)
[taxa.pop(x) for x in taxa.columns if x not in fields]
taxa=taxa.rename(columns={"peptide":"Peptide"})
#parse function dataframe
funs=pd.DataFrame(funlist)
funs["ec"]= funs["ec"].apply( lambda x: " ".join(pd.json_normalize(x)["ec_number"]) if x else [])
funs["go"]= funs["go"].apply( lambda x: " ".join(pd.json_normalize(x)["go_term"]) if x else [])
funs["ipr"]=funs["ipr"].apply(lambda x: " ".join(pd.json_normalize(x)["code"]) if x else [])
funs=funs.mask(funs.applymap(str).eq('[]')).fillna("") #remove empty lists
funs=funs.rename(columns={"peptide":"Peptide"})
xlsdf=xlsdf.merge(taxa,on="Peptide",how="left")
#select best candidates:
has_hit=xlsdf[xlsdf["taxon_name"].notnull()]
best_hit=has_hit.sort_values(["Scan","Candidate"]).groupby("Scan",sort=False).apply(lambda x: x.iloc[0])
no_hit=xlsdf[~xlsdf["Scan"].isin(has_hit["Scan"].drop_duplicates())]
no_hit=no_hit[no_hit["Candidate"]==1]
xlsdf=pd.concat([best_hit,no_hit])
xlsdf=xlsdf.merge(funs,on="Peptide",how="left") #add funs
#%% write result
pathout="output_unipept"
if not os.path.exists(pathout): os.makedirs(pathout)
basename="unipept_nb+_"+Path(filename).stem+ '.tsv'
if Randomize=="scramble": basename="Rand_"+basename
outfilename=str(Path(pathout,basename))
xlsdf.to_csv(outfilename,sep="\t")
#%%
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
"Part 2: Compositional analysis"
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
#%% Pre_processing filtering & denoising
filt="on"
if filt=="on":
if 'Tag Length' in xlsdf.columns:
xlsdf=xlsdf[xlsdf['Tag Length']>=comp_length_cutoff]
if 'ALC (%)' in xlsdf.columns:
xlsdf=xlsdf[xlsdf['ALC (%)']>=comp_ALC_cutoff]
if 'predicted_score' in xlsdf.columns:
xlsdf=xlsdf[xlsdf['predicted_score']<=comp_Score_cutoff] #scoring DeepNovo
if 'ppm' in xlsdf.columns:
xlsdf=xlsdf[xlsdf['ppm']<=comp_ppm_cutoff]
if 'Area' in xlsdf.columns:
xlsdf=xlsdf[xlsdf['Area']>=comp_Area_cutoff]
if 'Intensity' in xlsdf.columns:
xlsdf=xlsdf[xlsdf['Intensity']>=comp_Intensity_cutoff]
denoise="on"
if denoise=="on":
unirows=xlsdf[["Peptide"]+ranks].drop_duplicates()[ranks].astype(str).values.tolist()
jbranch=["#*".join(i) for i in unirows]
fbranch=[branch for branch, counts in Counter(jbranch).items() if counts >= cutbranch]
allowed_taxids=set(chain(*[i.split("#*") for i in fbranch]))
for i in ranks:
xlsdf.loc[~xlsdf[i].isin(allowed_taxids),i]=""
#%% krona plot (only coded for spectral counting)
pathout="output_composition"
if not os.path.exists(pathout): os.makedirs(pathout)
if "krona_template.xlsm" in os.listdir():
grouped=xlsdf.groupby(ranks)
vals=grouped.size()
vals=vals.reset_index(name="Count")
vals=vals[~(vals[ranks]=="").all(axis=1)] #remove empty rows
#results
branches=vals[ranks].values.tolist()
counts= vals["Count"].tolist()
#fill gaps
for i in range(len(branches)):
j=[j for j in range(len(branches[i])) if branches[i][j]!=""]
for l in range(0,max(j)):
if branches[i][l]=="": branches[i][l]="annotation_gap"
vals[ranks]=branches
branchdf=vals
kronafilename=str(Path(pathout,"Spectral_counts_Krona_"+Path(filename).stem+ '.xlsm'))
letters=list(string.ascii_uppercase)
wb = load_workbook("krona_template.xlsm",read_only=False, keep_vba=True)
ws = wb.active
for i in range(0,len(branches)):
ws['A{0}'.format(4+i)].value=filename
ws['B{0}'.format(4+i)].value=counts[i]
for j in range(0,len(ranks)):
ws['{0}{1}'.format(letters[j+3],4+i)].value=branches[i][j]
wb.save(kronafilename)
else:
print("No Krona template found, proceding without generating krona plots")
#%% quantification and visual outputs, write output to xlsx
pathout="output_composition"
if not os.path.exists(pathout): os.makedirs(pathout)
if type(tax_count_targets)==str: tax_count_targets=list(tax_count_targets)
for target in tax_count_targets:
if target!="Spectral_counts" and target not in xlsdf.columns:
print("Target column: '"+str(target)+"' not found in data, please change parameter: 'count_targets'")
continue
if type(tax_count_methods)==str: tax_count_methods=list(tax_count_methods)
for method in tax_count_methods:
quantdf=pd.DataFrame()
for rank in ranks:
if target=="Spectral_counts":
values=Counter(xlsdf[rank].astype(str))
if "" in values.keys(): values.pop("") #ignore unassigned peptides
else:
xlsdf[target]=xlsdf[target].astype(float)
grouped=xlsdf.groupby(rank)[target]
if method=="average": values=grouped.mean()
elif method=="total":values=grouped.sum()
elif method=="topx": values=grouped.nlargest(tax_topx).sum(level=0)
values=pd.Series(values).sort_values(axis=0, ascending=False, inplace=False).reset_index()
values.columns=[rank,rank+"_count"]
values=values[values[rank]!=""]
if tax_normalize==True:
values[rank+"_count"]=values[rank+"_count"]/values[rank+"_count"].sum()*100 #normalize to 100%
quantdf = pd.concat([quantdf, values], axis=1)
#writing output
pathout="output_composition"
if not os.path.exists(pathout): os.makedirs(pathout)
quantdfs=quantdf.fillna(0)
namecols=[i for i in quantdf.columns if "count" not in i]
countcols=[i for i in quantdf.columns if "count" in i]
basename="nb+"+Path(filename).stem+ '.xlsx'
if method=="topx": method=method.replace("x",str(fun_topx))
if target=='Spectral_counts': basename="composition_"+target+"_"+basename
else: basename="composition_"+method+"_"+target+"_"+basename
if Randomize=="scramble": basename="Rand_"+basename
xlsfilename=str(Path(pathout,basename))
writer = pd.ExcelWriter(xlsfilename, engine='xlsxwriter')
pardf=pd.DataFrame(comp_parameters,columns=["Name","Value"])
pardf.loc[pardf["Name"]=="tax_count_targets","Value"]=target
pardf.loc[pardf["Name"]=="tax_count_methods","Value"]=method
if target=='Spectral_counts': pardf=pardf[pardf["Name"]!="tax_count_methods"]
pd.DataFrame(pardf.to_excel(writer, sheet_name='Parameters'))
quantdf[namecols].to_excel(writer, sheet_name='TAX_LINEAGES')
quantdf[countcols].to_excel(writer, sheet_name='TAX_COUNTS')
if target=='Spectral_counts': stacked_bar(ranks,quantdf,target,pathout, basename)
else: stacked_bar(ranks,quantdf,method+"_"+target,pathout, basename)
writer.save()
#%%
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
"Part 3: Functional analysis"
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
#%% pre processing
#preselect xlsdf to only have rows that have ec numbers
xlsdf=xlsdf[xlsdf["ec"].notnull()]
#calculte total area/intensity/spectral counts for normalization
#separate ec annotations for exact matching
xlsdf["ec"]=xlsdf["ec"].apply(lambda x: str(x).split(" "))
#%% quantification
#check if kegg database is present
if not any("keg.tsv" in i for i in os.listdir()):
print("no local kegg database found, please run download_utilities.py before functional annotation can be done")
else: #Load local kegg database
for tsv in os.listdir():
if tsv.endswith('keg.tsv') and tsv[0].isalnum():
keggdf = pd.read_csv(tsv,sep="\t")
#only select pathways that are in the parameters
keggdf=keggdf[keggdf.isin(Pathways).any(axis=1)]
#only select cats that are in the parameters
if "ec" not in cats: keggdf=keggdf.loc[keggdf.isin(Pathways).any(axis=1),cats+["ec"]]
else: keggdf=keggdf.loc[keggdf.isin(Pathways).any(axis=1),cats]
if type(fun_count_targets)==str: tax_count_methods=list(fun_count_targets)
for target in fun_count_targets:
if target!="Spectral_counts" and target not in xlsdf.columns:
print("Target column: '"+str(target)+"' not found in data, please change parameter: 'count_targets'")
continue
if type(fun_count_methods)==str: fun_count_methods=list(fun_count_methods)
for method in fun_count_methods:
#match pathways to annotations and explode
if target=="Spectral_counts": fundf=xlsdf[["Scan","ec"]]
else: fundf=xlsdf[["Scan","ec",target]]
fundf=fundf.explode("ec")
fundf=fundf[fundf["ec"].notnull()]
fundf=fundf[fundf["ec"]!=""]
fundf=fundf.merge(keggdf,how="left",on="ec")
quantdf=fundf[cats].drop_duplicates()
for cat in cats:
if target=="Spectral_counts": catdf=fundf[["Scan",cat]].drop_duplicates()
else: catdf=fundf[["Scan",cat,target]].drop_duplicates()
catdf=catdf.explode(cat)
if target=="Spectral_counts":
values=pd.Series(Counter(catdf[cat].astype(str))).rename_axis(cat)
else:
catdf[target]=catdf[target].astype(float)
grouped=catdf.groupby(cat)[target]
if method=="average": values=grouped.mean()
elif method=="total": values=grouped.sum()
elif method=="topx": values=grouped.nlargest(fun_topx).sum(level=0)
values=values.reset_index(drop=False)
values.columns=[cat,cat+"_"+target]
if fun_normalize==True: values[cat,"cat_"+target]=values[cat,cat+"_"+target]/sum(values)
quantdf=quantdf.merge(values,how="left",on=cat)
quantdf=quantdf.dropna()
#writing output
pathout="output_function"
if not os.path.exists(pathout): os.makedirs(pathout)
basename=Path(filename).stem+ '.xlsx'
if method=="topx": method=method.replace("x",str(fun_topx))
if target=='Spectral_counts': basename="function_"+target+"_"+basename
else: basename="function_"+method+"_"+target+"_"+basename
if Randomize=="scramble": basename="Rand_"+basename
xlsfilename=str(Path(pathout,basename))
writer = pd.ExcelWriter(xlsfilename, engine='xlsxwriter')
pardf= | pd.DataFrame(fun_parameters,columns=["Name","Value"]) | pandas.DataFrame |
# Python 3 Required. Tested in rh-python36 #
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import traceback
import time
import datetime
import multiprocessing
from matplotlib.dates import DateFormatter
from tcp_latency import measure_latency
# IP List for Reference
# 172.16.58.3 --- China Telecom Guangzhou (www.gz.gov.cn)
# 192.168.127.12 --- China Mobile Shanghai (www.shanghai.gov.cn)
# 172.16.58.3 --- China Unicom Dalian (www.dl.gov.cn)
# 192.168.127.12 --- China Mobile Guangdong (www.gd.gov.cn)
# 192.168.3.11 --- China Unicom Guangdong (www.gd.gov.cn)
# Specify the test IP/Domains || 测试IP/域名列表
# Specify the data storage file's name || 数据存储文件名列表
# Specify the test port || 测试端口列表
test_host_list = ['172.16.58.3','192.168.127.12', '172.16.58.3']
file_name_list = ['Ch-Telecom Guangzhou.txt', 'Ch-Mobile Shanghai.txt', \
'Ch-Unicom Dalian.txt']
port_list = [80,80,80]
# Specify the period of the plot (Unit: second) || 指定绘制时间范围(秒)
# Specify the interval between generating the plot (Unit: second) || 指定生成图像间隔(秒)
plot_timespan_list = [6*3600,3*86400,7*86400,15*86400,30*86400]
generating_interval = 60*5
# Data point number (Can be any large #) || 数据采集次数
# Wait time between each TCP connection (Unit: second) || 发起新TCP连接间隔(秒)
number_of_data_points = 365*8640
wait_time_for_each_connection = 10
# Modify the figures (Latency upper limit in plot & DPI) || 调整图像(延迟上限及DPI)
latency_plot_limit = 400
img_dpi = 300
# To avoid pandas warning message || 避免pandas警告信息
pd.plotting.register_matplotlib_converters()
def LatencyDataCollection(t_host_list, t_port_list, t_timeout, t_wait, t_file_name_list):
# Create a loop for data collection.
for i in range(number_of_data_points):
for ii in range(len(t_host_list)):
latency_ms = measure_latency(host=t_host_list[ii], port=t_port_list[ii], \
timeout=t_timeout, runs=1) # latency_ms is a list with one element.
if latency_ms[0] is None:
latency_ms = [0] # If timeout, use 0 to replace the None value.
time_at_measurement = datetime.datetime.now()
latency_value = latency_ms[0]
with open(t_file_name_list[ii], 'a') as data_file:
data_file.write('%s' % time_at_measurement + ',' + \
'%s' % latency_value + '\n')
time.sleep(t_wait) # Wait for next TCP connection.
return
def PlotGraph(import_file_list, p_frequency, p_timespan_list, p_wait):
jj = round(2*number_of_data_points*p_wait/p_frequency)
# Create a loop to update the plot.
for j in range(jj):
time.sleep(p_frequency) # Wait for generating next plot.
for p in range(len(p_timespan_list)):
fig, ax = plt.subplots()
plt.xlabel('Server Time in mm-dd-HH-MM')
plt.ylabel('Latency in ms')
plt.title('TCP Latency Stat Last %s Seconds' % p_timespan_list[p])
plt.ylim([0,latency_plot_limit])
myFmt = DateFormatter('%m-%d, %H:%M') # Set date-time format for x-axis.
# Plot from each data file.
for import_file in import_file_list:
x_raw, y_raw = np.loadtxt(import_file, \
dtype='str', delimiter=',', unpack=True)
# It's not the precise # of data points corresponds to that time duration,
# since the time consumed in TCP connection is ignored.
# It does not have significant impact on the plot.
number_of_lines_selected = round(p_timespan_list[p]/p_wait)
x_raw_selected = x_raw[-number_of_lines_selected:]
y_raw_selected = y_raw[-number_of_lines_selected:]
x_time = | pd.to_datetime(x_raw_selected) | pandas.to_datetime |
import pandas
import pytest
import modin.pandas as pd
import numpy as np
from .utils import test_data_values, test_data_keys, df_equals
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_isna(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.isna(pandas_df)
modin_result = pd.isna(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.isna(pd.Series([1, np.nan, 2]))
pandas_result = pandas.isna(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_isnull(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.isnull(pandas_df)
modin_result = pd.isnull(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.isnull(pd.Series([1, np.nan, 2]))
pandas_result = pandas.isnull(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notna(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.notna(pandas_df)
modin_result = pd.notna(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.notna(pd.Series([1, np.nan, 2]))
pandas_result = pandas.notna( | pandas.Series([1, np.nan, 2]) | pandas.Series |
import pandas as pd
import numpy as np
import os
import random
import json
import argparse
from random import shuffle
random.seed(42)
from configs.config import Config
def main():
parser = argparse.ArgumentParser()
parser.add_argument('config_path')
args = parser.parse_args()
# get config
with open(args.config_path, 'r') as f:
cfg = json.load(f)
config = Config(**cfg)
###################
# set paths
paths_images_train = config.train_data_refined_dir_ims.split(',')
print("00_gen_folds.py: path_images_train:", paths_images_train)
train_files = []
for p in paths_images_train:
train_files.extend(os.listdir(p))
print("train_files[:10]:", train_files[:10])
weight_save_path = os.path.join(config.path_results_root, 'weights', config.save_weights_dir)
os.makedirs(weight_save_path, exist_ok=True)
folds_save_path = os.path.join(weight_save_path, config.folds_file_name)
if os.path.exists(folds_save_path):
print("folds csv already exists:", folds_save_path)
return
else:
print ("folds_save_path:", folds_save_path)
shuffle(train_files)
s = {k.split('_')[0] for k in train_files}
d = {k: [v for v in train_files] for k in s}
folds = {}
if config.num_folds == 1:
nfolds = int(np.rint(1. / config.default_val_perc))
else:
nfolds = config.num_folds
idx = 0
for v in d.values():
for val in v:
folds[val] = idx % nfolds
idx+=1
df = | pd.Series(folds, name='fold') | pandas.Series |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: <NAME>
date: 2020/1/9 22:52
contact: <EMAIL>
desc: 金十数据中心-经济指标-央行利率-主要央行利率
https://datacenter.jin10.com/economic
美联储利率决议报告
欧洲央行决议报告
新西兰联储决议报告
中国央行决议报告
瑞士央行决议报告
英国央行决议报告
澳洲联储决议报告
日本央行决议报告
俄罗斯央行决议报告
印度央行决议报告
巴西央行决议报告
"""
import json
import time
import pandas as pd
import requests
# 金十数据中心-经济指标-央行利率-主要央行利率-美联储利率决议报告
def macro_bank_usa_interest_rate():
"""
美联储利率决议报告, 数据区间从19820927-至今
https://datacenter.jin10.com/reportType/dc_usa_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_usa_interest_rate_decision_all.js?v=1578581921
:return: 美联储利率决议报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国利率决议"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
temp_df.name = "usa_interest_rate"
return temp_df
# 金十数据中心-经济指标-央行利率-主要央行利率-欧洲央行决议报告
def macro_bank_euro_interest_rate():
"""
欧洲央行决议报告, 数据区间从19990101-至今
https://datacenter.jin10.com/reportType/dc_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_interest_rate_decision_all.js?v=1578581663
:return: 欧洲央行决议报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["欧元区利率决议"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
temp_df.name = "euro_interest_rate"
return temp_df
# 金十数据中心-经济指标-央行利率-主要央行利率-新西兰联储决议报告
def macro_bank_newzealand_interest_rate():
"""
新西兰联储决议报告, 数据区间从19990401-至今
https://datacenter.jin10.com/reportType/dc_newzealand_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_newzealand_interest_rate_decision_all.js?v=1578582075
:return: 新西兰联储决议报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_newzealand_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["新西兰利率决议报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
temp_df.name = "newzealand_interest_rate"
return temp_df
# 金十数据中心-经济指标-央行利率-主要央行利率-中国央行决议报告
def macro_bank_china_interest_rate():
"""
中国人民银行利率报告, 数据区间从19910501-至今
https://datacenter.jin10.com/reportType/dc_china_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_china_interest_rate_decision_all.js?v=1578582163
:return: 中国人民银行利率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_china_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["中国人民银行利率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
temp_df.name = "china_interest_rate"
return temp_df
# 金十数据中心-经济指标-央行利率-主要央行利率-瑞士央行决议报告
def macro_bank_switzerland_interest_rate():
"""
瑞士央行利率决议报告, 数据区间从20080313-至今
https://datacenter.jin10.com/reportType/dc_switzerland_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_switzerland_interest_rate_decision_all.js?v=1578582240
:return: 瑞士央行利率决议报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_switzerland_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["瑞士央行利率决议报告"] for item in json_data["list"]]
value_df = | pd.DataFrame(value_list) | pandas.DataFrame |
# coding: utf-8
# In[42]:
import pandas as pd
import numpy as np
store = pd.read_csv("C:/Users/Administrator/Desktop/Jupiter notebooks/Store.csv", header = 0, encoding="latin")
store.head(n=5)
# In[16]:
#1.How many unique cities are the orders being delivered to
cities = store.City.unique()
print(len(cities))
# In[14]:
#• What is the total quantity sold in the East Region?
store_east = store[store.Region =='East']
total_quantity = np.sum(store_east.Quantity)
print(total_quantity)
# In[17]:
#• Find the sum of the quantity sold in the East Region
store_east = store[store.Region =='East']
total_quantity = np.sum(store_east.Quantity)
print(total_quantity)
# In[19]:
#• In the south region sort the sales in decreasing order
store[store.Region =='South'].sort_values('Sales', ascending=False)
# In[22]:
#• Find the mean of quantity for every region
store.groupby('Region')['Quantity'].agg(np.mean)
# In[21]:
#• Find the mean of sales for every category
store.groupby('Category')['Sales'].agg(np.mean)
# In[24]:
#• Find the max, min, sum of sales and profit for every category
store.groupby('Category')[['Sales','Profit']].agg({'max', 'min','sum'})
# In[25]:
#• Find sum of sales and max profit for every segment
store.groupby('Segment').agg({'Sales':np.sum,'Profit':np.max})
# In[26]:
#• For every segment find the mean of the discount
store.groupby('Segment')['Discount'].agg(np.mean)
#• For every segment find the most profitable customers
def get_cid(profit):
profit = profit.max()
return store[store.Profit == profit]['Customer Name']
group_seg = store.groupby('Segment').agg({'Profit':[get_cid,'max']})
print(group_seg)
# In[62]:
a = [store.groupby('Segment').max()['Profit']]
b= [store.Profit]
store[['Customer Name','Segment']][np.in1d(b,a)]
# In[31]:
#• What are the top 5 categories that give maximum profit?
cities = store.Country.unique()
print(cities)
# In[38]:
#• What is the Total Sales, Quantity, Discount, Profit across Total US.
store.groupby('Region').sum()[['Sales','Quantity','Discount','Profit']]
# In[60]:
#• How many times has it taken more than 5 days from placing an order to shipping
store['Order Date'] =pd.to_datetime(store['Order Date'])
store['Ship Date'] = pd.to_datetime(store['Ship Date'])
TimeTaken = store['Ship Date']- store['Order Date']
sum(TimeTaken >'5 days')
# In[71]:
#• Find the total number of orders in every category which has been shipped with a duration > 5 days
store['Order Date'] =pd.to_datetime(store['Order Date'])
store['Ship Date'] = pd.to_datetime(store['Ship Date'])
store['TimeTaken'] = store['Ship Date']- store['Order Date']
s1 = store[store.TimeTaken > '5 days']
s1.groupby('Category').count()['TimeTaken']
# In[75]:
#• What’s the percentage of items which has been shipped within 5 days
store['Order Date'] = | pd.to_datetime(store['Order Date']) | pandas.to_datetime |
"""
Analysis for Thermal Field Double state VQE experiment
"""
import os
import matplotlib.pylab as pl
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import numpy as np
import pycqed.analysis_v2.base_analysis as ba
from pycqed.analysis.analysis_toolbox import get_datafilepath_from_timestamp
from pycqed.analysis.tools.plotting import set_xlabel, set_ylabel, \
cmap_to_alpha, cmap_first_to_alpha
import pycqed.measurement.hdf5_data as h5d
from pycqed.analysis import analysis_toolbox as a_tools
import pandas as pd
from scipy import linalg
import cmath as cm
class TFD_Analysis_Pauli_Strings(ba.BaseDataAnalysis):
def __init__(self, t_start: str = None, t_stop: str = None,
label: str = '',
g: float = 1, T=1,
options_dict: dict = None, extract_only: bool = False,
auto=True):
"""
Analysis for the Thermal Field Double state QAOA experiment.
Args:
g (float):
coupling strength (in theorist units)
T (float):
temperature (in theorist units)
"""
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
options_dict=options_dict,
extract_only=extract_only)
self.g = g
self.T = T
if auto:
self.run_analysis()
def extract_data(self):
"""
This is a new style (sept 2019) data extraction.
This could at some point move to a higher level class.
"""
self.get_timestamps()
self.timestamp = self.timestamps[0]
data_fp = get_datafilepath_from_timestamp(self.timestamp)
param_spec = {
'data': ('Experimental Data/Data', 'dset'),
'combinations': ('Experimental Data/Experimental Metadata/combinations', 'dset'),
'value_names': ('Experimental Data', 'attr:value_names')}
self.raw_data_dict = h5d.extract_pars_from_datafile(
data_fp, param_spec)
# For some reason the list is stored a list of length 1 arrays...
self.raw_data_dict['combinations'] = [
c[0] for c in self.raw_data_dict['combinations']]
# Parts added to be compatible with base analysis data requirements
self.raw_data_dict['timestamps'] = self.timestamps
self.raw_data_dict['folder'] = os.path.split(data_fp)[0]
def process_data(self):
self.proc_data_dict = {}
# combinations = ['X', 'Z', '0000', '1111']
combinations = self.raw_data_dict['combinations']
raw_shots = self.raw_data_dict['data'][:, 1:]
value_names = self.raw_data_dict['value_names']
binned_data = {}
for i, ch_name in enumerate(value_names):
ch_data = raw_shots[:, i] # select shots per channel
binned_data[ch_name] = {}
for j, comb in enumerate(combinations):
binned_data[ch_name][comb] = np.mean(
ch_data[j::len(combinations)]) #get average for shots per combination
#data per combination is stored with index steps of len(combinations) starting from j.
# Calculate mean voltages to determine threshold
mn_voltages = {}
for i, ch_name in enumerate(value_names):
ch_data = binned_data[ch_name] # select per channel
mn_voltages[ch_name] = {'0': [], '1': []}
for c in combinations:
if c == '0000':
mn_voltages[ch_name]['0'].append(ch_data[c])
elif c == '1111':
mn_voltages[ch_name]['1'].append(ch_data[c])
mn_voltages[ch_name]['0'] = np.mean(mn_voltages[ch_name]['0'])
mn_voltages[ch_name]['1'] = np.mean(mn_voltages[ch_name]['1'])
mn_voltages[ch_name]['threshold'] = np.mean(
[mn_voltages[ch_name]['0'], mn_voltages[ch_name]['1']])
self.proc_data_dict['mn_voltages'] = mn_voltages
# Digitize data
digitized_data = np.zeros(raw_shots.shape)
for i, vn in enumerate(value_names):
digitized_data[:, i] = np.array(
raw_shots[:, i] > mn_voltages[vn]['threshold'], dtype=int)
# Calculating correlations when values are expressed as
# eigenvalues (+- 1) is easier
digitized_data_pm = digitized_data
digitized_data_pm[digitized_data_pm < .5] = -1
# Bin the Pauli Terms
pauli_terms = {'ZZII': 0, 'XIII': 0, 'IXII': 0,
'IIZZ': 0, 'IIXI': 0, 'IIIX': 0,
'ZIZI': 0, 'IZIZ': 0, 'XIXI': 0, 'IXIX': 0}
x_cnt = 0
z_cnt = 0
for i, row in enumerate(digitized_data_pm):
comb = combinations[i % len(combinations)]
if comb == 'X' or comb == 'X-IIII':
x_cnt += 1
pauli_terms['XIII'] += row[0]
pauli_terms['IXII'] += row[1]
pauli_terms['IIXI'] += row[2]
pauli_terms['IIIX'] += row[3]
pauli_terms['XIXI'] += row[0]*row[2]
pauli_terms['IXIX'] += row[1]*row[3]
elif comb == 'Z' or comb == 'Z-IIII':
z_cnt += 1
pauli_terms['ZZII'] += row[0]*row[1]
pauli_terms['IIZZ'] += row[2]*row[3]
pauli_terms['ZIZI'] += row[0]*row[2]
pauli_terms['IZIZ'] += row[1]*row[3]
# Normalize the pauli terms
for key, val in pauli_terms.items():
pauli_terms[key] = val/x_cnt
self.proc_data_dict['pauli_terms'] = pauli_terms
self.proc_data_dict['energy_terms'] = calc_tfd_hamiltonian(
pauli_terms, g=self.g, T=self.T)
self.proc_data_dict['quantities_of_interest'] = {
'g': self.g, 'T': self.T,
**self.proc_data_dict['pauli_terms'],
**self.proc_data_dict['energy_terms']}
def prepare_plots(self):
self.plot_dicts['pauli_operators_Strings'] = {
'plotfn': plot_pauli_ops,
'pauli_terms': self.proc_data_dict['pauli_terms'],
'energy_terms': self.proc_data_dict['energy_terms']
}
def calc_tfd_hamiltonian(pauli_terms: dict, g: float = 1, T=1):
"""
Calculate the thermal field double Hamiltonian expectation value.
Args:
pauli_terms (dict):
dictionary containing the expectation values.
Keys are of the form "XIII", "ZIZI"
Hamiltonian is given by
H = H_A + H_B - T H_AB
Individual terms H_A:
H_A = (Z_1^A * Z_2^A) + g (X_1^A* I_2^A) + g(I_1^A * X_2^A)
<H_A> = ZZII + g XIII + g IXII
Individual terms H_B:
H_B = (Z_1^B * Z_2^B) + g (X_1^B* I_2^B) + g(I_1^B * X_2^B)
<H_A> = IIZZ + g IIXI + g IIIX
Individual terms H_AB:
H_AB = (Z_1^A * Z_1^B)+(Z_2^A * Z_2^B) + (X_1^A* X_1^B)+(X_2^A * X_2^B)
<H_AB> = ZIZI + IZIZ + XIXI + IXIX
"""
factor=1.0
H_A = factor*pauli_terms['ZZII'] + g*pauli_terms['XIII'] + g*pauli_terms['IXII']
H_B = factor*pauli_terms['IIZZ'] + g*pauli_terms['IIXI'] + g*pauli_terms['IIIX']
H_AB = pauli_terms['ZIZI'] + pauli_terms['IZIZ'] + \
pauli_terms['XIXI'] + pauli_terms['IXIX']
if np.isinf(T):
H = -1*H_AB
else:
H = H_A + H_B - (T**factor)*H_AB
return {'H': H, 'H_A': H_A, 'H_B': H_B, 'H_AB': H_AB}
def plot_pauli_ops(pauli_terms, energy_terms, ax=None, **kw):
if ax is None:
f, ax = plt.subplots()
labels = pauli_terms.keys()
for i, label in enumerate(labels):
if i < 3:
c = 'r'
elif i < 6:
c = 'b'
else:
c = 'purple'
ax.bar(i, pauli_terms[label], color=c, align='center')
ax.set_xticks(np.arange(len(labels)))
ax.set_xticklabels(labels)
ax.text(1, .5, '$H_A=${:.2f}'.format(energy_terms['H_A']))
ax.text(4, .6, '$H_B=${:.2f}'.format(energy_terms['H_B']))
ax.text(7, .7, '$H_{AB}=$'+'{:.2f}'.format(energy_terms['H_AB']))
ax.set_ylabel('Expectation value')
ax.set_ylim(-1.05, 1.05)
ax.set_title('Digitized pauli expectation values')
def plot_all_pauli_ops(full_dict, ax=None, **kw):
if ax is None:
f, ax = plt.subplots()
labels = full_dict.keys()
for i, label in enumerate(labels):
if 'ZZII' in label or 'IIZZ' in label or 'XXII' in label or 'IIXX' in label:
c = 'r'
elif 'ZIZI' in label or 'IZIZ' in label or 'XIXI' in label or 'IXIX' in label:
c = 'b'
else:
c = 'purple'
ax.bar(i, full_dict[label], color=c, align='center')
ax.set_xticks(np.arange(len(labels)))
ax.set_xticklabels(labels, rotation=60)
ax.text(1, -.5, '$Inter=${:.2f}'.format(np.abs(full_dict['ZIZI'])+np.abs(full_dict['IZIZ'])+
np.abs(full_dict['XIXI'])+np.abs(full_dict['IXIX'])))
ax.text(15, -.5, '$Intra=${:.2f}'.format(np.abs(full_dict['ZZII'])+np.abs(full_dict['IIZZ'])+
np.abs(full_dict['XXII'])+np.abs(full_dict['IIXX'])))
ax.set_ylabel('Expectation value')
ax.set_ylim(-1.05, 1.05)
ax.set_title('All pauli expectation values')
############################################
# Addition from 18-02-2020
############################################
def plot_expectation_values_TFD(full_dict, qubit_order=['D1', 'Z1', 'X1', 'D3'],
system_A_qubits=['X1', 'D3'],
system_B_qubits=['D1', 'Z1'], bases = ['Z', 'X'],
ax=None, T:float = None,
exact_dict: dict = None, **kw):
if ax is None:
f, ax = plt.subplots(figsize=(12,5))
else:
f = ax.get_figure()
f.set_figwidth(12)
f.set_figheight(10)
operators = full_dict.keys()
color_dict = dict()
labels = ['IIII']
color_dict['IIII'] = 'purple'
for i, operator in enumerate(operators):
for j, basis in enumerate(bases):
if basis in operator:
correlators = ','.join([qubit_order[i] for i, j in enumerate(operator) if j != 'I'])
label = r'{}-${}$'.format(basis, correlators)
labels.append(label)
if len(label) < 10:
if (system_A_qubits[0] in label and system_A_qubits[1] in label):
color_dict[label] = 'r'
elif (system_B_qubits[0] in label and system_B_qubits[1] in label):
color_dict[label] = 'r'
elif (system_A_qubits[0] in label and system_B_qubits[0] in label):
color_dict[label] = 'b'
elif (system_A_qubits[1] in label and system_B_qubits[1] in label):
color_dict[label] = 'b'
else:
color_dict[label] = 'purple'
else:
color_dict[label] = 'purple'
for i, operator in enumerate(operators):
ax.bar(i, full_dict[operator], color=color_dict[labels[i]], align='center', zorder = 1)
if exact_dict is not None:
T_idx = exact_dict['T'].index(T)
ax.bar(list(full_dict).index(operator), exact_dict[operator][T_idx], fill=False, linestyle='--', edgecolor='black', align='center', zorder = 2)
ax.set_xticks(np.arange(len(labels)))
ax.set_xticklabels(labels, rotation=75)
ax.text(1, -.5, '$Inter=${:.2f}'.format(np.abs(full_dict['ZIZI'])+np.abs(full_dict['IZIZ'])+
np.abs(full_dict['XIXI'])+np.abs(full_dict['IXIX'])))
ax.text(15, -.5, '$Intra=${:.2f}'.format(np.abs(full_dict['ZZII'])+np.abs(full_dict['IIZZ'])+
np.abs(full_dict['XXII'])+np.abs(full_dict['IIXX'])))
ax.set_ylabel('Expectation value')
ax.set_ylim(-1.05, 1.05)
ax.set_title('Expectation values for pauli operators')
return f, ax
class TFD_versus_temperature_analysis(ba.BaseDataAnalysis):
def __init__(self, t_start: str = None, t_stop: str = None,
label: str = '',
options_dict: dict = None, extract_only: bool = False,
auto=True, operators=None, exact_dict: dict = None):
"""
Analysis for the Thermal Field Double QAOA experiment. Plots expectation values versus temperatures.
Args:
g (float):
coupling strength (in theorist units)
T (float):
temperature (in theorist units)
"""
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
options_dict=options_dict,
extract_only=extract_only)
if operators is not None:
self.operators = operators
else:
self.operators = None
if exact_dict is not None:
self.exact_dict = exact_dict
else:
self.exact_dict = None
if auto:
self.run_analysis()
def extract_data(self):
"""
Extract pauli terms from multiple hd5 files.
"""
self.raw_data_dict = {}
self.timestamps = a_tools.get_timestamps_in_range(
self.t_start, self.t_stop,
label=self.labels)
for ts in self.timestamps:
data_fp = get_datafilepath_from_timestamp(ts)
param_spec = {'TFD_dict': ('Analysis/quantities_of_interest', 'attr:all_attr'),
'tomo_dict': ('Analysis/quantities_of_interest/full_tomo_dict', 'attr:all_attr')}
self.raw_data_dict[ts] = h5d.extract_pars_from_datafile(data_fp, param_spec)
# Parts added to be compatible with base analysis data requirements
self.raw_data_dict['timestamps'] = self.timestamps
self.raw_data_dict['folder'] = os.path.split(data_fp)[0]
def process_data(self):
self.proc_data_dict = {}
self.proc_data_dict['timestamps'] = self.raw_data_dict['timestamps']
self.proc_data_dict['T'] = [self.raw_data_dict[ts]['TFD_dict']['T'] for ts in self.proc_data_dict['timestamps']]
for i, operator in enumerate(self.operators):
if '+' in operator:
seperate_operators = operator.split('+')
self.proc_data_dict[operator] = np.zeros(len(self.proc_data_dict['timestamps']))
for sep in seperate_operators:
self.proc_data_dict[operator] += np.array([self.raw_data_dict[ts]['tomo_dict'][sep] for ts in self.proc_data_dict['timestamps']])
self.proc_data_dict[operator] = list(self.proc_data_dict[operator])
else:
self.proc_data_dict[operator] = [self.raw_data_dict[ts]['tomo_dict'][operator] for ts in self.proc_data_dict['timestamps']]
def prepare_plots(self):
self.plot_dicts['pauli_vs_temperature'] = {
'plotfn': plot_TFD_versus_T,
'tomo_dict': self.proc_data_dict,
'operators': self.operators,
'exact_dict': self.exact_dict,
'numplotsy': len(self.operators),
'presentation_mode': True
}
def plot_TFD_versus_T(tomo_dict, operators=None, beta=False, ax=None, ax_dict=None, figsize=(10, 10), exact_dict=None, **kw):
if ax is None:
fig, ax = plt.subplots(len(operators), figsize=figsize)
else:
fig = ax[0].get_figure()
fig.set_figwidth(10)
fig.set_figheight(15)
if beta == True:
x_label = 'Beta'
x = [1/T for T in tomo_dict['T']]
if exact_dict is not None:
x_exact = [1/T for T in exact_dict['T']]
else:
x_label = 'Temperature'
x = tomo_dict['T']
if exact_dict is not None:
x_exact = exact_dict['T']
for i, operator in enumerate(operators):
ax[i].plot(x, tomo_dict[operator], color='red', label='experiment')
ax[i].scatter(x, tomo_dict[operator], facecolor='red')
if exact_dict is not None:
ax[i].plot(x_exact, exact_dict[operator], color = 'black', label='exact')
ax[i].scatter(x_exact, exact_dict[operator], facecolor = 'black')
ax[i].set_xlabel(x_label)
ax[i].set_ylabel(operator)
ax[i].legend()
if '+' in operator:
ax[i].set_ylim(-2, 2)
else:
ax[i].set_ylim(-1, 1)
return fig, ax
"""
Fidelity analysis functions added 02-25-2020
"""
def operator(operator_string):
"""
Inputs a string consisting of I, X, Y and Z characters
Outputs an array of the crossproduct of the corresponding operators
"""
#Pauli matrices
I=np.array([[1.,0],[0,1.]])/2
X=np.array([[0,1.],[1.,0]])/2
Y=np.array([[0,0+-1.j],[0+1.j,0]])/2
Z=np.array([[1,0],[0,-1]])/2
full_operator=1
for operator in operator_string:
if operator == 'I':
operator = I
elif operator == 'X':
operator = X
elif operator == 'Y':
operator = Y
elif operator == 'Z':
operator = Z
elif operator == 'H':
operator = 1/np.sqrt(2)*(X+Z)
else:
raise ValueError('operator_string should contain only I, X, Y or Z')
full_operator=np.kron(full_operator,operator)
return full_operator
def vec2dm(vec):
vec = vec.reshape(len(vec), 1)
vec_transpose = vec.reshape(1, len(vec))
rho = np.dot(vec, vec_transpose)
return rho
def vecs2mat(vec1,vec2):
if len(vec1) != len(vec2):
raise ValueError('Vectors must be same length')
vec1 = vec1.reshape(len(vec1), 1)
vec2 = vec2.reshape(1, len(vec2))
rho = np.dot(vec1,vec2)
return rho
def fidelity(rho_1, rho_2, trace_conserved = False):
if trace_conserved:
if np.round(np.trace(rho_1), 3) !=1:
raise ValueError('rho_1 unphysical, trace =/= 1, but ', np.trace(rho_1))
if np.round(np.trace(rho_2), 3) !=1:
raise ValueError('rho_2 unphysical, trace =/= 1, but ', np.trace(rho_2))
sqrt_rho_1 = linalg.sqrtm(rho_1)
eig_vals = linalg.eig(np.dot(np.dot(sqrt_rho_1,rho_2),sqrt_rho_1))[0]
pos_eig = [vals for vals in eig_vals if vals > 0]
return float(np.sum(np.real(np.sqrt(pos_eig))))**2
def trace_distance(rho_1, rho_2):
"""
To be constructed
"""
return
def tomo2dm(tomo_dict):
num_qubits = len(list(tomo_dict.keys())[0])
dim = 2**num_qubits
dm = np.zeros((dim,dim), dtype=np.complex128)
for op, value in tomo_dict.items():
dm += value*operator(op)
return dm
class Hamiltonian:
def __init__(self, hamiltonian=operator('ZZ')+operator('XI')+operator('IX')):
self.hamiltonian = hamiltonian
self.dim = len(hamiltonian)
def eigen_values(self):
eigen_values = np.linalg.eig(self.hamiltonian)[0]
return eigen_values
def eigen_vectors(self):
eigen_vectors = np.linalg.eig(self.hamiltonian)[1]
return eigen_vectors
def eigen_dict(self):
eigen_dict=dict()
eigen_values, eigen_vectors = np.linalg.eig(self.hamiltonian)
for n in range(len(eigen_values)):
eigen_dict[n]=[]
eigen_dict[n].append(eigen_values[n])
eigen_dict[n].append(eigen_vectors[:,n])
return eigen_dict
def thermal_gibbs_rho(self, T):
if np.round(T, 6) == 0:
raise ValueError('Temperature can not be zero')
rho = np.zeros((self.dim, self.dim))
for n in range(self.dim):
vec = self.eigen_vectors()[:,n].reshape(self.dim,1)
rho += np.exp(-self.eigen_values()[n]/T)*np.dot(vec,np.transpose(vec))
return np.round(rho/np.trace(rho),6)
def TFD_state(self, T):
if np.round(T, 6) == 0:
raise ValueError('Temperature can not be zero')
psi = np.zeros((1,self.dim**2))
for n in range(self.dim):
vec = self.eigen_vectors()[:,n]
psi += np.exp(-self.eigen_values()[n]/(2*T))*np.kron(vec,vec)
psi_norm=np.linalg.norm(psi)
return np.transpose(psi)/psi_norm
def TFD_rho(self, T):
vec=self.TFD_state(T).reshape(self.dim**2,1)
vec_transpose=self.TFD_state(T).reshape(1,self.dim**2)
rho = np.dot(vec,vec_transpose)
return rho
def plot_non_zero_pauli_terms(self, pauli_dict,T):
new_dict = pauli_dict.copy()
pauli_set = ['I', 'X', 'Y', 'Z']
if len(pauli_dict) == 16:
PiPj = []
for Pi in pauli_set:
for Pj in pauli_set:
PiPj.append(Pi+Pj)
for i, term in enumerate(PiPj):
if np.round(np.sum(np.abs(new_dict[term])),6) == 0:
del new_dict[term]
elif len(pauli_dict) == 256:
PiPjPkPl = []
for Pi in pauli_set:
for Pj in pauli_set:
for Pk in pauli_set:
for Pl in pauli_set:
PiPjPkPl.append(Pi+Pj+Pk+Pl)
for i, term in enumerate(PiPjPkPl):
if np.round(np.sum(np.abs(new_dict[term])),6) == 0:
del new_dict[term]
else:
raise ValueError('Not all pauli terms in dictionary')
fig, axs = plt.subplots(1,figsize=(10,10))
for i, term in enumerate(new_dict.keys()):
axs.plot(1/T, new_dict[term], label=term)
axs.legend()
axs.set_ylabel('Pauli terms')
axs.set_xlabel('1/T')
def expectation_value(self, operator, rho):
if len(operator) != len(rho):
raise ValueError('Operator and density matrix must be have same dimensions')
return np.round(np.real(np.trace(np.dot(operator, rho))),6)
def pauli_vector_gibbs(self, T, plot=False):
if self.dim != 4:
raise ValueError('Only for 4x4 Hamiltonian')
T=np.array(T)
pauli_dict=dict()
pauli_set=['I', 'X', 'Y', 'Z']
for Pi in pauli_set:
for Pj in pauli_set:
PiPj = operator(Pi+Pj)
pauli_dict[Pi+Pj] = []
if np.sqrt(np.size(T)) > 1:
for Ti in T:
pauli_dict[Pi+Pj].append(self.expectation_value(PiPj, self.thermal_gibbs_rho(Ti)))
else:
pauli_dict[Pi+Pj].append(self.expectation_value(PiPj, self.thermal_gibbs_rho(T)))
if plot:
self.plot_non_zero_pauli_terms(pauli_dict, T)
return pauli_dict
def pauli_vector_TFD(self, T, plot=False):
if self.dim**2 != 16:
raise ValueError('Only for 16x16 Hamiltonian')
T=np.array(T)
pauli_dict=dict()
pauli_set=['I', 'X', 'Y', 'Z']
for Pi in pauli_set:
for Pj in pauli_set:
for Pk in pauli_set:
for Pl in pauli_set:
PiPjPkPl = operator(Pi+Pj+Pk+Pl)
pauli_dict[Pi+Pj+Pk+Pl] = []
if np.sqrt(np.size(T)) > 1:
for Ti in T:
pauli_dict[Pi+Pj+Pk+Pl].append(self.expectation_value(PiPjPkPl, np.dot(self.TFD_state(Ti),np.transpose(self.TFD_state(Ti)))))
else:
pauli_dict[Pi+Pj+Pk+Pl].append(self.expectation_value(PiPjPkPl, np.dot(self.TFD_state(Ti),np.transpose(self.TFD_state(T)))))
if plot:
self.plot_non_zero_pauli_terms(pauli_dict, T)
return pauli_dict
def plot_fidelities_versus_T(fid_dict, data_label=None, data_marker=None, data_color='black', beta=False, ax=None, ax_dict=None, figsize=(10, 10), **kw):
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
else:
fig = ax.get_figure()
fig.set_figwidth(10)
fig.set_figheight(5)
print(fid_dict)
if beta == True:
x_label = 'Beta'
x = [1/T for T in list(fid_dict.keys())]
else:
x_label = 'Temperature'
x = list(fid_dict.keys())
ax.plot(x, list(fid_dict.values()), color=data_color)
ax.scatter(x, list(fid_dict.values()), marker=data_marker, facecolor=data_color, label=data_label)
ax.set_xlabel(x_label)
ax.set_ylabel('Fidelity')
ax.set_xscale('symlog')
ax.legend()
ax.set_ylim(0.7, 1.01)
return fig, ax
class Gibbs_fidelity_analysis(ba.BaseDataAnalysis):
def __init__(self, t_start: str = None, t_stop: str = None, ts_list=None,
label: str = '',
g: float = 1, T=1,
options_dict: dict = None, extract_only: bool = False,
auto=True):
"""
Analysis for the Thermal Field Double state QAOA experiment.
Args:
g (float):
coupling strength (in theorist units)
T (float):
temperature (in theorist units)
"""
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
options_dict=options_dict,
extract_only=extract_only)
self.g = g
self.T = T
self.ts_list = ts_list
if options_dict == None:
self.options_dict = {'beta': False,
'data_label': 'Data',
'data_color': 'black',
'save_figs': True}
if auto:
self.run_analysis()
def extract_data(self):
"""
Extract two qubit tomography terms.
"""
self.raw_data_dict = {}
if self.ts_list is None:
self.timestamps = a_tools.get_timestamps_in_range(self.t_start,
self.t_stop,
label=self.labels)
else:
self.timestamps = self.ts_list
for ts in self.timestamps:
data_fp = get_datafilepath_from_timestamp(ts)
param_spec = {'TFD_dict': ('Analysis/quantities_of_interest', 'attr:all_attr'),
'tomo_dict': ('Analysis/quantities_of_interest/full_tomo_dict', 'attr:all_attr')}
self.raw_data_dict[ts] = h5d.extract_pars_from_datafile(data_fp, param_spec)
# Parts added to be compatible with base analysis data requirements
self.raw_data_dict['timestamps'] = self.timestamps
self.raw_data_dict['folder'] = os.path.split(data_fp)[0]
def process_data(self):
"""
Create density matrix, find exact density matrix for given T and calculate fidelity.
"""
self.proc_data_dict = {}
self.hamiltonian = Hamiltonian()
self.proc_data_dict['timestamps'] = self.raw_data_dict['timestamps']
self.proc_data_dict['operators'] = self.raw_data_dict[self.raw_data_dict['timestamps'][0]]['tomo_dict'].keys()
self.proc_data_dict['T'] = [self.raw_data_dict[ts]['TFD_dict']['T'] for ts in self.proc_data_dict['timestamps']]
for i, operator in enumerate(self.proc_data_dict['operators']):
self.proc_data_dict[operator] = [self.raw_data_dict[ts]['tomo_dict'][operator] for ts in self.proc_data_dict['timestamps']]
for i, Ti in enumerate(self.proc_data_dict['T']):
self.proc_data_dict[Ti] = {}
self.proc_data_dict[Ti]['density_matrices'] = {}
self.proc_data_dict[Ti]['density_matrices']['experiment'] = tomo2dm(self.raw_data_dict[self.proc_data_dict['timestamps'][i]]['tomo_dict'])
self.proc_data_dict[Ti]['density_matrices']['theory'] = self.hamiltonian.thermal_gibbs_rho(T=Ti)
self.proc_data_dict[Ti]['fidelity'] = fidelity(self.proc_data_dict[Ti]['density_matrices']['experiment'],self.proc_data_dict[Ti]['density_matrices']['theory'])
fid_df = | pd.DataFrame.from_dict({T:self.proc_data_dict[T]['fidelity'] for T in self.proc_data_dict['T']}, orient='index',columns=['F']) | pandas.DataFrame.from_dict |
from datetime import datetime, timedelta, timezone
import random
from tabnanny import check
import unittest
import pandas as pd
import pytz
if __name__ == "__main__":
from pathlib import Path
import sys
sys.path.insert(0, str(Path(__file__).resolve().parents[2]))
from datatube.dtype import check_dtypes
class TestObj:
pass
unittest.TestCase.maxDiff = None
SIZE = 3
TEST_DATA = {
int: {
"integers":
[-1 * SIZE // 2 + i + 1 for i in range(SIZE)],
"whole floats":
[-1 * SIZE // 2 + i + 1.0 for i in range(SIZE)],
"real whole complex":
[complex(-1 * SIZE // 2 + i + 1, 0) for i in range(SIZE)],
},
float: {
"decimal floats":
[-1 * SIZE // 2 + i + 1 + random.random() for i in range(SIZE)],
"real decimal complex":
[complex(-1 * SIZE // 2 + i + 1 + random.random(), 0)
for i in range(SIZE)],
},
complex: {
"imaginary complex":
[complex(-1 * SIZE // 2 + i + 1 + random.random(),
-1 * SIZE // 2 + i + 1 + random.random())
for i in range(SIZE)],
},
str: {
"integer strings":
[str(-1 * SIZE // 2 + i + 1) for i in range(SIZE)],
"whole float strings":
[str(-1 * SIZE // 2 + i + 1.0) for i in range(SIZE)],
"decimal float strings":
[str(-1 * SIZE // 2 + i + 1 + random.random())
for i in range(SIZE)],
"real whole complex strings":
[str(complex(-1 * SIZE // 2 + i + 1, 0)) for i in range(SIZE)],
"real decimal complex strings":
[str(complex(-1 * SIZE // 2 + i + 1 + random.random(), 0))
for i in range(SIZE)],
"imaginary complex strings":
[str(complex(-1 * SIZE // 2 + i + 1 + random.random(),
-1 * SIZE // 2 + i + 1 + random.random()))
for i in range(SIZE)],
"character strings":
[chr(i % 26 + ord("a")) for i in range(SIZE)],
"boolean strings":
[str(bool((i + 1) % 2)) for i in range(SIZE)],
"aware datetime strings":
[str(datetime.fromtimestamp(i, tz=timezone.utc))
for i in range(SIZE)],
"aware ISO 8601 strings":
[datetime.fromtimestamp(i, tz=timezone.utc).isoformat()
for i in range(SIZE)],
"naive datetime strings":
[str(datetime.fromtimestamp(i)) for i in range(SIZE)],
"naive ISO 8601 strings":
[datetime.fromtimestamp(i).isoformat() for i in range(SIZE)],
"aware/naive datetime strings":
[str(datetime.fromtimestamp(i, tz=timezone.utc)) if i % 2
else str(datetime.fromtimestamp(i)) for i in range(SIZE)],
"aware/naive ISO 8601 strings":
[datetime.fromtimestamp(i, tz=timezone.utc).isoformat() if i % 2
else datetime.fromtimestamp(i).isoformat()
for i in range(SIZE)],
"mixed timezone datetime strings":
[str(
datetime.fromtimestamp(
i,
tz=pytz.timezone(
pytz.all_timezones[i % len(pytz.all_timezones)]
)
)
) for i in range(SIZE)],
"mixed timezone ISO 8601 strings":
[datetime.fromtimestamp(
i,
tz=pytz.timezone(
pytz.all_timezones[i % len(pytz.all_timezones)]
)
).isoformat() for i in range(SIZE)],
"timedelta strings":
[str(timedelta(seconds=i + 1)) for i in range(SIZE)],
"pd.Timedelta strings":
[str(pd.Timedelta(timedelta(seconds=i + 1))) for i in range(SIZE)]
},
bool: {
"booleans":
[bool((i + 1) % 2) for i in range(SIZE)]
},
datetime: {
"aware datetimes":
[datetime.fromtimestamp(i, tz=timezone.utc) for i in range(SIZE)],
"naive datetimes":
[datetime.fromtimestamp(i) for i in range(SIZE)],
"aware/naive datetimes":
[datetime.fromtimestamp(i, tz=timezone.utc) if i % 2
else datetime.fromtimestamp(i) for i in range(SIZE)],
"mixed timezone datetimes":
[datetime.fromtimestamp(
i,
tz = pytz.timezone(
pytz.all_timezones[i % len(pytz.all_timezones)]
)
) for i in range(SIZE)]
},
timedelta: {
"timedeltas":
[timedelta(seconds=i + 1) for i in range(SIZE)]
},
object: {
"Nones":
[None for _ in range(SIZE)],
"custom objects":
[TestObj() for _ in range(SIZE)]
}
}
ALL_DATA = {col_name: data for v in TEST_DATA.values()
for col_name, data in v.items()}
class CheckDtypeTests(unittest.TestCase):
def test_check_integers_series_no_na(self):
failed = []
for col_name, data in ALL_DATA.items():
series = pd.Series(data)
result = check_dtypes(series, int)
expected = col_name in TEST_DATA[int]
try:
self.assertEqual(result, expected)
except AssertionError:
context = f"check_dtypes({data[:3]}..., int) != {expected}"
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_integers_series_with_na(self):
failed = []
for col_name, data in ALL_DATA.items():
series = pd.Series(data + [None])
result = check_dtypes(series, int)
expected = col_name in TEST_DATA[int]
try:
self.assertEqual(result, expected)
except AssertionError:
context = f"check_dtypes({data[:3]}..., int) != {expected}"
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_integers_df_no_na(self):
df = pd.DataFrame(ALL_DATA)
failed = []
for col_name in df.columns:
result = check_dtypes(df, {col_name: int})
expected = col_name in TEST_DATA[int]
try:
self.assertEqual(result, expected)
except AssertionError:
context = (f"check_dtypes(df, {{{repr(col_name)}: int}}) != "
f"{expected}")
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_integers_df_with_na(self):
with_na = {k: v + [None] for k, v in ALL_DATA.items()}
df = pd.DataFrame(with_na)
failed = []
for col_name in df.columns:
result = check_dtypes(df, {col_name: int})
expected = col_name in TEST_DATA[int]
try:
self.assertEqual(result, expected)
except AssertionError:
context = (f"check_dtypes(df, {{{repr(col_name)}: int}}) != "
f"{expected}")
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_floats_series_no_na(self):
failed = []
for col_name, data in ALL_DATA.items():
series = pd.Series(data)
result = check_dtypes(series, float)
expected = col_name in TEST_DATA[float]
try:
self.assertEqual(result, expected)
except AssertionError:
context = f"check_dtypes({data[:3]}..., float) != {expected}"
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_floats_series_with_na(self):
failed = []
for col_name, data in ALL_DATA.items():
series = pd.Series(data + [None])
result = check_dtypes(series, float)
expected = col_name in TEST_DATA[float]
try:
self.assertEqual(result, expected)
except AssertionError:
context = f"check_dtypes({data[:3]}..., float) != {expected}"
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_floats_df_no_na(self):
df = pd.DataFrame(ALL_DATA)
failed = []
for col_name in df.columns:
result = check_dtypes(df, {col_name: float})
expected = col_name in TEST_DATA[float]
try:
self.assertEqual(result, expected)
except AssertionError:
context = (f"check_dtypes(df, {{{repr(col_name)}: float}}) != "
f"{expected}")
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_floats_df_with_na(self):
with_na = {k: v + [None] for k, v in ALL_DATA.items()}
df = pd.DataFrame(with_na)
failed = []
for col_name in df.columns:
result = check_dtypes(df, {col_name: float})
expected = col_name in TEST_DATA[float]
try:
self.assertEqual(result, expected)
except AssertionError:
context = (f"check_dtypes(df, {{{repr(col_name)}: float}}) != "
f"{expected}")
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_complex_series_no_na(self):
failed = []
for col_name, data in ALL_DATA.items():
series = pd.Series(data)
result = check_dtypes(series, complex)
expected = col_name in TEST_DATA[complex]
try:
self.assertEqual(result, expected)
except AssertionError:
context = f"check_dtypes({data[:3]}..., complex) != {expected}"
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_complex_series_with_na(self):
failed = []
for col_name, data in ALL_DATA.items():
series = pd.Series(data + [None])
result = check_dtypes(series, complex)
expected = col_name in TEST_DATA[complex]
try:
self.assertEqual(result, expected)
except AssertionError:
context = f"check_dtypes({data[:3]}..., complex) != {expected}"
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_complex_df_no_na(self):
df = pd.DataFrame(ALL_DATA)
failed = []
for col_name in df.columns:
result = check_dtypes(df, {col_name: complex})
expected = col_name in TEST_DATA[complex]
try:
self.assertEqual(result, expected)
except AssertionError:
context = (f"check_dtypes(df, {{{repr(col_name)}: complex}}) "
f"!= {expected}")
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_complex_df_with_na(self):
with_na = {k: v + [None] for k, v in ALL_DATA.items()}
df = pd.DataFrame(with_na)
failed = []
for col_name in df.columns:
result = check_dtypes(df, {col_name: complex})
expected = col_name in TEST_DATA[complex]
try:
self.assertEqual(result, expected)
except AssertionError:
context = (f"check_dtypes(df, {{{repr(col_name)}: complex}}) "
f"!= {expected}")
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_strings_series_no_na(self):
failed = []
for col_name, data in ALL_DATA.items():
series = pd.Series(data)
result = check_dtypes(series, str)
expected = col_name in TEST_DATA[str]
try:
self.assertEqual(result, expected)
except AssertionError:
context = f"check_dtypes({data[:3]}..., str) != {expected}"
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_strings_series_with_na(self):
failed = []
for col_name, data in ALL_DATA.items():
series = pd.Series(data + [None])
result = check_dtypes(series, str)
expected = col_name in TEST_DATA[str]
try:
self.assertEqual(result, expected)
except AssertionError:
context = f"check_dtypes({data[:3]}..., str) != {expected}"
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_strings_df_no_na(self):
df = pd.DataFrame(ALL_DATA)
failed = []
for col_name in df.columns:
result = check_dtypes(df, {col_name: str})
expected = col_name in TEST_DATA[str]
try:
self.assertEqual(result, expected)
except AssertionError:
context = (f"check_dtypes(df, {{{repr(col_name)}: str}}) != "
f"{expected}")
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_strings_df_with_na(self):
with_na = {k: v + [None] for k, v in ALL_DATA.items()}
df = pd.DataFrame(with_na)
failed = []
for col_name in df.columns:
result = check_dtypes(df, {col_name: str})
expected = col_name in TEST_DATA[str]
try:
self.assertEqual(result, expected)
except AssertionError:
context = (f"check_dtypes(df, {{{repr(col_name)}: str}}) != "
f"{expected}")
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_booleans_series_no_na(self):
failed = []
for col_name, data in ALL_DATA.items():
series = pd.Series(data)
result = check_dtypes(series, bool)
expected = col_name in TEST_DATA[bool]
try:
self.assertEqual(result, expected)
except AssertionError:
context = f"check_dtypes({data[:3]}..., bool) != {expected}"
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_booleans_series_with_na(self):
failed = []
for col_name, data in ALL_DATA.items():
series = pd.Series(data + [None])
result = check_dtypes(series, bool)
expected = col_name in TEST_DATA[bool]
try:
self.assertEqual(result, expected)
except AssertionError:
context = f"check_dtypes({data[:3]}..., bool) != {expected}"
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_booleans_df_no_na(self):
df = pd.DataFrame(ALL_DATA)
failed = []
for col_name in df.columns:
result = check_dtypes(df, {col_name: bool})
expected = col_name in TEST_DATA[bool]
try:
self.assertEqual(result, expected)
except AssertionError:
context = (f"check_dtypes(df, {{{repr(col_name)}: bool}}) != "
f"{expected}")
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_booleans_df_with_na(self):
with_na = {k: v + [None] for k, v in ALL_DATA.items()}
df = pd.DataFrame(with_na)
failed = []
for col_name in df.columns:
result = check_dtypes(df, {col_name: bool})
expected = col_name in TEST_DATA[bool]
try:
self.assertEqual(result, expected)
except AssertionError:
context = (f"check_dtypes(df, {{{repr(col_name)}: bool}}) != "
f"{expected}")
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_datetimes_series_no_na(self):
failed = []
for col_name, data in ALL_DATA.items():
series = pd.Series(data)
result = check_dtypes(series, datetime)
expected = col_name in TEST_DATA[datetime]
try:
self.assertEqual(result, expected)
except AssertionError:
context = f"check_dtypes({data[:3]}..., datetime) != {expected}"
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_datetimes_series_with_na(self):
failed = []
for col_name, data in ALL_DATA.items():
series = pd.Series(data + [None])
result = check_dtypes(series, datetime)
expected = col_name in TEST_DATA[datetime]
try:
self.assertEqual(result, expected)
except AssertionError:
context = f"check_dtypes({data[:3]}..., datetime) != {expected}"
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_datetimes_df_no_na(self):
df = pd.DataFrame(ALL_DATA)
failed = []
for col_name in df.columns:
result = check_dtypes(df, {col_name: datetime})
expected = col_name in TEST_DATA[datetime]
try:
self.assertEqual(result, expected)
except AssertionError:
context = (f"check_dtypes(df, {{{repr(col_name)}: datetime}}) "
f"!= {expected}")
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_datetimes_df_with_na(self):
with_na = {k: v + [None] for k, v in ALL_DATA.items()}
df = pd.DataFrame(with_na)
failed = []
for col_name in df.columns:
result = check_dtypes(df, {col_name: datetime})
expected = col_name in TEST_DATA[datetime]
try:
self.assertEqual(result, expected)
except AssertionError:
context = (f"check_dtypes(df, {{{repr(col_name)}: datetime}}) "
f"!= {expected}")
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_timedeltas_series_no_na(self):
failed = []
for col_name, data in ALL_DATA.items():
series = pd.Series(data)
result = check_dtypes(series, timedelta)
expected = col_name in TEST_DATA[timedelta]
try:
self.assertEqual(result, expected)
except AssertionError:
context = f"check_dtypes({data[:3]}..., timedelta) != {expected}"
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_timedeltas_series_with_na(self):
failed = []
for col_name, data in ALL_DATA.items():
series = pd.Series(data + [None])
result = check_dtypes(series, timedelta)
expected = col_name in TEST_DATA[timedelta]
try:
self.assertEqual(result, expected)
except AssertionError:
context = f"check_dtypes({data[:3]}..., timedelta) != {expected}"
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_timedeltas_df_no_na(self):
df = pd.DataFrame(ALL_DATA)
failed = []
for col_name in df.columns:
result = check_dtypes(df, {col_name: timedelta})
expected = col_name in TEST_DATA[timedelta]
try:
self.assertEqual(result, expected)
except AssertionError:
context = (f"check_dtypes(df, {{{repr(col_name)}: timedelta}}) "
f"!= {expected}")
failed.append(context)
if len(failed) > 0:
joined = "\n\t".join(failed)
raise AssertionError(f"{len(failed)} failed checks:\n\t{joined}")
def test_check_timedeltas_df_with_na(self):
with_na = {k: v + [None] for k, v in ALL_DATA.items()}
df = | pd.DataFrame(with_na) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.