prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from __future__ import print_function
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright (c) 2016 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import os, gzip
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
def filter(obj, filt=None):
'''
Parameters:
obj: a DataFrame object. row are observation, columns are variables.
filt: a self-definded operation. format: {variable: (op, value)}
variable is one of the variables in obj. value is the posible value of variable
op is the operation between variable and value, currently support '==', '!=', 'in', 'not in'
Returns:
a nameList that contains the list of filenames. each file correspond to one sample
'''
for key in filt.keys():
if filt[key][0] == '==':
obj = obj[obj[key] == filt[key][1] ]
elif filt[key][0] == '!=':
obj = obj[obj[key] != filt[key][1] ]
elif filt[key][0] == 'in':
col = obj[key].values
row = []
for item in col:
if item in filt[key][1]:
row.append(True)
else:
row.append(False)
obj = obj[row]
elif filt[key][0] == 'not in':
col = obj[key].values
row = []
for item in col:
if item in filt[key][1]:
row.append(False)
else:
row.append(True)
obj = obj[row]
return obj
def integration(nameList, DIR='./'):
'''
intergrate selected samples together.
Parameters:
nameList: the list of filenames. each file correspond to one sample
Returns:
allSample: a DataFrame object. the intergrated data
'''
allSample = DataFrame()
for name in nameList:
aDict = {}
with gzip.open(''.join([DIR, name] ), 'rb') as handle:
for line in handle:
line = line.split('\t')
aDict[line[0] ] = line[1]
oneSample = Series(aDict, dtype=np.float)
if allSample is None:
oneSample.name = name.split('.')[0]
oneSample.index.name = 'Gene'
allSample = | DataFrame(oneSample) | pandas.DataFrame |
import sys
from Bio import SeqIO
import tempfile
import os
import glob
import shutil
import pandas as pd
from collections import defaultdict
import fnmatch
from . import rampart
# extract with constraints:
# -- only one group ever
# -- only one flowcell ID ever
# -- always unique read ID
# fast fastq code by <NAME>
def readfq(fp): # this is a generator function
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last: break
name, seqs, last = last[1:], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last: break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs); # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
def write_fastq(fh, name, rec, qual):
fh.write("@%s\n%s\n+\n%s\n" % (name, rec, qual))
def run(parser, args):
if not args.directory:
if not os.path.exists(args.prompt_directory):
print ("Please specify a directory with --directory, artic gather --help for details.")
raise SystemExit
directories = os.listdir(args.prompt_directory)
directories = [args.prompt_directory+'/'+d for d in directories if os.path.isdir(args.prompt_directory+'/'+d)]
args.directory = [rampart.chooser(directories)]
if not args.fast5_directory and not args.no_fast5s:
print("Must supply a directory to fast5 files with --fast5-directory")
print("If you do not want use fast5s with nanopolish use --no-fast5s instead")
raise SystemExit(1)
if isinstance(args.directory, list) and len(args.directory) > 1 and not args.prefix:
print("Must supply a prefix if gathering multiple directories!", file=sys.stderr)
raise SystemExit(1)
if args.prefix:
prefix = args.prefix
else:
prefix = os.path.split(args.directory[0])[-1]
all_fastq_outfn = "%s_all.fastq" % (prefix)
all_fastq_outfh = open(all_fastq_outfn, "w")
summary_files = []
fastq = defaultdict(list)
for directory in args.directory:
d = directory
for root, dirs, files in os.walk(d):
paths = os.path.split(root)
barcode_directory = paths[-1]
fastq[barcode_directory].extend([root+'/'+f for f in files if f.endswith('.fastq')])
summary_files.extend([root+'/'+f for f in files if fnmatch.fnmatch(f, '*cing_summary*txt')])
for barcode_directory, fastq in list(fastq.items()):
if len(fastq):
fastq_outfn = "%s_%s.fastq" % (prefix, barcode_directory)
outfh = open(fastq_outfn, "w")
print("Processing %s files in %s" % (len(fastq), barcode_directory), file=sys.stderr)
dups = set()
uniq = 0
total = 0
limit_reached = False
for f in fastq:
for name, rec, qual in readfq(open(f)):
seq_length = len(rec)
if args.max_length and seq_length > args.max_length:
continue
if args.min_length and seq_length < args.min_length:
continue
total += 1
if name not in dups:
write_fastq(outfh, name, rec, qual)
write_fastq(all_fastq_outfh, name, rec, qual)
dups.add(name)
uniq += 1
if args.limit and uniq >= args.limit:
limit_reached = True
break
if limit_reached:
break
outfh.close()
print("%s\t%s\t%s" % (fastq_outfn, total, uniq))
all_fastq_outfh.close()
print("Found the following summary files:\n", file=sys.stderr)
for summaryfn in summary_files:
print (" " + summaryfn, file=sys.stderr)
dfs = []
for summaryfn in summary_files:
df = pd.read_csv(summaryfn, sep="\t")
# support for local basecalling
if 'filename_fast5' in df.columns:
df['filename'] = df['filename_fast5']
dfs.append(df)
summary_outfn = ""
if dfs:
summary_outfn = "%s_sequencing_summary.txt" % (prefix)
summaryfh = open(summary_outfn, "w")
| pd.concat(dfs, sort=False) | pandas.concat |
# Copyright 2016 Netherlands eScience Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import pytest
import numpy as np
from numpy.testing import assert_array_almost_equal
import pandas as pd
import pandas.util.testing as pdt
from .utils import FrozenSimilarityMatrixInMemory, SimilarityMatrixInMemory
@pytest.fixture
def similarity_matrix():
pair_matrix_inmem = SimilarityMatrixInMemory()
pair_matrix = pair_matrix_inmem.matrix
labels = {'a': 0, 'b': 1, 'c': 2, 'd': 3}
similarities = [
('a', 'b', 0.9),
('a', 'c', 0.5),
('b', 'c', 0.6),
('d', 'c', 0.7)
]
pair_matrix.update(similarities, labels)
yield pair_matrix
pair_matrix_inmem.close()
@pytest.fixture
def frozen_similarity_matrix():
matrix_inmem = FrozenSimilarityMatrixInMemory()
matrix = matrix_inmem.matrix
yield matrix
matrix_inmem.close()
def fillit(frozen_similarity_matrix):
labels = ['a', 'b', 'c', 'd']
data = [
[0.0, 0.9, 0.5, 0.0],
[0.9, 0.0, 0.6, 0.0],
[0.5, 0.6, 0.0, 0.7],
[0.0, 0.0, 0.7, 0.0],
]
frozen_similarity_matrix.from_array(np.array(data), labels)
class TestFrozenSimilarityMatrix(object):
def test_from_pairs_defaults(self, similarity_matrix, frozen_similarity_matrix):
frozen_similarity_matrix.from_pairs(similarity_matrix, 10)
result = frozen_similarity_matrix.to_pandas()
labels = ['a', 'b', 'c', 'd']
expected = pd.DataFrame([
[0.0, 0.9, 0.5, 0.0],
[0.9, 0.0, 0.6, 0.0],
[0.5, 0.6, 0.0, 0.7],
[0.0, 0.0, 0.7, 0.0]
], index=labels, columns=labels)
pdt.assert_almost_equal(result, expected)
def test_from_pairs_multiframe(self, similarity_matrix, frozen_similarity_matrix):
frozen_similarity_matrix.from_pairs(similarity_matrix, 1, None, False)
result = frozen_similarity_matrix.to_pandas()
labels = ['a', 'b', 'c', 'd']
expected = pd.DataFrame([
[0.0, 0.9, 0.5, 0.0],
[0.9, 0.0, 0.6, 0.0],
[0.5, 0.6, 0.0, 0.7],
[0.0, 0.0, 0.7, 0.0]
], index=labels, columns=labels)
pdt.assert_almost_equal(result, expected)
def test_from_pairs_limited(self, similarity_matrix, frozen_similarity_matrix):
frozen_similarity_matrix.from_pairs(similarity_matrix, 1, 1, False)
result = frozen_similarity_matrix.to_pandas()
labels = ['a', 'b', 'c', 'd']
expected = pd.DataFrame([
[0.0, 0.9, 0.0, 0.0],
[0.9, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0]
], index=labels, columns=labels)
pdt.assert_almost_equal(result, expected)
def test_from_pairs_singlesided(self, similarity_matrix, frozen_similarity_matrix):
frozen_similarity_matrix.from_pairs(similarity_matrix, 10, None, True)
result = frozen_similarity_matrix.to_pandas()
print(result)
labels = ['a', 'b', 'c', 'd']
expected = pd.DataFrame([
[0.0, 0.9, 0.5, 0.0],
[0.0, 0.0, 0.6, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.7, 0.0]
], index=labels, columns=labels)
pdt.assert_almost_equal(result, expected)
def test_find_defaults(self, similarity_matrix, frozen_similarity_matrix):
frozen_similarity_matrix.from_pairs(similarity_matrix, 10)
hits = frozen_similarity_matrix.find('c', 0.55)
expected = [('d', 0.7), ('b', 0.6)]
assert hits == expected
def test_find_limit(self, similarity_matrix, frozen_similarity_matrix):
frozen_similarity_matrix.from_pairs(similarity_matrix, 10)
hits = frozen_similarity_matrix.find('c', 0.55 , 1)
expected = [('d', 0.7)]
assert hits == expected
def test_find_cutoffhigh_nohits(self, similarity_matrix, frozen_similarity_matrix):
frozen_similarity_matrix.from_pairs(similarity_matrix, 10)
hits = frozen_similarity_matrix.find('c', 0.9)
expected = []
assert hits == expected
def test_find_badkey_keyerror(self, similarity_matrix, frozen_similarity_matrix):
frozen_similarity_matrix.from_pairs(similarity_matrix, 10)
with pytest.raises(KeyError):
frozen_similarity_matrix.find('f', 0.45)
def test_find_singlesided(self, similarity_matrix, frozen_similarity_matrix):
frozen_similarity_matrix.from_pairs(similarity_matrix, 10, None, True)
print(frozen_similarity_matrix.scores.read())
hits = frozen_similarity_matrix.find('c', 0.0)
expected = []
assert hits == expected
def test_from_array(self, similarity_matrix, frozen_similarity_matrix):
labels = ['a', 'b', 'c', 'd']
data = [
[0.0, 0.9, 0.5, 0.0],
[0.9, 0.0, 0.6, 0.0],
[0.5, 0.6, 0.0, 0.7],
[0.0, 0.0, 0.7, 0.0],
]
frozen_similarity_matrix.from_array(np.array(data), labels)
result = frozen_similarity_matrix.to_pandas()
expected = pd.DataFrame(data, index=labels, columns=labels)
| pdt.assert_almost_equal(result, expected) | pandas.util.testing.assert_almost_equal |
# coding: utf8
import abc
from os import path
import numpy as np
import pandas as pd
import torch
import torchvision.transforms as transforms
from torch.utils.data import Dataset
from clinicadl.utils.inputs import FILENAME_TYPE, MASK_PATTERN
#################################
# Datasets loaders
#################################
class CapsDataset(Dataset):
"""Abstract class for all derived CapsDatasets."""
def __init__(
self,
caps_directory,
data_df,
preprocessing,
transformations,
label_presence,
label=None,
label_code=None,
augmentation_transformations=None,
multi_cohort=False,
):
self.caps_directory = caps_directory
self.caps_dict = self.create_caps_dict(caps_directory, multi_cohort)
self.transformations = transformations
self.augmentation_transformations = augmentation_transformations
self.eval_mode = False
self.label_presence = label_presence
self.label = label
self.label_code = label_code
self.preprocessing = preprocessing
if not hasattr(self, "elem_index"):
raise ValueError(
"Child class of CapsDataset must set elem_index attribute."
)
if not hasattr(self, "mode"):
raise ValueError("Child class of CapsDataset must set mode attribute.")
# Check the format of the tsv file here
self.df = data_df
mandatory_col = {"participant_id", "session_id", "cohort"}
if self.label_presence and self.label is not None:
mandatory_col.add(self.label)
if not mandatory_col.issubset(set(self.df.columns.values)):
raise Exception(
"the data file is not in the correct format."
"Columns should include %s" % mandatory_col
)
self.elem_per_image = self.num_elem_per_image()
self.size = self[0]["image"].size()
@property
@abc.abstractmethod
def elem_index(self):
pass
def label_fn(self, target):
"""
Returns the label value usable in criterion.
Args:
target (str or float or int): value of the target.
Returns:
label (int or float): value of the label usable in criterion.
"""
# Reconstruction case (no label)
if self.label is None:
return None
# Regression case (no label code)
elif self.label_code is None:
return np.float32([target])
# Classification case (label + label_code dict)
else:
return self.label_code[target]
def __len__(self):
return len(self.df) * self.elem_per_image
@staticmethod
def create_caps_dict(caps_directory, multi_cohort):
from clinica.utils.inputs import check_caps_folder
if multi_cohort:
if not caps_directory.endswith(".tsv"):
raise ValueError(
"If multi_cohort is given, the caps_dir argument should be a path to a TSV file."
)
else:
caps_df = pd.read_csv(caps_directory, sep="\t")
check_multi_cohort_tsv(caps_df, "CAPS")
caps_dict = dict()
for idx in range(len(caps_df)):
cohort = caps_df.loc[idx, "cohort"]
caps_path = caps_df.loc[idx, "path"]
check_caps_folder(caps_path)
caps_dict[cohort] = caps_path
else:
check_caps_folder(caps_directory)
caps_dict = {"single": caps_directory}
return caps_dict
def _get_path(self, participant, session, cohort, mode="image"):
"""
Gets the path to the tensor image (*.pt)
Args:
participant (str): ID of the participant.
session (str): ID of the session.
cohort (str): Name of the cohort.
mode (str): Type of mode used (image, patch, slice or roi).
Returns:
image_path (str): path to the image
"""
if cohort not in self.caps_dict.keys():
raise ValueError(
"Cohort names in labels and CAPS definitions do not match."
)
if self.preprocessing == "t1-linear":
image_path = path.join(
self.caps_dict[cohort],
"subjects",
participant,
session,
"deeplearning_prepare_data",
"%s_based" % mode,
"t1_linear",
participant + "_" + session + FILENAME_TYPE["cropped"] + ".pt",
)
elif self.preprocessing == "t1-linear-downsampled":
image_path = path.join(
self.caps_dict[cohort],
"subjects",
participant,
session,
"deeplearning_prepare_data",
"%s_based" % mode,
"t1_linear",
participant + "_" + session + FILENAME_TYPE["downsampled"] + ".pt",
)
elif self.preprocessing == "t1-extensive":
image_path = path.join(
self.caps_dict[cohort],
"subjects",
participant,
session,
"deeplearning_prepare_data",
"%s_based" % mode,
"t1_extensive",
participant + "_" + session + FILENAME_TYPE["skull_stripped"] + ".pt",
)
elif self.preprocessing == "t1-volume":
image_path = path.join(
self.caps_dict[cohort],
"subjects",
participant,
session,
"deeplearning_prepare_data",
"%s_based" % mode,
"custom",
participant + "_" + session + FILENAME_TYPE["gm_maps"] + ".pt",
)
elif self.preprocessing == "shepplogan":
image_path = path.join(
self.caps_dict[cohort],
"subjects",
"%s_%s%s.pt" % (participant, session, FILENAME_TYPE["shepplogan"]),
)
else:
raise NotImplementedError(
"The path to preprocessing %s is not implemented" % self.preprocessing
)
return image_path
def _get_meta_data(self, idx):
"""
Gets all meta data necessary to compute the path with _get_path
Args:
idx (int): row number of the meta-data contained in self.df
Returns:
participant (str): ID of the participant.
session (str): ID of the session.
cohort (str): Name of the cohort.
elem_index (int): Index of the part of the image.
label (str or float or int): value of the label to be used in criterion.
"""
image_idx = idx // self.elem_per_image
participant = self.df.loc[image_idx, "participant_id"]
session = self.df.loc[image_idx, "session_id"]
cohort = self.df.loc[image_idx, "cohort"]
if self.elem_index is None:
elem_idx = idx % self.elem_per_image
else:
elem_idx = self.elem_index
if self.label_presence and self.label is not None:
target = self.df.loc[image_idx, self.label]
label = self.label_fn(target)
else:
label = -1
return participant, session, cohort, elem_idx, label
def _get_full_image(self):
"""
Allows to get the an example of the image mode corresponding to the dataset.
Useful to compute the number of elements if mode != image.
Returns:
image (torch.Tensor) tensor of the full image.
"""
import nibabel as nib
from clinicadl.generate.generate_utils import find_image_path as get_nii_path
participant_id = self.df.loc[0, "participant_id"]
session_id = self.df.loc[0, "session_id"]
cohort = self.df.loc[0, "cohort"]
try:
image_path = self._get_path(
participant_id, session_id, cohort, mode="image"
)
image = torch.load(image_path)
except FileNotFoundError:
image_path = get_nii_path(
self.caps_dict,
participant_id,
session_id,
cohort=cohort,
preprocessing=self.preprocessing,
)
image_nii = nib.load(image_path)
image_np = image_nii.get_fdata()
image = ToTensor()(image_np)
return image
@abc.abstractmethod
def __getitem__(self, idx):
"""
Gets the sample containing all the information needed for training and testing tasks.
Args:
idx (int): row number of the meta-data contained in self.df
Returns:
Dict[str, Any]: dictionary with following items:
- "image" (torch.Tensor): the input given to the model,
- "label" (int or float): the label used in criterion,
- "participant_id" (str): ID of the participant,
- "session_id" (str): ID of the session,
- f"{self.mode}_id" (int): number of the element,
- "image_path": path to the image loaded in CAPS.
"""
pass
@abc.abstractmethod
def num_elem_per_image(self):
"""Computes the number of elements per image based on the full image."""
pass
def eval(self):
"""Put the dataset on evaluation mode (data augmentation is not performed)."""
self.eval_mode = True
return self
def train(self):
"""Put the dataset on training mode (data augmentation is performed)."""
self.eval_mode = False
return self
class CapsDatasetImage(CapsDataset):
"""Dataset of MRI organized in a CAPS folder."""
def __init__(
self,
caps_directory,
data_file,
preprocessing="t1-linear",
train_transformations=None,
label_presence=True,
label=None,
label_code=None,
all_transformations=None,
multi_cohort=False,
):
"""
Args:
caps_directory (string): Directory of all the images.
data_file (string or DataFrame): Path to the tsv file or DataFrame containing the subject/session list.
preprocessing (string): Defines the path to the data in CAPS.
train_transformations (callable, optional): Optional transform to be applied only on training mode.
label_presence (bool): If True the diagnosis will be extracted from the given DataFrame.
label (str): Name of the column in data_df containing the label.
label_code (Dict[str, int]): label code that links the output node number to label value.
all_transformations (callable, options): Optional transform to be applied during training and evaluation.
multi_cohort (bool): If True caps_directory is the path to a TSV file linking cohort names and paths.
"""
self.mode = "image"
super().__init__(
caps_directory,
data_file,
preprocessing,
augmentation_transformations=train_transformations,
label_presence=label_presence,
label=label,
label_code=label_code,
transformations=all_transformations,
multi_cohort=multi_cohort,
)
@property
def elem_index(self):
return None
def __getitem__(self, idx):
participant, session, cohort, _, label = self._get_meta_data(idx)
image_path = self._get_path(participant, session, cohort, "image")
image = torch.load(image_path)
if self.transformations:
image = self.transformations(image)
if self.augmentation_transformations and not self.eval_mode:
image = self.augmentation_transformations(image)
sample = {
"image": image,
"label": label,
"participant_id": participant,
"session_id": session,
"image_id": 0,
"image_path": image_path,
}
return sample
def num_elem_per_image(self):
return 1
class CapsDatasetPatch(CapsDataset):
def __init__(
self,
caps_directory,
data_file,
patch_size,
stride_size,
train_transformations=None,
prepare_dl=False,
patch_index=None,
preprocessing="t1-linear",
label_presence=True,
label=None,
label_code=None,
all_transformations=None,
multi_cohort=False,
):
"""
Args:
caps_directory (string): Directory of all the images.
data_file (string or DataFrame): Path to the tsv file or DataFrame containing the subject/session list.
preprocessing (string): Defines the path to the data in CAPS.
train_transformations (callable, optional): Optional transform to be applied only on training mode.
prepare_dl (bool): If true pre-extracted patches will be loaded.
patch_index (int, optional): If a value is given the same patch location will be extracted for each image.
else the dataset will load all the patches possible for one image.
patch_size (int): size of the regular cubic patch.
stride_size (int): length between the centers of two patches.
label_presence (bool): If True the diagnosis will be extracted from the given DataFrame.
label (str): Name of the column in data_df containing the label.
label_code (Dict[str, int]): label code that links the output node number to label value.
all_transformations (callable, options): Optional transform to be applied during training and evaluation.
multi_cohort (bool): If True caps_directory is the path to a TSV file linking cohort names and paths.
"""
if preprocessing == "shepplogan":
raise ValueError(
"Patch mode is not available for preprocessing %s" % preprocessing
)
self.patch_size = patch_size
self.stride_size = stride_size
self.patch_index = patch_index
self.mode = "patch"
self.prepare_dl = prepare_dl
super().__init__(
caps_directory,
data_file,
preprocessing,
augmentation_transformations=train_transformations,
label_presence=label_presence,
label=label,
label_code=label_code,
transformations=all_transformations,
multi_cohort=multi_cohort,
)
@property
def elem_index(self):
return self.patch_index
def __getitem__(self, idx):
participant, session, cohort, patch_idx, label = self._get_meta_data(idx)
if self.prepare_dl:
patch_path = path.join(
self._get_path(participant, session, cohort, "patch")[0:-7]
+ "_patchsize-"
+ str(self.patch_size)
+ "_stride-"
+ str(self.stride_size)
+ "_patch-"
+ str(patch_idx)
+ "_T1w.pt"
)
image = torch.load(patch_path)
else:
image_path = self._get_path(participant, session, cohort, "image")
full_image = torch.load(image_path)
image = self.extract_patch_from_mri(full_image, patch_idx)
if self.transformations:
image = self.transformations(image)
if self.augmentation_transformations and not self.eval_mode:
image = self.augmentation_transformations(image)
sample = {
"image": image,
"label": label,
"participant_id": participant,
"session_id": session,
"patch_id": patch_idx,
}
return sample
def num_elem_per_image(self):
if self.elem_index is not None:
return 1
image = self._get_full_image()
patches_tensor = (
image.unfold(1, self.patch_size, self.stride_size)
.unfold(2, self.patch_size, self.stride_size)
.unfold(3, self.patch_size, self.stride_size)
.contiguous()
)
patches_tensor = patches_tensor.view(
-1, self.patch_size, self.patch_size, self.patch_size
)
num_patches = patches_tensor.shape[0]
return num_patches
def extract_patch_from_mri(self, image_tensor, patch_idx):
"""
Extracts the patch corresponding to patch_idx
Args:
image_tensor (torch.Tensor): tensor of the full image.
patch_idx (int): Index of the patch wanted.
Returns:
extracted_patch (torch.Tensor): the tensor of the patch.
"""
patches_tensor = (
image_tensor.unfold(1, self.patch_size, self.stride_size)
.unfold(2, self.patch_size, self.stride_size)
.unfold(3, self.patch_size, self.stride_size)
.contiguous()
)
patches_tensor = patches_tensor.view(
-1, self.patch_size, self.patch_size, self.patch_size
)
extracted_patch = patches_tensor[patch_idx, ...].unsqueeze_(0).clone()
return extracted_patch
class CapsDatasetRoi(CapsDataset):
def __init__(
self,
caps_directory,
data_file,
roi_list=None,
cropped_roi=True,
roi_index=None,
preprocessing="t1-linear",
train_transformations=None,
prepare_dl=False,
label_presence=True,
label=None,
label_code=None,
all_transformations=None,
multi_cohort=False,
):
"""
Args:
caps_directory (string): Directory of all the images.
data_file (string or DataFrame): Path to the tsv file or DataFrame containing the subject/session list.
roi_list (list): Defines the regions used in the classification.
cropped_roi (bool): If True the image is cropped according to the smallest bounding box possible.
roi_index (int, optional): If a value is given the same region will be extracted for each image.
else the dataset will load all the regions possible for one image.
preprocessing (string): Defines the path to the data in CAPS.
train_transformations (callable, optional): Optional transform to be applied only on training mode.
prepare_dl (bool): If true pre-extracted patches will be loaded.
label_presence (bool): If True the diagnosis will be extracted from the given DataFrame.
label (str): Name of the column in data_df containing the label.
label_code (Dict[str, int]): label code that links the output node number to label value.
all_transformations (callable, options): Optional transform to be applied during training and evaluation.
multi_cohort (bool): If True caps_directory is the path to a TSV file linking cohort names and paths.
"""
if preprocessing == "shepplogan":
raise ValueError(
"ROI mode is not available for preprocessing %s" % preprocessing
)
self.roi_index = roi_index
self.mode = "roi"
self.roi_list = roi_list
self.cropped_roi = cropped_roi
self.prepare_dl = prepare_dl
self.mask_list = self.find_masks(caps_directory, preprocessing)
super().__init__(
caps_directory,
data_file,
preprocessing,
augmentation_transformations=train_transformations,
label_presence=label_presence,
label=label,
label_code=label_code,
transformations=all_transformations,
multi_cohort=multi_cohort,
)
@property
def elem_index(self):
return self.roi_index
def __getitem__(self, idx):
participant, session, cohort, roi_idx, label = self._get_meta_data(idx)
if self.prepare_dl:
if self.roi_list is None:
raise NotImplementedError(
"The extraction of ROIs prior to training is not implemented for default ROIs."
"Please disable --use_extracted_rois or precise the regions in --roi_names."
)
# read the regions directly
roi_path = self._get_path(participant, session, cohort, "roi")
roi_path = self.compute_roi_filename(roi_path, roi_idx)
patch = torch.load(roi_path)
else:
image_path = self._get_path(participant, session, cohort, "image")
image = torch.load(image_path)
patch = self.extract_roi_from_mri(image, roi_idx)
if self.transformations:
patch = self.transformations(patch)
if self.augmentation_transformations and not self.eval_mode:
patch = self.augmentation_transformations(patch)
sample = {
"image": patch,
"label": label,
"participant_id": participant,
"session_id": session,
"roi_id": roi_idx,
}
return sample
def num_elem_per_image(self):
if self.elem_index is not None:
return 1
if self.roi_list is None:
return 2
else:
return len(self.roi_list)
def extract_roi_from_mri(self, image_tensor, roi_idx):
"""
Extracts the region of interest corresponding to the roi_idx-th mask given to the dataset
Args:
image_tensor (torch.Tensor): tensor of the full image.
roi_idx (int): Index of the region wanted.
Returns:
extracted_roi (torch.Tensor): the tensor of the region.
"""
if self.roi_list is None:
if self.preprocessing == "t1-linear":
if roi_idx == 1:
# the center of the left hippocampus
crop_center = (61, 96, 68)
else:
# the center of the right hippocampus
crop_center = (109, 96, 68)
else:
raise NotImplementedError(
"The extraction of hippocampi was not implemented for "
"preprocessing %s" % self.preprocessing
)
crop_size = (50, 50, 50) # the output cropped hippocampus size
if self.cropped_roi:
extracted_roi = image_tensor[
:,
crop_center[0]
- crop_size[0] // 2 : crop_center[0]
+ crop_size[0] // 2 :,
crop_center[1]
- crop_size[1] // 2 : crop_center[1]
+ crop_size[1] // 2 :,
crop_center[2]
- crop_size[2] // 2 : crop_center[2]
+ crop_size[2] // 2 :,
].clone()
else:
raise NotImplementedError(
"The uncropped option for the default ROI was not implemented."
)
else:
roi_mask = self.mask_list[roi_idx]
if len(roi_mask.shape) == 3:
roi_mask = np.expand_dims(roi_mask, axis=0)
elif len(roi_mask.shape) == 4:
assert roi_mask.shape[0] == 1
else:
raise ValueError(
"ROI masks must be 3D or 4D tensors. "
f"The dimension of your ROI mask is {len(roi_mask.shape)}."
)
extracted_roi = image_tensor * roi_mask
if self.cropped_roi:
extracted_roi = extracted_roi[
np.ix_(
roi_mask.any((1, 2, 3)),
roi_mask.any((0, 2, 3)),
roi_mask.any((0, 1, 3)),
roi_mask.any((0, 1, 2)),
)
]
return extracted_roi.float()
def find_masks(self, caps_directory, preprocessing):
"""Loads the masks necessary to regions extraction"""
import nibabel as nib
# TODO should be mutualized with deeplearning-prepare-data
templates_dict = {
"t1-linear": "MNI152NLin2009cSym",
"t1-volume": "Ixi549Space",
"t1-extensive": "Ixi549Space",
}
if self.prepare_dl or self.roi_list is None:
return None
else:
mask_list = []
for roi in self.roi_list:
template = templates_dict[preprocessing]
if preprocessing == "t1-linear":
mask_pattern = MASK_PATTERN["cropped"]
elif preprocessing == "t1-volume":
mask_pattern = MASK_PATTERN["gm_maps"]
elif preprocessing == "t1-extensive":
mask_pattern = MASK_PATTERN["skull_stripped"]
else:
raise NotImplementedError(
"Roi extraction for %s preprocessing was not implemented."
% preprocessing
)
mask_path = path.join(
caps_directory,
"masks",
"tpl-%s" % template,
"tpl-%s%s_roi-%s_mask.nii.gz" % (template, mask_pattern, roi),
)
mask_nii = nib.load(mask_path)
mask_list.append(mask_nii.get_fdata())
return mask_list
def compute_roi_filename(self, image_path, roi_index):
# TODO should be mutualized with deeplearning-prepare-data
from os import path
image_dir = path.dirname(image_path)
image_filename = path.basename(image_path)
image_descriptors = image_filename.split("_")
if "desc-Crop" not in image_descriptors and self.cropped_roi:
image_descriptors = self.insert_descriptor(
image_descriptors, "desc-CropRoi", "space"
)
elif "desc-Crop" in image_descriptors:
image_descriptors = [
descriptor
for descriptor in image_descriptors
if descriptor != "desc-Crop"
]
if self.cropped_roi:
image_descriptors = self.insert_descriptor(
image_descriptors, "desc-CropRoi", "space"
)
else:
image_descriptors = self.insert_descriptor(
image_descriptors, "desc-CropImage", "space"
)
return (
path.join(image_dir, "_".join(image_descriptors))[0:-7]
+ f"_roi-{self.roi_list[roi_index]}_T1w.pt"
)
@staticmethod
def insert_descriptor(image_descriptors, descriptor_to_add, key_to_follow):
# TODO should be mutualized with deeplearning-prepare-data
for i, desc in enumerate(image_descriptors):
if key_to_follow in desc:
image_descriptors.insert(i + 1, descriptor_to_add)
return image_descriptors
class CapsDatasetSlice(CapsDataset):
def __init__(
self,
caps_directory,
data_file,
slice_index=None,
preprocessing="t1-linear",
train_transformations=None,
mri_plane=0,
prepare_dl=False,
discarded_slices=20,
label_presence=True,
label=None,
label_code=None,
all_transformations=None,
multi_cohort=False,
):
"""
Args:
caps_directory (string): Directory of all the images.
data_file (string or DataFrame): Path to the tsv file or DataFrame containing the subject/session list.
preprocessing (string): Defines the path to the data in CAPS.
slice_index (int, optional): If a value is given the same slice will be extracted for each image.
else the dataset will load all the slices possible for one image.
train_transformations (callable, optional): Optional transform to be applied only on training mode.
prepare_dl (bool): If true pre-extracted patches will be loaded.
mri_plane (int): Defines which mri plane is used for slice extraction.
discarded_slices (int or list): number of slices discarded at the beginning and the end of the image.
If one single value is given, the same amount is discarded at the beginning and at the end.
label_presence (bool): If True the diagnosis will be extracted from the given DataFrame.
label (str): Name of the column in data_df containing the label.
label_code (Dict[str, int]): label code that links the output node number to label value.
all_transformations (callable, options): Optional transform to be applied during training and evaluation.
multi_cohort (bool): If True caps_directory is the path to a TSV file linking cohort names and paths.
"""
# Rename MRI plane
if preprocessing == "shepplogan":
raise ValueError(
"Slice mode is not available for preprocessing %s" % preprocessing
)
self.slice_index = slice_index
self.mri_plane = mri_plane
self.direction_list = ["sag", "cor", "axi"]
if self.mri_plane >= len(self.direction_list):
raise ValueError(
"mri_plane value %i > %i" % (self.mri_plane, len(self.direction_list))
)
# Manage discarded_slices
if isinstance(discarded_slices, int):
discarded_slices = [discarded_slices, discarded_slices]
if isinstance(discarded_slices, list) and len(discarded_slices) == 1:
discarded_slices = discarded_slices * 2
self.discarded_slices = discarded_slices
self.mode = "slice"
self.prepare_dl = prepare_dl
super().__init__(
caps_directory,
data_file,
preprocessing,
augmentation_transformations=train_transformations,
label_presence=label_presence,
label=label,
label_code=label_code,
transformations=all_transformations,
multi_cohort=multi_cohort,
)
@property
def elem_index(self):
return self.slice_index
def __getitem__(self, idx):
participant, session, cohort, slice_idx, label = self._get_meta_data(idx)
slice_idx = slice_idx + self.discarded_slices[0]
if self.prepare_dl:
# read the slices directly
slice_path = path.join(
self._get_path(participant, session, cohort, "slice")[0:-7]
+ "_axis-%s" % self.direction_list[self.mri_plane]
+ "_channel-rgb_slice-%i_T1w.pt" % slice_idx
)
image = torch.load(slice_path)
else:
image_path = self._get_path(participant, session, cohort, "image")
full_image = torch.load(image_path)
image = self.extract_slice_from_mri(full_image, slice_idx)
if self.transformations:
image = self.transformations(image)
if self.augmentation_transformations and not self.eval_mode:
image = self.augmentation_transformations(image)
sample = {
"image": image,
"label": label,
"participant_id": participant,
"session_id": session,
"slice_id": slice_idx,
}
return sample
def num_elem_per_image(self):
if self.elem_index is not None:
return 1
image = self._get_full_image()
return (
image.size(self.mri_plane + 1)
- self.discarded_slices[0]
- self.discarded_slices[1]
)
def extract_slice_from_mri(self, image, index_slice):
"""
This function extracts one slice along one axis and creates a RGB image
(the slice is duplicated in each channel).
To note:
Axial_view = "[:, :, slice_i]"
Coronal_view = "[:, slice_i, :]"
Sagittal_view= "[slice_i, :, :]"
Args:
image (torch.Tensor): tensor of the full image.
index_slice (int): index of the wanted slice.
Returns:
triple_slice (torch.Tensor): tensor of the slice with 3 channels.
"""
image = image.squeeze(0)
simple_slice = image[(slice(None),) * self.mri_plane + (index_slice,)]
triple_slice = torch.stack((simple_slice, simple_slice, simple_slice))
return triple_slice
def return_dataset(
mode,
input_dir,
data_df,
preprocessing,
all_transformations,
params,
label=None,
label_code=None,
train_transformations=None,
cnn_index=None,
label_presence=True,
multi_cohort=False,
prepare_dl=False,
):
"""
Return appropriate Dataset according to given options.
Args:
mode (str): input used by the network. Chosen from ['image', 'patch', 'roi', 'slice'].
input_dir (str): path to a directory containing a CAPS structure.
data_df (pd.DataFrame): List subjects, sessions and diagnoses.
preprocessing (str): type of preprocessing wanted ('t1-linear' or 't1-extensive')
train_transformations (callable, optional): Optional transform to be applied during training only.
all_transformations (callable, optional): Optional transform to be applied during training and evaluation.
params (clinicadl.MapsManager): options used by specific modes.
label (str): Name of the column in data_df containing the label.
label_code (Dict[str, int]): label code that links the output node number to label value.
cnn_index (int): Index of the CNN in a multi-CNN paradigm (optional).
label_presence (bool): If True the diagnosis will be extracted from the given DataFrame.
multi_cohort (bool): If True caps_directory is the path to a TSV file linking cohort names and paths.
prepare_dl (bool): If true pre-extracted slices / patches / regions will be loaded.
Returns:
(Dataset) the corresponding dataset.
"""
if cnn_index is not None and mode in ["image"]:
raise ValueError("Multi-CNN is not implemented for %s mode." % mode)
if mode == "image":
return CapsDatasetImage(
input_dir,
data_df,
preprocessing,
train_transformations=train_transformations,
all_transformations=all_transformations,
label_presence=label_presence,
label=label,
label_code=label_code,
multi_cohort=multi_cohort,
)
elif mode == "patch":
return CapsDatasetPatch(
input_dir,
data_df,
params.patch_size,
params.stride_size,
preprocessing=preprocessing,
train_transformations=train_transformations,
all_transformations=all_transformations,
prepare_dl=prepare_dl,
patch_index=cnn_index,
label_presence=label_presence,
label=label,
label_code=label_code,
multi_cohort=multi_cohort,
)
elif mode == "roi":
return CapsDatasetRoi(
input_dir,
data_df,
roi_list=params.roi_list,
cropped_roi=not params.uncropped_roi,
preprocessing=preprocessing,
train_transformations=train_transformations,
all_transformations=all_transformations,
prepare_dl=prepare_dl,
roi_index=cnn_index,
label_presence=label_presence,
label=label,
label_code=label_code,
multi_cohort=multi_cohort,
)
elif mode == "slice":
return CapsDatasetSlice(
input_dir,
data_df,
preprocessing=preprocessing,
train_transformations=train_transformations,
all_transformations=all_transformations,
mri_plane=params.slice_direction,
prepare_dl=prepare_dl,
discarded_slices=params.discarded_slices,
slice_index=cnn_index,
label_presence=label_presence,
label=label,
label_code=label_code,
multi_cohort=multi_cohort,
)
else:
raise ValueError("Mode %s is not implemented." % mode)
##################################
# Transformations
##################################
class RandomNoising(object):
"""Applies a random zoom to a tensor"""
def __init__(self, sigma=0.1):
self.sigma = sigma
def __call__(self, image):
import random
sigma = random.uniform(0, self.sigma)
dist = torch.distributions.normal.Normal(0, sigma)
return image + dist.sample(image.shape)
class RandomSmoothing(object):
"""Applies a random zoom to a tensor"""
def __init__(self, sigma=1):
self.sigma = sigma
def __call__(self, image):
import random
from scipy.ndimage import gaussian_filter
sigma = random.uniform(0, self.sigma)
image = gaussian_filter(image, sigma) # smoothing of data
image = torch.from_numpy(image).float()
return image
class RandomCropPad(object):
def __init__(self, length):
self.length = length
def __call__(self, image):
dimensions = len(image.shape) - 1
crop = np.random.randint(-self.length, self.length, dimensions)
if dimensions == 2:
output = torch.nn.functional.pad(
image, (-crop[0], crop[0], -crop[1], crop[1])
)
elif dimensions == 3:
output = torch.nn.functional.pad(
image, (-crop[0], crop[0], -crop[1], crop[1], -crop[2], crop[2])
)
else:
raise ValueError("RandomCropPad is only available for 2D or 3D data.")
return output
class GaussianSmoothing(object):
def __init__(self, sigma):
self.sigma = sigma
def __call__(self, sample):
from scipy.ndimage.filters import gaussian_filter
image = sample["image"]
np.nan_to_num(image, copy=False)
smoothed_image = gaussian_filter(image, sigma=self.sigma)
sample["image"] = smoothed_image
return sample
class ToTensor(object):
"""Convert image type to Tensor and diagnosis to diagnosis code"""
def __call__(self, image):
np.nan_to_num(image, copy=False)
image = image.astype(float)
return torch.from_numpy(image[np.newaxis, :]).float()
class MinMaxNormalization(object):
"""Normalizes a tensor between 0 and 1"""
def __call__(self, image):
return (image - image.min()) / (image.max() - image.min())
def get_transforms(mode, minmaxnormalization=True, data_augmentation=None):
"""
Outputs the transformations that will be applied to the dataset
Args:
mode (str): input used by the network. Chosen from ['image', 'patch', 'roi', 'slice'].
minmaxnormalization (bool): if True will perform MinMaxNormalization.
data_augmentation (List[str]): list of data augmentation performed on the training set.
Returns:
- container transforms.Compose including transforms to apply in train and evaluation mode.
- container transforms.Compose including transforms to apply in evaluation mode only.
"""
augmentation_dict = {
"Noise": RandomNoising(sigma=0.1),
"Erasing": transforms.RandomErasing(),
"CropPad": RandomCropPad(10),
"Smoothing": RandomSmoothing(),
"None": None,
}
if data_augmentation:
augmentation_list = [
augmentation_dict[augmentation] for augmentation in data_augmentation
]
else:
augmentation_list = []
if minmaxnormalization:
transformations_list = [MinMaxNormalization()]
else:
transformations_list = []
if mode == "slice":
trg_size = (224, 224)
transformations_list += [
transforms.ToPILImage(),
transforms.Resize(trg_size),
transforms.ToTensor(),
]
all_transformations = transforms.Compose(transformations_list)
train_transformations = transforms.Compose(augmentation_list)
return train_transformations, all_transformations
################################
# TSV files loaders
################################
def load_data_test(test_path, diagnoses_list, baseline=True, multi_cohort=False):
"""
Load data not managed by split_manager.
Args:
test_path (str): path to the test TSV files / split directory / TSV file for multi-cohort
diagnoses_list (List[str]): list of the diagnoses wanted in case of split_dir or multi-cohort
baseline (bool): If True baseline sessions only used (split_dir handling only).
multi_cohort (bool): If True considers multi-cohort setting.
"""
# TODO: computes baseline sessions on-the-fly to manager TSV file case
if multi_cohort:
if not test_path.endswith(".tsv"):
raise ValueError(
"If multi_cohort is given, the tsv_path argument should be a path to a TSV file."
)
else:
tsv_df = pd.read_csv(test_path, sep="\t")
check_multi_cohort_tsv(tsv_df, "labels")
test_df = pd.DataFrame()
found_diagnoses = set()
for idx in range(len(tsv_df)):
cohort_name = tsv_df.loc[idx, "cohort"]
cohort_path = tsv_df.loc[idx, "path"]
cohort_diagnoses = (
tsv_df.loc[idx, "diagnoses"].replace(" ", "").split(",")
)
if bool(set(cohort_diagnoses) & set(diagnoses_list)):
target_diagnoses = list(set(cohort_diagnoses) & set(diagnoses_list))
cohort_test_df = load_data_test_single(
cohort_path, target_diagnoses, baseline=baseline
)
cohort_test_df["cohort"] = cohort_name
test_df = pd.concat([test_df, cohort_test_df])
found_diagnoses = found_diagnoses | (
set(cohort_diagnoses) & set(diagnoses_list)
)
if found_diagnoses != set(diagnoses_list):
raise ValueError(
f"The diagnoses found in the multi cohort dataset {found_diagnoses} "
f"do not correspond to the diagnoses wanted {set(diagnoses_list)}."
)
test_df.reset_index(inplace=True, drop=True)
else:
if test_path.endswith(".tsv"):
tsv_df = pd.read_csv(test_path, sep="\t")
multi_col = {"cohort", "path"}
if multi_col.issubset(tsv_df.columns.values):
raise ValueError(
"To use multi-cohort framework, please add --multi_cohort flag."
)
test_df = load_data_test_single(test_path, diagnoses_list, baseline=baseline)
test_df["cohort"] = "single"
return test_df
def load_data_test_single(test_path, diagnoses_list, baseline=True):
if test_path.endswith(".tsv"):
test_df = pd.read_csv(test_path, sep="\t")
if "diagnosis" not in test_df.columns.values:
raise ValueError(
f"'diagnosis' column must be present in TSV file {test_path}."
)
test_df = test_df[test_df.diagnosis.isin(diagnoses_list)]
if len(test_df) == 0:
raise ValueError(
f"Diagnoses wanted {diagnoses_list} were not found in TSV file {test_path}."
)
return test_df
test_df = | pd.DataFrame() | pandas.DataFrame |
import sys
import os
import cobra.io
import libsbml
from tqdm import tqdm
import pandas as pd
import re
import memote
from bioservices.kegg import KEGG
import helper_functions as hf
'''
Usage: annotate_reactions.py <path_input_sbml-file> <path_output_sbml-file>
<path_outfile-tsv_missing_bigg> <path_memote-report>
Adds annotations to reactions.
'''
def main(args):
# console access
if len(args) != 5:
print(main.__doc__)
sys.exit(1)
infile = args[1]
outfile = args[2]
outfile_missing_bigg = args[3]
memote_report = args[4]
if not os.path.exists(infile):
print("[Error] %s : No such file." % infile)
sys.exit(1)
# create Readers and Writers
reader = libsbml.SBMLReader()
writer = libsbml.SBMLWriter()
# Read SBML File
doc = reader.readSBML(infile)
model = doc.getModel()
# Knowledge base preparation
bigg_db = pd.read_csv("Databases/BiGG/bigg_models_reactions.tsv", sep='\t').fillna("")
seed_db = | pd.read_csv("Databases/SEED/reactions.tsv", header=0, sep="\t") | pandas.read_csv |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100Y", periods=4)
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([Timestamp.max])
t2 = tmax + Timedelta.min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = Series(dti)
expected = Series(TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([NaT, Timestamp("19900315")]),
Series([NaT, NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_series * one
with pytest.raises(TypeError, match=msg):
one * dt64_series
# division
with pytest.raises(TypeError, match=msg):
dt64_series / one
with pytest.raises(TypeError, match=msg):
one / dt64_series
# TODO: parametrize over box
def test_dt64_series_add_intlike(self, tz_naive_fixture):
# GH#19123
tz = tz_naive_fixture
dti = DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype="uint8")
msg = "|".join(
[
"Addition/subtraction of integers and integer-arrays",
"cannot subtract .* from ndarray",
]
)
assert_invalid_addsub_type(ser, 1, msg)
assert_invalid_addsub_type(ser, other, msg)
assert_invalid_addsub_type(ser, np.array(other), msg)
assert_invalid_addsub_type(ser, pd.Index(other), msg)
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
assert td2._values.freq is None
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError, match=msg):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "cannot (add|subtract)"
with pytest.raises(TypeError, match=msg):
td1 - dt1
with pytest.raises(TypeError, match=msg):
td2 - dt2
class TestDatetimeIndexArithmetic:
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_addsub_int(self, tz_naive_fixture, one):
# Variants of `one` for #19012
tz = tz_naive_fixture
rng = date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
msg = "Addition/subtraction of integers"
with pytest.raises(TypeError, match=msg):
rng + one
with pytest.raises(TypeError, match=msg):
rng += one
with pytest.raises(TypeError, match=msg):
rng - one
with pytest.raises(TypeError, match=msg):
rng -= one
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("freq", ["H", "D"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_non_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_no_freq(self, int_holder):
# GH#19959
dti = DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"])
other = int_holder([9, 4, -1])
msg = "|".join(
["cannot subtract DatetimeArray from", "Addition/subtraction of integers"]
)
assert_invalid_addsub_type(dti, other, msg)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .*TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
# DTA.__isub__ GH#43904
dta = dti._data.copy()
dta -= tdi
tm.assert_datetime_array_equal(dta, expected._data)
out = dti._data.copy()
np.subtract(out, tdi, out=out)
tm.assert_datetime_array_equal(out, expected._data)
msg = "cannot subtract .* from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract DatetimeArray from ndarray"
with pytest.raises(TypeError, match=msg):
tdi.values -= dti
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi._values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
@pytest.mark.parametrize(
"addend",
[
datetime(2011, 1, 1),
DatetimeIndex(["2011-01-01", "2011-01-02"]),
DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize("US/Eastern"),
np.datetime64("2011-01-01"),
Timestamp("2011-01-01"),
],
ids=lambda x: type(x).__name__,
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_add_datetimelike_and_dtarr(self, box_with_array, addend, tz):
# GH#9631
dti = DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize(tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add DatetimeArray and"
assert_cannot_add(dtarr, addend, msg)
# -------------------------------------------------------------
def test_dta_add_sub_index(self, tz_naive_fixture):
# Check that DatetimeArray defers to Index classes
dti = date_range("20130101", periods=3, tz=tz_naive_fixture)
dta = dti.array
result = dta - dti
expected = dti - dti
tm.assert_index_equal(result, expected)
tdi = result
result = dta + tdi
expected = dti + tdi
tm.assert_index_equal(result, expected)
result = dta - tdi
expected = dti - tdi
tm.assert_index_equal(result, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range("20130101", periods=3)
dti_tz = date_range("20130101", periods=3).tz_localize("US/Eastern")
dti_tz2 = date_range("20130101", periods=3).tz_localize("UTC")
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
msg = "DatetimeArray subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dti_tz - dti
with pytest.raises(TypeError, match=msg):
dti - dti_tz
with pytest.raises(TypeError, match=msg):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range("20130101", periods=3)
dti2 = date_range("20130101", periods=4)
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(["2012-01-01", np.nan, "2012-01-03"])
dti2 = DatetimeIndex(["2012-01-02", "2012-01-03", np.nan])
expected = TimedeltaIndex(["1 days", np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------------
# TODO: Most of this block is moved from series or frame tests, needs
# cleanup, box-parametrization, and de-duplication
@pytest.mark.parametrize("op", [operator.add, operator.sub])
def test_timedelta64_equal_timedelta_supported_ops(self, op, box_with_array):
ser = Series(
[
Timestamp("20130301"),
Timestamp("20130228 23:00:00"),
Timestamp("20130228 22:00:00"),
Timestamp("20130228 21:00:00"),
]
)
obj = box_with_array(ser)
intervals = ["D", "h", "m", "s", "us"]
def timedelta64(*args):
# see casting notes in NumPy gh-12927
return np.sum(list(starmap(np.timedelta64, zip(args, intervals))))
for d, h, m, s, us in product(*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s, microseconds=us)
lhs = op(obj, nptd)
rhs = op(obj, pytd)
tm.assert_equal(lhs, rhs)
def test_ops_nat_mixed_datetime64_timedelta64(self):
# GH#11349
timedelta_series = Series([NaT, Timedelta("1s")])
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]")
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]")
# subtraction
tm.assert_series_equal(
datetime_series - single_nat_dtype_datetime, nat_series_dtype_timedelta
)
tm.assert_series_equal(
datetime_series - single_nat_dtype_timedelta, nat_series_dtype_timestamp
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + datetime_series, nat_series_dtype_timestamp
)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_datetime,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
msg = "cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
timedelta_series - single_nat_dtype_datetime
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_datetime,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_datetime + nat_series_dtype_timedelta,
nat_series_dtype_timestamp,
)
def test_ufunc_coercions(self):
idx = date_range("2011-01-01", periods=3, freq="2D", name="x")
delta = np.timedelta64(1, "D")
exp = date_range("2011-01-02", periods=3, freq="2D", name="x")
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
exp = date_range("2010-12-31", periods=3, freq="2D", name="x")
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
# When adding/subtracting an ndarray (which has no .freq), the result
# does not infer freq
idx = idx._with_freq(None)
delta = np.array(
[np.timedelta64(1, "D"), np.timedelta64(2, "D"), np.timedelta64(3, "D")]
)
exp = DatetimeIndex(["2011-01-02", "2011-01-05", "2011-01-08"], name="x")
for result in [idx + delta, np.add(idx, delta)]:
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
exp = DatetimeIndex(["2010-12-31", "2011-01-01", "2011-01-02"], name="x")
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
def test_dti_add_series(self, tz_naive_fixture, names):
# GH#13905
tz = tz_naive_fixture
index = DatetimeIndex(
["2016-06-28 05:30", "2016-06-28 05:31"], tz=tz, name=names[0]
)
ser = Series([Timedelta(seconds=5)] * 2, index=index, name=names[1])
expected = Series(index + Timedelta(seconds=5), index=index, name=names[2])
# passing name arg isn't enough when names[2] is None
expected.name = names[2]
assert expected.dtype == index.dtype
result = ser + index
tm.assert_series_equal(result, expected)
result2 = index + ser
tm.assert_series_equal(result2, expected)
expected = index + Timedelta(seconds=5)
result3 = ser.values + index
tm.assert_index_equal(result3, expected)
result4 = index + ser.values
tm.assert_index_equal(result4, expected)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
def test_dti_addsub_offset_arraylike(
self, tz_naive_fixture, names, op, index_or_series
):
# GH#18849, GH#19744
other_box = index_or_series
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz, name=names[0])
other = other_box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], name=names[1])
xbox = get_upcast_box(dti, other)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dti, other)
expected = DatetimeIndex(
[op(dti[n], other[n]) for n in range(len(dti))], name=names[2], freq="infer"
)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
@pytest.mark.parametrize("other_box", [pd.Index, np.array])
def test_dti_addsub_object_arraylike(
self, tz_naive_fixture, box_with_array, other_box
):
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = other_box([pd.offsets.MonthEnd(), Timedelta(days=4)])
xbox = get_upcast_box(dtarr, other)
expected = DatetimeIndex(["2017-01-31", "2017-01-06"], tz=tz_naive_fixture)
expected = tm.box_expected(expected, xbox)
with tm.assert_produces_warning(PerformanceWarning):
result = dtarr + other
tm.assert_equal(result, expected)
expected = DatetimeIndex(["2016-12-31", "2016-12-29"], tz=tz_naive_fixture)
expected = tm.box_expected(expected, xbox)
with tm.assert_produces_warning(PerformanceWarning):
result = dtarr - other
tm.assert_equal(result, expected)
@pytest.mark.parametrize("years", [-1, 0, 1])
@pytest.mark.parametrize("months", [-2, 0, 2])
def test_shift_months(years, months):
dti = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
| Timestamp("2000-02-29") | pandas.Timestamp |
from collections import Counter
from functools import partial
from math import sqrt
from pathlib import Path
import lightgbm as lgb
import numpy as np
import pandas as pd
import scipy as sp
from sklearn.decomposition import TruncatedSVD, NMF
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from kaggle_petfinder.utils import is_script_running
ON_KAGGLE: bool = is_script_running()
DATA_ROOT = Path(
"../input/petfinder-adoption-prediction"
if ON_KAGGLE
else "../resources/petfinder-adoption-prediction"
)
EXTRA_DATA_ROOT = Path(
"../input/extract-image-features-from-pretrained-nn"
if ON_KAGGLE
else "../resources/extract-image-features-from-pretrained-nn"
)
# basic datasets
train = pd.read_csv(DATA_ROOT / "train/train.csv")
test = pd.read_csv(DATA_ROOT / "test/test.csv")
sample_submission = pd.read_csv(DATA_ROOT / "test/sample_submission.csv")
labels_breed = pd.read_csv(DATA_ROOT / "breed_labels.csv")
labels_state = | pd.read_csv(DATA_ROOT / "color_labels.csv") | pandas.read_csv |
from sklearn.cluster import KMeans
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, QuantileTransformer, PolynomialFeatures
from sklearn.metrics import mean_squared_error
from pandas import DataFrame, concat
class clasterisator():
def __init__(self, K, model=KMeans):
self.kmeans = model(n_clusters=K, random_state=42)
def fit(self, df):
self.kmeans.fit(df)
return self.kmeans
def predict(self, df):
return self.kmeans.predict(df)
def fit_predict(self, df):
return self.fit(df).predict(df)
class MultiColumnLabelEncoder():
def __init__(self, columns = None):
self.columns = columns
def fit(self, X):
encoders_dict = dict()
if self.columns:
for col in self.columns:
encoders_dict[col] = LabelEncoder().fit(X[col])
else:
for colname, values in X.iteritems():
encoders_dict[colname] = LabelEncoder().fit(values)
self.encoders_dict = encoders_dict
return self
def transform(self, X):
output = X.copy()
if self.columns:
for col in self.columns:
output[col] = self.encoders_dict[col].transform(output[col])
else:
for colname, values in output.iteritems():
output[colname] = self.encoders_dict[colname].transform(values)
return output
def fit_transform(self, X):
return self.fit(X).transform(X)
def inverse_transform(self, X):
output = X.copy()
if self.columns:
for col in self.columns:
output[col] = self.encoders_dict[col].inverse_transform(output[col])
else:
for colname, values in output.iteritems():
output[colname] = self.encoders_dict[colname].inverse_transform(values)
return output
class SimpleDataTransformer():
def __init__(self, numerical_cols=None, categorical_cols=None, numerical_transformer=None, degree=1):
self.transformers_dict = dict()
self.degree = degree
if numerical_cols:
self.numerical_cols = numerical_cols
else:
self.numerical_cols = None
if categorical_cols:
self.categorical_cols = categorical_cols
self.mcle = MultiColumnLabelEncoder(categorical_cols)
else:
self.categorical_cols = None
if numerical_transformer is None:
self.numerical_transformer = QuantileTransformer(1000, 'normal')
elif isinstance(numerical_transformer, type):
self.numerical_transformer = numerical_transformer()
else:
self.numerical_transformer = numerical_transformer
def fit(self, X):
if self.numerical_cols:
for col in self.numerical_cols:
self.transformers_dict[col] = self.numerical_transformer.fit(X[[col]])
self.polynomial = PolynomialFeatures(self.degree).fit(X[self.numerical_cols])
if self.categorical_cols:
mcle_out = X[self.categorical_cols]#self.mcle.fit_transform(X[self.categorical_cols])
self.ohe = OneHotEncoder(sparse=False, categories='auto', handle_unknown='ignore').fit(mcle_out)
#for col in self.categorical_cols:
# self.transformers_dict[col] = OneHotEncoder.fit(mcle_out[[col]])
return self
def transform(self, X, drop_cat=True):
output = X.copy()
if self.numerical_cols:
for col in self.numerical_cols:
output[col] = self.transformers_dict[col].transform(output[[col]])
polynom_out = self.polynomial.fit_transform(output[self.numerical_cols])
polynom_cols = ['num%d'%i for i in range(polynom_out.shape[1])]
polynom_out_df = DataFrame(polynom_out, columns=polynom_cols)
output = concat((output, polynom_out_df), axis=1)
output.drop(columns=self.numerical_cols, inplace=True)
if self.categorical_cols:
mcle_out = X[self.categorical_cols]#self.mcle.transform(X[self.categorical_cols])
ohe_out = self.ohe.transform(mcle_out)
if drop_cat:
output.drop(columns=self.categorical_cols, inplace=True)
catcol_names = ['ohe%d'%i for i in range(ohe_out.shape[1])]
ohe_out_df = DataFrame(ohe_out, columns=catcol_names)
output = | concat((output, ohe_out_df), axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""RandomForest.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/19ZaS9axtwR_5R4KIlm1xD5FAqwYXzwU5
"""
import os
import time
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.model_selection import ShuffleSplit, KFold
# train = pd.read_csv("C:/Users/swl3guest/CE4032/datasets/train_modified_v2.csv")
# test = pd.read_csv("C:/Users/swl3guest/CE4032/datasets/test_modified_v2.csv")
train_df = | pd.read_csv("../datasets/modified_train.csv") | pandas.read_csv |
import pandas as pd
filtered_path = "filtered/ambiguous/filtered_"
def convert(data_path):
data=[]
with open(data_path, 'r',encoding='utf-8-sig') as f_input:
for line in f_input:
data.append(list(line.strip().split('\t')))
df=pd.DataFrame(data[1:],columns=data[0])
return df.loc[:,['question', 'sentence', 'label']]
def get_ambi():
dp = filtered_path + "1/cartography_variability_0.25/QNLI/train.tsv"
df_ans = convert(dp)
for i in range(2,11):
dp2 = filtered_path + str(i) + "/cartography_variability_0.25/QNLI/train.tsv"
df2 = convert(dp2)
print(df2)
df_ans = | pd.merge(df_ans, df2, how='inner',on=['question','sentence','label']) | pandas.merge |
import pandas as pd
#데이터프레임 만들기
df1 = pd.DataFrame({'a': ['a0','a1','a2','a3'],
'b':['b0','b1','b2','b3'],
'c':['c0','c1','c2','c3'] },
index= [0,1,2,3])
df2 = pd.DataFrame( {'a':['a2','a3','a4','a5'],
'b':['b2','b3','b4','b5'],
'c':['c2','c3','c4','c5']},
index=[2,3,4,5])
print(df1.head())
# print('\n')
# print(df2.head())
result1 = pd.concat([df1,df2])
# print(result1,'\n')
result2 = pd.concat([df1,df2], ignore_index= True)
# print(result2)
result3 = pd.concat([df1,df2],axis=1)
# print(result3,'\n')
result3_in = pd.concat([df1,df2],axis=1,join='inner')
# print(result3_in)
sr1 = pd.Series(['e0','e1','e2','e3'], name='e')
sr2 = pd.Series(['f0','f1','f2'],name='f',index=[3,4,5])
sr3 = | pd.Series(['g0','g1','g2','g3'],name='g') | pandas.Series |
#!/usr/bin/env python3
"""
Aim of this script is to add
event data from a csv file
to the database.
"""
import argparse
import os
import sqlite3
import pandas as pd
class MapToAttribute:
"""
Returns an attribute of the old series.
"""
def __init__(self, attribute):
self._attribute = attribute
def __call__(self, old_series):
return old_series[self._attribute]
class MapToOneValue:
"""
Returns just one value.
"""
def __init__(self, value):
self._value = value
def __call__(self, old_series):
return self._value
class MapToOneValueIfAttributeIsNone:
"""
Returns the value from the attribute or
(if none) a default value.
"""
def __init__(self, attribute, value):
self._attribute = attribute
self._value = value
def __call__(self, old_series):
possible_value = old_series[self._attribute]
if possible_value is not None:
return possible_value
return self._value
class MapToNone:
"""
Returns None.
"""
def __call__(self, old_series):
return None
class MapToAttributeWithPrefix:
"""
Returns the attribute of a series with
a prefix.
The prefix is the same for all the values.
"""
def __init__(self, prefix, attribute):
self._prefix = prefix
self._attribute = attribute
def __call__(self, old_series):
return self._prefix + str(old_series[self._attribute])
class MapToAttributeAndMapValues:
"""
Returns the attribute of the series.
If this has a specific value, it will
be mapped to anohter value depending
on the content of the map values.
"""
def __init__(self, attribute, lookup_table):
self._attribute = attribute
self._lookup_table = lookup_table
def __call__(self, old_series):
old_value = old_series[self._attribute]
if old_value in self._lookup_table.keys():
return self._lookup_table[old_value]
return old_value
MAPPINGS = {
"eventID": MapToAttributeWithPrefix(prefix="peru_", attribute="rupid"),
"Agency": MapToAttribute("Agency"),
"Identifier": MapToAttributeWithPrefix(prefix="peru_", attribute="rupid"),
"year": MapToOneValueIfAttributeIsNone(
attribute="Unnamed: 12", value=3000
),
"month": MapToAttribute("Unnamed: 13"),
"day": MapToAttribute("Unnamed: 14"),
"minute": MapToNone(),
"second": MapToNone(),
"timeUncertainty": MapToNone(),
"longitude": MapToAttribute("centroid_lon"),
"longitudeUncertainty": MapToNone(),
"latitude": MapToAttribute("centroid_lat"),
"latitudeUncertainty": MapToNone(),
"horizontalUncertainty": MapToNone(),
"minHorizontalUncertainty": MapToNone(),
"maxHorizontalUncertainty": MapToNone(),
"azimuthMaxHorizontalUncertainty": MapToNone(),
"depth": MapToAttribute("centroid_depth"),
"depthUncertainty": MapToNone(),
"magnitude": MapToAttribute("mag"),
"magnitudeUncertainty": MapToNone(),
"rake": MapToAttribute("rake"),
"rakeUncertainty": MapToNone(),
"dip": MapToAttribute("dip"),
"dipUncertainty": MapToNone(),
"strike": MapToAttribute("strike"),
"strikeUncertainty": MapToNone(),
# we have historic in the new series,
# but just observed in the old dataset
# we can map them afterwards with sql
# but I'm still a bit unsure about
# the values here
"type": MapToAttributeAndMapValues("type", {"historic": "observed"}),
"probability": MapToOneValue(0.1),
}
def map_to_existing_names(series):
"""
Maps the names of the old series to those
expected by the database.
"""
new_series = | pd.Series() | pandas.Series |
"""get_lineups.py
Usage:
get_lineups.py <f_data_config>
Arguments:
<f_data_config> example ''lineups.yaml''
Example:
get_lineups.py lineups.yaml
"""
from __future__ import print_function
import pandas as pd
from docopt import docopt
import yaml
from tqdm import tqdm
import lineup.config as CONFIG
from lineup.data.utils import parse_nba_play, roster
class MatchupException(Exception):
pass
def _cols(data_config):
"""
Get column names
"""
away_ids = ["away_%s" % id for id in list(range(5))]
home_ids = ["home_%s" % id for id in list(range(5))]
if data_config['time_seperator'] == 'min':
cols = ['game', 'season', 'home_team', 'away_team', 'starting_min', 'end_min']
else:
cols = ['game', 'season', 'home_team', 'away_team', 'starting_sec', 'end_sec']
cols.extend(home_ids)
cols.extend(away_ids)
return cols
def _performance_vector(team_matchup_pbp, team):
"""
Get performance vector
"""
fga = len(team_matchup_pbp.loc[team_matchup_pbp.is_fga == True, :])
fta = len(team_matchup_pbp.loc[team_matchup_pbp.is_fta == True, :])
fgm = len(team_matchup_pbp.loc[team_matchup_pbp.is_fgm == True, :])
fga_2 = len(team_matchup_pbp.loc[(team_matchup_pbp.is_fga == True) & (team_matchup_pbp.is_three == False), :])
fgm_2 = len(team_matchup_pbp.loc[(team_matchup_pbp.is_fgm == True) & (team_matchup_pbp.is_three == False), :])
fga_3 = len(team_matchup_pbp.loc[(team_matchup_pbp.is_fga == True) & (team_matchup_pbp.is_three == True), :])
fgm_3 = len(team_matchup_pbp.loc[(team_matchup_pbp.is_fgm == True) & (team_matchup_pbp.is_three == True), :])
ast = len(team_matchup_pbp.loc[team_matchup_pbp.is_assist == True, :])
blk = len(team_matchup_pbp.loc[team_matchup_pbp.is_block == True, :])
pf = len(team_matchup_pbp.loc[team_matchup_pbp.is_pf == True, :])
reb = len(team_matchup_pbp.loc[team_matchup_pbp.is_reb == True, :])
dreb = len(team_matchup_pbp.loc[team_matchup_pbp.is_dreb == True, :])
oreb = len(team_matchup_pbp.loc[team_matchup_pbp.is_oreb == True, :])
to = len(team_matchup_pbp.loc[team_matchup_pbp.is_to == True, :])
pts = fgm_2 * 2 + fgm_3 * 3
if fga > 0:
pct = (1.0 * fgm)/fga
else:
pct = 0.0
if fga_2 > 0:
pct_2 = (1.0 * fgm_2) / fga_2
else:
pct_2 = 0.0
if fga_3 > 0:
pct_3 = (1.0 * fgm_3) / fga_3
else:
pct_3 = 0.0
cols = ['fga', 'fta', 'fgm', 'fga_2', 'fgm_2', 'fga_3', 'fgm_3', 'ast', 'blk', 'pf', 'reb', 'dreb', 'oreb', 'to', 'pts', 'pct', 'pct_2', 'pct_3']
cols = ['%s_%s' % (col, team) for col in cols]
data = [fga, fta, fgm, fga_2, fgm_2, fga_3, fgm_3, ast, blk, pf, reb, dreb, oreb, to, pts, pct, pct_2, pct_3]
performance = pd.DataFrame(data=[data], columns=cols)
return performance
def _performance(matchup, pbp):
"""
Get performance for single matchup
"""
starting_min = matchup['starting_min']
end_min = matchup['end_min']
matchup_pbp = pbp.loc[(pbp.minute >= starting_min) & (pbp.minute <= end_min), :]
# get totals for home
team_matchup_pbp = matchup_pbp.loc[matchup_pbp.home == True, :]
performance_home = _performance_vector(team_matchup_pbp, 'home')
# get totals for visitor
team_matchup_pbp = matchup_pbp.loc[matchup_pbp.home == False, :]
performance_away = _performance_vector(team_matchup_pbp, 'visitor')
performance = pd.concat([performance_home, performance_away], axis=1)
return performance
def _matchup_performances(matchups, pbp):
"""
Create performance vectors for each of the matchups
Parameters
----------
matchups: pandas.DataFrame
time in/out of lineup matchups
pbp: pandas.DataFrame
events in game with timestamps
Returns
-------
matchups_performance: pandas.DataFrame
performance vectors
"""
performances = pd.DataFrame()
for ind, matchup in matchups.iterrows():
performance = _performance(matchup, pbp)
if not performance.empty:
if (int(performance['pts_home']) - int(performance['pts_visitor'])) > 0:
performance['outcome'] = 1
elif (int(performance['pts_home']) - int(performance['pts_visitor'])) <= 0:
performance['outcome'] = -1
performances = performances.append(performance)
performances = pd.concat([matchups, performances], axis=1)
return performances
def _matchup(lineups, game, season, cols, time_start, time_end):
"""
Get lineup at time t
"""
lineup_ids = map(str, list(range(5)))
if lineups.empty:
raise MatchupException('no lineups at time')
if not len(lineups) == 2:
raise MatchupException('too many lineups at time')
start_time = lineups.loc[:, time_start].max()
end_time = lineups.loc[:, time_end].min()
for ind, lineup in lineups.iterrows():
home_id = lineup['game'][-3:]
if lineup['team'] == home_id:
home_team = lineup['team']
home_lineup = lineup
home_players = home_lineup[lineup_ids].values
else:
away_team = lineup['team']
away_lineup = lineup
away_players = away_lineup[lineup_ids].values
data = [game, season, home_team, away_team, start_time, end_time]
data.extend(home_players)
data.extend(away_players)
matchup = pd.DataFrame(data=[data], columns=cols)
return matchup
def _pbp(game):
"""
Scrape basketball reference game play-by-play by ID
Args:
game_ID (str): bball reference gameID
Returns: None
pickles pbp DataFrame to data directory
"""
url = ('http://www.basketball-reference.com/boxscores/pbp/{ID}.html').format(ID=game)
pbp = | pd.read_html(url) | pandas.read_html |
import os
from typing import Tuple
from numpy.core.defchararray import array
import pandas as pd
import numpy as np
from pandas.core.frame import DataFrame
from scipy.sparse import csr_matrix, save_npz
import hashlib
import random
import hmac
from pathlib import Path
from .config import secrets, parameters
import logging
# class ReadConfig(object):
# """
# Config Class
# """
# def __init__(self, conf_dict: dict = None):
# """
# Initialize config class.
# Args:
# conf_dict (dict, optional): Config dictionary containig parameters. Defaults to None.
# """
# if conf_dict is None:
# conf_dict = {}
# self._conf_dict = conf_dict
# self._param = self._load_conf_dict()
# def get_conf_dict(self, conf_dict: dict =None):
# """
# Get config dictionary.
# Args:
# conf_dict (dict, optional): config dictionary. Defaults to None.
# """
# if conf_dict is None:
# conf_dict = {}
# self._conf_dict = self._conf_dict if conf_dict is None else conf_dict
# return self._load_conf_dict()
# def _load_conf_dict(self):
# """
# Load config dictionary.
# """
# tmp_dict = self._conf_dict
# return tmp_dict
def load_config(args: dict):
"""
Load config from file.
Args:
args (dict): argparser arguments .
"""
config_file = Path(args["config_file"])
if config_file.is_file() is False:
print("Config file does not exist.")
return quit()
if parameters.get_config_file() != config_file:
parameters.__init__(config_path=config_file)
return print("Read new config file.")
def load_key(args: dict):
"""
Load key from file.
Args:
args (dict): argparser arguments .
"""
key_file = Path(args["key_file"])
if key_file.is_file() is False:
print("Key file does not exist.")
return quit()
if secrets.get_key_file() != key_file:
secrets.__init__(key_path=key_file)
return print("Read new key file.")
def read_input_file(path: str) -> DataFrame:
"""
Read csv file and return dataframe.
Args:
path (str): [input path to csv file
Returns:
DataFrame: pandas dataframe from csv file
"""
data_file = Path(path)
if data_file.is_file() is False:
print(f"{data_file} does not exist.")
return quit()
df = read_csv(data_file)
input_file_len = len(df)
if input_file_len == 0:
print("Structure input is empty. Please provide a suitable structure file.")
return quit()
return df
def save_df_as_csv(
output_dir: Path, df: DataFrame, name: str = None, col_to_keep: list = None
):
"""
Save dataframe to csv file.
Args:
df (DataFrame): provided dataframe
output_dir (Path): path to output folder
name (str, optional): filename. Defaults to None.
col_to_keep (list, optional): columns to write into the output file. Defaults to None.
"""
if col_to_keep is None:
col_to_keep = df.columns
path = output_dir / f"{name}.csv"
df.to_csv(path, sep=",", columns=col_to_keep, index=False)
def sanity_check_assay_type(T0: DataFrame):
std_types = ["ADME", "PANEL", "OTHER", "AUX_HTS"]
assay_types_counts = T0["assay_type"].value_counts()
found_types = T0["assay_type"].unique().tolist()
# look for non standard assay type names
for at in found_types:
if at not in std_types:
print(f"WARNING: found non standard assay type name: {at}")
# look for missing assay types
if len(assay_types_counts.index) < 4:
found_types = T0["assay_type"].unique().tolist()
diff_types = list(set(std_types) - set(found_types))
print(f"INFO: missing assay type: {diff_types}")
if len(assay_types_counts.index) > 4:
exit(f"ERROR : too many assay type, found : {assay_types_counts.index}")
# look for empty assay type values
if T0.loc[T0["assay_type"].isna()].shape[0]:
exit("ERROR: assay types has missing values")
def sanity_check_assay_sizes(T0, T1):
t0_assays = T0.input_assay_id.unique()
t1_assays = T1.input_assay_id.unique()
num_t0_assay = t0_assays.shape[0]
num_t1_assay = t1_assays.shape[0]
if num_t1_assay < num_t0_assay:
print("WARNING : T1 does not have all input_assay_id present in T0")
assays_not_in_t1 = T0.loc[~T0["input_assay_id"].isin(t1_assays)]
print("Assay not in T1:")
print(assays_not_in_t1)
if num_t0_assay < num_t1_assay:
print("ERROR: some input_assay_id present in T1 are not present in T0")
assays_not_in_t0 = T1.loc[~T1["input_assay_id"].isin(t0_assays)]
print("Assay not in T0:")
print(assays_not_in_t0)
if num_t0_assay != num_t1_assay:
print(f"ERROR : number input_assay_id differs")
print(f"WARNING : T0 input_assay_id count: {num_t0_assay}")
print(f"WARNING : T1 input_assay_id count: {num_t1_assay}")
exit(
"Processing will be stopped. Please check for consistency in input_assay_id in T0 and T1."
)
def sanity_check_compound_sizes(T1, T2):
t1_compounds = T1.input_compound_id.unique()
t2_compounds = T2.input_compound_id.unique()
num_t2_compounds = t2_compounds.shape[0]
num_t1_compounds = t1_compounds.shape[0]
if num_t1_compounds < num_t2_compounds:
print("WARNING : T1 does not have all input_compound_id present in T2")
compounds_not_in_t1 = T2.loc[~T2["input_compound_id"].isin(t1_compounds)]
print("Compounds not in T1:")
print(compounds_not_in_t1)
if num_t2_compounds < num_t1_compounds:
print("**** ERROR: some input_compound_id present in T1 are not present in T2")
compounds_not_in_t2 = T1.loc[~T1["input_compound_id"].isin(t2_compounds)]
print("Compounds not in T2:")
print(compounds_not_in_t2)
if num_t1_compounds != num_t2_compounds:
print(f"WARNING : T2 input_compound_id count: {num_t2_compounds}")
print(f"WARNING : T1 input_compound_id count: {num_t1_compounds}")
print(f"ERROR : number input_compound_id differs!")
print(
"Processing will be stopped. Please check for consistency in input_compound_id in T1 and T2."
)
exit()
def sanity_check_uniqueness(df, colname, filename):
# verif input_compound_id duplicates
duplic = df[colname].duplicated(keep=False)
if duplic.sum() > 0:
print(
f"Found {duplic.sum()} records with *{colname}* present multiple times in {filename}."
)
print(df[duplic])
df_nrows = df.shape[0]
df_id = df[colname].nunique()
if df_nrows != df_id:
exit(
f"Processing will be stopped. {colname} is not unique. Please check for duplicates in {filename}."
)
def save_mtx_as_npy(matrix: csr_matrix, output_dir: Path, name: str = None):
"""
Save csr matrix as npy matrix.
Args:
matrix (csr_matrix): input csr matrix
output_dir (Path): output path
name (str, optional): filename. Defaults to None.
"""
if "T11_fold_vector" in name:
path = output_dir / f"{name}.npy"
np.save(path, matrix)
else:
# path_npy = output_dir / f'{name}.npy'
# np.save(path_npy, matrix)
path = output_dir / f"{name}.npz"
save_npz(path, matrix)
def concat_desc_folds(df_desc: DataFrame, df_folds: DataFrame) -> DataFrame:
"""
Concatenate descriptor and fold dataframes.
Args:
df_desc (DataFrame): descriptor dataframe
df_folds (DataFrame): fold dataframe
Returns:
DataFrame: concatenated dataframe
"""
df_out = | pd.concat([df_desc, df_folds], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
from collections import defaultdict
import pandas as pd
from ahocorasick import Automaton
from ..parsers import parse_fasta, parse_fastq
from ..utils import revcomp, expand_degenerate_bases
def init_automaton(scheme_fasta):
"""Initialize Aho-Corasick Automaton with kmers from SNV scheme fasta
Args:
scheme_fasta: SNV scheme fasta file path
Returns:
Aho-Corasick Automaton with kmers loaded
"""
A = Automaton()
for header, sequence in parse_fasta(scheme_fasta):
kmer_list = expand_degenerate_bases(sequence)
for seq in kmer_list:
A.add_word(seq, (header, seq, False))
A.add_word(revcomp(seq), (header, seq, True))
A.make_automaton()
return A
def find_in_fasta(automaton: Automaton, fasta: str) -> pd.DataFrame:
"""Find scheme kmers in input fasta file
Args:
automaton: Aho-Corasick Automaton with scheme SNV target kmers loaded
fasta: Input fasta path
Returns:
Dataframe with any matches found in input fasta file
"""
res = []
for contig_header, sequence in parse_fasta(fasta):
for idx, (kmername, kmer_seq, is_revcomp) in automaton.iter(sequence):
res.append((kmername, kmer_seq, is_revcomp, contig_header, idx))
columns = ['kmername', 'seq', 'is_revcomp', 'contig_id', 'match_index']
return pd.DataFrame(res, columns=columns)
def find_in_fastqs(automaton: Automaton, *fastqs):
"""Find scheme kmers in input fastq files
Args:
automaton: Aho-Corasick Automaton with scheme SNV target kmers loaded
fastqs: Input fastq file paths
Returns:
Dataframe with any matches found in input fastq files
"""
kmer_seq_counts = defaultdict(int)
for fastq in fastqs:
for _, sequence in parse_fastq(fastq):
for idx, (_, kmer_seq, _) in automaton.iter(sequence):
kmer_seq_counts[kmer_seq] += 1
res = []
for kmer_seq, freq in kmer_seq_counts.items():
kmername, sequence, _ = automaton.get(kmer_seq)
res.append((kmername, kmer_seq, freq))
return | pd.DataFrame(res, columns=['kmername', 'seq', 'freq']) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Exports subset of an IBEIS database to a new IBEIS database
"""
from __future__ import absolute_import, division, print_function
import utool as ut
from ibeis.other import ibsfuncs
from ibeis import constants as const
(print, rrr, profile) = ut.inject2(__name__)
def check_merge(ibs_src, ibs_dst):
aid_list1 = ibs_src.get_valid_aids()
gid_list1 = ibs_src.get_annot_gids(aid_list1)
gname_list1 = ibs_src.get_image_uris(gid_list1)
image_uuid_list1 = ibs_src.get_image_uuids(gid_list1)
gid_list2 = ibs_dst.get_image_gids_from_uuid(image_uuid_list1)
gname_list2 = ibs_dst.get_image_uris(gid_list2)
# Asserts
ut.assert_all_not_None(gid_list1, 'gid_list1')
ut.assert_all_not_None(gid_list2, 'gid_list2')
ut.assert_lists_eq(gname_list1, gname_list2, 'faild gname')
# Image UUIDS should be consistent between databases
image_uuid_list2 = ibs_dst.get_image_uuids(gid_list2)
ut.assert_lists_eq(image_uuid_list1, image_uuid_list2, 'failed uuid')
aids_list1 = ibs_src.get_image_aids(gid_list1)
aids_list2 = ibs_dst.get_image_aids(gid_list2)
avuuids_list1 = ibs_src.unflat_map(
ibs_src.get_annot_visual_uuids, aids_list1)
avuuids_list2 = ibs_dst.unflat_map(
ibs_dst.get_annot_visual_uuids, aids_list2)
issubset_list = [set(avuuids1).issubset(set(avuuids2))
for avuuids1, avuuids2 in zip(avuuids_list1, avuuids_list2)]
assert all(issubset_list), 'ibs_src must be a subset of ibs_dst: issubset_list=%r' % (
issubset_list,)
#aids_depth1 = ut.depth_profile(aids_list1)
#aids_depth2 = ut.depth_profile(aids_list2)
# depth might not be true if ibs_dst is not empty
#ut.assert_lists_eq(aids_depth1, aids_depth2, 'failed depth')
print('Merge seems ok...')
def merge_databases(ibs_src, ibs_dst, rowid_subsets=None, localize_images=True):
"""
New way of merging using the non-hacky sql table merge.
However, its only workings due to major hacks.
FIXME: annotmatch table
CommandLine:
python -m ibeis --test-merge_databases
python -m ibeis merge_databases:0 --db1 LF_OPTIMIZADAS_NI_V_E --db2 LF_ALL
python -m ibeis merge_databases:0 --db1 LF_WEST_POINT_OPTIMIZADAS --db2 LF_ALL
python -m ibeis merge_databases:0 --db1 PZ_Master0 --db2 PZ_Master1
python -m ibeis merge_databases:0 --db1 NNP_Master3 --db2 PZ_Master1
python -m ibeis merge_databases:0 --db1 GZ_ALL --db2 GZ_Master1
python -m ibeis merge_databases:0 --db1 lewa_grevys --db2 GZ_Master1
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.dbio.export_subset import * # NOQA
>>> import ibeis
>>> db1 = ut.get_argval('--db1', str, default=None)
>>> db2 = ut.get_argval('--db2', str, default=None)
>>> dbdir1 = ut.get_argval('--dbdir1', str, default=None)
>>> dbdir2 = ut.get_argval('--dbdir2', str, default=None)
>>> delete_ibsdir = False
>>> # Check for test mode instead of script mode
>>> if db1 is None and db2 is None and dbdir1 is None and dbdir2 is None:
... db1 = 'testdb1'
... dbdir2 = 'testdb_dst'
... delete_ibsdir = True
>>> # Open the source and destination database
>>> assert db1 is not None or dbdir1 is not None
>>> assert db2 is not None or dbdir2 is not None
>>> ibs_src = ibeis.opendb(db=db1, dbdir=dbdir1)
>>> ibs_dst = ibeis.opendb(db=db2, dbdir=dbdir2, allow_newdir=True,
>>> delete_ibsdir=delete_ibsdir)
>>> merge_databases(ibs_src, ibs_dst)
>>> check_merge(ibs_src, ibs_dst)
>>> ibs_dst.print_dbinfo()
"""
# TODO: ensure images are localized
# otherwise this wont work
print('BEGIN MERGE OF %r into %r' %
(ibs_src.get_dbname(), ibs_dst.get_dbname()))
# ibs_src.run_integrity_checks()
# ibs_dst.run_integrity_checks()
ibs_dst.update_annot_visual_uuids(ibs_dst.get_valid_aids())
ibs_src.update_annot_visual_uuids(ibs_src.get_valid_aids())
ibs_src.ensure_contributor_rowids()
ibs_dst.ensure_contributor_rowids()
ibs_src.fix_invalid_annotmatches()
ibs_dst.fix_invalid_annotmatches()
# Hack move of the external data
if rowid_subsets is not None and const.IMAGE_TABLE in rowid_subsets:
gid_list = rowid_subsets[const.IMAGE_TABLE]
else:
gid_list = ibs_src.get_valid_gids()
imgpath_list = ibs_src.get_image_paths(gid_list)
dst_imgdir = ibs_dst.get_imgdir()
if localize_images:
ut.copy_files_to(imgpath_list, dst_imgdir, overwrite=False, verbose=True)
ignore_tables = [
'lblannot', 'lblimage', 'image_lblimage_relationship',
'annotation_lblannot_relationship', 'keys'
]
# ignore_tables += [
# 'contributors', 'party', 'configs'
# ]
# TODO: Fix database merge to allow merging tables with more than one superkey
# and no primary superkey.
error_tables = [
'imageset_image_relationship',
'annotgroup_annotation_relationship',
'annotmatch',
]
ignore_tables += error_tables
ibs_dst.db.merge_databases_new(
ibs_src.db, ignore_tables=ignore_tables, rowid_subsets=rowid_subsets)
print('FINISHED MERGE %r into %r' %
(ibs_src.get_dbname(), ibs_dst.get_dbname()))
def make_new_dbpath(ibs, id_label, id_list):
"""
Creates a new database path unique to the exported subset of ids.
"""
import ibeis
tag_hash = ut.hashstr_arr(id_list, hashlen=8, alphabet=ut.ALPHABET_27)
base_fmtstr = ibs.get_dbname() + '_' + id_label + 's=' + \
tag_hash.replace('(', '_').replace(')', '_') + '_%d'
dpath = ibeis.get_workdir()
new_dbpath = ut.non_existing_path(base_fmtstr, dpath)
return new_dbpath
def export_names(ibs, nid_list, new_dbpath=None):
r"""
exports a subset of names and other required info
Args:
ibs (IBEISController): ibeis controller object
nid_list (list):
CommandLine:
python -m ibeis.dbio.export_subset --test-export_names
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.dbio.export_subset import * # NOQA
>>> import ibeis
>>> # build test data
>>> ibs = ibeis.opendb('testdb2')
>>> ibs.delete_empty_nids()
>>> nid_list = ibs._get_all_known_nids()[0:2]
>>> # execute function
>>> result = export_names(ibs, nid_list)
>>> # verify results
>>> print(result)
"""
print('Exporting name nid_list=%r' % (nid_list,))
if new_dbpath is None:
new_dbpath = make_new_dbpath(ibs, 'nid', nid_list)
aid_list = ut.flatten(ibs.get_name_aids(nid_list))
gid_list = ut.unique_unordered(ibs.get_annot_gids(aid_list))
return export_data(ibs, gid_list, aid_list, nid_list, new_dbpath=new_dbpath)
def find_gid_list(ibs, min_count=500, ensure_annots=False):
import random
gid_list = ibs.get_valid_gids()
reviewed_list = ibs.get_image_reviewed(gid_list)
if ensure_annots:
aids_list = ibs.get_image_aids(gid_list)
reviewed_list = [
0 if len(aids) == 0 else reviewed
for aids, reviewed in zip(aids_list, reviewed_list)
]
# Filter by reviewed
gid_list = [
gid
for gid, reviewed in zip(gid_list, reviewed_list)
if reviewed == 1
]
if len(gid_list) < min_count:
return None
while len(gid_list) > min_count:
index = random.randint(0, len(gid_list) - 1)
del gid_list[index]
return gid_list
def __export_reviewed_subset(ibs, min_count=500, ensure_annots=False):
from os.path import join
gid_list = find_gid_list(
ibs, min_count=min_count, ensure_annots=ensure_annots)
if gid_list is None:
return None
new_dbpath = '/' + join('Datasets', 'BACKGROUND', ibs.dbname)
print('Exporting to %r with %r images' % (new_dbpath, len(gid_list), ))
return export_images(ibs, gid_list, new_dbpath=new_dbpath)
def export_images(ibs, gid_list, new_dbpath=None):
"""
exports a subset of images and other required info
TODO:
PZ_Master1 needs to backproject information back on to NNP_Master3 and PZ_Master0
Args:
ibs (IBEISController): ibeis controller object
gid_list (list): list of annotation rowids
new_dbpath (None): (default = None)
Returns:
str: new_dbpath
"""
print('Exporting image gid_list=%r' % (gid_list,))
if new_dbpath is None:
new_dbpath = make_new_dbpath(ibs, 'gid', gid_list)
aid_list = ut.unique_unordered(ut.flatten(ibs.get_image_aids(gid_list)))
nid_list = ut.unique_unordered(ibs.get_annot_nids(aid_list))
return export_data(ibs, gid_list, aid_list, nid_list, new_dbpath=new_dbpath)
def export_annots(ibs, aid_list, new_dbpath=None):
r"""
exports a subset of annotations and other required info
TODO:
PZ_Master1 needs to backproject information back on to NNP_Master3 and
PZ_Master0
Args:
ibs (IBEISController): ibeis controller object
aid_list (list): list of annotation rowids
new_dbpath (None): (default = None)
Returns:
str: new_dbpath
CommandLine:
python -m ibeis.dbio.export_subset export_annots
python -m ibeis.dbio.export_subset export_annots --db NNP_Master3 \
-a viewpoint_compare --nocache-aid --verbtd --new_dbpath=PZ_ViewPoints
python -m ibeis.expt.experiment_helpers get_annotcfg_list:0 \
--db NNP_Master3 \
-a viewpoint_compare --nocache-aid --verbtd
python -m ibeis.expt.experiment_helpers get_annotcfg_list:0 --db NNP_Master3 \
-a viewpoint_compare --nocache-aid --verbtd
python -m ibeis.expt.experiment_helpers get_annotcfg_list:0 --db NNP_Master3 \
-a default:aids=all,is_known=True,view_pername=#primary>0&#primary1>0,per_name=4,size=200
python -m ibeis.expt.experiment_helpers get_annotcfg_list:0 --db NNP_Master3 \
-a default:aids=all,is_known=True,view_pername='#primary>0&#primary1>0',per_name=4,size=200 --acfginfo
python -m ibeis.expt.experiment_helpers get_annotcfg_list:0 --db PZ_Master1 \
-a default:has_any=photobomb --acfginfo
Example:
>>> # SCRIPT
>>> from ibeis.dbio.export_subset import * # NOQA
>>> import ibeis
>>> from ibeis.expt import experiment_helpers
>>> ibs = ibeis.opendb(defaultdb='NNP_Master3')
>>> acfg_name_list = ut.get_argval(('--aidcfg', '--acfg', '-a'), type_=list, default=[''])
>>> acfg_list, expanded_aids_list = experiment_helpers.get_annotcfg_list(ibs, acfg_name_list)
>>> aid_list = expanded_aids_list[0][0]
>>> ibs.print_annot_stats(aid_list, viewcode_isect=True, per_image=True)
>>> # Expand to get all annots in each chosen image
>>> gid_list = ut.unique_ordered(ibs.get_annot_gids(aid_list))
>>> aid_list = ut.flatten(ibs.get_image_aids(gid_list))
>>> ibs.print_annot_stats(aid_list, viewcode_isect=True, per_image=True)
>>> new_dbpath = ut.get_argval('--new-dbpath', default='PZ_ViewPoints')
>>> new_dbpath = export_annots(ibs, aid_list, new_dbpath)
>>> result = ('new_dbpath = %s' % (str(new_dbpath),))
>>> print(result)
"""
print('Exporting annotations aid_list=%r' % (aid_list,))
if new_dbpath is None:
new_dbpath = make_new_dbpath(ibs, 'aid', aid_list)
gid_list = ut.unique(ibs.get_annot_gids(aid_list))
nid_list = ut.unique(ibs.get_annot_nids(aid_list))
return export_data(ibs, gid_list, aid_list, nid_list, new_dbpath=new_dbpath)
def export_data(ibs, gid_list, aid_list, nid_list, new_dbpath=None):
"""
exports a subset of data and other required info
Args:
ibs (IBEISController): ibeis controller object
gid_list (list): list of image rowids
aid_list (list): list of annotation rowids
nid_list (list): list of name rowids
imgsetid_list (list): list of imageset rowids
gsgrid_list (list): list of imageset-image pairs rowids
new_dbpath (None): (default = None)
Returns:
str: new_dbpath
"""
import ibeis
imgsetid_list = ut.unique_unordered(ut.flatten(ibs.get_image_imgsetids(gid_list)))
gsgrid_list = ut.unique_unordered(
ut.flatten(ibs.get_image_gsgrids(gid_list)))
# TODO: write SQL query to do this
am_rowids = ibs._get_all_annotmatch_rowids()
flags1_list = [
aid in set(aid_list) for aid in ibs.get_annotmatch_aid1(am_rowids)]
flags2_list = [
aid in set(aid_list) for aid in ibs.get_annotmatch_aid2(am_rowids)]
flag_list = ut.and_lists(flags1_list, flags2_list)
am_rowids = ut.compress(am_rowids, flag_list)
#am_rowids = ibs.get_valid_aids(ibs.get_valid_aids())
rowid_subsets = {
const.ANNOTATION_TABLE: aid_list,
const.NAME_TABLE: nid_list,
const.IMAGE_TABLE: gid_list,
const.ANNOTMATCH_TABLE: am_rowids,
const.GSG_RELATION_TABLE: gsgrid_list,
const.IMAGESET_TABLE: imgsetid_list,
}
ibs_dst = ibeis.opendb(dbdir=new_dbpath, allow_newdir=True)
# Main merge driver
merge_databases(ibs, ibs_dst, rowid_subsets=rowid_subsets)
print('Exported to %r' % (new_dbpath,))
return new_dbpath
def slow_merge_test():
r"""
CommandLine:
python -m ibeis.dbio.export_subset --test-slow_merge_test
Example:
>>> # SLOW_DOCTEST
>>> from ibeis.dbio.export_subset import * # NOQA
>>> result = slow_merge_test()
>>> print(result)
"""
from ibeis.dbio import export_subset
import ibeis
ibs1 = ibeis.opendb('testdb2')
ibs1.fix_invalid_annotmatches()
ibs_dst = ibeis.opendb(
db='testdb_dst2', allow_newdir=True, delete_ibsdir=True)
export_subset.merge_databases(ibs1, ibs_dst)
#ibs_src = ibs1
check_merge(ibs1, ibs_dst)
ibs2 = ibeis.opendb('testdb1')
ibs1.print_dbinfo()
ibs2.print_dbinfo()
ibs_dst.print_dbinfo()
ibs_dst.print_dbinfo()
export_subset.merge_databases(ibs2, ibs_dst)
#ibs_src = ibs2
check_merge(ibs2, ibs_dst)
ibs3 = ibeis.opendb('PZ_MTEST')
export_subset.merge_databases(ibs3, ibs_dst)
#ibs_src = ibs2
check_merge(ibs3, ibs_dst)
ibs_dst.print_dbinfo()
return ibs_dst
#ibs_src.print_annotation_table(exclude_columns=['annot_verts',
#'annot_semantic_uuid', 'annot_note', 'annot_parent_rowid',
#'annot_exemplar_flag,'])
# ibs_dst.print_annotation_table()
def fix_bidirectional_annotmatch(ibs):
import ibeis
infr = ibeis.AnnotInference(ibs=ibs, aids='all', verbose=5)
infr.initialize_graph()
annots = ibs.annots()
aid_to_nid = ut.dzip(annots.aids, annots.nids)
# Delete bidirectional annotmatches
annotmatch = ibs.db.get_table_as_pandas('annotmatch')
df = annotmatch.set_index(['annot_rowid1', 'annot_rowid2'])
# Find entires that have both directions
pairs1 = annotmatch[['annot_rowid1', 'annot_rowid2']].values
f_edges = {tuple(p) for p in pairs1}
b_edges = {tuple(p[::-1]) for p in pairs1}
isect_edges = {tuple(sorted(p)) for p in b_edges.intersection(f_edges)}
print('Found %d bidirectional edges' % len(isect_edges))
isect_edges1 = list(isect_edges)
isect_edges2 = [p[::-1] for p in isect_edges]
import pandas as pd
extra_ = {}
fixme_edges = []
d1 = df.loc[isect_edges1].reset_index(drop=False)
d2 = df.loc[isect_edges2].reset_index(drop=False)
flags = d1['annotmatch_evidence_decision'] != d2['annotmatch_evidence_decision']
from ibeis.tag_funcs import _parse_tags
for f, r1, r2 in zip(flags, d1.iterrows(), d2.iterrows()):
v1, v2 = r1[1], r2[1]
aid1 = v1['annot_rowid1']
aid2 = v1['annot_rowid2']
truth_real = (ibs.const.EVIDENCE_DECISION.POSITIVE
if aid_to_nid[aid1] == aid_to_nid[aid2] else
ibs.const.EVIDENCE_DECISION.NEGATIVE)
truth1 = v1['annotmatch_evidence_decision']
truth2 = v2['annotmatch_evidence_decision']
t1 = _parse_tags(v1['annotmatch_tag_text'])
t2 = _parse_tags(v2['annotmatch_tag_text'])
newtag = ut.union_ordered(t1, t2)
fixme_flag = False
if not pd.isnull(truth1):
if truth_real != truth1:
fixme_flag = True
if not pd.isnull(truth2):
if truth_real != truth2:
fixme_flag = True
if fixme_flag:
print('--')
print('t1, t2 = %r, %r' % (t1, t2))
print('newtag = %r' % (newtag,))
print('truth_real, truth1, truth2 = %r, %r, %r' % (
truth_real, truth1, truth2,))
print('aid1, aid2 = %r, %r' % (aid1, aid2))
fixme_edges.append(tuple(sorted((aid1, aid2))))
else:
extra_[(aid1, aid2)] = (truth_real, newtag)
if len(fixme_edges) > 0:
# need to manually fix these edges
fix_infr = ibeis.AnnotInference.from_pairs(fixme_edges, ibs=ibs, verbose=5)
feedback = fix_infr.read_ibeis_annotmatch_feedback(only_existing_edges=True)
infr = fix_infr
fix_infr.external_feedback = feedback
fix_infr.apply_feedback_edges()
fix_infr.start_qt_interface(loop=False)
# DELETE OLD EDGES TWICE
ams = ibs.get_annotmatch_rowid_from_edges(fixme_edges)
ibs.delete_annotmatch(ams)
ams = ibs.get_annotmatch_rowid_from_edges(fixme_edges)
ibs.delete_annotmatch(ams)
# MANUALLY CALL THIS ONCE FINISHED
# TO ONLY CHANGE ANNOTMATCH EDGES
infr.write_ibeis_staging_feedback()
infr.write_ibeis_annotmatch_feedback()
# extra_.update(custom_)
new_pairs = extra_.keys()
new_truths = ut.take_column(ut.dict_take(extra_, new_pairs), 0)
new_tags = ut.take_column(ut.dict_take(extra_, new_pairs), 1)
new_tag_texts = [';'.join(t) for t in new_tags]
aids1, aids2 = ut.listT(new_pairs)
# Delete the old
ibs.delete_annotmatch((d1['annotmatch_rowid'].values.tolist() +
d2['annotmatch_rowid'].values.tolist()))
# Add the new
ams = ibs.add_annotmatch_undirected(aids1, aids2)
ibs.set_annotmatch_evidence_decision(ams, new_truths)
ibs.set_annotmatch_tag_text(ams, new_tag_texts)
if False:
import guitool as gt
gt.ensure_qapp()
ut.qtensure()
from ibeis.gui import inspect_gui
inspect_gui.show_vsone_tuner(ibs, aid1, aid2)
def fix_annotmatch_pzmaster1():
"""
PZ_Master1 had annotmatch rowids that did not agree with the current name
labeling. Looking at the inconsistencies in the graph interface was too
cumbersome, because over 3000 annots were incorrectly grouped together.
This function deletes any annotmatch rowid that is not consistent with the
current labeling so we can go forward with using the new AnnotInference
object
"""
import ibeis
ibs = ibeis.opendb('PZ_Master1')
infr = ibeis.AnnotInference(ibs=ibs, aids=ibs.get_valid_aids(), verbose=5)
infr.initialize_graph()
annots = ibs.annots()
aid_to_nid = ut.dzip(annots.aids, annots.nids)
if False:
infr.reset_feedback()
infr.ensure_mst()
infr.apply_feedback_edges()
infr.relabel_using_reviews()
infr.start_qt_interface()
# Get annotmatch rowids that agree with current labeling
if False:
annotmatch = ibs.db.get_table_as_pandas('annotmatch')
import pandas as pd
flags1 = pd.isnull(annotmatch['annotmatch_evidence_decision'])
flags2 = annotmatch['annotmatch_tag_text'] == ''
bad_part = annotmatch[flags1 & flags2]
rowids = bad_part.index.tolist()
ibs.delete_annotmatch(rowids)
if False:
# Delete bidirectional annotmatches
annotmatch = ibs.db.get_table_as_pandas('annotmatch')
df = annotmatch.set_index(['annot_rowid1', 'annot_rowid2'])
# Find entires that have both directions
pairs1 = annotmatch[['annot_rowid1', 'annot_rowid2']].values
f_edges = {tuple(p) for p in pairs1}
b_edges = {tuple(p[::-1]) for p in pairs1}
isect_edges = {tuple(sorted(p)) for p in b_edges.intersection(f_edges)}
isect_edges1 = list(isect_edges)
isect_edges2 = [p[::-1] for p in isect_edges]
# cols = ['annotmatch_evidence_decision', 'annotmatch_tag_text']
import pandas as pd
custom_ = {
(559, 4909): (False, ['photobomb']),
(7918, 8041): (False, ['photobomb']),
(6634, 6754): (False, ['photobomb']),
(3707, 3727): (False, ['photobomb']),
(86, 103): (False, ['photobomb']),
}
extra_ = {
}
fixme_edges = []
d1 = df.loc[isect_edges1].reset_index(drop=False)
d2 = df.loc[isect_edges2].reset_index(drop=False)
flags = d1['annotmatch_evidence_decision'] != d2['annotmatch_evidence_decision']
from ibeis.tag_funcs import _parse_tags
for f, r1, r2 in zip(flags, d1.iterrows(), d2.iterrows()):
v1, v2 = r1[1], r2[1]
aid1 = v1['annot_rowid1']
aid2 = v1['annot_rowid2']
truth_real = (ibs.const.EVIDENCE_DECISION.POSITIVE
if aid_to_nid[aid1] == aid_to_nid[aid2] else
ibs.const.EVIDENCE_DECISION.NEGATIVE)
truth1 = v1['annotmatch_evidence_decision']
truth2 = v2['annotmatch_evidence_decision']
t1 = _parse_tags(v1['annotmatch_tag_text'])
t2 = _parse_tags(v2['annotmatch_tag_text'])
newtag = ut.union_ordered(t1, t2)
if (aid1, aid2) in custom_:
continue
fixme_flag = False
if not pd.isnull(truth1):
if truth_real != truth1:
fixme_flag = True
if not | pd.isnull(truth2) | pandas.isnull |
import numpy as np
import pandas as pd
import sys,os
#from random import choices
import random
from datetime import datetime as dt
import json
from ast import literal_eval
import time
from scipy import stats
#from joblib import Parallel, delayed
from libs.lib_job_thread import *
import logging
class SimX:
def __init__(self,*args):
self.platform=args[0]
self.domain=args[1]
self.scenario=args[2]
self.model_identifier=args[3]
self.pool=ThreadPool(32)
self.sim_outputs=[]
self.data_level_degree_list=None
self.data_delay_level_degree_root_list=None
self.data_user_list=None
self.data_user_followers=None
self.data_acts_list=None
self.data_acts_list_indexed=None
self.data_level_content_list=None
# self.logger = logging.getLogger(__name__)
# logPath='./logs/run_simulation_prob_HYDRA_%s_%s_S20001.log'%(self.platform,self.domain)
# handler = logging.FileHandler(logPath,mode='w+')
# handler.setLevel(logging.INFO)
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# handler.setFormatter(formatter)
# self.logger.addHandler(handler)
def set_metadata(self):#,degree_list,delay_list):
print("[Degree by level] loading..")
self.data_level_degree_list=pd.read_pickle("./metadata/probs/%s-%s/degree_cond_level.pkl.gz"%(self.platform,self.domain))
print("[Delay sequences by size] loading..")
self.data_delay_level_degree_root_list= | pd.read_pickle("./metadata/probs/%s-%s/delay_cond_size.pkl.gz"%(self.platform,self.domain)) | pandas.read_pickle |
import os
import gc
import re
import json
import pandas as pd
import datetime
import xlrd
import numpy as np
from werkzeug.utils import secure_filename
from src.helper import reader, unicode
from . import entity
HASH_TYPE_REGEX = {
re.compile(r"^[a-f0-9]{32}(:.+)?$", re.IGNORECASE): ["MD5", "MD4", "MD2", "Double MD5",
"LM", "RIPEMD-128", "Haval-128",
"Tiger-128", "Skein-256(128)", "Skein-512(128",
"Lotus Notes/Domino 5", "Skype", "ZipMonster",
"PrestaShop"],
re.compile(r"^[a-f0-9]{64}(:.+)?$", re.IGNORECASE): ["SHA-256", "RIPEMD-256", "SHA3-256", "Haval-256",
"GOST R 34.11-94", "GOST CryptoPro S-Box",
"Skein-256", "Skein-512(256)", "Ventrilo"],
re.compile(r"^[a-f0-9]{128}(:.+)?$", re.IGNORECASE): ["SHA-512", "Whirlpool", "Salsa10",
"Salsa20", "SHA3-512", "Skein-512",
"Skein-1024(512)"],
re.compile(r"^[a-f0-9]{56}$", re.IGNORECASE): ["SHA-224", "Haval-224", "SHA3-224",
"Skein-256(224)", "Skein-512(224)"],
re.compile(r"^[a-f0-9]{40}(:.+)?$", re.IGNORECASE): ["SHA-1", "Double SHA-1", "RIPEMD-160",
"Haval-160", "Tiger-160", "HAS-160",
"LinkedIn", "Skein-256(160)", "Skein-512(160)",
"MangoWeb Enhanced CMS"],
re.compile(r"^[a-f0-9]{96}$", re.IGNORECASE): ["SHA-384", "SHA3-384", "Skein-512(384)",
"Skein-1024(384)"],
re.compile(r"^[a-f0-9]{16}$", re.IGNORECASE): ["MySQL323", "DES(Oracle)", "Half MD5",
"Oracle 7-10g", "FNV-164", "CRC-64"],
re.compile(r"^\*[a-f0-9]{40}$", re.IGNORECASE): ["MySQL5.x", "MySQL4.1"],
re.compile(r"^[a-f0-9]{48}$", re.IGNORECASE): ["Haval-192", "Tiger-192", "SHA-1(Oracle)",
"XSHA (v10.4 - v10.6)"]
}
class Cleaning:
UPLOAD_FOLDER = 'temp/'
JSON_FORMAT = "json"
def __init__(self, file_byte, fileid: str, file_name: str):
if file_byte is None:
raise Exception("payload must required")
secure_name = secure_filename(file_name)
self.file_path = f"{self.UPLOAD_FOLDER}{fileid}-{secure_name}"
self.obtain_hash_type(fileid)
with open(self.file_path, "wb") as f:
f.write(file_byte)
f.close()
# self.byte = file_byte
self.file_id = fileid
self.file_name = file_name
def obtain_hash_type(self, check_hash):
found = False
for algorithm in HASH_TYPE_REGEX:
if algorithm.match(check_hash):
found = True
self.enumerate_hash_types(HASH_TYPE_REGEX[algorithm])
if found is False:
raise Exception("Unable to verify hash type")
@staticmethod
def enumerate_hash_types(items):
print("{} possible hash types found..".format(len(items)))
count = 0
for item in items:
count += 1
if count <= 3:
pass
# print("\033[92m[*] Most likely possible hash type: {}\033[0m".format(item))
else:
print("\033[33m[*] Least likely possible hash type: {}\033[0m".format(item))
def Excel(self) -> entity.Response:
results, sheets = [], []
try:
xl = | pd.ExcelFile(self.file_path) | pandas.ExcelFile |
"""
data_prep.py - Extract data from date range and create models
Usage:
data_prep.py [options]
data_prep.py -h | --help
Options:
-h --help Show this message.
--output_folder=OUT Output folder for the data and reports to be saved.
"""
from __future__ import print_function
import pandas as pd
import numpy as np
import re
import os
import docopt
import sys
import pickle
import os.path
from datetime import datetime, date, time
from dateutil.parser import parse
from time import strftime
import pyarrow
import json
import git
from tqdm import tqdm
from covidify.config import REPO, TMP_FOLDER, TMP_GIT, DATA
args = docopt.docopt(__doc__)
out = args['--output_folder']
def clean_sheet_names(new_ranges):
indices = []
# Remove all sheets that dont have a numeric header
numeric_sheets = [x for x in new_ranges if re.search(r'\d', x)]
return numeric_sheets
def clone_repo(TMP_FOLDER, REPO):
print('Cloning Data Repo...')
git.Git(TMP_FOLDER).clone(REPO)
# Create Tmp Folder
if not os.path.isdir(TMP_FOLDER):
print('Creating folder...')
print('...', TMP_FOLDER)
os.mkdir(TMP_FOLDER)
#Check if repo exists
#git pull if it does
if not os.path.isdir(TMP_GIT):
clone_repo(TMP_FOLDER, REPO)
else:
try:
print('git pull from', REPO)
rep = git.Repo(TMP_GIT)
rep.remotes.origin.pull()
except:
print('Could not pull from', REPO)
sys.exit()
sheets = os.listdir(DATA)
# Clean the result to the sheet tabs we want
print('Getting sheets...')
cleaned_sheets = clean_sheet_names(sheets)
def clean_last_updated(last_update):
'''
convert date and time in YYYYMMDD HMS format
'''
date = parse(str(last_update).split(' ')[0]).strftime("%Y-%m-%d")
time = parse(str(last_update).split(' ')[1]).strftime('%H:%M:%S')
parsed_date = str(date) + ' ' + str(time)
return parsed_date
def get_date(last_update):
return parse(str(last_update).split(' ')[0]).strftime("%Y-%m-%d")
def get_csv_date(file):
return get_date(file.split('.')[0] + ' ')
def drop_duplicates(df_raw):
'''
Take the max date value for each province for a given date
'''
days_list = []
for datetime in df_raw.date.unique():
tmp_df = df_raw[df_raw.date == datetime]
tmp_df = tmp_df.sort_values(['Last Update']).drop_duplicates('Province/State', keep='last')
days_list.append(tmp_df)
return days_list
keep_cols = ['Confirmed', 'Country/Region', 'Deaths', 'Last Update', 'Province/State', 'Recovered']
numeric_cols = ['Confirmed', 'Deaths', 'Recovered']
def get_data(cleaned_sheets):
all_csv = []
# Import all CSV's
for file in tqdm(sorted(sheets), desc='... importing data: '):
if 'csv' in file:
# print('...', file)
tmp_df = pd.read_csv(os.path.join(DATA, file), index_col=None, header=0, parse_dates=['Last Update'])
tmp_df = tmp_df[keep_cols]
tmp_df[numeric_cols] = tmp_df[numeric_cols].fillna(0)
tmp_df[numeric_cols] = tmp_df[numeric_cols].astype(int)
tmp_df['Province/State'].fillna(tmp_df['Country/Region'], inplace=True) #If no region given, fill it with country
tmp_df['Last Update'] = tmp_df['Last Update'].apply(clean_last_updated)
tmp_df['date'] = tmp_df['Last Update'].apply(get_date)
tmp_df['file_date'] = get_csv_date(file)
all_csv.append(tmp_df)
# concatenate all csv's into one df
df_raw = pd.concat(all_csv, axis=0, ignore_index=True, sort=True)
df_raw = df_raw.sort_values(by=['Last Update'])
frames = drop_duplicates(df_raw)
tmp = pd.concat(frames, axis=0, ignore_index=True, sort=True)
return tmp
df = get_data(cleaned_sheets)
# Now that we have all the data we now need to clean it
# - Fill null values
# - remore suspected values
# - change column names
def clean_data(tmp_df):
if 'Demised' in tmp_df.columns:
tmp_df.rename(columns={'Demised':'Deaths'}, inplace=True)
if 'Country/Region' in tmp_df.columns:
tmp_df.rename(columns={'Country/Region':'country'}, inplace=True)
if 'Province/State' in tmp_df.columns:
tmp_df.rename(columns={'Province/State':'province'}, inplace=True)
if 'Last Update' in tmp_df.columns:
tmp_df.rename(columns={'Last Update':'datetime'}, inplace=True)
if 'Suspected' in tmp_df.columns:
tmp_df = tmp_df.drop(columns='Suspected')
for col in tmp_df.columns:
tmp_df[col] = tmp_df[col].fillna(0)
#Lower case all col names
tmp_df.columns = map(str.lower, tmp_df.columns)
return tmp_df
df = clean_data(df)
# sheets need to be sorted by date value
print('Sorting by datetime...')
current_date = str(datetime.date(datetime.now()))
if df.date.max() == current_date:
df = df[df.date != df.date.max()]
else:
df = df[df.date != current_date]
df = df.sort_values('datetime')
'''
Get the difference of the sum totals for each
date and plot them on a trendline graph
'''
def get_new_cases(tmp, col):
diff_list = []
tmp_df_list = []
df = tmp.copy()
for i, day in enumerate(df.sort_values('date').date.unique()):
tmp_df = df[df.date == day]
tmp_df_list.append(tmp_df[col].sum())
if i == 0:
diff_list.append(tmp_df[col].sum())
else:
diff_list.append(tmp_df[col].sum() - tmp_df_list[i-1])
return diff_list
def get_moving_average(tmp, col):
df = tmp.copy()
return df[col].rolling(window=2).mean()
def get_exp_moving_average(tmp, col):
df = tmp.copy()
return df[col].ewm(span=2, adjust=True).mean()
print('Calculating dataframe for new cases...')
daily_cases_df = | pd.DataFrame([]) | pandas.DataFrame |
# This script assumes taht the freesurfer csv for the BANC data has already been generated
import os
import pandas as pd
import numpy as np
import pdb
import seaborn as sns
sns.set()
import matplotlib.pyplot as plt
from BayOptPy.helperfunctions import get_paths, get_data, drop_missing_features
def visualise_missing_features_banc(freesurfer_df_banc, save_path):
'''
Visualise features that are only present in the BANC dataset
'''
# just as a proof of concept check that the mean thickness between rh and lh are
# different
if sum(freesurfer_df_banc['lh_MeanThickness_thickness'] ==
freesurfer_df_banc['rh_MeanThickness_thickness']) == len(freesurfer_df_banc):
print('LH and RH MeanThickness are identical')
else:
print('LH and RH MeanThickness are NOT identical')
# Plot the missing data eTIV1 and eTIV and BrainSegVolNoVent
# Check if both columns are identical
data = [freesurfer_df_banc['eTIV'], freesurfer_df_banc['eTIV.1']]
# If this equality is true it means that all the data is equal
if sum(freesurfer_df_banc['eTIV']==freesurfer_df_banc['eTIV.1']) == len(freesurfer_df_banc):
print('eTIV is identical to eTIV.1')
# plt.figure()
# plt.boxplot(data)
# plt.title('eTIV')
# plt.savefig(os.path.join(save_path, diagnosics, 'boxplot_eTIV.png')
# plt.close()
# Check BrainSegVolNoVent is identical to BrainSegVolNoVent.1
if sum(freesurfer_df_banc['BrainSegVolNotVent']==freesurfer_df_banc['BrainSegVolNotVent.1'])== len(freesurfer_df_banc):
print('BrainSegVolNotVent is identical to BrainSegVolNotVent.1')
if sum(freesurfer_df_banc['BrainSegVolNotVent']==freesurfer_df_banc['BrainSegVolNotVent.2'])== len(freesurfer_df_banc):
print('BrainSegVolNotVent is identical to BrainSegVolNotVent.2')
def dict_rename_columns(freesurfer_banc_columns):
# Create a dictionary matching the columns from the ukbiobank to the BANC
# dataset and rename ukbiobank columns.
lh_thickness = [
# left hemisphere
'lh_thk_bankssts', 'lh_thk_caudalanteriorcingulate',
'lh_thk_caudalmiddlefrontal', 'lh_thk_cuneus',
'lh_thk_entorhinal', 'lh_thk_fusiform',
'lh_thk_inferiorparietal', 'lh_thk_inferiortemporal',
'lh_thk_isthmus', 'lh_thk_lateraloccipital',
'lh_thk_lateralorbitofrontal', 'lh_thk_lingual',
'lh_thk_medialorbitofrontal',
'lh_thk_middletemporal', 'lh_thk_parahippocampal',
'lh_thk_paracentral', 'lh_thk_parsopercularis',
'lh_thk_parsorbitalis', 'lh_thk_parstriangularis',
'lh_thk_pericalcarine', 'lh_thk_postcentral',
'lh_thk_posteriorcingulate', 'lh_thk_precentral',
'lh_thk_precuneus', 'lh_thk_rostralanteriorcingulate',
'lh_thk_rostralmiddlefrontal', 'lh_thk_superiorfrontal',
'lh_thk_superiorparietal', 'lh_thk_superiortemporal',
'lh_thk_supramarginal', 'lh_thk_frontalpole',
'lh_thk_temporalpole', 'lh_thk_transversetemporal',
'lh_thk_insula'
]
rh_thickness = [
# right hemisphere
'rh_thk_bankssts', 'rh_thk_caudalanteriorcingulate',
'rh_thk_caudalmiddlefrontal', 'rh_thk_cuneus',
'rh_thk_entorhinal', 'rh_thk_fusiform',
'rh_thk_inferiorparietal', 'rh_thk_inferiortemporal',
'rh_thk_isthmus', 'rh_thk_lateraloccipital',
'rh_thk_lateralorbitofrontal', 'rh_thk_lingual',
'rh_thk_medialorbitofrontal', 'rh_thk_middletemporal',
'rh_thk_parahippocampal', 'rh_thk_paracentral',
'rh_thk_parsopercularis', 'rh_thk_parsorbitalis',
'rh_thk_parstriangularis', 'rh_thk_pericalcarine',
'rh_thk_postcentral', 'rh_thk_posteriorcingulate',
'rh_thk_precentral', 'rh_thk_precuneus',
'rh_thk_rostralanteriorcingulate',
'rh_thk_rostralmiddlefrontal', 'rh_thk_superiorfrontal',
'rh_thk_superiorparietal', 'rh_thk_superiortemporal',
'rh_thk_supramarginal', 'rh_thk_frontalpole',
'rh_thk_temporalpole', 'rh_thk_transversetemporal',
'rh_thk_insula'
]
biobank_columns = lh_thickness + rh_thickness + [
# MaAdditional fM`eatures
'Left.Cerebellum.White.Matter', 'Left.Cerebellum.Cortex',
'Left.Thalamus.Proper', 'Left.Caudate', 'Left.Putamen',
'Left.Pallidum', 'X3rd.Ventricle', 'X4th.Ventricle',
'Brain.Stem', 'Left.Hippocampus', 'Left.Amygdala', 'CSF',
'Left.Accumbens.area', 'Left.VentralDC', 'Left.vessel',
'Right.Cerebellum.White.Matter',
'Right.Cerebellum.Cortex', 'Right.Thalamus.Proper',
'Right.Caudate', 'Right.Putamen', 'Right.Pallidum',
'Right.Hippocampus', 'Right.Amygdala', 'Right.Accumbens.area',
'Right.VentralDC', 'Right.vessel',
# the same
'CC_Posterior',
'CC_Mid.Posterior', 'CC_Central', 'CC_Mid_Anterior',
'CC_Anterior',
'lhCortexVol', 'rhCortexVol',
'CortexVol',
# Missing in the bionk we have
'lhCorticalWhiteMatterVol', 'rhCorticalWhiteMatterVol',
'CorticalWhiteMatterVol',
# 'lhCerebralWhiMateMatterVol',
# 'rhCerebralWhiteMatterVol', 'CerebralWhiteMatterVol',
'SubCortGrayVol', 'TotalGrayVol', 'SupraTentorialVol',
'SupraTentorialVolMaNotVent', 'SupraTentorialVolNotVentVox',
'MaskVol', 'BrainSegVol.to.eTIV', 'MaskVol.to.eTIV',
'EstimatedTotalIntraMaCranialVol'
]
# Check if you have all the features form the banc dataset
assert(len(biobank_columns) == len(freesurfer_banc_columns))
renameCols = dict(zip(biobank_columns, freesurfer_banc_columns))
return lh_thickness, rh_thickness, renameCols
# Load both datasets
debug = False
resamplefactor = 1
save_path = os.path.join('/code/BayOptPy', 'freesurfer_preprocess')
project_ukbio_wd, project_data_ukbio, _ = get_paths(debug, 'UKBIO_freesurf')
project_banc_wd, project_data_banc, _ = get_paths(debug, 'BANC_freesurf')
_, _, freesurfer_df_banc = get_data(project_data_banc, 'BANC_freesurf', debug,
project_banc_wd, resamplefactor,
raw=True, analysis=None)
_, _, freesurfer_df_ukbio = get_data(project_data_ukbio, 'UKBIO_freesurf', debug,
project_ukbio_wd, resamplefactor,
raw=True, analysis=None)
# checM`k the columns between both datasets
# First Maprint the size of dataset
print('shape of the banc dataset; shape of the ukbio dataset')
print(freesurfer_df_banc.shape, freesurfer_df_ukbio.shape)
#-----------------------------------------------------------------------------
# BANC
#-----------------------------------------------------------------------------
# Check and visualise features that are only present in the BANC dataset
visualise_missing_features_banc(freesurfer_df_banc, save_path)
# remove the columns from the banc dataset that are not present in the BIOBANK
freesurfer_df_banc_clean = drop_missing_features(freesurfer_df_banc)
# Save the lh_MeanThickness and rh_MeanThickness to compare it to calculated
# value afterwards
lh_MeanThickness_banc = freesurfer_df_banc['lh_MeanThickness_thickness']
rh_MeanThickness_banc = freesurfer_df_banc['rh_MeanThickness_thickness']
# Also drop the rh_MeanThickness_thickness and lh_MeanThickness_thickness, for
# now
freesurfer_df_banc_clean = freesurfer_df_banc_clean.drop(columns=['rh_MeanThickness_thickness',
'lh_MeanThickness_thickness'])
# Save the colunns as variable
freesurfer_banc_columns = list(freesurfer_df_banc_clean)
freesurfer_ukbio_columns = list(freesurfer_df_ukbio)
#-----------------------------------------------------------------------------
# UKBIO
#-----------------------------------------------------------------------------
# Create mapping between the BANC and UKBIO to rename it
lh_thickness, rh_thickness, renameCols = dict_rename_columns(freesurfer_banc_columns)
freesurfer_df_ukbio.rename(columns=renameCols, inplace=True)
# Keep only the columns that both datasets have in common
df_ukbio = freesurfer_df_ukbio[freesurfer_banc_columns]
df_ukbio.index.name = 'id'
#-----------------------------------------------------------------------------
# UKBIO vs BANC
#-----------------------------------------------------------------------------
variables_of_interest = ['lhCerebralWhiteMatterVol', 'rhCerebralWhiteMatterVol',
'CerebralWhiteMatterVol']
# Generate a few plots to make sure you are comparing the same things among both
# datasets
df_ukbio['dataset'] = 'ukbio'
freesurfer_df_banc_clean['dataset'] = 'banc'
df_combined = | pd.concat((df_ukbio, freesurfer_df_banc_clean)) | pandas.concat |
import hw1.speech as s
import numpy as np
import pandas as pd
def top_tfidf_feats(row, features, top_n=25):
''' Get top n tfidf values in a document and return them with their corresponding feature names.'''
topn_ids = np.argsort(row)[::-1][:top_n]
top_feats = [(features[i], row[i]) for i in topn_ids]
df = pd.DataFrame(top_feats)
df.columns = ['feature', 'tfidf']
return df
def top_mean_feats(Xtr, features, grp_ids, min_tfidf=0.1, top_n=25):
''' Return the top n features that on average are most important amongst documents in rows
indentified by indices in grp_ids. '''
# if grp_ids:
# D = Xtr[grp_ids].toarray()
# else:
# D = Xtr.toarray()
D = Xtr[grp_ids].toarray()
D[D < min_tfidf] = 0
tfidf_means = np.mean(D, axis=0)
return top_tfidf_feats(tfidf_means, features, top_n)
if __name__ == "__main__":
#get top tfidf scores averaged
rand_indices = np.random.randint(0, (speech.trainX.shape[0]) - 1, 50)
df = top_mean_feats(speech.trainX, speech.count_vect.get_feature_names(), rand_indices, top_n=10)
print(df[-10:])
#get highest weighted features
weights = abs(cls.coef_).mean(0)
sorted_weights = [(x[0], x[1]) for x in sorted(zip(weights, speech.count_vect.get_feature_names()))]
df = | pd.DataFrame(sorted_weights) | pandas.DataFrame |
import math
import json
import random
import time
import calendar
import pickle
import os
import requests
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from collections import namedtuple
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
SEED = 2701
torch.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)
#################
# Replay Memory #
#################
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward'))
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
#############
# Q-Network #
#############
class DQN(nn.Module):
def __init__(self, num_in_features, num_out_features):
super(DQN, self).__init__()
self.linear1 = nn.Linear(num_in_features, 32)
self.ln1 = nn.LayerNorm(32)
self.linear2 = nn.Linear(32, 64)
self.ln2 = nn.LayerNorm(64)
self.linear3 = nn.Linear(64, 64)
self.ln3 = nn.LayerNorm(64)
self.linear4 = nn.Linear(64, 32)
self.ln4 = nn.LayerNorm(32)
self.out_layer = nn.Linear(32, num_out_features)
# Called with either one element to determine next action, or a batch
# during optimization. Returns tensor([[add_class,remove_class, maintain_class]]).
def forward(self, x):
x = F.leaky_relu(self.ln1(self.linear1(x)))
x = F.leaky_relu(self.ln2(self.linear2(x)))
x = F.leaky_relu(self.ln3(self.linear3(x)))
x = F.leaky_relu(self.ln4(self.linear4(x)))
return self.out_layer(x)
###############################
# Hyperparameters & Utilities #
###############################
# if gpu is to be used
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
df = pd.read_csv('../dbV3.csv')
timeseries = pd.read_csv('../timeseries.csv')
MIN_JVB_NUM = 1
MAX_JVB_NUM = 60
W1 = 30
ACTION_COOLDOWN = 30
LOOKBACK = 10
MEMORY_CAPACITY = 2000
BATCH_SIZE = 64
GAMMA = 0.5
TARGET_UPDATE = 200
N_EPISODES = 200
EPS_START = 1.0
EPS_END = 0.05
EXPLORATION_DUR = (80000 / ACTION_COOLDOWN) * (N_EPISODES / 2)
EPS_DECAY = (EPS_START - EPS_END) / EXPLORATION_DUR
EPS_THRESHOLD = EPS_START
# Q-Network paramteres
N_FEATURES = 5
N_ACTIONS = 3
# Initialize
policy_net = DQN(N_FEATURES, N_ACTIONS).to(device)
target_net = DQN(N_FEATURES, N_ACTIONS).to(device)
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()
optimizer = optim.Adam(policy_net.parameters())
memory = ReplayMemory(MEMORY_CAPACITY)
# define reward function
def calc_reward(state, action):
loss_delta = state[0][3].item()
curr_loss = state[0][4].item()
if action == 0:
jvb_num_delta = 1
elif action == 1:
jvb_num_delta = -1
else:
jvb_num_delta = 0
reward = loss_delta * jvb_num_delta
if loss_delta == 0:
reward = W1 * curr_loss * jvb_num_delta
if curr_loss == 0:
reward = -jvb_num_delta
return reward
# Loss approximation
def loss_from_nearest_points(c, p, tj, ij):
PARTITIONS = 3
losses = []
#conf_partitions = [0, 1, 2, 3]
part_partitions = [1, 5, 9, 13]
tj_partitions = [1, 3, 5, 7]
ij_partitions = [0, 2, 4, 7]
for i in range(PARTITIONS):
#curr_c = conf_partitions[i]
#d = df[df['conferences'] == curr_c]
flag = True
for curr_p in range(part_partitions[i], part_partitions[i+1]):
if not flag:
break
d1 = df[df['participants'] == curr_p]
for curr_tj in range(tj_partitions[i], tj_partitions[i+1]):
if not flag:
break
d2 = d1[d1['jvb_num'] == curr_tj]
for curr_ij in range(ij_partitions[i], ij_partitions[i+1]):
d3 = d2[d2['zero_conf'] == curr_ij]
if len(d3) > 0:
loss = d3['loss'].mean()
participants_scale = p / curr_p
curr_active_jvb_count = curr_tj - curr_ij
if (tj - ij) == 0 or curr_active_jvb_count == 0:
continue
active_jvbs_scale = (tj - ij) / curr_active_jvb_count
loss_scale = participants_scale / active_jvbs_scale
estimated_loss = loss * loss_scale
losses.append(estimated_loss)
flag = False
break
return np.mean(losses)
#################
# Training Func #
#################
def optimize_model():
if len(memory) < BATCH_SIZE:
return
policy_net.train()
transitions = memory.sample(BATCH_SIZE)
# Transpose the batch (see https://stackoverflow.com/a/19343/3343043 for
# detailed explanation). This converts batch-array of Transitions
# to Transition of batch-arrays.
batch = Transition(*zip(*transitions))
# Compute a mask of non-final states and concatenate the batch elements
# (a final state would've been the one after which simulation ended)
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
batch.next_state)), device=device, dtype=torch.bool)
non_final_next_states = torch.cat([s for s in batch.next_state
if s is not None])
state_batch = torch.cat(batch.state)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward)
# Compute Q(s_t, a) - the model computes Q(s_t), then we select the
# columns of actions taken. These are the actions which would've been taken
# for each batch state according to policy_net
state_action_values = policy_net(state_batch).gather(1, action_batch)
# Compute V(s_{t+1}) for all next states.
# Expected values of actions for non_final_next_states are computed based
# on the "older" target_net; selecting their best reward with max(1)[0].
# This is merged based on the mask, such that we'll have either the expected
# state value or 0 in case the state was final.
next_state_values = torch.zeros(BATCH_SIZE, device=device)
next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0].detach()
# Compute the expected Q values
expected_state_action_values = (next_state_values * GAMMA) + reward_batch
# Compute Huber loss
loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))
# Optimize the model
optimizer.zero_grad()
loss.backward()
for param in policy_net.parameters():
param.grad.data.clamp_(-1, 1)
optimizer.step()
return loss
##############
# Simulation #
##############
print("Starting simulation...")
curr_time = time.time()
cummulative_rewards_history = []
epsilon_history = []
losses_dict = {}
counter = 0
for i_episode in range(N_EPISODES):
# list of [jvb id, conference count] pair of currently running JVBs
# selected with round-robin, removed with graceful shutdown
curr_jvbs = [[0, 0], ]
is_shutting_down = []
prev_state = np.array([0, 1, 1, 0])
prev_action = -1
prev_delta_state = None
latest_losses = []
jvb_num_history = []
rewards_history = []
losses_history = []
miss_count = 0
conf_count_over_time = timeseries['conference_count']
part_count_over_time = timeseries['participant_count']
with open('../logs/conference_count.txt', 'w') as f:
pass
with open('../logs/participant_count.txt', 'w') as f:
pass
with open('../logs/jvb_count.txt', 'w') as f:
pass
with open('../logs/rewards.txt', 'w') as f:
pass
with open('../logs/losses.txt', 'w') as f:
pass
episode_start_time = time.time()
for i in range(len(conf_count_over_time)):
c1 = int(conf_count_over_time[i])
p1 = int(part_count_over_time[i])
# update conferences
try:
new_c = c1 - int(conf_count_over_time[i-1])
except:
new_c = c1
if new_c > 0:
# assign conferences
for c in range(new_c):
jvb_conferences = [x[1] if x[0] not in is_shutting_down else 1e10 for x in curr_jvbs]
least_loaded_idx = np.argmin(jvb_conferences)
curr_jvbs[least_loaded_idx][1] += 1
elif new_c < 0:
# remove conferences
for c in range(abs(new_c)):
for j in curr_jvbs:
if j[1] > 0:
j[1] -= 1
break
# update jvbs (check shutting down jvbs)
for idx in range(len(is_shutting_down) - 1, -1, -1):
for j in curr_jvbs:
if j[0] == is_shutting_down[idx] and j[1] == 0:
curr_jvbs.remove(j)
is_shutting_down.pop(idx)
break
j1 = len(curr_jvbs)
jvb_num_history.append(j1)
z1 = len(list(filter(lambda x: x[1] == 0, curr_jvbs)))
avg_loss = losses_dict.get(c1, {}).get(p1, {}).get(j1, {}).get(z1, -1)
if avg_loss == -1:
miss_count += 1
avg_loss = df[
(df['conferences'] == c1)
& (df['participants'] == p1)
& (df['jvb_num'] == j1)
& (df['zero_conf'] == z1)
]['loss'].mean()
if pd.isna(avg_loss):
if c1 == 0 or p1 == 0:
avg_loss = 0
else:
avg_loss = df[
(df['conferences'] >= c1-1) & (df['conferences'] <= c1+1)
& (df['participants'] >= p1-1) & (df['participants'] <= p1+1)
& (df['jvb_num'] >= j1-1) & (df['jvb_num'] <= j1+1)
& (df['zero_conf'] >= z1-1) & (df['zero_conf'] <= z1+1)
]['loss'].mean()
if | pd.isna(avg_loss) | pandas.isna |
# -*- coding: utf-8 -*-
from datetime import timedelta, time
import numpy as np
from pandas import (DatetimeIndex, Float64Index, Index, Int64Index,
NaT, Period, PeriodIndex, Series, Timedelta,
TimedeltaIndex, date_range, period_range,
timedelta_range, notnull)
import pandas.util.testing as tm
import pandas as pd
from pandas.lib import Timestamp
from .common import Base
class DatetimeLike(Base):
def test_shift_identity(self):
idx = self.create_index()
self.assert_index_equal(idx, idx.shift(0))
def test_str(self):
# test the string repr
idx = self.create_index()
idx.name = 'foo'
self.assertFalse("length=%s" % len(idx) in str(idx))
self.assertTrue("'foo'" in str(idx))
self.assertTrue(idx.__class__.__name__ in str(idx))
if hasattr(idx, 'tz'):
if idx.tz is not None:
self.assertTrue(idx.tz in str(idx))
if hasattr(idx, 'freq'):
self.assertTrue("freq='%s'" % idx.freqstr in str(idx))
def test_view(self):
super(DatetimeLike, self).test_view()
i = self.create_index()
i_view = i.view('i8')
result = self._holder(i)
tm.assert_index_equal(result, i)
i_view = i.view(self._holder)
result = self._holder(i)
tm.assert_index_equal(result, i_view)
class TestDatetimeIndex(DatetimeLike, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index=tm.makeDateIndex(10))
self.setup_indices()
def create_index(self):
return date_range('20130101', periods=5)
def test_shift(self):
# test shift for datetimeIndex and non datetimeIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05',
'2013-01-06'], freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(-1)
expected = DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02',
'2013-01-03', '2013-01-04'],
freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D')
expected = DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09',
'2013-01-10',
'2013-01-11'], freq='D')
self.assert_index_equal(result, expected)
def test_construction_with_alt(self):
i = pd.date_range('20130101', periods=5, freq='H', tz='US/Eastern')
i2 = DatetimeIndex(i, dtype=i.dtype)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
self.assert_index_equal(i2, expected)
# incompat tz/dtype
self.assertRaises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_pickle_compat_construction(self):
pass
def test_construction_index_with_mixed_timezones(self):
# GH 11488
# no tz results in DatetimeIndex
result = Index(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
# passing tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 19:00'),
Timestamp('2011-01-03 00:00')],
tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
def test_construction_index_with_mixed_timezones_with_NaT(self):
# GH 11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
# passing tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 19:00'),
pd.NaT, Timestamp('2011-01-03 00:00')],
tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# all NaT
result = Index([pd.NaT, pd.NaT], name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# all NaT with tz
result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
def test_construction_dti_with_mixed_timezones(self):
# GH 11488 (not changed, added explicit tests)
# no tz results in DatetimeIndex
result = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# same tz results in DatetimeIndex
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# same tz results in DatetimeIndex (DST)
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00',
tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# different tz coerces tz-naive to tz-awareIndex(dtype=object)
result = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 05:00'),
Timestamp('2011-01-02 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# tz mismatch affecting to tz-aware raises TypeError/ValueError
with tm.assertRaises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
with tm.assertRaises(TypeError):
DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
with tm.assertRaises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='US/Eastern', name='idx')
def test_astype(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timestamp('2016-05-16')] + [NaT] * 3, dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([1463356800000000000] +
[-9223372036854775808] * 3, dtype=np.int64)
tm.assert_index_equal(result, expected)
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_index_equal(result, Index(rng.asi8))
self.assert_numpy_array_equal(result.values, rng.asi8)
def test_astype_with_tz(self):
# with tz
rng = date_range('1/1/2000', periods=10, tz='US/Eastern')
result = rng.astype('datetime64[ns]')
expected = (date_range('1/1/2000', periods=10,
tz='US/Eastern')
.tz_convert('UTC').tz_localize(None))
tm.assert_index_equal(result, expected)
# BUG#10442 : testing astype(str) is correct for Series/DatetimeIndex
result = pd.Series(pd.date_range('2012-01-01', periods=3)).astype(str)
expected = pd.Series(
['2012-01-01', '2012-01-02', '2012-01-03'], dtype=object)
tm.assert_series_equal(result, expected)
result = Series(pd.date_range('2012-01-01', periods=3,
tz='US/Eastern')).astype(str)
expected = Series(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
dtype=object)
tm.assert_series_equal(result, expected)
def test_astype_str_compat(self):
# GH 13149, GH 13209
# verify that we are returing NaT as a string (and not unicode)
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(str)
expected = Index(['2016-05-16', 'NaT', 'NaT', 'NaT'], dtype=object)
tm.assert_index_equal(result, expected)
def test_astype_str(self):
# test astype string - #10442
result = date_range('2012-01-01', periods=4,
name='test_name').astype(str)
expected = Index(['2012-01-01', '2012-01-02', '2012-01-03',
'2012-01-04'], name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with tz and name
result = date_range('2012-01-01', periods=3, name='test_name',
tz='US/Eastern').astype(str)
expected = Index(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and name
result = date_range('1/1/2011', periods=3, freq='H',
name='test_name').astype(str)
expected = Index(['2011-01-01 00:00:00', '2011-01-01 01:00:00',
'2011-01-01 02:00:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and timezone
result = date_range('3/6/2012 00:00', periods=2, freq='H',
tz='Europe/London', name='test_name').astype(str)
expected = Index(['2012-03-06 00:00:00+00:00',
'2012-03-06 01:00:00+00:00'],
dtype=object, name='test_name')
tm.assert_index_equal(result, expected)
def test_astype_datetime64(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype('datetime64[ns]')
tm.assert_index_equal(result, idx)
self.assertFalse(result is idx)
result = idx.astype('datetime64[ns]', copy=False)
tm.assert_index_equal(result, idx)
self.assertTrue(result is idx)
idx_tz = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN], tz='EST')
result = idx_tz.astype('datetime64[ns]')
expected = DatetimeIndex(['2016-05-16 05:00:00', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]')
tm.assert_index_equal(result, expected)
def test_astype_raises(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
self.assertRaises(ValueError, idx.astype, float)
self.assertRaises(ValueError, idx.astype, 'timedelta64')
self.assertRaises(ValueError, idx.astype, 'timedelta64[ns]')
self.assertRaises(ValueError, idx.astype, 'datetime64')
self.assertRaises(ValueError, idx.astype, 'datetime64[D]')
def test_where_other(self):
# other is ndarray or Index
i = pd.date_range('20130101', periods=3, tz='US/Eastern')
for arr in [np.nan, pd.NaT]:
result = i.where(notnull(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notnull(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notnull(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_where_tz(self):
i = pd.date_range('20130101', periods=3, tz='US/Eastern')
result = i.where(notnull(i))
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notnull(i2))
expected = i2
tm.assert_index_equal(result, expected)
def test_get_loc(self):
idx = pd.date_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
self.assertEqual(idx.get_loc(idx[1], method), 1)
self.assertEqual(idx.get_loc(idx[1].to_pydatetime(), method), 1)
self.assertEqual(idx.get_loc(str(idx[1]), method), 1)
if method is not None:
self.assertEqual(idx.get_loc(idx[1], method,
tolerance=pd.Timedelta('0 days')),
1)
self.assertEqual(idx.get_loc('2000-01-01', method='nearest'), 0)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest'), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance='1 day'), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance=pd.Timedelta('1D')), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance=np.timedelta64(1, 'D')), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance=timedelta(1)), 1)
with tm.assertRaisesRegexp(ValueError, 'must be convertible'):
idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo')
with tm.assertRaises(KeyError):
idx.get_loc('2000-01-01T03', method='nearest', tolerance='2 hours')
self.assertEqual(idx.get_loc('2000', method='nearest'), slice(0, 3))
self.assertEqual(idx.get_loc('2000-01', method='nearest'), slice(0, 3))
self.assertEqual(idx.get_loc('1999', method='nearest'), 0)
self.assertEqual(idx.get_loc('2001', method='nearest'), 2)
with tm.assertRaises(KeyError):
idx.get_loc('1999', method='pad')
with tm.assertRaises(KeyError):
idx.get_loc('2001', method='backfill')
with tm.assertRaises(KeyError):
idx.get_loc('foobar')
with tm.assertRaises(TypeError):
idx.get_loc(slice(2))
idx = pd.to_datetime(['2000-01-01', '2000-01-04'])
self.assertEqual(idx.get_loc('2000-01-02', method='nearest'), 0)
self.assertEqual(idx.get_loc('2000-01-03', method='nearest'), 1)
self.assertEqual(idx.get_loc('2000-01', method='nearest'), slice(0, 2))
# time indexing
idx = pd.date_range('2000-01-01', periods=24, freq='H')
tm.assert_numpy_array_equal(idx.get_loc(time(12)),
np.array([12], dtype=np.int64))
tm.assert_numpy_array_equal(idx.get_loc(time(12, 30)),
np.array([], dtype=np.int64))
with tm.assertRaises(NotImplementedError):
idx.get_loc(time(12, 30), method='pad')
def test_get_indexer(self):
idx = pd.date_range('2000-01-01', periods=3)
tm.assert_numpy_array_equal(idx.get_indexer(idx), np.array([0, 1, 2]))
target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours',
'1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1]))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2]))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1]))
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour')),
np.array([0, -1, 1]))
with tm.assertRaises(ValueError):
idx.get_indexer(idx[[0]], method='nearest', tolerance='foo')
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = date_range('20130101', periods=3, tz='US/Eastern', name='foo')
unpickled = self.round_trip_pickle(index)
self.assert_index_equal(index, unpickled)
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
self.assertEqual(str(index.reindex([])[0].tz), 'US/Eastern')
self.assertEqual(str(index.reindex(np.array([]))[0].tz), 'US/Eastern')
def test_time_loc(self): # GH8667
from datetime import time
from pandas.index import _SIZE_CUTOFF
ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64)
key = time(15, 11, 30)
start = key.hour * 3600 + key.minute * 60 + key.second
step = 24 * 3600
for n in ns:
idx = pd.date_range('2014-11-26', periods=n, freq='S')
ts = pd.Series(np.random.randn(n), index=idx)
i = np.arange(start, n, step)
tm.assert_numpy_array_equal(ts.index.get_loc(key), i)
tm.assert_series_equal(ts[key], ts.iloc[i])
left, right = ts.copy(), ts.copy()
left[key] *= -10
right.iloc[i] *= -10
tm.assert_series_equal(left, right)
def test_time_overflow_for_32bit_machines(self):
# GH8943. On some machines NumPy defaults to np.int32 (for example,
# 32-bit Linux machines). In the function _generate_regular_range
# found in tseries/index.py, `periods` gets multiplied by `strides`
# (which has value 1e9) and since the max value for np.int32 is ~2e9,
# and since those machines won't promote np.int32 to np.int64, we get
# overflow.
periods = np.int_(1000)
idx1 = pd.date_range(start='2000', periods=periods, freq='S')
self.assertEqual(len(idx1), periods)
idx2 = pd.date_range(end='2000', periods=periods, freq='S')
self.assertEqual(len(idx2), periods)
def test_intersection(self):
first = self.index
second = self.index[5:]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.intersection(case)
self.assertTrue(tm.equalContents(result, second))
third = Index(['a', 'b', 'c'])
result = first.intersection(third)
expected = pd.Index([], dtype=object)
self.assert_index_equal(result, expected)
def test_union(self):
first = self.index[:5]
second = self.index[5:]
everything = self.index
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.union(case)
self.assertTrue(tm.equalContents(result, everything))
def test_nat(self):
self.assertIs(DatetimeIndex([np.nan])[0], pd.NaT)
def test_ufunc_coercions(self):
idx = date_range('2011-01-01', periods=3, freq='2D', name='x')
delta = np.timedelta64(1, 'D')
for result in [idx + delta, np.add(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
for result in [idx - delta, np.subtract(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
np.timedelta64(3, 'D')])
for result in [idx + delta, np.add(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
freq='3D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '3D')
for result in [idx - delta, np.subtract(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
freq='D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, 'D')
def test_fillna_datetime64(self):
# GH 11343
for tz in ['US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01 09:00', pd.NaT,
'2011-01-01 11:00'])
exp = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'])
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00')), exp)
# tz mismatch
exp = pd.Index([pd.Timestamp('2011-01-01 09:00'),
pd.Timestamp('2011-01-01 10:00', tz=tz),
pd.Timestamp('2011-01-01 11:00')], dtype=object)
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00', tz=tz)), exp)
# object
exp = pd.Index([pd.Timestamp('2011-01-01 09:00'), 'x',
pd.Timestamp('2011-01-01 11:00')], dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
idx = pd.DatetimeIndex(
['2011-01-01 09:00', pd.NaT, '2011-01-01 11:00'], tz=tz)
exp = pd.DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], tz=tz)
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00', tz=tz)), exp)
exp = pd.Index([pd.Timestamp('2011-01-01 09:00', tz=tz),
pd.Timestamp('2011-01-01 10:00'),
pd.Timestamp('2011-01-01 11:00', tz=tz)],
dtype=object)
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00')), exp)
# object
exp = pd.Index([pd.Timestamp('2011-01-01 09:00', tz=tz),
'x',
pd.Timestamp('2011-01-01 11:00', tz=tz)],
dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
class TestPeriodIndex(DatetimeLike, tm.TestCase):
_holder = PeriodIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index= | tm.makePeriodIndex(10) | pandas.util.testing.makePeriodIndex |
from datetime import datetime, timedelta
import inspect
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
Timestamp,
cut,
date_range,
to_datetime,
)
import pandas.util.testing as tm
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestDataFrameAlterAxes:
def test_set_index_directly(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df.index = idx
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.index = idx[::2]
def test_set_index(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df = df.set_index(idx)
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.set_index(idx[::2])
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame(
{"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2]}, index=[2010, 2011, 2012]
)
df2 = df.set_index(df.index.astype(np.int32))
tm.assert_frame_equal(df, df2)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop_inplace(self, frame_of_index_cols, drop, inplace, keys):
df = frame_of_index_cols
if isinstance(keys, list):
idx = MultiIndex.from_arrays([df[x] for x in keys], names=keys)
else:
idx = Index(df[keys], name=keys)
expected = df.drop(keys, axis=1) if drop else df
expected.index = idx
if inplace:
result = df.copy()
result.set_index(keys, drop=drop, inplace=True)
else:
result = df.set_index(keys, drop=drop)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append(self, frame_of_index_cols, drop, keys):
df = frame_of_index_cols
keys = keys if isinstance(keys, list) else [keys]
idx = MultiIndex.from_arrays(
[df.index] + [df[x] for x in keys], names=[None] + keys
)
expected = df.drop(keys, axis=1) if drop else df.copy()
expected.index = idx
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append_to_multiindex(self, frame_of_index_cols, drop, keys):
# append to existing multiindex
df = frame_of_index_cols.set_index(["D"], drop=drop, append=True)
keys = keys if isinstance(keys, list) else [keys]
expected = frame_of_index_cols.set_index(["D"] + keys, drop=drop, append=True)
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
def test_set_index_after_mutation(self):
# GH1590
df = DataFrame({"val": [0, 1, 2], "key": ["<KEY>"]})
expected = DataFrame({"val": [1, 2]}, Index(["b", "c"], name="key"))
df2 = df.loc[df.index.map(lambda indx: indx >= 1)]
result = df2.set_index("key")
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# Add list-of-list constructor because list is ambiguous -> lambda
# also test index name if append=True (name is duplicate here for B)
@pytest.mark.parametrize(
"box",
[
Series,
Index,
np.array,
list,
lambda x: [list(x)],
lambda x: MultiIndex.from_arrays([x]),
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "B"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_single_array(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
key = box(df["B"])
if box == list:
# list of strings gets interpreted as list of keys
msg = "['one', 'two', 'three', 'one', 'two']"
with pytest.raises(KeyError, match=msg):
df.set_index(key, drop=drop, append=append)
else:
# np.array/list-of-list "forget" the name of B
name_mi = getattr(key, "names", None)
name = [getattr(key, "name", None)] if name_mi is None else name_mi
result = df.set_index(key, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, nothing is dropped
expected = df.set_index(["B"], drop=False, append=append)
expected.index.names = [index_name] + name if append else name
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# also test index name if append=True (name is duplicate here for A & B)
@pytest.mark.parametrize(
"box", [Series, Index, np.array, list, lambda x: MultiIndex.from_arrays([x])]
)
@pytest.mark.parametrize(
"append, index_name",
[(True, None), (True, "A"), (True, "B"), (True, "test"), (False, None)],
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
keys = ["A", box(df["B"])]
# np.array/list "forget" the name of B
names = ["A", None if box in [np.array, list, tuple, iter] else "B"]
result = df.set_index(keys, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, only A is dropped, if at all
expected = df.set_index(["A", "B"], drop=False, append=append)
expected = expected.drop("A", axis=1) if drop else expected
expected.index.names = [index_name] + names if append else names
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# We also emulate a "constructor" for the label -> lambda
# also test index name if append=True (name is duplicate here for A)
@pytest.mark.parametrize(
"box2",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"box1",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "A"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays_duplicate(
self, frame_of_index_cols, drop, append, index_name, box1, box2
):
df = frame_of_index_cols
df.index.name = index_name
keys = [box1(df["A"]), box2(df["A"])]
result = df.set_index(keys, drop=drop, append=append)
# if either box is iter, it has been consumed; re-read
keys = [box1(df["A"]), box2(df["A"])]
# need to adapt first drop for case that both keys are 'A' --
# cannot drop the same column twice;
# use "is" because == would give ambiguous Boolean error for containers
first_drop = (
False if (keys[0] is "A" and keys[1] is "A") else drop # noqa: F632
)
# to test against already-tested behaviour, we add sequentially,
# hence second append always True; must wrap keys in list, otherwise
# box = list would be interpreted as keys
expected = df.set_index([keys[0]], drop=first_drop, append=append)
expected = expected.set_index([keys[1]], drop=drop, append=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_multiindex(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
keys = MultiIndex.from_arrays([df["A"], df["B"]], names=["A", "B"])
result = df.set_index(keys, drop=drop, append=append)
# setting with a MultiIndex will never drop columns
expected = df.set_index(["A", "B"], drop=False, append=append)
tm.assert_frame_equal(result, expected)
def test_set_index_verify_integrity(self, frame_of_index_cols):
df = frame_of_index_cols
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index("A", verify_integrity=True)
# with MultiIndex
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index([df["A"], df["A"]], verify_integrity=True)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_keys(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
with pytest.raises(KeyError, match="['foo', 'bar', 'baz']"):
# column names are A-E, as well as one tuple
df.set_index(["foo", "bar", "baz"], drop=drop, append=append)
# non-existent key in list with arrays
with pytest.raises(KeyError, match="X"):
df.set_index([df["A"], df["B"], "X"], drop=drop, append=append)
msg = "[('foo', 'foo', 'foo', 'bar', 'bar')]"
# tuples always raise KeyError
with pytest.raises(KeyError, match=msg):
df.set_index(tuple(df["A"]), drop=drop, append=append)
# also within a list
with pytest.raises(KeyError, match=msg):
df.set_index(["A", df["A"], tuple(df["A"])], drop=drop, append=append)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("box", [set], ids=["set"])
def test_set_index_raise_on_type(self, frame_of_index_cols, box, drop, append):
df = frame_of_index_cols
msg = 'The parameter "keys" may be a column key, .*'
# forbidden type, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(box(df["A"]), drop=drop, append=append)
# forbidden type in list, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(["A", df["A"], box(df["A"])], drop=drop, append=append)
# MultiIndex constructor does not work directly on Series -> lambda
@pytest.mark.parametrize(
"box",
[Series, Index, np.array, iter, lambda x: MultiIndex.from_arrays([x])],
ids=["Series", "Index", "np.array", "iter", "MultiIndex"],
)
@pytest.mark.parametrize("length", [4, 6], ids=["too_short", "too_long"])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_on_len(
self, frame_of_index_cols, box, length, drop, append
):
# GH 24984
df = frame_of_index_cols # has length 5
values = np.random.randint(0, 10, (length,))
msg = "Length mismatch: Expected 5 rows, received array of length.*"
# wrong length directly
with pytest.raises(ValueError, match=msg):
df.set_index(box(values), drop=drop, append=append)
# wrong length in list
with pytest.raises(ValueError, match=msg):
df.set_index(["A", df.A, box(values)], drop=drop, append=append)
def test_set_index_custom_label_type(self):
# GH 24969
class Thing:
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
# necessary for pretty KeyError
__repr__ = __str__
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing("Three", "pink")
msg = "<Thing 'Three'>"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_hashable_iterable(self):
# GH 24969
# actual example discussed in GH 24984 was e.g. for shapely.geometry
# objects (e.g. a collection of Points) that can be both hashable and
# iterable; using frozenset as a stand-in for testing here
class Thing(frozenset):
# need to stabilize repr for KeyError (due to random order in sets)
def __repr__(self):
tmp = sorted(list(self))
# double curly brace prints one brace in format string
return "frozenset({{{}}})".format(", ".join(map(repr, tmp)))
thing1 = Thing(["One", "red"])
thing2 = Thing(["Two", "blue"])
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing(["Three", "pink"])
msg = r"frozenset\(\{'Three', 'pink'\}\)"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_type_raises(self):
# GH 24969
# purposefully inherit from something unhashable
class Thing(set):
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame([[0, 2], [1, 3]], columns=[thing1, thing2])
msg = 'The parameter "keys" may be a column key, .*'
with pytest.raises(TypeError, match=msg):
# use custom label directly
df.set_index(thing2)
with pytest.raises(TypeError, match=msg):
# custom label wrapped in list
df.set_index([thing2])
def test_construction_with_categorical_index(self):
ci = tm.makeCategoricalIndex(10)
ci.name = "B"
# with Categorical
df = DataFrame({"A": np.random.randn(10), "B": ci.values})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# from a CategoricalIndex
df = DataFrame({"A": np.random.randn(10), "B": ci})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# round-trip
idf = idf.reset_index().set_index("B")
tm.assert_index_equal(idf.index, ci)
def test_set_index_cast_datetimeindex(self):
df = DataFrame(
{
"A": [datetime(2000, 1, 1) + timedelta(i) for i in range(1000)],
"B": np.random.randn(1000),
}
)
idf = df.set_index("A")
assert isinstance(idf.index, DatetimeIndex)
def test_convert_dti_to_series(self):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
idx = DatetimeIndex(
to_datetime(["2013-1-1 13:00", "2013-1-2 14:00"]), name="B"
).tz_localize("US/Pacific")
df = DataFrame(np.random.randn(2, 1), columns=["A"])
expected = Series(
np.array(
[
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
],
dtype="object",
),
name="B",
)
# convert index to series
result = Series(idx)
tm.assert_series_equal(result, expected)
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
# convert to series while keeping the timezone
result = idx.to_series(keep_tz=True, index=[0, 1])
tm.assert_series_equal(result, expected)
# convert to utc
with tm.assert_produces_warning(FutureWarning):
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
result = df["B"]
comp = Series(DatetimeIndex(expected.values).tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
msg = (
"The default of the 'keep_tz' keyword in "
"DatetimeIndex.to_series will change to True in a future "
"release."
)
assert msg in str(m[0].message)
with tm.assert_produces_warning(FutureWarning):
result = idx.to_series(keep_tz=False, index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
# list of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
# GH 6785
# set the index manually
import pytz
df = DataFrame([{"ts": datetime(2014, 4, 1, tzinfo=pytz.utc), "foo": 1}])
expected = df.set_index("ts")
df.index = df["ts"]
df.pop("ts")
tm.assert_frame_equal(df, expected)
def test_reset_index_tz(self, tz_aware_fixture):
# GH 3950
# reset_index with single level
tz = tz_aware_fixture
idx = date_range("1/1/2011", periods=5, freq="D", tz=tz, name="idx")
df = DataFrame({"a": range(5), "b": ["A", "B", "C", "D", "E"]}, index=idx)
expected = DataFrame(
{
"idx": [
datetime(2011, 1, 1),
datetime(2011, 1, 2),
datetime(2011, 1, 3),
datetime(2011, 1, 4),
datetime(2011, 1, 5),
],
"a": range(5),
"b": ["A", "B", "C", "D", "E"],
},
columns=["idx", "a", "b"],
)
expected["idx"] = expected["idx"].apply(lambda d: Timestamp(d, tz=tz))
tm.assert_frame_equal(df.reset_index(), expected)
def test_set_index_timezone(self):
# GH 12358
# tz-aware Series should retain the tz
idx = to_datetime(["2014-01-01 10:10:10"], utc=True).tz_convert("Europe/Rome")
df = DataFrame({"A": idx})
assert df.set_index(idx).index[0].hour == 11
assert DatetimeIndex(Series(df.A))[0].hour == 11
assert df.set_index(df.A).index[0].hour == 11
def test_set_index_dst(self):
di = date_range("2006-10-29 00:00:00", periods=3, freq="H", tz="US/Pacific")
df = DataFrame(data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=di).reset_index()
# single level
res = df.set_index("index")
exp = DataFrame(
data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=Index(di, name="index")
)
tm.assert_frame_equal(res, exp)
# GH 12920
res = df.set_index(["index", "a"])
exp_index = MultiIndex.from_arrays([di, [0, 1, 2]], names=["index", "a"])
exp = DataFrame({"b": [3, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp)
def test_reset_index_with_intervals(self):
idx = IntervalIndex.from_breaks(np.arange(11), name="x")
original = DataFrame({"x": idx, "y": np.arange(10)})[["x", "y"]]
result = original.set_index("x")
expected = DataFrame({"y": np.arange(10)}, index=idx)
tm.assert_frame_equal(result, expected)
result2 = result.reset_index()
tm.assert_frame_equal(result2, original)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([("foo", 1), ("foo", 2), ("bar", 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
result = df.set_index(df.columns[0])
expected = df.iloc[:, 1:]
expected.index = df.iloc[:, 0].values
expected.index.names = [df.columns[0]]
tm.assert_frame_equal(result, expected)
def test_set_index_empty_column(self):
# GH 1971
df = DataFrame(
[
{"a": 1, "p": 0},
{"a": 2, "m": 10},
{"a": 3, "m": 11, "p": 20},
{"a": 4, "m": 12, "p": 21},
],
columns=("a", "m", "p", "x"),
)
result = df.set_index(["a", "x"])
expected = df[["m", "p"]]
expected.index = MultiIndex.from_arrays([df["a"], df["x"]], names=["a", "x"])
tm.assert_frame_equal(result, expected)
def test_set_columns(self, float_string_frame):
cols = Index(np.arange(len(float_string_frame.columns)))
float_string_frame.columns = cols
with pytest.raises(ValueError, match="Length mismatch"):
float_string_frame.columns = cols[::2]
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range("2011/01/01", periods=6, freq="M", tz="US/Eastern")
idx2 = date_range("2013", periods=6, freq="A", tz="Asia/Tokyo")
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.reindex(idx2)
tm.assert_index_equal(df.index, idx2)
# GH 11314
# with tz
index = date_range(
datetime(2015, 10, 1), datetime(2015, 10, 1, 23), freq="H", tz="US/Eastern"
)
df = DataFrame(np.random.randn(24, 1), columns=["a"], index=index)
new_index = date_range(
datetime(2015, 10, 2), datetime(2015, 10, 2, 23), freq="H", tz="US/Eastern"
)
result = df.set_index(new_index)
assert result.index.freq == index.freq
# Renaming
def test_rename(self, float_frame):
mapping = {"A": "a", "B": "b", "C": "c", "D": "d"}
renamed = float_frame.rename(columns=mapping)
renamed2 = float_frame.rename(columns=str.lower)
tm.assert_frame_equal(renamed, renamed2)
tm.assert_frame_equal(
renamed2.rename(columns=str.upper), float_frame, check_names=False
)
# index
data = {"A": {"foo": 0, "bar": 1}}
# gets sorted alphabetical
df = DataFrame(data)
renamed = df.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, Index(["foo", "bar"]))
renamed = df.rename(index=str.upper)
tm.assert_index_equal(renamed.index, Index(["BAR", "FOO"]))
# have to pass something
with pytest.raises(TypeError, match="must pass an index to rename"):
float_frame.rename()
# partial columns
renamed = float_frame.rename(columns={"C": "foo", "D": "bar"})
tm.assert_index_equal(renamed.columns, Index(["A", "B", "foo", "bar"]))
# other axis
renamed = float_frame.T.rename(index={"C": "foo", "D": "bar"})
tm.assert_index_equal(renamed.index, Index(["A", "B", "foo", "bar"]))
# index with name
index = Index(["foo", "bar"], name="name")
renamer = DataFrame(data, index=index)
renamed = renamer.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, Index(["bar", "foo"], name="name"))
assert renamed.index.name == renamer.index.name
def test_rename_axis_inplace(self, float_frame):
# GH 15704
expected = float_frame.rename_axis("foo")
result = float_frame.copy()
no_return = result.rename_axis("foo", inplace=True)
assert no_return is None
tm.assert_frame_equal(result, expected)
expected = float_frame.rename_axis("bar", axis=1)
result = float_frame.copy()
no_return = result.rename_axis("bar", axis=1, inplace=True)
assert no_return is None
tm.assert_frame_equal(result, expected)
def test_rename_axis_raises(self):
# https://github.com/pandas-dev/pandas/issues/17833
df = DataFrame({"A": [1, 2], "B": [1, 2]})
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis(id, axis=0)
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis({0: 10, 1: 20}, axis=0)
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis(id, axis=1)
with pytest.raises(ValueError, match="Use `.rename`"):
df["A"].rename_axis(id)
def test_rename_axis_mapper(self):
# GH 19978
mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"])
df = DataFrame(
{"x": [i for i in range(len(mi))], "y": [i * 10 for i in range(len(mi))]},
index=mi,
)
# Test for rename of the Index object of columns
result = df.rename_axis("cols", axis=1)
tm.assert_index_equal(result.columns, Index(["x", "y"], name="cols"))
# Test for rename of the Index object of columns using dict
result = result.rename_axis(columns={"cols": "new"}, axis=1)
tm.assert_index_equal(result.columns, Index(["x", "y"], name="new"))
# Test for renaming index using dict
result = df.rename_axis(index={"ll": "foo"})
assert result.index.names == ["foo", "nn"]
# Test for renaming index using a function
result = df.rename_axis(index=str.upper, axis=0)
assert result.index.names == ["LL", "NN"]
# Test for renaming index providing complete list
result = df.rename_axis(index=["foo", "goo"])
assert result.index.names == ["foo", "goo"]
# Test for changing index and columns at same time
sdf = df.reset_index().set_index("nn").drop(columns=["ll", "y"])
result = sdf.rename_axis(index="foo", columns="meh")
assert result.index.name == "foo"
assert result.columns.name == "meh"
# Test different error cases
with pytest.raises(TypeError, match="Must pass"):
df.rename_axis(index="wrong")
with pytest.raises(ValueError, match="Length of names"):
df.rename_axis(index=["wrong"])
with pytest.raises(TypeError, match="bogus"):
df.rename_axis(bogus=None)
@pytest.mark.parametrize(
"kwargs, rename_index, rename_columns",
[
({"mapper": None, "axis": 0}, True, False),
({"mapper": None, "axis": 1}, False, True),
({"index": None}, True, False),
({"columns": None}, False, True),
({"index": None, "columns": None}, True, True),
({}, False, False),
],
)
def test_rename_axis_none(self, kwargs, rename_index, rename_columns):
# GH 25034
index = Index(list("abc"), name="foo")
columns = Index(["col1", "col2"], name="bar")
data = np.arange(6).reshape(3, 2)
df = DataFrame(data, index, columns)
result = df.rename_axis(**kwargs)
expected_index = index.rename(None) if rename_index else index
expected_columns = columns.rename(None) if rename_columns else columns
expected = DataFrame(data, expected_index, expected_columns)
tm.assert_frame_equal(result, expected)
def test_rename_multiindex(self):
tuples_index = [("foo1", "bar1"), ("foo2", "bar2")]
tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")]
index = MultiIndex.from_tuples(tuples_index, names=["foo", "bar"])
columns = MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"])
df = DataFrame([(0, 0), (1, 1)], index=index, columns=columns)
#
# without specifying level -> across all levels
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
new_index = MultiIndex.from_tuples(
[("foo3", "bar1"), ("foo2", "bar3")], names=["foo", "bar"]
)
new_columns = MultiIndex.from_tuples(
[("fizz3", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"]
)
tm.assert_index_equal(renamed.index, new_index)
tm.assert_index_equal(renamed.columns, new_columns)
assert renamed.index.names == df.index.names
assert renamed.columns.names == df.columns.names
#
# with specifying a level (GH13766)
# dict
new_columns = MultiIndex.from_tuples(
[("fizz3", "buzz1"), ("fizz2", "buzz2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz")
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples(
[("fizz1", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"]
)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz")
tm.assert_index_equal(renamed.columns, new_columns)
# function
func = str.upper
new_columns = MultiIndex.from_tuples(
[("FIZZ1", "buzz1"), ("FIZZ2", "buzz2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns=func, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level="fizz")
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples(
[("fizz1", "BUZZ1"), ("fizz2", "BUZZ2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns=func, level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level="buzz")
tm.assert_index_equal(renamed.columns, new_columns)
# index
new_index = MultiIndex.from_tuples(
[("foo3", "bar1"), ("foo2", "bar2")], names=["foo", "bar"]
)
renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
tm.assert_index_equal(renamed.index, new_index)
def test_rename_nocopy(self, float_frame):
renamed = float_frame.rename(columns={"C": "foo"}, copy=False)
renamed["foo"] = 1.0
assert (float_frame["C"] == 1.0).all()
def test_rename_inplace(self, float_frame):
float_frame.rename(columns={"C": "foo"})
assert "C" in float_frame
assert "foo" not in float_frame
c_id = id(float_frame["C"])
float_frame = float_frame.copy()
float_frame.rename(columns={"C": "foo"}, inplace=True)
assert "C" not in float_frame
assert "foo" in float_frame
assert id(float_frame["foo"]) != c_id
def test_rename_bug(self):
# GH 5344
# rename set ref_locs, and set_index was not resetting
df = DataFrame({0: ["foo", "bar"], 1: ["bah", "bas"], 2: [1, 2]})
df = df.rename(columns={0: "a"})
df = df.rename(columns={1: "b"})
df = df.set_index(["a", "b"])
df.columns = ["2001-01-01"]
expected = DataFrame(
[[1], [2]],
index=MultiIndex.from_tuples(
[("foo", "bah"), ("bar", "bas")], names=["a", "b"]
),
columns=["2001-01-01"],
)
tm.assert_frame_equal(df, expected)
def test_rename_bug2(self):
# GH 19497
# rename was changing Index to MultiIndex if Index contained tuples
df = DataFrame(data=np.arange(3), index=[(0, 0), (1, 1), (2, 2)], columns=["a"])
df = df.rename({(1, 1): (5, 4)}, axis="index")
expected = DataFrame(
data=np.arange(3), index=[(0, 0), (5, 4), (2, 2)], columns=["a"]
)
tm.assert_frame_equal(df, expected)
def test_rename_errors_raises(self):
df = DataFrame(columns=["A", "B", "C", "D"])
with pytest.raises(KeyError, match="'E'] not found in axis"):
df.rename(columns={"A": "a", "E": "e"}, errors="raise")
@pytest.mark.parametrize(
"mapper, errors, expected_columns",
[
({"A": "a", "E": "e"}, "ignore", ["a", "B", "C", "D"]),
({"A": "a"}, "raise", ["a", "B", "C", "D"]),
(str.lower, "raise", ["a", "b", "c", "d"]),
],
)
def test_rename_errors(self, mapper, errors, expected_columns):
# GH 13473
# rename now works with errors parameter
df = DataFrame(columns=["A", "B", "C", "D"])
result = df.rename(columns=mapper, errors=errors)
expected = DataFrame(columns=expected_columns)
tm.assert_frame_equal(result, expected)
def test_reorder_levels(self):
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
names=["L0", "L1", "L2"],
)
df = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=index)
# no change, position
result = df.reorder_levels([0, 1, 2])
tm.assert_frame_equal(df, result)
# no change, labels
result = df.reorder_levels(["L0", "L1", "L2"])
tm.assert_frame_equal(df, result)
# rotate, position
result = df.reorder_levels([1, 2, 0])
e_idx = MultiIndex(
levels=[["one", "two", "three"], [0, 1], ["bar"]],
codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]],
names=["L1", "L2", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
tm.assert_frame_equal(result, expected)
result = df.reorder_levels([0, 0, 0])
e_idx = MultiIndex(
levels=[["bar"], ["bar"], ["bar"]],
codes=[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]],
names=["L0", "L0", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
tm.assert_frame_equal(result, expected)
result = df.reorder_levels(["L0", "L0", "L0"])
tm.assert_frame_equal(result, expected)
def test_reset_index(self, float_frame):
stacked = float_frame.stack()[::2]
stacked = DataFrame({"foo": stacked, "bar": stacked})
names = ["first", "second"]
stacked.index.names = names
deleveled = stacked.reset_index()
for i, (lev, level_codes) in enumerate(
zip(stacked.index.levels, stacked.index.codes)
):
values = lev.take(level_codes)
name = names[i]
tm.assert_index_equal(values, Index(deleveled[name]))
stacked.index.names = [None, None]
deleveled2 = stacked.reset_index()
tm.assert_series_equal(
deleveled["first"], deleveled2["level_0"], check_names=False
)
tm.assert_series_equal(
deleveled["second"], deleveled2["level_1"], check_names=False
)
# default name assigned
rdf = float_frame.reset_index()
exp = Series(float_frame.index.values, name="index")
tm.assert_series_equal(rdf["index"], exp)
# default name assigned, corner case
df = float_frame.copy()
df["index"] = "foo"
rdf = df.reset_index()
exp = Series(float_frame.index.values, name="level_0")
tm.assert_series_equal(rdf["level_0"], exp)
# but this is ok
float_frame.index.name = "index"
deleveled = float_frame.reset_index()
tm.assert_series_equal(deleveled["index"], Series(float_frame.index))
tm.assert_index_equal(deleveled.index, Index(np.arange(len(deleveled))))
# preserve column names
float_frame.columns.name = "columns"
resetted = float_frame.reset_index()
assert resetted.columns.name == "columns"
# only remove certain columns
df = float_frame.reset_index().set_index(["index", "A", "B"])
rs = df.reset_index(["A", "B"])
# TODO should reset_index check_names ?
tm.assert_frame_equal(rs, float_frame, check_names=False)
rs = df.reset_index(["index", "A", "B"])
tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False)
rs = df.reset_index(["index", "A", "B"])
tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False)
rs = df.reset_index("A")
xp = float_frame.reset_index().set_index(["index", "B"])
tm.assert_frame_equal(rs, xp, check_names=False)
# test resetting in place
df = float_frame.copy()
resetted = float_frame.reset_index()
df.reset_index(inplace=True)
tm.assert_frame_equal(df, resetted, check_names=False)
df = float_frame.reset_index().set_index(["index", "A", "B"])
rs = df.reset_index("A", drop=True)
xp = float_frame.copy()
del xp["A"]
xp = xp.set_index(["B"], append=True)
tm.assert_frame_equal(rs, xp, check_names=False)
def test_reset_index_name(self):
df = DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8]],
columns=["A", "B", "C", "D"],
index=Index(range(2), name="x"),
)
assert df.reset_index().index.name is None
assert df.reset_index(drop=True).index.name is None
df.reset_index(inplace=True)
assert df.index.name is None
def test_reset_index_level(self):
df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "C", "D"])
for levels in ["A", "B"], [0, 1]:
# With MultiIndex
result = df.set_index(["A", "B"]).reset_index(level=levels[0])
tm.assert_frame_equal(result, df.set_index("B"))
result = df.set_index(["A", "B"]).reset_index(level=levels[:1])
tm.assert_frame_equal(result, df.set_index("B"))
result = df.set_index(["A", "B"]).reset_index(level=levels)
tm.assert_frame_equal(result, df)
result = df.set_index(["A", "B"]).reset_index(level=levels, drop=True)
tm.assert_frame_equal(result, df[["C", "D"]])
# With single-level Index (GH 16263)
result = df.set_index("A").reset_index(level=levels[0])
tm.assert_frame_equal(result, df)
result = df.set_index("A").reset_index(level=levels[:1])
tm.assert_frame_equal(result, df)
result = df.set_index(["A"]).reset_index(level=levels[0], drop=True)
tm.assert_frame_equal(result, df[["B", "C", "D"]])
# Missing levels - for both MultiIndex and single-level Index:
for idx_lev in ["A", "B"], ["A"]:
with pytest.raises(KeyError, match="Level E "):
df.set_index(idx_lev).reset_index(level=["A", "E"])
with pytest.raises(IndexError, match="Too many levels"):
df.set_index(idx_lev).reset_index(level=[0, 1, 2])
def test_reset_index_right_dtype(self):
time = np.arange(0.0, 10, np.sqrt(2) / 2)
s1 = Series(
(9.81 * time ** 2) / 2, index=Index(time, name="time"), name="speed"
)
df = DataFrame(s1)
resetted = s1.reset_index()
assert resetted["time"].dtype == np.float64
resetted = df.reset_index()
assert resetted["time"].dtype == np.float64
def test_reset_index_multiindex_col(self):
vals = np.random.randn(3, 3).astype(object)
idx = ["x", "y", "z"]
full = np.hstack(([[x] for x in idx], vals))
df = DataFrame(
vals,
Index(idx, name="a"),
columns=[["b", "b", "c"], ["mean", "median", "mean"]],
)
rs = df.reset_index()
xp = DataFrame(
full, columns=[["a", "b", "b", "c"], ["", "mean", "median", "mean"]]
)
tm.assert_frame_equal(rs, xp)
rs = df.reset_index(col_fill=None)
xp = DataFrame(
full, columns=[["a", "b", "b", "c"], ["a", "mean", "median", "mean"]]
)
tm.assert_frame_equal(rs, xp)
rs = df.reset_index(col_level=1, col_fill="blah")
xp = DataFrame(
full, columns=[["blah", "b", "b", "c"], ["a", "mean", "median", "mean"]]
)
tm.assert_frame_equal(rs, xp)
df = DataFrame(
vals,
MultiIndex.from_arrays([[0, 1, 2], ["x", "y", "z"]], names=["d", "a"]),
columns=[["b", "b", "c"], ["mean", "median", "mean"]],
)
rs = df.reset_index("a")
xp = DataFrame(
full,
Index([0, 1, 2], name="d"),
columns=[["a", "b", "b", "c"], ["", "mean", "median", "mean"]],
)
tm.assert_frame_equal(rs, xp)
rs = df.reset_index("a", col_fill=None)
xp = DataFrame(
full,
Index(range(3), name="d"),
columns=[["a", "b", "b", "c"], ["a", "mean", "median", "mean"]],
)
tm.assert_frame_equal(rs, xp)
rs = df.reset_index("a", col_fill="blah", col_level=1)
xp = DataFrame(
full,
Index(range(3), name="d"),
columns=[["blah", "b", "b", "c"], ["a", "mean", "median", "mean"]],
)
tm.assert_frame_equal(rs, xp)
def test_reset_index_multiindex_nan(self):
# GH6322, testing reset_index on MultiIndexes
# when we have a nan or all nan
df = DataFrame(
{"A": ["a", "b", "c"], "B": [0, 1, np.nan], "C": np.random.rand(3)}
)
rs = df.set_index(["A", "B"]).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame(
{"A": [np.nan, "b", "c"], "B": [0, 1, 2], "C": np.random.rand(3)}
)
rs = df.set_index(["A", "B"]).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame({"A": ["a", "b", "c"], "B": [0, 1, 2], "C": [np.nan, 1.1, 2.2]})
rs = df.set_index(["A", "B"]).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame(
{
"A": ["a", "b", "c"],
"B": [np.nan, np.nan, np.nan],
"C": np.random.rand(3),
}
)
rs = df.set_index(["A", "B"]).reset_index()
tm.assert_frame_equal(rs, df)
def test_reset_index_with_datetimeindex_cols(self):
# GH5818
#
df = DataFrame(
[[1, 2], [3, 4]],
columns=date_range("1/1/2013", "1/2/2013"),
index=["A", "B"],
)
result = df.reset_index()
expected = DataFrame(
[["A", 1, 2], ["B", 3, 4]],
columns=["index", datetime(2013, 1, 1), datetime(2013, 1, 2)],
)
tm.assert_frame_equal(result, expected)
def test_reset_index_range(self):
# GH 12071
df = DataFrame([[0, 0], [1, 1]], columns=["A", "B"], index=RangeIndex(stop=2))
result = df.reset_index()
assert isinstance(result.index, RangeIndex)
expected = DataFrame(
[[0, 0, 0], [1, 1, 1]],
columns=["index", "A", "B"],
index=RangeIndex(stop=2),
)
tm.assert_frame_equal(result, expected)
def test_set_index_names(self):
df = tm.makeDataFrame()
df.index.name = "name"
assert df.set_index(df.index).index.names == ["name"]
mi = MultiIndex.from_arrays(df[["A", "B"]].T.values, names=["A", "B"])
mi2 = MultiIndex.from_arrays(
df[["A", "B", "A", "B"]].T.values, names=["A", "B", "C", "D"]
)
df = df.set_index(["A", "B"])
assert df.set_index(df.index).index.names == ["A", "B"]
# Check that set_index isn't converting a MultiIndex into an Index
assert isinstance(df.set_index(df.index).index, MultiIndex)
# Check actual equality
tm.assert_index_equal(df.set_index(df.index).index, mi)
idx2 = df.index.rename(["C", "D"])
# Check that [MultiIndex, MultiIndex] yields a MultiIndex rather
# than a pair of tuples
assert isinstance(df.set_index([df.index, idx2]).index, MultiIndex)
# Check equality
tm.assert_index_equal(df.set_index([df.index, idx2]).index, mi2)
def test_rename_objects(self, float_string_frame):
renamed = float_string_frame.rename(columns=str.upper)
assert "FOO" in renamed
assert "foo" not in renamed
def test_rename_axis_style(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["X", "Y"])
expected = DataFrame({"a": [1, 2], "b": [1, 2]}, index=["X", "Y"])
result = df.rename(str.lower, axis=1)
tm.assert_frame_equal(result, expected)
result = df.rename(str.lower, axis="columns")
tm.assert_frame_equal(result, expected)
result = df.rename({"A": "a", "B": "b"}, axis=1)
tm.assert_frame_equal(result, expected)
result = df.rename({"A": "a", "B": "b"}, axis="columns")
tm.assert_frame_equal(result, expected)
# Index
expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["x", "y"])
result = df.rename(str.lower, axis=0)
tm.assert_frame_equal(result, expected)
result = df.rename(str.lower, axis="index")
tm.assert_frame_equal(result, expected)
result = df.rename({"X": "x", "Y": "y"}, axis=0)
tm.assert_frame_equal(result, expected)
result = df.rename({"X": "x", "Y": "y"}, axis="index")
tm.assert_frame_equal(result, expected)
result = df.rename(mapper=str.lower, axis="index")
tm.assert_frame_equal(result, expected)
def test_rename_mapper_multi(self):
df = DataFrame({"A": ["a", "b"], "B": ["c", "d"], "C": [1, 2]}).set_index(
["A", "B"]
)
result = df.rename(str.upper)
expected = df.rename(index=str.upper)
tm.assert_frame_equal(result, expected)
def test_rename_positional_named(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"a": [1, 2], "b": [1, 2]}, index=["X", "Y"])
result = df.rename(str.lower, columns=str.upper)
expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["x", "y"])
tm.assert_frame_equal(result, expected)
def test_rename_axis_style_raises(self):
# see gh-12392
df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["0", "1"])
# Named target and axis
over_spec_msg = "Cannot specify both 'axis' and any of 'index' or 'columns'"
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(index=str.lower, axis=1)
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(index=str.lower, axis="columns")
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(columns=str.lower, axis="columns")
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(index=str.lower, axis=0)
# Multiple targets and axis
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(str.lower, str.lower, axis="columns")
# Too many targets
over_spec_msg = "Cannot specify all of 'mapper', 'index', 'columns'."
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(str.lower, str.lower, str.lower)
# Duplicates
with pytest.raises(TypeError, match="multiple values"):
df.rename(id, mapper=id)
def test_reindex_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
df = DataFrame(
[[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=["a", "b", "c"],
columns=["d", "e", "f"],
)
res1 = df.reindex(["b", "a"])
res2 = df.reindex(index=["b", "a"])
res3 = df.reindex(labels=["b", "a"])
res4 = df.reindex(labels=["b", "a"], axis=0)
res5 = df.reindex(["b", "a"], axis=0)
for res in [res2, res3, res4, res5]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(columns=["e", "d"])
res2 = df.reindex(["e", "d"], axis=1)
res3 = df.reindex(labels=["e", "d"], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(index=["b", "a"], columns=["e", "d"])
res2 = df.reindex(columns=["e", "d"], index=["b", "a"])
res3 = df.reindex(labels=["b", "a"], axis=0).reindex(labels=["e", "d"], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
def test_rename_positional(self):
df = DataFrame(columns=["A", "B"])
with tm.assert_produces_warning(FutureWarning) as rec:
result = df.rename(None, str.lower)
expected = | DataFrame(columns=["a", "b"]) | pandas.DataFrame |
#from POPS_lib.fileIO import read_Calibration_fromFile,read_Calibration_fromString,save_Calibration
#import fileIO
from scipy import interpolate, optimize
import numpy as np
import pylab as plt
from io import StringIO as io
import pandas as pd
import warnings
from atmPy.aerosols.instruments.POPS import mie
#read_fromFile = fileIO.read_Calibration_fromFile
#read_fromString = fileIO.read_Calibration_fromString
def _msg(txt, save, out_file, verbose):
if verbose:
print(txt)
if save:
out_file.write(str(txt) + '\n')
def generate_calibration(single_pnt_cali_d=508,
single_pnt_cali_ior=1.6,
single_pnt_cali_int=1000,
noise_level = 12,
ior=1.5,
dr=[100, 5000],
no_pts=5000,
no_cal_pts=50,
plot=True,
raise_error=True,
test=False
):
"""
This function generates a calibration function for the POPS instrument based on its theoretical responds.
Args:
single_pnt_cali_d: float [508]
Diameter of single point calibration in nm.
single_pnt_cali_ior: float [1.6]
Refractive index of material used in single point calibration.
single_pnt_cali_int: float [1000]
Raw intensity (digitizer bins) measured in single point calibration
noise_level: int
The way POPS detectes peak height it is effected by the noise which results in a positive bias for sizing
close to the lower detection limit ... this corrects for it. If you don't want this set noise_level to None
ior: float [1.5]
Refractive index of the anticipated aerosol material.
dr: array-like [[110, 3400]]
Diameter range of the calibration. The calibration range will actually be a bit smaller than this, so make
this range a little bit larger than you want it.
no_pts: int [600]
Number of points used in the Mie calculations... quite unimportant value.
no_cal_pts: int
Number of points in the generated calibration. Usually a mie curve is not bijective, this number defines how
much a respondscurve is smoothened. This is merely a starting number. If bejectivity is not given, the number
is reduced until bijectivity is achieved.
plot: bool [False]
If the plotting of the result is desired.
raise_error: bool [True]
If an error is raised in case the resulting calibration function is not bijective.
test: bool [False]
If True the calibration diameters are returned, so one can check if they are in the desired range.
Returns:
Calibration instance
if plot: (Calibration instance, Axes instance)
if test: Series instance
"""
drum = np.array(dr)/1e3
d, amp = mie.makeMie_diameter(diameterRangeInMikroMeter=drum,
noOfdiameters=no_pts,
IOR=ior)
df = pd.DataFrame({'d': d, 'amp': amp})
# a calibration function is created with no_cal_pts of calibration points. no_cal_pts is steadyly decreased until
# the calibration function is bijective
valid = False
while not valid:
no_cal_pts -= 1
binedgs = np.logspace(np.log10(df.amp.min()), np.log10(df.amp.max()), no_cal_pts)
binedgs[0] -= (binedgs[1] - binedgs[
0]) * 0.01 # this is to ensure the first point is not onthe edge ... for cut function used later
mie_cal = df.groupby(pd.cut(df.amp, binedgs)).median()
dfstd = df.groupby(pd.cut(df.amp, binedgs)).mad()
mie_cal.index = mie_cal.d
dfstd.index = mie_cal.d
mie_cal['sigma_d'] = dfstd.d
mie_cal.index.name = None
mie_cal.sort_values('amp', axis=0, inplace=True)
# check for bijectivity
if not ((mie_cal.d.values[1:] - mie_cal.d.values[:-1]) < 0).sum():
valid = True
# final conditioning: um2nm, indexname, drop d
# mie_cal.drop('d', axis=1, inplace=True)
mie_cal.index.name = 'd_nm'
mie_cal.index *= 1e3
mie_cal.d *= 1e3
mie_cal.sigma_d *= 1.e3
cali_inst_pre = Calibration(mie_cal)
# single point calibration
## solve calibration function ot get amp at calibration diameter
### first guess
dt = mie_cal.index[abs(mie_cal.index - single_pnt_cali_d).argmin()]
at = mie_cal.loc[dt, 'amp']
# cali_inst_at_single_pnt_calid_d = cali_inst_pre.calibrationFunction(single_pnt_cali_d)
### solve
cali_inst_at_single_pnt_calid_d = optimize.fsolve(lambda x: cali_inst_pre.calibrationFunction(x) - single_pnt_cali_d, at)
## scale for ior mismatch
if ior == single_pnt_cali_ior:
scale_ioradj = 1
single_pnt_cali_int_pre = cali_inst_at_single_pnt_calid_d
else:
# single_pnt_cali_d = 500
single_pnt_cali_d *= 1e-3
dt, mt = mie.makeMie_diameter(diameterRangeInMikroMeter=[single_pnt_cali_d, single_pnt_cali_d + 1e-3],
IOR=single_pnt_cali_ior,
noOfdiameters=2)
single_pnt_cali_int_pre = mt[0]
scale_ioradj = cali_inst_at_single_pnt_calid_d / single_pnt_cali_int_pre
## scale for instrument calibration
scale_instrument = single_pnt_cali_int / single_pnt_cali_int_pre
## total scale
scale = scale_ioradj * scale_instrument
mie_cal.loc[:,'amp'] *= scale
if not isinstance(noise_level, type(None)):
mie_cal.loc[:, 'amp'] += noise_level
cali_inst = Calibration(mie_cal)
if plot:
f, a = plt.subplots()
a.plot(df.d * 1e3, df.amp * scale, label='POPS resp.')
cali_inst.plot(ax = a)
# a.plot(ampm.index * 1e3, ampm.values * scale, label='POPS resp. smooth')
# g, = a.plot(cali.index, cali.values, label='cali')
# g.set_linestyle('')
# g.set_marker('x')
# g.set_markersize(10)
# g.set_markeredgewidth(2)
g, = a.plot(single_pnt_cali_d * 1e3, single_pnt_cali_int, label='single ptn cal')
g.set_linestyle('')
g.set_marker('x')
g.set_markersize(10)
g.set_markeredgewidth(2)
g.set_label('single pt. cali.')
# # st.plot(ax = a)
# a.loglog()
a.legend()
# return dft
# return cali_inst, a
##########
# start of old mie calcultations
old = False
if old:
dr = np.array(dr)/1000
single_pnt_cali_d *= 1e-3
# rr = dr / 2 / 1000
d, amp = mie.makeMie_diameter(noOfdiameters=no_pts,
diameterRangeInMikroMeter = dr,
# radiusRangeInMikroMeter=rr,
IOR=ior)
ds = pd.Series(amp, d)
cal_d = pd.Series(index=np.logspace(np.log10(dr[0]), np.log10(dr[1]), no_cal_pts + 2)[1:-1])
if test:
return cal_d
# single point calibration
if ior == single_pnt_cali_ior:
ds_spc = ds
else:
d, amp = mie.makeMie_diameter(noOfdiameters=no_pts,
diameterRangeInMikroMeter = dr,
# radiusRangeInMikroMeter=rr,
IOR=single_pnt_cali_ior)
ds_spc = pd.Series(amp, d)
# rolling, the issue here is that I am rolling over windows of diameter rather then windows of amp, which would be better
ampm = ds.rolling(int(no_pts / no_cal_pts), center=True).mean()
#
cali = ampm.append(cal_d).sort_index().interpolate().reindex(cal_d.index)
spc_point = ds_spc.append(pd.Series(index=[single_pnt_cali_d])).sort_index().interpolate().reindex(
[single_pnt_cali_d]) # .values[0]
scale = single_pnt_cali_int / spc_point.values[0]
cali *= scale
cali.index *= 1e3
cali_inst_pre = pd.DataFrame(cali, columns=['amp'])
cali_inst_pre['d'] = cali_inst_pre.index
cali_inst_pre = Calibration(cali_inst_pre)
if raise_error:
ct = cali.values
if (ct[1:] - ct[:-1]).min() < 0:
raise ValueError(
'Clibration function is not bijective. usually decreasing the number of calibration points will help!')
cal_fkt_test = cali_inst_pre.calibrationFunction(cali_inst_pre.data.amp.values)
if not np.all(~np.isnan(cal_fkt_test)):
raise ValueError(
'Clibration function is not bijective. usually decreasing the number of calibration points will help!')
if plot:
f, a = plt.subplots()
a.plot(ds.index * 1e3, ds.values * scale, label='POPS resp.')
a.plot(ampm.index * 1e3, ampm.values * scale, label='POPS resp. smooth')
g, = a.plot(cali.index, cali.values, label='cali')
g.set_linestyle('')
g.set_marker('x')
g.set_markersize(10)
g.set_markeredgewidth(2)
g, = a.plot(single_pnt_cali_d * 1e3, single_pnt_cali_int, label='single ptn cal')
g.set_linestyle('')
g.set_marker('o')
g.set_markersize(10)
g.set_markeredgewidth(2)
# st.plot(ax = a)
a.loglog()
a.legend()
return cali_inst_pre, a
###### end of old 2
return cali_inst
def get_interface_bins(fname, n_bins, imin=1.4, imax=4.8, save=False, verbose = True):
"""Prints the bins assosiated with what is seen on the POPS user interface and the serial output, respectively.
Parameters
----------
fname: string or calibration instance
name of file containing a calibration or a calibration instance it self
n_bins: int
number of bins
imin: float [1.4], optional
log10 of the minimum value considered (digitizer bins)
imax: float [4.8], optional
log10 of the maximum value considered (digitizer bins)
save: bool or string.
If result is saved into file given by string.
Returns
-------
matplotlib axes instance
pandas DataFrame instance
"""
if isinstance(fname, str):
cal = read_csv(fname)
else:
cal = fname
bin_ed = np.linspace(imin, imax, n_bins + 1)
bin_center_log = 10 ** ((bin_ed[:-1] + bin_ed[1:]) / 2.)
bin_center_lin = ((10 ** bin_ed[:-1] + 10 ** bin_ed[1:]) / 2.)
bin_ed = 10 ** bin_ed
bin_ed_cal = cal.calibrationFunction(bin_ed)
bin_center_lin_cal = cal.calibrationFunction(bin_center_lin)
bin_center_log_cal = cal.calibrationFunction(bin_center_log)
if save:
save_file = open(save, 'w')
else:
save_file = False
txt = '''
bin edges (digitizer bins)
--------------------------'''
_msg(txt, save, save_file, verbose)
for e, i in enumerate(bin_ed):
_msg(i, save, save_file, verbose)
# bin_center_cal = cal.calibrationFunction(bin_center)
txt = '''
bin centers (digitizer bins)
----------------------------'''
_msg(txt, save, save_file, verbose)
for e, i in enumerate(bin_center_lin):
_msg(i, save, save_file, verbose)
txt = '''
bin centers of logarithms (digitizer bins)
----------------------------'''
_msg(txt, save, save_file, verbose)
for e, i in enumerate(bin_center_log):
_msg(i, save, save_file, verbose)
txt = '''
bin edges (nm)
--------------'''
_msg(txt, save, save_file, verbose)
for e, i in enumerate(bin_ed_cal):
_msg(i, save, save_file, verbose)
# bin_center_cal = cal.calibrationFunction(bin_center)
txt = '''
bin centers (nm)
----------------'''
_msg(txt, save, save_file, verbose)
for e, i in enumerate(bin_center_lin_cal):
_msg(i, save, save_file, verbose)
txt = '''
bin centers of logarithms (nm)
----------------'''
_msg(txt, save, save_file, verbose)
for e, i in enumerate(bin_center_log_cal):
_msg(i, save, save_file, verbose)
out = {}
df_bin_c = pd.DataFrame(bin_center_lin_cal, index=bin_center_log, columns=['Bin_centers'])
df_bin_e = pd.DataFrame(bin_ed_cal, index = bin_ed, columns = ['Bin_edges'])
# a = df.Bin_centers.plot()
if verbose:
f, a = plt.subplots()
d = df_bin_c.Bin_centers.values[1:-1]
g, = a.plot(np.arange(len(d)) + 2, d)
g.set_linestyle('')
g.set_marker('o')
# g.set_label('')
a.set_yscale('log')
a.set_xlim((1, 16))
a.set_ylim((100, 3000))
a.set_ylabel('Bin center (nm)')
a.grid(which='both')
a.set_xlabel('POPS bin')
out['axes'] = a
else:
out['axes'] = None
# a.set_title('Bin')
out['bincenters_v_int'] = df_bin_c
out['binedges_v_int'] = df_bin_e
return out
def _string2Dataframe(data, log=True):
sb = io(data)
dataFrame = pd.read_csv(sb, sep = ' ', names = ('d','amp')).sort_values('d')
if log:
dataFrame.amp = 10 ** dataFrame.amp
return dataFrame
def read_str(data, log=True):
'''Read a calibration table from string.
Arguments
---------
data: string.
Multiline string with a diameter-intensity pair seperated by space. Diameter in nm, intensity in digitizer bin
or log_10(digitizer bins).
log: bool, optional.
Set True if the intensity values are given in log_10(digitizer bins).
Example
-------
data = """140 88
150 102
173 175
200 295
233 480
270 740
315 880
365 1130
420 1350
490 1930
570 3050
660 4200
770 5100
890 6300
1040 8000
1200 8300
1400 10000
1600 11500
1880 16000
2180 21000
2500 28000s
3000 37000"""
read_str(data, log = False)
'''
dataFrame = _string2Dataframe(data, log=log)
calibrationInstance = Calibration(dataFrame)
return calibrationInstance
def read_csv(fname):
""" most likely found here"""
calDataFrame = | pd.read_csv(fname) | pandas.read_csv |
#################################################################### MODULE COMMENTS ####################################################################
#The Training Algorithm python object bins and shuffles the column data and adds noise to the dataframe columns. this object also is in charge of #
#Calculating the N,Q and F Scores and matrices. Each of the function calls are stand alone and can be calculated independent of each other #
#The main Datastructures that are used by this algorithm are the Pandas datastructure to move data sets between classes and functions and dictionarys #
#The purpose of the dictionary is to count the number of occurences of classes in a given dataset and other data points that are needed to derive the #
#Statistics that were mentioned above. #
#################################################################### MODULE COMMENTS ####################################################################
import pandas as pd
import numpy as np
import random
import sys
import copy
import pprint
class TrainingAlgorithm:
#Parameters: Dataframe
#Returns: List of Dataframes
#Function: This function takes in adataframe and breaks the dataframe down into multiple dataframes that are returned Goal of this is to separate testing and training datasets
def ShuffleData(self, df: pd.DataFrame) ->pd.DataFrame:
#Get a deep copy of the dataframe
df1 = copy.deepcopy(df)
# print(df.head, '\n', df1.head)
#Calculate the number of records to be sampled for testing
TestSize = int((len(df.columns)-1) * .1)
#if the test size is 0
if TestSize == 0:
#Set it to 1
TestSize = 1
#intialize an empty list to store all of the data frames
Shuffled = list()
#Loop through the number of columns that need to have data shuffled
for i in range(TestSize):
#Just continue until we break
while(True):
#Set a variable to a random number for the column to be shuffled around
Column_Shuffle = random.randint(0,len(df.columns)-1)
#If the column number is in the list above then it has been shuffled, try again
if Column_Shuffle in Shuffled :
#Go to the top of the loop
continue
else:
#We found a new column that needs to be shuffled, break out of the loop
break
#Append the column number to the list to save what columns have been shuffled
Shuffled.append(Column_Shuffle)
#Create a temp list
temp = list()
#Loop through the number of rows in the data frame
for j in range(len(df)):
#Append the value in a given cell for a given column to a list
temp.append(df.iloc[j][Column_Shuffle])
#Loop through weach row in the data frame again
for j in range(len(df)):
#Pull a value out from the size of the list
value = random.randint(0,len(temp)-1)
#Set the dataframe value at this position to a random value from the list
df1.at[j,df.columns[Column_Shuffle]] = temp[value]
#Remove the value that was radomly assigned
temp.remove(temp[value])
print(Shuffled)
#Return the Data Frame
return df1
#Parameters: Dataframe
#Returns: List of dataframes
#Function: Take in a given dataframe and break the dataframe down into 2 dataframes, a test and training dataframe. Append both of those to a list and return the list
def CrossValidation(self,df: pd.DataFrame) -> list():
#Create an empty list
columnss = list()
#For each of the columns in the dataframe
for i in df.columns:
#Append the column name to the list we created above
columnss.append(i)
#Create a dataframe that has the same columns in the same format as the list we created
df1 = pd.DataFrame(columns = columnss)
#Calculate the number of records to be sampled for testing
TestSize = len(df) * .1
#Count until we hit the number of records we want to sample
for i in range(int(TestSize)):
#Set a value to be a random number from the dataset
TestValue = random.randint(0,len(df)-1)
#Append this row to a new dataframe
df1.loc[i] = df.index[TestValue]
#Drop the row from the dataframe
df = df.drop(df.index[TestValue])
#df1.loc[i] = df.drop(df.loc[TestValue].index,inplace =True)
Temporary = list()
#Return the training and test set data
Temporary.append(df)
Temporary.append(df1)
#Return the List of dataframes
return Temporary
#Parameters: DataFrame
#Returns: List of dataframes
#Function: Take in a dataframe and break dataframe into 10 similar sized sets and append each of these to a list to be returned
def BinTestData(self, df: pd.DataFrame) -> list():
#Set the bin size to 10
Binsize = 10
#Create a List of column names that are in the dataframe
columnHeaders = list(df.columns.values)
#Create an empty list
bins = []
#Loop through the size of the bins
for i in range(Binsize):
#Append the dataframe columns to the list created above
bins.append( | pd.DataFrame(columns=columnHeaders) | pandas.DataFrame |
from nose.tools import *
from os.path import abspath, dirname, join
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_series_equal
import numpy as np
import wntr
testdir = dirname(abspath(str(__file__)))
datadir = join(testdir,'networks_for_testing')
net3dir = join(testdir,'..','..','examples','networks')
def test_population_net3():
inp_file = join(net3dir,'Net3.inp')
wn = wntr.network.WaterNetworkModel(inp_file)
pop = wntr.metrics.population(wn)
expected = 79000
error = abs((pop.sum() - expected)/expected)
assert_less(error, 0.01) # 1% error
def test_population_net6():
inp_file = join(net3dir,'Net6.inp')
wn = wntr.network.WaterNetworkModel(inp_file)
pop = wntr.metrics.population(wn)
expected = 152000
error = abs((pop.sum() - expected)/expected)
assert_less(error, 0.01) # 1% error
def test_population_impacted(): # tests query and population impacted
pop = pd.Series([100,200,300,400,500], index=['J1', 'J2', 'J3', 'J4', 'J5'])
# arg1 as a Series, arg2 as a scalar
wsa = pd.Series([0.6,0.7,0.8,0.9,1], index=['J1', 'J2', 'J3', 'J4', 'J5'])
pop_impacted = wntr.metrics.population_impacted(pop, wsa, np.less, 0.8)
expected = pd.Series([100,200,0,0,0], index=['J1', 'J2', 'J3', 'J4', 'J5'])
assert_series_equal(pop_impacted, expected, check_dtype=False)
# arg1 as a Series, arg2 as a Series
wsa = | pd.Series([0.6,0.7,0.8,0.9,1], index=['J1', 'J2', 'J3', 'J4', 'J5']) | pandas.Series |
# -*- coding: utf-8 -*-
import pandas as pd
from fiba_inbounder.communicator import FibaCommunicator
from fiba_inbounder.formulas import game_time, base60_from, base60_to, \
update_secs_v7, update_xy_v7, update_xy_v5, \
update_pbp_stats_v7, update_pbp_stats_v5_to_v7, \
update_team_stats_v5_to_v7, update_player_stats_v5_to_v7
class FibaGameParser:
@staticmethod
def get_game_data_dataframe_v5(match_id):
game_json = FibaCommunicator.get_game_data_v5(match_id)
team_stats_json = game_json['tm']
for t in team_stats_json.values():
#Team Stats
t['Name'] = t['nameInternational']
t['TeamCode'] = t['codeInternational']
t['Periods'] = [{'Id': period.replace('_score', '').replace('p', 'q').upper(), 'Score': score}
for period, score in t.iteritems() if period.startswith('p') and period.endswith('_score')]
if 'ot_score' in t:
t['Periods'].append({'Id': 'OT', 'Score':t['ot_score']})
t['PeriodIdList'] = [p['Id'] for p in t['Periods']]
if base60_from(t['tot_sMinutes']) == 0:
team_secs = sum([base60_from(p['sMinutes']) for p in t['pl'].values()])
t['tot_sMinutes'] = base60_to(team_secs)
#Player Stats
for p in t['pl'].values():
p['TeamCode'] = t['codeInternational']
p['JerseyNumber'] = p['shirtNumber']
p['Name'] = p['name'].replace(' ', '').upper()
p['NumName'] = '{num} {name}'.format(num=p['JerseyNumber'].zfill(2), name=p['Name'])
team_a_stats_json = team_stats_json['1']
team_b_stats_json = team_stats_json['2']
team_a_player_stats_list = [p for p in team_stats_json['1']['pl'].values()]
team_b_player_stats_list = [p for p in team_stats_json['2']['pl'].values()]
#Oppenent DREB for calculating OREB%
team_a_stats_json['OPP_DR'] = team_b_stats_json['tot_sReboundsDefensive']
team_b_stats_json['OPP_DR'] = team_a_stats_json['tot_sReboundsDefensive']
team_a_stats_json['OppTeamCode'] = team_b_stats_json['TeamCode']
team_b_stats_json['OppTeamCode'] = team_a_stats_json['TeamCode']
team_stats_df = pd.DataFrame([team_a_stats_json, team_b_stats_json])
player_stats_df = pd.DataFrame(team_a_player_stats_list + team_b_player_stats_list)
update_team_stats_v5_to_v7(team_stats_df)
update_player_stats_v5_to_v7(player_stats_df)
starter_dict = {t['TeamCode']: {p['NumName'] for p in t['pl'].values() if p['starter'] == 1}
for t in team_stats_json.values()}
pbp_df = pd.DataFrame(reversed(game_json['pbp']))
update_pbp_stats_v5_to_v7(pbp_df, team_a_stats_json['TeamCode'], team_b_stats_json['TeamCode'])
shot_df = pd.DataFrame(sum([t['shot'] for t in team_stats_json.values()], []))
update_pbp_stats_v5_to_v7(shot_df, team_a_stats_json['TeamCode'], team_b_stats_json['TeamCode'])
update_xy_v5(shot_df)
return team_stats_df, player_stats_df, starter_dict, pbp_df, shot_df
@staticmethod
def get_game_stats_dataframe_v7(event_id, game_unit):
game_json = FibaCommunicator.get_game_team_stats_v7(event_id, game_unit)
team_stats_json = game_json['content']['full']['Competitors']
for t in team_stats_json:
#Team Stats
t['Stats']['Name'] = t['Name']
t['Stats']['TeamCode'] = t['TeamCode']
t['Stats']['Periods'] = t['Periods']
t['Stats']['PeriodIdList'] = [p['Id'] for p in t['Periods']]
t['Stats']['SECS'] = 60 * 5 * game_time(len(t['Periods']))
t['Stats']['TP'] = base60_to(t['Stats']['SECS'])
#Player Stats
for p in t['Children']:
p['Stats']['TeamCode'] = t['TeamCode']
p['Stats']['TeamId'] = t['Id']
p['Stats']['JerseyNumber'] = p['JerseyNumber']
p['Stats']['Name'] = p['Name']
team_a_stats_json = team_stats_json[0]['Stats']
team_b_stats_json = team_stats_json[1]['Stats']
team_a_player_stats_list = [p['Stats'] for p in team_stats_json[0]['Children']]
team_b_player_stats_list = [p['Stats'] for p in team_stats_json[1]['Children']]
#Oppenent DREB for calculating OREB%
team_a_stats_json['OPP_DR'] = team_b_stats_json['DR']
team_b_stats_json['OPP_DR'] = team_a_stats_json['DR']
team_a_stats_json['OppTeamCode'] = team_b_stats_json['TeamCode']
team_b_stats_json['OppTeamCode'] = team_a_stats_json['TeamCode']
team_stats_df = | pd.DataFrame([team_a_stats_json, team_b_stats_json]) | pandas.DataFrame |
import pytest
from pandas import Series
import pandas._testing as tm
class TestSeriesUnaryOps:
# __neg__, __pos__, __inv__
def test_neg(self):
ser = tm.makeStringSeries()
ser.name = "series"
tm.assert_series_equal(-ser, -1 * ser)
def test_invert(self):
ser = tm.makeStringSeries()
ser.name = "series"
tm.assert_series_equal(-(ser < 0), ~(ser < 0))
@pytest.mark.parametrize(
"source, neg_target, abs_target",
[
([1, 2, 3], [-1, -2, -3], [1, 2, 3]),
([1, 2, None], [-1, -2, None], [1, 2, None]),
],
)
def test_all_numeric_unary_operators(
self, any_numeric_ea_dtype, source, neg_target, abs_target
):
# GH38794
dtype = any_numeric_ea_dtype
ser = Series(source, dtype=dtype)
neg_result, pos_result, abs_result = -ser, +ser, abs(ser)
if dtype.startswith("U"):
neg_target = -Series(source, dtype=dtype)
else:
neg_target = | Series(neg_target, dtype=dtype) | pandas.Series |
from io import StringIO
import operator
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, date_range
import pandas._testing as tm
from pandas.core.computation.check import _NUMEXPR_INSTALLED
PARSERS = "python", "pandas"
ENGINES = "python", pytest.param("numexpr", marks=td.skip_if_no_ne)
@pytest.fixture(params=PARSERS, ids=lambda x: x)
def parser(request):
return request.param
@pytest.fixture(params=ENGINES, ids=lambda x: x)
def engine(request):
return request.param
def skip_if_no_pandas_parser(parser):
if parser != "pandas":
pytest.skip(f"cannot evaluate with parser {repr(parser)}")
class TestCompat:
def setup_method(self, method):
self.df = DataFrame({"A": [1, 2, 3]})
self.expected1 = self.df[self.df.A > 0]
self.expected2 = self.df.A + 1
def test_query_default(self):
# GH 12749
# this should always work, whether _NUMEXPR_INSTALLED or not
df = self.df
result = df.query("A>0")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1")
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_None(self):
df = self.df
result = df.query("A>0", engine=None)
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine=None)
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_python(self):
df = self.df
result = df.query("A>0", engine="python")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine="python")
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_numexpr(self):
df = self.df
if _NUMEXPR_INSTALLED:
result = df.query("A>0", engine="numexpr")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine="numexpr")
tm.assert_series_equal(result, self.expected2, check_names=False)
else:
with pytest.raises(ImportError):
df.query("A>0", engine="numexpr")
with pytest.raises(ImportError):
df.eval("A+1", engine="numexpr")
class TestDataFrameEval:
# smaller hits python, larger hits numexpr
@pytest.mark.parametrize("n", [4, 4000])
@pytest.mark.parametrize(
"op_str,op,rop",
[
("+", "__add__", "__radd__"),
("-", "__sub__", "__rsub__"),
("*", "__mul__", "__rmul__"),
("/", "__truediv__", "__rtruediv__"),
],
)
def test_ops(self, op_str, op, rop, n):
# tst ops and reversed ops in evaluation
# GH7198
df = DataFrame(1, index=range(n), columns=list("abcd"))
df.iloc[0] = 2
m = df.mean()
base = DataFrame( # noqa
np.tile(m.values, n).reshape(n, -1), columns=list("abcd")
)
expected = eval(f"base {op_str} df")
# ops as strings
result = eval(f"m {op_str} df")
tm.assert_frame_equal(result, expected)
# these are commutative
if op in ["+", "*"]:
result = getattr(df, op)(m)
tm.assert_frame_equal(result, expected)
# these are not
elif op in ["-", "/"]:
result = getattr(df, rop)(m)
tm.assert_frame_equal(result, expected)
def test_dataframe_sub_numexpr_path(self):
# GH7192: Note we need a large number of rows to ensure this
# goes through the numexpr path
df = DataFrame(dict(A=np.random.randn(25000)))
df.iloc[0:5] = np.nan
expected = 1 - np.isnan(df.iloc[0:25])
result = (1 - np.isnan(df)).iloc[0:25]
tm.assert_frame_equal(result, expected)
def test_query_non_str(self):
# GH 11485
df = pd.DataFrame({"A": [1, 2, 3], "B": ["a", "b", "b"]})
msg = "expr must be a string to be evaluated"
with pytest.raises(ValueError, match=msg):
df.query(lambda x: x.B == "b")
with pytest.raises(ValueError, match=msg):
df.query(111)
def test_query_empty_string(self):
# GH 13139
df = pd.DataFrame({"A": [1, 2, 3]})
msg = "expr cannot be an empty string"
with pytest.raises(ValueError, match=msg):
df.query("")
def test_eval_resolvers_as_list(self):
# GH 14095
df = DataFrame(np.random.randn(10, 2), columns=list("ab"))
dict1 = {"a": 1}
dict2 = {"b": 2}
assert df.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"]
assert pd.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"]
class TestDataFrameQueryWithMultiIndex:
def test_query_with_named_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(["red", "green"], size=10)
b = np.random.choice(["eggs", "ham"], size=10)
index = MultiIndex.from_arrays([a, b], names=["color", "food"])
df = DataFrame(np.random.randn(10, 2), index=index)
ind = Series(
df.index.get_level_values("color").values, index=index, name="color"
)
# equality
res1 = df.query('color == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == color', parser=parser, engine=engine)
exp = df[ind == "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# inequality
res1 = df.query('color != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != color', parser=parser, engine=engine)
exp = df[ind != "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('color == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == color', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('color != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != color', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in color', parser=parser, engine=engine)
res2 = df.query('"red" in color', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in color', parser=parser, engine=engine)
res2 = df.query('"red" not in color', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
def test_query_with_unnamed_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(["red", "green"], size=10)
b = np.random.choice(["eggs", "ham"], size=10)
index = MultiIndex.from_arrays([a, b])
df = DataFrame(np.random.randn(10, 2), index=index)
ind = Series(df.index.get_level_values(0).values, index=index)
res1 = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == ilevel_0', parser=parser, engine=engine)
exp = df[ind == "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != ilevel_0', parser=parser, engine=engine)
exp = df[ind != "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_0 == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('ilevel_0 != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" in ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
| tm.assert_frame_equal(res1, exp) | pandas._testing.assert_frame_equal |
""" Helper function for parallel computing """
from collections import defaultdict
import numpy as np
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.preprocessing import StandardScaler, MinMaxScaler, Imputer
from sklearn.metrics import roc_auc_score, accuracy_score
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel, laplacian_kernel
from sklearn.utils import shuffle
import pandas as pd
import dro_model
def parallel_classification_table1(x_train, y_train, x_test, y_test, param,
kernel_functions, is_missing=False):
""" This a function for using joblib to enhance parallel processing
for the classification example in table2 """
minmax_scaler = MinMaxScaler()
x_train, x_test_sp, y_train, y_test_sp = train_test_split(
x_train, y_train, train_size=500)
x_test = np.vstack([x_test, x_test_sp]) if x_test.size else x_test_sp
y_test = np.hstack([y_test, y_test_sp]) if y_test.size else y_test_sp
if is_missing:
for i in range(250):
pix = np.random.permutation(784)
pix = pix[0:588]
x_train[i, pix] = np.nan
impute = Imputer()
x_train = impute.fit_transform(x_train)
x_train = minmax_scaler.fit_transform(x_train)
x_test = minmax_scaler.transform(x_test)
# Initialize output
dro_results = {}
reg_results = {}
for kernel_fun in kernel_functions:
if kernel_fun.lower() == 'polynomial':
best_params = []
dro_score = []
reg_score = []
gamma = 1 / 100
for deg in param['deg']:
kernel_train = polynomial_kernel(x_train, degree=deg, gamma=gamma)
total_score = validation_process(kernel_train, y_train, param)
# Select the best model
tot_score = pd.DataFrame(total_score)
ave_score = tot_score.mean()
best_kappa, best_epsilon = ave_score.idxmax()
best_reg = ave_score[float('inf')].idxmax()
tmp = {
'kappa': best_kappa,
'epsilon': best_epsilon,
'reg': best_reg
}
best_params.append(tmp)
dro_score.append(ave_score[(best_kappa, best_epsilon)])
reg_score.append(ave_score[(float('inf'), best_reg)])
sel_idx = dro_score.index(max(dro_score))
sel_deg = param['deg'][sel_idx]
sel_kernel_train = polynomial_kernel(x_train, degree=sel_deg, gamma=gamma)
sel_kernel_test = polynomial_kernel(x_test, x_train, degree=sel_deg, gamma=gamma)
sel_param = {
'epsilon': [best_params[sel_idx]['epsilon']],
'kappa': [best_params[sel_idx]['kappa']]
}
dro_results[kernel_fun] = test_performance(
sel_kernel_train, y_train, sel_kernel_test, y_test, sel_param)
sel_idx = reg_score.index(max(reg_score))
sel_deg = param['deg'][sel_idx]
sel_kernel_train = polynomial_kernel(x_train, degree=sel_deg, gamma=gamma)
sel_kernel_test = polynomial_kernel(x_test, x_train, degree=sel_deg, gamma=gamma)
sel_param = {
'epsilon': [best_params[sel_idx]['reg']],
'kappa': [float('inf')]
}
reg_results[kernel_fun] = test_performance(
sel_kernel_train, y_train, sel_kernel_test, y_test, sel_param)
elif (kernel_fun.lower() == 'rbf') or (kernel_fun.lower() == 'gaussian'):
best_params = []
dro_score = []
reg_score = []
for gamma in param['gamma_rbf']:
kernel_train = rbf_kernel(x_train, gamma=gamma)
total_score = validation_process(kernel_train, y_train, param)
# Select the best model
tot_score = pd.DataFrame(total_score)
ave_score = tot_score.mean()
best_kappa, best_epsilon = ave_score.idxmax()
best_reg = ave_score[float('inf')].idxmax()
tmp = {
'kappa': best_kappa,
'epsilon': best_epsilon,
'reg': best_reg
}
best_params.append(tmp)
dro_score.append(ave_score[(best_kappa, best_epsilon)])
reg_score.append(ave_score[(float('inf'), best_reg)])
sel_idx = dro_score.index(max(dro_score))
sel_gamma = param['gamma_rbf'][sel_idx]
sel_kernel_train = rbf_kernel(x_train, gamma=sel_gamma)
sel_kernel_test = rbf_kernel(x_test, x_train, gamma=sel_gamma)
sel_param = {
'epsilon': [best_params[sel_idx]['epsilon']],
'kappa': [best_params[sel_idx]['kappa']]
}
dro_results[kernel_fun] = test_performance(
sel_kernel_train, y_train, sel_kernel_test, y_test, sel_param)
sel_idx = reg_score.index(max(reg_score))
sel_gamma = param['gamma_rbf'][sel_idx]
sel_kernel_train = rbf_kernel(x_train, gamma=sel_gamma)
sel_kernel_test = rbf_kernel(x_test, x_train, gamma=sel_gamma)
sel_param = {
'epsilon': [best_params[sel_idx]['reg']],
'kappa': [float('inf')]
}
reg_results[kernel_fun] = test_performance(
sel_kernel_train, y_train, sel_kernel_test, y_test, sel_param)
elif kernel_fun.lower() == 'laplacian':
best_params = []
dro_score = []
reg_score = []
for gamma in param['gamma_lap']:
kernel_train = laplacian_kernel(x_train, gamma=gamma)
total_score = validation_process(kernel_train, y_train, param)
# Select the best model
tot_score = pd.DataFrame(total_score)
ave_score = tot_score.mean()
best_kappa, best_epsilon = ave_score.idxmax()
best_reg = ave_score[float('inf')].idxmax()
tmp = {
'kappa': best_kappa,
'epsilon': best_epsilon,
'reg': best_reg
}
best_params.append(tmp)
dro_score.append(ave_score[(best_kappa, best_epsilon)])
reg_score.append(ave_score[(float('inf'), best_reg)])
sel_idx = dro_score.index(max(dro_score))
sel_gamma = param['gamma_lap'][sel_idx]
sel_kernel_train = laplacian_kernel(x_train, gamma=sel_gamma)
sel_kernel_test = laplacian_kernel(x_test, x_train, gamma=sel_gamma)
sel_param = {
'epsilon': [best_params[sel_idx]['epsilon']],
'kappa': [best_params[sel_idx]['kappa']]
}
dro_results[kernel_fun] = test_performance(
sel_kernel_train, y_train, sel_kernel_test, y_test, sel_param)
sel_idx = reg_score.index(max(reg_score))
sel_gamma = param['gamma_lap'][sel_idx]
sel_kernel_train = laplacian_kernel(x_train, gamma=sel_gamma)
sel_kernel_test = laplacian_kernel(x_test, x_train, gamma=sel_gamma)
sel_param = {
'epsilon': [best_params[sel_idx]['reg']],
'kappa': [float('inf')]
}
reg_results[kernel_fun] = test_performance(
sel_kernel_train, y_train, sel_kernel_test, y_test, sel_param)
else:
raise 'Undefined kernel function'
return (dro_results, reg_results)
def parallel_classification_table2(*args):
""" This a function for using joblib to enhance parallel processing
for the classification example in table1 """
# Setting parameters
all_param = {
'epsilon': [1e-4, 5e-4, 1e-3, 5e-2, 1e-2, 5e-2, 1e-1],
'kappa': [0.1, 0.2, 0.3, 0.4, 0.5, 1, float('inf')],
'd': [],
'C': []
}
pnorms = [1, 2, float('Inf')]
# Initialize output
DRSVM_AUC = {}
RSVM_AUC = {}
# Load input data
nargin = len(args)
if nargin == 2:
x_data = args[0]
y_data = args[1]
x_train, x_test, y_train, y_test = train_test_split(
x_data, y_data, test_size=0.25)
elif nargin == 4:
x_train = args[0]
y_train = args[1]
x_train, y_train = shuffle(x_train, y_train)
x_test = args[2]
y_test = args[3]
# Fit classical svm model, hinge loss minimization
stand_scaler = StandardScaler()
x_train_nrm = stand_scaler.fit_transform(x_train)
x_test_nrm = stand_scaler.transform(x_test)
training_data = {'x': x_train_nrm, 'y': y_train}
optimal = dro_model.hinge_loss_minimization(training_data)
w_opt = optimal['w']
y_scores = 1 / (1 + np.exp(-x_test_nrm.dot(w_opt)))
SVM_AUC = roc_auc_score(y_test, y_scores)
# Parameter selection and then test the model performance
skf = StratifiedKFold(n_splits=5)
for pnorm in pnorms:
all_param['pnorm'] = pnorm
total_score = defaultdict(list)
# K-fold cross validation
for train_index, val_index in skf.split(x_train, y_train):
x_train_k, x_val_k = x_train[train_index], x_train[val_index]
y_train_k, y_val_k = y_train[train_index], y_train[val_index]
x_train_k = stand_scaler.fit_transform(x_train_k)
x_val_k = stand_scaler.transform(x_val_k)
data_k = {'x': x_train_k, 'y': y_train_k}
optimal = dro_model.svm(all_param, data_k)
for key, value in optimal.items():
w_opt = np.array(value['w'])
y_scores = 1 / (1 + np.exp(-x_val_k.dot(w_opt)))
total_score[key].append(roc_auc_score(y_val_k, y_scores))
# Select the best model
tot_score = | pd.DataFrame(total_score) | pandas.DataFrame |
"""Map flows on provincial networks
Purpose
-------
Mapping the commune access OD node level matrix values to road network paths in Provinces
For all roads in the Provinces: ['<NAME>', '<NAME>', '<NAME>']
The code estimates 2 values - A MIN and a MAX value of flows between each selected OD node pair
- Based on MIN-MAX generalised costs estimates
Input data requirements
-----------------------
1. Correct paths to all files and correct input parameters
2. Excel file with mode sheets containing network graph structure and attributes
- edge_id - String Edge ID
- from_node - String node ID that should be present in node_id column
- to_node - String node ID that should be present in node_id column
- length - Float length of edge in km
- min_time - Float minimum time of travel in hours on edge
- max_time - Float maximum time of travel in hours on edge
- min_time_cost - Float minimum cost of time in USD on edge
- max_time_cost - Float maximum cost of time in USD on edge
- min_tariff_cost - Float minimum tariff cost in USD on edge
- max_tariff_cost - Float maximum tariff cost in USD on edge
3. Edge shapefiles for all national-scale networks with attributes:
- edge_id - String Edge ID
- geometry - Shapely LineString geometry of edges
4. Excel file with mode sheets containing node-level OD values with attributes:
- origin - String node ID of Origin
- destination - String node ID of Destination
- min_netrev - Float values of miniimum daily OD Net Revenue in USD
- max_netrev - Float values of maximum daily OD Net Revenue in USD
- min_tons - Float values of minimum daily OD in tons
- max_tons - Float values of maximum daily OD in tons
Results
-------
1. Excel sheets with results of flow mapping based on MIN-MAX generalised costs estimates:
- origin - String node ID of Origin
- destination - String node ID of Destination
- min_edge_path - List of string of edge ID's for paths with minimum generalised cost flows
- max_edge_path - List of string of edge ID's for paths with maximum generalised cost flows
- min_netrev - Float values of estimated daily Net Revenue for paths with minimum generalised cost flows
- max_netrev - Float values of estimated daily Net Revenue for paths with maximum generalised cost flows
- min_croptons - Float values of estimated daily crop tonnage for paths with minimum generalised cost flows
- max_croptons - Float values of estimated daily crop tonnage for paths with maximum generalised cost flows
- min_distance - Float values of estimated distance for paths with minimum generalised cost flows
- max_distance - Float values of estimated distance for paths with maximum generalised cost flows
- min_time - Float values of estimated time for paths with minimum generalised cost flows
- max_time - Float values of estimated time for paths with maximum generalised cost flows
- min_gcost - Float values of estimated generalised cost for paths with minimum generalised cost flows
- max_gcost - Float values of estimated generalised cost for paths with maximum generalised cost flows
- min_vehicle_nums - Float values of estimated vehicle numbers for paths with minimum generalised cost flows
- max_vehicle_nums - Float values of estimated vehicle numbers for paths with maximum generalised cost flows
2. Shapefiles with all flows on edges mapping based on MIN-MAX generalised costs estimates:
- edge_id - String/Integer/Float Edge ID
- geometry - Shapely LineString geomtry of edges
- min_netrev - Float values of estimated daily Net Revenue in USD on edges
- max_netrev - Float values of estimated daily Net Revenue in USD on edges
- min_tons - Float values of estimated daily crops in tons on edges
- max_tons - Float values of estimated daily crops in tons on edges
References
----------
1. <NAME>., <NAME>., <NAME>., <NAME>. & <NAME>. (2018).
Analysis and development of model for addressing climate change/disaster risks in multi-modal transport networks in Vietnam.
Final Report, Oxford Infrastructure Analytics Ltd., Oxford, UK.
2. All input data folders and files referred to in the code below.
"""
import ast
import itertools
import math
import operator
import os
import subprocess
import sys
import copy
import geopandas as gpd
import igraph as ig
import numpy as np
import pandas as pd
from shapely import wkt
from shapely.geometry import Point
from vtra.transport_flow_and_failure_functions import *
from vtra.utils import *
def network_od_paths_assembly_provincial(points_dataframe, graph, vehicle_wt, region_name,
excel_writer=''):
"""Assemble estimates of OD paths, distances, times, costs and tonnages on networks
Parameters
----------
points_dataframe : pandas.DataFrame
OD nodes and their tonnages
graph
igraph network structure
vehicle_wt : float
unit weight of vehicle
region_name : str
name of Province
excel_writer
Name of the excel writer to save Pandas dataframe to Excel file
Returns
-------
save_paths_df : pandas.DataFrame
- origin - String node ID of Origin
- destination - String node ID of Destination
- min_edge_path - List of string of edge ID's for paths with minimum generalised cost flows
- max_edge_path - List of string of edge ID's for paths with maximum generalised cost flows
- min_netrev - Float values of estimated netrevenue for paths with minimum generalised cost flows
- max_netrev - Float values of estimated netrevenue for paths with maximum generalised cost flows
- min_croptons - Float values of estimated crop tons for paths with minimum generalised cost flows
- max_croptons - Float values of estimated crop tons for paths with maximum generalised cost flows
- min_distance - Float values of estimated distance for paths with minimum generalised cost flows
- max_distance - Float values of estimated distance for paths with maximum generalised cost flows
- min_time - Float values of estimated time for paths with minimum generalised cost flows
- max_time - Float values of estimated time for paths with maximum generalised cost flows
- min_gcost - Float values of estimated generalised cost for paths with minimum generalised cost flows
- max_gcost - Float values of estimated generalised cost for paths with maximum generalised cost flows
- min_vehicle_nums - Float values of estimated vehicle numbers for paths with minimum generalised cost flows
- max_vehicle_nums - Float values of estimated vehicle numbers for paths with maximum generalised cost flows
"""
save_paths = []
points_dataframe = points_dataframe.set_index('origin')
origins = list(set(points_dataframe.index.values.tolist()))
for origin in origins:
try:
destinations = points_dataframe.loc[[origin], 'destination'].values.tolist()
min_croptons = points_dataframe.loc[[origin], 'min_croptons'].values.tolist()
max_croptons = points_dataframe.loc[[origin], 'max_croptons'].values.tolist()
min_rev = points_dataframe.loc[[origin], 'min_netrev'].values.tolist()
max_rev = points_dataframe.loc[[origin], 'max_netrev'].values.tolist()
min_veh_nums = points_dataframe.loc[[origin], 'min_vehicle_nums'].values.tolist()
max_veh_nums = points_dataframe.loc[[origin], 'max_vehicle_nums'].values.tolist()
get_min_path, get_min_dist, get_min_time, get_min_gcost = network_od_path_estimations(
graph, origin, destinations,vehicle_wt,vehicle_wt, 'min_gcost', 'min_time')
get_max_path, get_max_dist, get_max_time, get_max_gcost = network_od_path_estimations(
graph, origin, destinations,vehicle_wt,vehicle_wt, 'max_gcost', 'max_time')
save_paths += list(zip([origin]*len(destinations), destinations, get_min_path, get_max_path, min_rev, max_rev, min_croptons, max_croptons,
get_min_dist, get_max_dist, get_min_time, get_max_time, get_min_gcost, get_max_gcost, min_veh_nums, max_veh_nums))
print("done with {0} in province {1}".format(origin, region_name))
except:
print('* no path between {}-{}'.format(origin,destinations))
cols = [
'origin', 'destination', 'min_edge_path', 'max_edge_path', 'min_netrev', 'max_netrev',
'min_croptons', 'max_croptons', 'min_distance', 'max_distance', 'min_time', 'max_time',
'min_gcost', 'max_gcost', 'min_vehicle_nums', 'max_vehicle_nums'
]
save_paths_df = pd.DataFrame(save_paths, columns=cols)
save_paths_df.to_excel(excel_writer, region_name +
'_{}_tons'.format(int(vehicle_wt)), index=False)
excel_writer.save()
del save_paths
return save_paths_df
def main():
"""Map flows to networks
1. Specify the paths from where you want to read and write:
- Input data
- Intermediate calcuations data
- Output results
2. Supply input data and parameters
- Names of the three Provinces: List of string types
- Assumed terrains of the provinces: List of string types
- Assumed unit weights of trucks: List of float types
3. Give the paths to the input data files:
- Network edges EXcel File
- OD flows Excel file
- Road properties Excel file
4. Specify the output files and paths to be created
"""
data_path, calc_path, output_path = load_config()['paths']['data'], load_config()[
'paths']['calc'], load_config()['paths']['output']
# Supply input data and parameters
province_list = ['Lao Cai', 'Binh Dinh', 'Thanh Hoa']
province_terrian = ['mountain', 'flat', 'flat']
truck_unit_wt = [5.0]
percentage = [100.0]
# Give the paths to the input data files
network_data_path = os.path.join(data_path,'post_processed_networks')
network_data_excel = os.path.join(data_path,'post_processed_networks','province_roads_edges.xlsx')
od_output_excel = os.path.join(
output_path, 'flow_ods','province_roads_commune_center_flow_ods.xlsx')
rd_prop_file = os.path.join(data_path, 'mode_properties', 'road_properties.xlsx')
# Specify the output files and paths to be created
flow_shp_dir = os.path.join(output_path, 'flow_mapping_shapefiles')
if os.path.exists(flow_shp_dir) == False:
os.mkdir(flow_shp_dir)
flow_csv_dir = os.path.join(output_path, 'flow_mapping_combined')
if os.path.exists(flow_csv_dir) == False:
os.mkdir(flow_csv_dir)
flow_paths_dir = os.path.join(output_path, 'flow_mapping_paths')
if os.path.exists(flow_paths_dir) == False:
os.mkdir(flow_paths_dir)
for perct in percentage:
flow_output_excel = os.path.join(
flow_paths_dir, 'province_roads_commune_center_access_flow_paths_{}_percent.xlsx'.format(int(perct)))
excl_wrtr = pd.ExcelWriter(flow_output_excel)
# Start the OD flow mapping process
for prn in range(len(province_list)):
province = province_list[prn]
province_name = province.replace(' ', '').lower()
# Load igraph network and GeoDataFrame
print ('* Loading {} igraph network and GeoDataFrame'.format(province))
edges_in = | pd.read_excel(network_data_excel,sheet_name = province_name,encoding='utf-8') | pandas.read_excel |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Checking the mixing of trajectories - limits of number of trajectories and
the limits on running the network for long time. Plus, the contribution
of inserting the trajectories to the network.
For each number of trajectories (5, 20, 50, 100, 200, 1000, inf)
For each RNN (with or without trajectories)
Run for 200 epochs
Run a control of the trajectories where you don't input the trajectories'
Save test_accuracy for each run
Save a dataset with the final accuracy from the trajectories
Print fig of with/without trajectories
sys.argv gets:
[1] = how many trajectories
[2] = number of epochs per run
[3] = number of epochs
[4] = resolution factor
traject = 5 - out.418713 5 200 3 8 20
traject = 1 - out.418730 1 200 3 8 20
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
import sys
sys.path.insert(1, '/home/labs/ahissarlab/orra/imagewalker')
sys.path.insert(1, '/home/orram/Documents/GitHub/imagewalker')
import gc
#import torch
#from torch.optim import Adam, SGD
#import torch.nn as nn
# import tensorflow as tf
# import tensorflow.keras as keras
from mnist import MNIST
from keras_utils import create_cifar_dataset, split_dataset_xy
from cifar_nets import cnn_gru, parallel_gru
print('Starting Run')
if len(sys.argv) > 1:
parameters = {
#DEfine the number of trajectories to use
'num_trajectories' : int(sys.argv[1]),
'num_learning_epochs' : int(sys.argv[2]),
'num_trials' : int(sys.argv[3]),
'res' : int(sys.argv[4]),
'sample' : int(sys.argv[5]), #Number of samples to drew
}
else:
parameters = {
#DEfine the number of trajectories to use
'num_trajectories' : 1,
'num_learning_epochs' : 1,
'num_trials' : 1,
'res' : 8,
'sample' : 5,#Number of samples to drew
}
print(parameters)
for key,val in parameters.items():
exec(key + '=val')
num_trajectories = num_trajectories
num_learning_epochs = num_learning_epochs
num_trials = num_trials
res = res
sample = sample
train_dataframe = pd.DataFrame()
test_dataframe = pd.DataFrame()
train_prll_dataframe = pd.DataFrame()
test_prll_dataframe = pd.DataFrame()
test_dataframe_no_coordinates = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
COLUMN_RENAMES = {
"Age (110)": "Age",
"Total - Sex": "Total",
"Age(110)": "Age",
"Age (122)": "Age",
"Age (123)": "Age",
"Age (131)": "Age",
"Age (in single years) and average age (127)": "Age",
" Female": "Female",
" Male": "Male",
}
AGE_RENAMES = {
"Under 1 year": "0",
"Under 1": "0",
"under 1": "0",
"90\+": "90",
"90 and over": "90",
"100\+": "100",
}
AGE_GROUP_BINS = [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 105]
AGE_GROUP_LABELS = [
"0-4",
"5-9",
"10-14",
"15-19",
"20-24",
"25-29",
"30-34",
"35-39",
"40-44",
"45-49",
"50-54",
"55-59",
"60-64",
"65-69",
"70-74",
"75-79",
"80+",
]
def _get_census_1991_pictou_county():
path = "https://www12.statcan.gc.ca/English/census91/data/tables/File.cfm?S=0&LANG=E&A=R&PID=71935&GID=3503&D1=0&D2=0&D3=0&D4=0&D5=0&D6=0&OFT=CSV"
df = pd.read_csv(path, skiprows=2)
df.drop(columns=[" "], inplace=True)
df.rename(columns=COLUMN_RENAMES, inplace=True)
df.drop(df.index[0], inplace=True)
df = df[~df.Age.str.contains("years")]
df.drop(df.tail(2).index, inplace=True)
df["Year"] = 1991
df.reset_index(inplace=True, drop=True)
return df
def _get_census_1996_pictou_county():
path = "https://www12.statcan.gc.ca/English/census96/data/tables/File.cfm?S=0&LANG=E&A=R&PID=1030&GID=199728&D1=0&D2=0&D3=0&D4=0&D5=0&D6=0&OFT=CSV"
df = pd.read_csv(path, skiprows=2)
df.drop(columns=[" "], inplace=True)
df.rename(columns=COLUMN_RENAMES, inplace=True)
df.drop(df.index[0], inplace=True)
df = df[~df.Age.str.contains("-")]
df.drop(df.tail(1).index, inplace=True)
df["Year"] = 1996
df.reset_index(inplace=True, drop=True)
return df
def _get_census_2001_pictou_county():
path = "https://www12.statcan.gc.ca/English/census01/products/standard/themes/File.cfm?S=0&LANG=E&A=R&PID=55439&GID=426199&D1=0&D2=0&D3=0&D4=0&D5=0&D6=0&OFT=CSV"
df = pd.read_csv(path, skiprows=2)
df.drop(columns=[" "], inplace=True)
df.rename(columns=COLUMN_RENAMES, inplace=True)
df.drop(df.index[0], inplace=True)
df = df[~df.Age.str.contains("-")]
df.drop(df.tail(2).index, inplace=True)
df["Year"] = 2001
df.reset_index(inplace=True, drop=True)
return df
def _get_census_2006_pictou_county():
path = "https://www12.statcan.gc.ca/census-recensement/2006/dp-pd/tbt/File.cfm?S=0&LANG=E&A=R&PID=88989&GID=771825&D1=0&D2=0&D3=0&D4=0&D5=0&D6=0&OFT=CSV"
df = pd.read_csv(path, skiprows=2)
df.drop(columns=[" "], inplace=True)
df.rename(columns=COLUMN_RENAMES, inplace=True)
df.drop(df.index[0], inplace=True)
df = df[~df.Age.str.contains("years")]
df.drop(df.tail(7).index, inplace=True)
df["Year"] = 2006
df.reset_index(inplace=True, drop=True)
return df
def _get_census_2011_pictou_county():
path = "https://www12.statcan.gc.ca/census-recensement/2011/dp-pd/tbt-tt/File.cfm?S=0&LANG=E&A=R&PID=102010&GID=906638&D1=0&D2=0&D3=0&D4=0&D5=0&D6=0&OFT=CSV"
df = pd.read_csv(path, skiprows=2)
df.drop(columns=[" "], inplace=True)
df.rename(columns=COLUMN_RENAMES, inplace=True)
df.drop(df.index[0], inplace=True)
df = df[~df.Age.str.contains("years")]
df.drop(df.tail(4).index, inplace=True)
df["Year"] = 2011
df.reset_index(inplace=True, drop=True)
return df
def _get_census_2016_pictou_county():
path = "https://www12.statcan.gc.ca/census-recensement/2016/dp-pd/dt-td/File.cfm?S=0&LANG=E&A=R&PID=109526&GID=1160159&D1=0&D2=0&D3=0&D4=0&D5=0&D6=0&OFT=CSV"
df = | pd.read_csv(path, skiprows=3) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import xml.etree.ElementTree as ET
import copy
import glob
import pandas as pd
import os
"""
This code tries to improve the outputfiles, making it easier for the user to view and work with the files with extended
nodes in Cytoscape. Fix_y changes the coordinates of all nodes below a node that is extended, making the number of overlapping
nodes smaller.
"""
def fix_coords(root):
nodes = []
#Extract the genes and compounds
for child in root:
for underchild in child:
if (child.attrib["type"] == "gene") or (child.attrib["type"] == "compound"):
namestring = underchild.attrib["name"]
if "," in namestring:
length = 1
else:
split_namestring = namestring.split(" ")
length = len(split_namestring)
nodes.append((int(child.attrib["id"]), int(length), int(underchild.attrib["y"])))
# Change to Pandas Dataframe to sort the whole thing
nodes_df = | pd.DataFrame(nodes) | pandas.DataFrame |
import unittest
from main.main_app import compute_accuracy, get_set, normalize_set, fill_empty_with_average, fill_empty_with_random
from decimal import Decimal
import pandas as pd
import numpy as np
class MainTest(unittest.TestCase):
def test_compute_accuracy(self):
"""Tests the 'compute_accuracy' method in Main with different values."""
# Testing full accuracy on 5 labels
y_star = [1, 0, 0, 1, 0]
y = [1, 0, 0, 1, 0]
self.assertEqual(compute_accuracy(y_star, y), 1,
"Accuracy should be 1 for [1,0,0,1,0] [1,0,0,1,0]")
# Testing full failure on 1 label
y_star = [1]
y = [0]
self.assertEqual(compute_accuracy(y_star, y), 0,
"Accuracy should be 0 for [1] [0]")
# Testing decimal accuracy on 5 labels
y_star = [0, 0, 0, 0, 0]
y = [0, 0, 0, 1, 1]
self.assertEqual(compute_accuracy(y_star, y), Decimal('0.6'),
"Accuracy should be 0.6 for [0,0,0,0,0] [0,0,0,1,1]")
# Testing decimal accuracy on 5 labels with different values
y_star = [5.2, -10, -15.3, "str", 0]
y = [5.2, -10, -15.3, "str", 1000]
self.assertEqual(compute_accuracy(y_star, y), Decimal('0.8'),
"Accuracy should be 0.8 for [5.2, -10, -15.3, 'str', 0] [5.2, -10, -15.3, 'str', 1000]")
def test_get_set(self):
"""Tests that data from CSV file is loaded without null values"""
# Retrieving sets
training_set = get_set('../dataset/1500_pairs_train.csv')
testing_set = get_set('../dataset/400_pairs_test.csv')
sets = [training_set, testing_set]
# Checking that there is no null value in any cell of the matrix of any csv file
for current in range(len(sets)):
for row in range(len(sets[current])):
self.assertFalse( | pd.isnull(sets[current][row, 0]) | pandas.isnull |
import pandas as pd
import numpy as np
df= pd.read_csv('../Datos/Premios2020.csv',encoding='ISO-8859-1')
# print(df.isnull().sum())
# moda = df.release.mode()
# valores = {'release': moda[0]}
# df.fillna(value=valores, inplace=True)
moda = df['release'].mode()
df['release'] = df['release'].replace([np.nan], moda)
print( | pd.value_counts(df['release']) | pandas.value_counts |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import datetime
import pandas as pd
import numpy as _np
import os
# import pylab as plt
# from atmPy.tools import conversion_tools as ct
from atmPy.general import timeseries
from atmPy.atmosphere import standards as atm_std
import pathlib
def read_file(path,
version = 'BBB_01',
pattern = 'HK',
skip_histogram = False,
ignore_colums = [], #['Flow_Rate_ccps', 'LED_P_MON', 'AI_4', 'AI_5', 'AI_7', 'AI_8', 'AI_9', 'AI_10', 'AI_11', 'LED_P_Mon_Therm', 'AO_Flow', 'AO_LaserPower', 'No_Pts', 'ValidParts', 'writeTime', 'currMax'],
verbose = False):
"""
Parameters
----------
path: string or list of strings.
This can either be a file name, a list of filenames or a folder.
version: string ['BBB_01']
BBB_01: Beagle bone
sbRio: sbRio
pattern: str
if folder is given than this is the pattern housekeeping files will be identified by
verbose: bool
Returns
-------
TimeSeries instance
"""
# test_data_folder = os.listdir()
# test_data_folder = '20150419_000_POPS_HK.csv'
def read_sbRio(fname, skip_histogram = False, verbose=False):
"""Reads housekeeping file (test_data_folder; csv-format) returns a pandas data frame instance.
"""
if verbose:
print('reading %s' % fname)
try:
df = pd.read_csv(fname, error_bad_lines=False)
except ValueError:
return False
# data = df.values
# dateString = test_data_folder.split('_')[0]
dt = datetime.datetime.strptime('19700101', "%Y%m%d") - datetime.datetime.strptime('19040101', "%Y%m%d")
dts = dt.total_seconds()
# todo: (low) what is that delta t for, looks fishi (Hagen)
dtsPlus = datetime.timedelta(hours=0).total_seconds()
# Time_s = data[:,0]
# data = data[:,1:]
df.index = pd.Series(pd.to_datetime(df.Time_s - dts - dtsPlus, unit='s'), name='Time_UTC')
# if 'P_Baro' in df.keys():
# df['barometric_pressure'] = df.P_Baro
# df.drop('P_Baro', 1, inplace=True)
# df['altitude'] = ct.p2h(df.barometric_pressure)
return POPSHouseKeeping(df)
def read_BBB(fname, skip_histogram = False, verbose = False):
if verbose:
print(f'read pops house keeping bbb file: {fname}')
col_names = pd.read_csv(fname, sep=',', nrows=1, header=None,
# index_col=1,
# usecols=np.arange()
).values[0][:-1].astype(str)
col_names = _np.char.strip(col_names)
if skip_histogram:
usecols = list(range(27))
else:
usecols = None
data = pd.read_csv(fname, sep=',', skiprows=1, header=None, usecols = usecols
# index_col=1,
# usecols=np.arange()
)
data_hk = data.iloc[:, :27]
data_hk.columns = col_names
data_hk.index = pd.to_datetime(data_hk['DateTime'], unit='s')
data_hk.drop('DateTime', axis=1, inplace=True)
# hk = atmPy.general.timeseries.TimeSeries(data_hk, sampling_period = 1)
hk = POPSHouseKeeping(data_hk, sampling_period=1)
hk.data['Barometric_pressure'] = hk.data['P']
return hk
def read_BBB_02(fname, skip_histogram = False, verbose = False):
if verbose:
print(f'read pops house keeping bbb file: {fname}')
# col_names = pd.read_csv(fname, sep=',', nrows=1, header=None,
# # index_col=1,
# # usecols=np.arange()
# ).values[0][:-1].astype(str)
# col_names = _np.char.strip(col_names)
if skip_histogram:
usecols = list(range(27))
else:
usecols = None
data = pd.read_csv(fname, sep=',', skiprows=1, header=None, usecols = usecols
# index_col=1,
# usecols=np.arange()
)
# data.columns = _np.char.strip(data.columns)
return data
data_hk = data#.iloc[:, :27]
# data_hk.columns = col_names
data_hk.index = | pd.to_datetime(data_hk['DateTime'], unit='s') | pandas.to_datetime |
from itertools import groupby, zip_longest
from fractions import Fraction
from random import sample
import json
import pandas as pd
import numpy as np
import music21 as m21
from music21.meter import TimeSignatureException
m21.humdrum.spineParser.flavors['JRP'] = True
from collections import defaultdict
#song has no meter
class UnknownPGramType(Exception):
def __init__(self, arg):
self.arg = arg
def __str__(self):
return f"Unknown pgram type: {self.arg}."
#compute features:
def compute_completesmeasure_phrase(seq, ix, start_ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][start_ix]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % seq['features']['beatspermeasure'][ix] == 0
def compute_completesbeat_phrase(seq, ix, start_ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][start_ix]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % 1 == 0
def compute_completesmeasure_song(seq, ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][0]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % seq['features']['beatspermeasure'][ix] == 0
def compute_completesbeat_song(seq, ix):
endpos = Fraction(seq['features']['beatinphrase'][ix]) - \
Fraction(seq['features']['beatinphrase'][0]) + \
Fraction(seq['features']['IOI_beatfraction'][ix])
return endpos % 1 == 0
#extract IOI in units of beat
#IOI_beatfraction[i] is IOI from start of ith note till start of (i+1)th note
#for last note: beatfraction is taken
#Also to be interpreted as duration of note + duration of following rests (except for rests at end of melody)
#
#extract beats per measure
def extractFeatures(seq_iter, vocalfeatures=True):
count = 0
for seq in seq_iter:
count += 1
if count % 100 == 0:
print(count, end=' ')
pairs = zip(seq['features']['beatinsong'],seq['features']['beatinsong'][1:]) #this possibly includes rests
IOI_beatfraction = [Fraction(o[1])-Fraction(o[0]) for o in pairs]
IOI_beatfraction = [str(bf) for bf in IOI_beatfraction] + [seq['features']['beatfraction'][-1]]
seq['features']['IOI_beatfraction'] = IOI_beatfraction
beatspermeasure = [m21.meter.TimeSignature(ts).beatCount for ts in seq['features']['timesignature']]
seq['features']['beatspermeasure'] = beatspermeasure
phrasepos = seq['features']['phrasepos']
phrasestart_ix=[0]*len(phrasepos)
for ix in range(1,len(phrasestart_ix)):
if phrasepos[ix] < phrasepos[ix-1]:
phrasestart_ix[ix] = ix
else:
phrasestart_ix[ix] = phrasestart_ix[ix-1]
seq['features']['phrasestart_ix'] = phrasestart_ix
endOfPhrase = [x[1]<x[0] for x in zip(phrasepos, phrasepos[1:])] + [True]
seq['features']['endOfPhrase'] = endOfPhrase
cm_p = [compute_completesmeasure_phrase(seq, ix, phrasestart_ix[ix]) for ix in range(len(phrasepos))]
cb_p = [compute_completesbeat_phrase(seq, ix, phrasestart_ix[ix]) for ix in range(len(phrasepos))]
cm_s = [compute_completesmeasure_song(seq, ix) for ix in range(len(phrasepos))]
cb_s = [compute_completesbeat_song(seq, ix) for ix in range(len(phrasepos))]
seq['features']['completesmeasure_phrase'] = cm_p
seq['features']['completesbeat_phrase'] = cb_p
seq['features']['completesmeasure_song'] = cm_s
seq['features']['completesbeat_song'] = cb_s
if vocalfeatures:
#move lyric features to end of melisma:
#rhymes, rhymescontentwords, wordstress, noncontentword, wordend
#and compute rhyme_noteoffset and rhyme_beatoffset
if 'melismastate' in seq['features'].keys(): #vocal?
lyrics = seq['features']['lyrics']
phoneme = seq['features']['phoneme']
melismastate = seq['features']['melismastate']
rhymes = seq['features']['rhymes']
rhymescontentwords = seq['features']['rhymescontentwords']
wordend = seq['features']['wordend']
noncontentword = seq['features']['noncontentword']
wordstress = seq['features']['wordstress']
rhymes_endmelisma, rhymescontentwords_endmelisma = [], []
wordend_endmelisma, noncontentword_endmelisma, wordstress_endmelisma = [], [], []
lyrics_endmelisma, phoneme_endmelisma = [], []
from_ix = 0
inmelisma = False
for ix in range(len(phrasepos)):
if melismastate[ix] == 'start':
from_ix = ix
inmelisma = True
if melismastate[ix] == 'end':
if not inmelisma:
from_ix = ix
inmelisma = False
rhymes_endmelisma.append(rhymes[from_ix])
rhymescontentwords_endmelisma.append(rhymescontentwords[from_ix])
wordend_endmelisma.append(wordend[from_ix])
noncontentword_endmelisma.append(noncontentword[from_ix])
wordstress_endmelisma.append(wordstress[from_ix])
lyrics_endmelisma.append(lyrics[from_ix])
phoneme_endmelisma.append(phoneme[from_ix])
else:
rhymes_endmelisma.append(False)
rhymescontentwords_endmelisma.append(False)
wordend_endmelisma.append(False)
noncontentword_endmelisma.append(False)
wordstress_endmelisma.append(False)
lyrics_endmelisma.append(None)
phoneme_endmelisma.append(None)
seq['features']['rhymes_endmelisma'] = rhymes_endmelisma
seq['features']['rhymescontentwords_endmelisma'] = rhymescontentwords_endmelisma
seq['features']['wordend_endmelisma'] = wordend_endmelisma
seq['features']['noncontentword_endmelisma'] = noncontentword_endmelisma
seq['features']['wordstress_endmelisma'] = wordstress_endmelisma
seq['features']['lyrics_endmelisma'] = lyrics_endmelisma
seq['features']['phoneme_endmelisma'] = phoneme_endmelisma
#compute rhyme_noteoffset and rhyme_beatoffset
rhyme_noteoffset = [0]
rhyme_beatoffset = [0.0]
previous = 0
previousbeat = float(Fraction(seq['features']['beatinsong'][0]))
for ix in range(1,len(rhymescontentwords_endmelisma)):
if rhymescontentwords_endmelisma[ix-1]: #previous rhymes
previous = ix
previousbeat = float(Fraction(seq['features']['beatinsong'][ix]))
rhyme_noteoffset.append(ix - previous)
rhyme_beatoffset.append(float(Fraction(seq['features']['beatinsong'][ix])) - previousbeat)
seq['features']['rhymescontentwords_noteoffset'] = rhyme_noteoffset
seq['features']['rhymescontentwords_beatoffset'] = rhyme_beatoffset
else:
#vocal features requested, but not present.
#skip melody
continue
#Or do this?
if False:
length = len(phrasepos)
seq['features']['rhymes_endmelisma'] = [None] * length
seq['features']['rhymescontentwords_endmelisma'] = [None] * length
seq['features']['wordend_endmelisma'] = [None] * length
seq['features']['noncontentword_endmelisma'] = [None] * length
seq['features']['wordstress_endmelisma'] = [None] * length
seq['features']['lyrics_endmelisma'] = [None] * length
seq['features']['phoneme_endmelisma'] = [None] * length
yield seq
class NoFeaturesError(Exception):
def __init__(self, arg):
self.args = arg
class NoTrigramsError(Exception):
def __init__(self, arg):
self.args = arg
def __str__(self):
return repr(self.value)
#endix is index of last note + 1
def computeSumFractions(fractions, startix, endix):
res = 0.0
for fr in fractions[startix:endix]:
res = res + float(Fraction(fr))
return res
#make groups of indices with the same successive pitch, but (optionally) not crossing phrase boundaries <- 20200331 crossing phrase boundaries should be allowed (contourfourth)
#returns tuples (ix of first note in group, ix of last note in group + 1)
#crossPhraseBreak=False splits on phrase break. N.B. Is Using GroundTruth!
def breakpitchlist(midipitch, phrase_ix, crossPhraseBreak=False):
res = []
if crossPhraseBreak:
for _, g in groupby( enumerate(midipitch), key=lambda x:x[1]):
glist = list(g)
res.append( (glist[0][0], glist[-1][0]+1) )
else: #N.B. This uses the ground truth
for _, g in groupby( enumerate(zip(midipitch,phrase_ix)), key=lambda x:(x[1][0],x[1][1])):
glist = list(g)
res.append( (glist[0][0], glist[-1][0]+1) )
return res
#True if no phrase end at first or second item (span) in the trigram
#trigram looks like ((8, 10), (10, 11), (11, 12))
def noPhraseBreak(tr, endOfPhrase):
return not ( ( True in endOfPhrase[tr[0][0]:tr[0][1]] ) or \
( True in endOfPhrase[tr[1][0]:tr[1][1]] ) )
#pgram_type : "pitch", "note"
def extractPgramsFromCorpus(corpus, pgram_type="pitch", startat=0, endat=None):
pgrams = {}
arfftype = {}
for ix, seq in enumerate(corpus):
if endat is not None:
if ix >= endat:
continue
if ix < startat:
continue
if not ix%100:
print(ix, end=' ')
songid = seq['id']
try:
pgrams[songid], arfftype_new = extractPgramsFromMelody(seq, pgram_type=pgram_type)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'interval', newname='intervalsize', typeconv=lambda x: abs(int(x)))
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'interval', newname='intervaldir', typeconv=np.sign)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'diatonicpitch', typeconv=int)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'VosHarmony', typeconv=int)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'beatstrength', typeconv=float)
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'IOIbeatfraction', typeconv=float)
if 'melismastate' in seq['features'].keys():
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'wordstress', typeconv=int)
if 'informationcontent' in seq['features'].keys():
_,_ = addCrossRelations(pgrams[songid], arfftype_new, 'informationcontent', typeconv=float)
except NoFeaturesError:
print(songid, ": No features extracted.")
except NoTrigramsError:
print(songid, ": No trigrams extracted")
#if ix > startat:
# if arfftype.keys() != arfftype_new.keys():
# print("Warning: Melodies have different feature sets.")
# print(list(zip_longest(arfftype.keys(), arfftype_new.keys())))
#Keep largest set of features possible. N.B. no guarantee that all features in arfftype are in each sequence.
arfftype.update(arfftype_new)
#concat melodies
pgrams = pd.concat([v for v in pgrams.values()])
return pgrams, arfftype
def extractPgramsFromMelody(seq, pgram_type, skipPhraseCrossing=False):
# some aliases
scaledegree = seq['features']['scaledegree']
endOfPhrase = seq['features']['endOfPhrase']
midipitch = seq['features']['midipitch']
phrase_ix = seq['features']['phrase_ix']
if pgram_type == "pitch":
event_spans = breakpitchlist(midipitch, phrase_ix) #allow pitches to cross phrase break
elif pgram_type == "note":
event_spans = list(zip(range(len(scaledegree)),range(1,len(scaledegree)+1)))
else:
raise UnknownPGramType(pgram_type)
# make trigram of spans
event_spans = event_spans + [(None, None), (None, None)]
pgram_span_ixs = list(zip(event_spans,event_spans[1:],event_spans[2:],event_spans[3:],event_spans[4:]))
# If skipPhraseCrossing prune trigrams crossing phrase boundaries. WHY?
#Why actually? e.g. kindr154 prhases of 2 pitches
if skipPhraseCrossing:
pgram_span_ixs = [ixs for ixs in pgram_span_ixs if noPhraseBreak(ixs,endOfPhrase)]
if len(pgram_span_ixs) == 0:
raise NoTrigramsError(seq['id'])
# create dataframe with pgram names as index
pgram_ids = [seq["id"]+'_'+str(ixs[0][0]).zfill(3) for ixs in pgram_span_ixs]
pgrams = pd.DataFrame(index=pgram_ids)
pgrams['ix0_0'] = pd.array([ix[0][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix0_1'] = pd.array([ix[0][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix1_0'] = pd.array([ix[1][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix1_1'] = pd.array([ix[1][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix2_0'] = pd.array([ix[2][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix2_1'] = pd.array([ix[2][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix3_0'] = pd.array([ix[3][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix3_1'] = pd.array([ix[3][1] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix4_0'] = pd.array([ix[4][0] for ix in pgram_span_ixs], dtype="Int16")
pgrams['ix4_1'] = pd.array([ix[4][1] for ix in pgram_span_ixs], dtype="Int16")
#add tune family ids and songids
pgrams['tunefamily'] = seq['tunefamily']
pgrams['songid'] = seq['id']
pgrams, arfftype = extractPgramFeatures(pgrams, seq)
return pgrams, arfftype
def getBeatDuration(timesig):
try:
dur = float(m21.meter.TimeSignature(timesig).beatDuration.quarterLength)
except TimeSignatureException:
dur = float(Fraction(timesig) / Fraction('1/4'))
return dur
def oneCrossRelation(el1, el2, typeconv):
if pd.isna(el1) or pd.isna(el2):
return np.nan
return '-' if typeconv(el2) < typeconv(el1) else '=' if typeconv(el1) == typeconv(el2) else '+'
def addCrossRelations(pgrams, arfftype, featurename, newname=None, typeconv=int):
postfixes = {
1 : 'first',
2 : 'second',
3 : 'third',
4 : 'fourth',
5 : 'fifth'
}
if newname is None:
newname = featurename
for ix1 in range(1,6):
for ix2 in range(ix1+1,6):
featname = newname + postfixes[ix1] + postfixes[ix2]
source = zip(pgrams[featurename + postfixes[ix1]], pgrams[featurename + postfixes[ix2]])
pgrams[featname] = [oneCrossRelation(el1, el2, typeconv) for (el1, el2) in source]
arfftype[featname] = '{-,=,+}'
return pgrams, arfftype
def extractPgramFeatures(pgrams, seq):
# vocal?
vocal = False
if 'melismastate' in seq['features'].keys():
vocal = True
arfftype = {}
# some aliases
scaledegree = seq['features']['scaledegree']
beatstrength = seq['features']['beatstrength']
diatonicpitch = seq['features']['diatonicpitch']
midipitch = seq['features']['midipitch']
chromaticinterval = seq['features']['chromaticinterval']
timesig = seq['features']['timesignature']
metriccontour = seq['features']['metriccontour']
beatinsong = seq['features']['beatinsong']
beatinphrase = seq['features']['beatinphrase']
endOfPhrase = seq['features']['endOfPhrase']
phrasestart_ix = seq['features']['phrasestart_ix']
phrase_ix = seq['features']['phrase_ix']
completesmeasure_song = seq['features']['completesmeasure_song']
completesbeat_song = seq['features']['completesbeat_song']
completesmeasure_phrase = seq['features']['completesmeasure_phrase']
completesbeat_phrase = seq['features']['completesbeat_phrase']
IOIbeatfraction = seq['features']['IOI_beatfraction']
nextisrest = seq['features']['nextisrest']
gpr2a = seq['features']['gpr2a_Frankland']
gpr2b = seq['features']['gpr2b_Frankland']
gpr3a = seq['features']['gpr3a_Frankland']
gpr3d = seq['features']['gpr3d_Frankland']
gprsum = seq['features']['gpr_Frankland_sum']
pprox = seq['features']['pitchproximity']
prev = seq['features']['pitchreversal']
lbdmpitch = seq['features']['lbdm_spitch']
lbdmioi = seq['features']['lbdm_sioi']
lbdmrest = seq['features']['lbdm_srest']
lbdm = seq['features']['lbdm_boundarystrength']
if vocal:
wordstress = seq['features']['wordstress_endmelisma']
noncontentword = seq['features']['noncontentword_endmelisma']
wordend = seq['features']['wordend_endmelisma']
rhymescontentwords = seq['features']['rhymescontentwords_endmelisma']
rhymescontentwords_noteoffset = seq['features']['rhymescontentwords_noteoffset']
rhymescontentwords_beatoffset = seq['features']['rhymescontentwords_beatoffset']
melismastate = seq['features']['melismastate']
phrase_count = max(phrase_ix) + 1
pgrams['scaledegreefirst'] = pd.array([scaledegree[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['scaledegreesecond'] = pd.array([scaledegree[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['scaledegreethird'] = pd.array([scaledegree[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['scaledegreefourth'] = pd.array([scaledegree[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['scaledegreefifth'] = pd.array([scaledegree[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['scaledegreefirst'] = 'numeric'
arfftype['scaledegreesecond'] = 'numeric'
arfftype['scaledegreethird'] = 'numeric'
arfftype['scaledegreefourth'] = 'numeric'
arfftype['scaledegreefifth'] = 'numeric'
pgrams['diatonicpitchfirst'] = pd.array([diatonicpitch[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['diatonicpitchsecond'] = pd.array([diatonicpitch[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['diatonicpitchthird'] = pd.array([diatonicpitch[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['diatonicpitchfourth'] = pd.array([diatonicpitch[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['diatonicpitchfifth'] = pd.array([diatonicpitch[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['diatonicpitchfirst'] = 'numeric'
arfftype['diatonicpitchsecond'] = 'numeric'
arfftype['diatonicpitchthird'] = 'numeric'
arfftype['diatonicpitchfourth'] = 'numeric'
arfftype['diatonicpitchfifth'] = 'numeric'
pgrams['midipitchfirst'] = pd.array([midipitch[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['midipitchsecond'] = pd.array([midipitch[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['midipitchthird'] = pd.array([midipitch[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['midipitchfourth'] = pd.array([midipitch[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['midipitchfifth'] = pd.array([midipitch[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['midipitchfirst'] = 'numeric'
arfftype['midipitchsecond'] = 'numeric'
arfftype['midipitchthird'] = 'numeric'
arfftype['midipitchfourth'] = 'numeric'
arfftype['midipitchfifth'] = 'numeric'
pgrams['intervalfirst'] = pd.array([chromaticinterval[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['intervalsecond'] = pd.array([chromaticinterval[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['intervalthird'] = pd.array([chromaticinterval[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['intervalfourth'] = pd.array([chromaticinterval[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['intervalfifth'] = pd.array([chromaticinterval[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['intervalfirst'] = 'numeric'
arfftype['intervalsecond'] = 'numeric'
arfftype['intervalthird'] = 'numeric'
arfftype['intervalfourth'] = 'numeric'
arfftype['intervalfifth'] = 'numeric'
parsons = {-1:'-', 0:'=', 1:'+'}
#intervalcontour is not a good feature. Pitchcontour would be better. This will be in the cross-relations
#pgrams['intervalcontoursecond'] = [parsons[np.sign(int2 - int1)] if not pd.isna(int1) else np.nan for int1, int2 in \
# zip(pgrams['intervalfirst'],pgrams['intervalsecond'])]
#pgrams['intervalcontourthird'] = [parsons[np.sign(int2 - int1)] for int1, int2 in \
# zip(pgrams['intervalsecond'],pgrams['intervalthird'])]
#pgrams['intervalcontourfourth'] = [parsons[np.sign(int2 - int1)] if not pd.isna(int2) else np.nan for int1, int2 in \
# zip(pgrams['intervalthird'],pgrams['intervalfourth'])]
#pgrams['intervalcontourfifth'] = [parsons[np.sign(int2 - int1)] if not pd.isna(int2) else np.nan for int1, int2 in \
# zip(pgrams['intervalfourth'],pgrams['intervalfifth'])]
#arfftype['intervalcontoursecond'] = '{-,=,+}'
#arfftype['intervalcontourthird'] = '{-,=,+}'
#arfftype['intervalcontourfourth'] = '{-,=,+}'
#arfftype['intervalcontourfifth'] = '{-,=,+}'
#intervals of which second tone has center of gravity according to Vos 2002 + octave equivalents
VosCenterGravityASC = np.array([1, 5, 8])
VosCenterGravityDESC = np.array([-2, -4, -6, -7, -11])
VosCenterGravity = list(VosCenterGravityDESC-24) + \
list(VosCenterGravityDESC-12) + \
list(VosCenterGravityDESC) + \
list(VosCenterGravityASC) + \
list(VosCenterGravityASC+12) + \
list(VosCenterGravityASC+24)
pgrams['VosCenterGravityfirst'] = [interval in VosCenterGravity if not pd.isna(interval) else np.nan for interval in pgrams['intervalfirst']]
pgrams['VosCenterGravitysecond'] = [interval in VosCenterGravity for interval in pgrams['intervalsecond']]
pgrams['VosCenterGravitythird'] = [interval in VosCenterGravity for interval in pgrams['intervalthird']]
pgrams['VosCenterGravityfourth'] = [interval in VosCenterGravity if not pd.isna(interval) else np.nan for interval in pgrams['intervalfourth']]
pgrams['VosCenterGravityfifth'] = [interval in VosCenterGravity if not pd.isna(interval) else np.nan for interval in pgrams['intervalfifth']]
arfftype['VosCenterGravityfirst'] = '{True, False}'
arfftype['VosCenterGravitysecond'] = '{True, False}'
arfftype['VosCenterGravitythird'] = '{True, False}'
arfftype['VosCenterGravityfourth'] = '{True, False}'
arfftype['VosCenterGravityfifth'] = '{True, False}'
VosHarmony = {
0: 0,
1: 2,
2: 3,
3: 4,
4: 5,
5: 6,
6: 1,
7: 6,
8: 5,
9: 4,
10: 3,
11: 2,
12: 7
}
#interval modulo one octave, but 0 only for absolute unison (Vos 2002, p.633)
def vosint(intervals):
return [((np.sign(i)*i-1)%12+1 if i!=0 else 0) if not pd.isna(i) else np.nan for i in intervals]
pgrams['VosHarmonyfirst'] = pd.array([VosHarmony[interval] if not pd.isna(interval) else np.nan for interval in vosint(pgrams['intervalfirst'])], dtype="Int16")
pgrams['VosHarmonysecond'] = pd.array([VosHarmony[interval] for interval in vosint(pgrams['intervalsecond'])], dtype="Int16")
pgrams['VosHarmonythird'] = pd.array([VosHarmony[interval] for interval in vosint(pgrams['intervalthird'])], dtype="Int16")
pgrams['VosHarmonyfourth'] = pd.array([VosHarmony[interval] if not pd.isna(interval) else np.nan for interval in vosint(pgrams['intervalfourth'])], dtype="Int16")
pgrams['VosHarmonyfifth'] = pd.array([VosHarmony[interval] if not pd.isna(interval) else np.nan for interval in vosint(pgrams['intervalfifth'])], dtype="Int16")
arfftype['VosHarmonyfirst'] = 'numeric'
arfftype['VosHarmonysecond'] = 'numeric'
arfftype['VosHarmonythird'] = 'numeric'
arfftype['VosHarmonyfourth'] = 'numeric'
arfftype['VosHarmonyfifth'] = 'numeric'
if 'informationcontent' in seq['features'].keys():
informationcontent = seq['features']['informationcontent']
pgrams['informationcontentfirst'] = [informationcontent[int(ix)] for ix in pgrams['ix0_0']]
pgrams['informationcontentsecond'] = [informationcontent[int(ix)] for ix in pgrams['ix1_0']]
pgrams['informationcontentthird'] = [informationcontent[int(ix)] for ix in pgrams['ix2_0']]
pgrams['informationcontentfourth'] = [informationcontent[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['informationcontentfifth'] = [informationcontent[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']]
arfftype['informationcontentfirst'] = 'numeric'
arfftype['informationcontentsecond'] = 'numeric'
arfftype['informationcontentthird'] = 'numeric'
arfftype['informationcontentfourth'] = 'numeric'
arfftype['informationcontentfifth'] = 'numeric'
pgrams['contourfirst'] = [parsons[np.sign(i)] if not pd.isna(i) else np.nan for i in pgrams['intervalfirst']]
pgrams['contoursecond'] = [parsons[np.sign(i)] for i in pgrams['intervalsecond']]
pgrams['contourthird'] = [parsons[np.sign(i)] for i in pgrams['intervalthird']]
pgrams['contourfourth'] = [parsons[np.sign(i)] if not pd.isna(i) else np.nan for i in pgrams['intervalfourth']]
pgrams['contourfifth'] = [parsons[np.sign(i)] if not pd.isna(i) else np.nan for i in pgrams['intervalfifth']]
arfftype['contourfirst'] = '{-,=,+}'
arfftype['contoursecond'] = '{-,=,+}'
arfftype['contourthird'] = '{-,=,+}'
arfftype['contourfourth'] = '{-,=,+}'
arfftype['contourfifth'] = '{-,=,+}'
###########################################3
#derived features from Interval and Contour
pgrams['registraldirectionchange'] = [cont_sec != cont_third for cont_sec, cont_third in \
zip(pgrams['contoursecond'], pgrams['contourthird'])]
arfftype['registraldirectionchange'] = '{True, False}'
pgrams['largetosmall'] = [int_first >= 6 and int_second <=4 for int_first, int_second in \
zip(pgrams['intervalsecond'], pgrams['intervalthird'])]
arfftype['largetosmall'] = '{True, False}'
pgrams['contourreversal'] = [(i[0] == '-' and i[1] == '+') or (i[0]=='+' and i[1]=='-') \
for i in zip(pgrams['contoursecond'], pgrams['contourthird'])]
arfftype['contourreversal'] = '{True, False}'
pgrams['isascending'] = \
(pgrams['diatonicpitchfirst'] < pgrams['diatonicpitchsecond']) & \
(pgrams['diatonicpitchsecond'] < pgrams['diatonicpitchthird'])
arfftype['isascending'] = '{True, False}'
pgrams['isdescending'] = \
(pgrams['diatonicpitchfirst'] > pgrams['diatonicpitchsecond']) & \
(pgrams['diatonicpitchsecond'] > pgrams['diatonicpitchthird'])
arfftype['isdescending'] = '{True, False}'
diat = pgrams[['diatonicpitchfirst','diatonicpitchsecond','diatonicpitchthird']].values
pgrams['ambitus'] = diat.max(1) - diat.min(1)
arfftype['ambitus'] = 'numeric'
pgrams['containsleap'] = \
(abs(pgrams['diatonicpitchsecond'] - pgrams['diatonicpitchfirst']) > 1) | \
(abs(pgrams['diatonicpitchthird'] - pgrams['diatonicpitchsecond']) > 1)
arfftype['containsleap'] = '{True, False}'
###########################################3
pgrams['numberofnotesfirst'] = pd.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix0_0'],pgrams['ix0_1'])], dtype="Int16")
pgrams['numberofnotessecond'] = pd.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix1_0'],pgrams['ix1_1'])], dtype="Int16")
pgrams['numberofnotesthird'] = pd.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix2_0'],pgrams['ix2_1'])], dtype="Int16")
pgrams['numberofnotesfourth'] = pd.array([ix2 - ix1 if not pd.isna(ix1) else np.nan for ix1, ix2 in zip(pgrams['ix3_0'],pgrams['ix3_1'])], dtype="Int16")
pgrams['numberofnotesfifth'] = pd.array([ix2 - ix1 if not pd.isna(ix1) else np.nan for ix1, ix2 in zip(pgrams['ix4_0'],pgrams['ix4_1'])], dtype="Int16")
arfftype['numberofnotesfirst'] = 'numeric'
arfftype['numberofnotessecond'] = 'numeric'
arfftype['numberofnotesthird'] = 'numeric'
arfftype['numberofnotesfourth'] = 'numeric'
arfftype['numberofnotesfifth'] = 'numeric'
if seq['freemeter']:
pgrams['meternumerator'] = pd.array([np.nan for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['meterdenominator'] = pd.array([np.nan for ix in pgrams['ix0_0']], dtype="Int16")
else:
pgrams['meternumerator'] = pd.array([int(timesig[ix].split('/')[0]) for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['meterdenominator'] = pd.array([int(timesig[ix].split('/')[1]) for ix in pgrams['ix0_0']], dtype="Int16")
arfftype['meternumerator'] = 'numeric'
arfftype['meterdenominator'] = 'numeric'
pgrams['nextisrestfirst'] = [nextisrest[ix-1] for ix in pgrams['ix0_1']]
pgrams['nextisrestsecond'] = [nextisrest[ix-1] for ix in pgrams['ix1_1']]
pgrams['nextisrestthird'] = [nextisrest[ix-1] for ix in pgrams['ix2_1']]
pgrams['nextisrestfourth'] = [nextisrest[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['nextisrestfifth'] = [nextisrest[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['nextisrestfirst'] = '{True, False}'
arfftype['nextisrestsecond'] = '{True, False}'
arfftype['nextisrestthird'] = '{True, False}'
arfftype['nextisrestfourth'] = '{True, False}'
arfftype['nextisrestfifth'] = '{True, False}'
pgrams['beatstrengthfirst'] = [beatstrength[int(ix)] for ix in pgrams['ix0_0']]
pgrams['beatstrengthsecond'] = [beatstrength[int(ix)] for ix in pgrams['ix1_0']]
pgrams['beatstrengththird'] = [beatstrength[int(ix)] for ix in pgrams['ix2_0']]
pgrams['beatstrengthfourth'] = [beatstrength[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['beatstrengthfifth'] = [beatstrength[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']]
arfftype['beatstrengthfirst'] = 'numeric'
arfftype['beatstrengthsecond'] = 'numeric'
arfftype['beatstrengththird'] = 'numeric'
arfftype['beatstrengthfourth'] = 'numeric'
arfftype['beatstrengthfifth'] = 'numeric'
#these will be in crossrelations: beatstrengthfirstsecond, etc.
#pgrams['metriccontourfirst'] = [metriccontour[int(ix)] for ix in pgrams['ix0_0']]
#pgrams['metriccontoursecond'] = [metriccontour[int(ix)] for ix in pgrams['ix1_0']]
#pgrams['metriccontourthird'] = [metriccontour[int(ix)] for ix in pgrams['ix2_0']]
#pgrams['metriccontourfourth'] = [metriccontour[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
#pgrams['metriccontourfifth'] = [metriccontour[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']]
#arfftype['metriccontourfirst'] = '{-,=,+}'
#arfftype['metriccontoursecond'] = '{-,=,+}'
#arfftype['metriccontourthird'] = '{-,=,+}'
#arfftype['metriccontourfourth'] = '{-,=,+}'
#arfftype['metriccontourfifth'] = '{-,=,+}'
pgrams['IOIbeatfractionfirst'] = [computeSumFractions(IOIbeatfraction, startix, endix) for \
startix, endix in zip(pgrams['ix0_0'],pgrams['ix0_1'])]
pgrams['IOIbeatfractionsecond'] = [computeSumFractions(IOIbeatfraction, startix, endix) for \
startix, endix in zip(pgrams['ix1_0'],pgrams['ix1_1'])]
pgrams['IOIbeatfractionthird'] = [computeSumFractions(IOIbeatfraction, startix, endix) for \
startix, endix in zip(pgrams['ix2_0'],pgrams['ix2_1'])]
pgrams['IOIbeatfractionfourth'] = [computeSumFractions(IOIbeatfraction, startix, endix) if not pd.isna(startix) else np.nan for \
startix, endix in zip(pgrams['ix3_0'],pgrams['ix3_1'])]
pgrams['IOIbeatfractionfifth'] = [computeSumFractions(IOIbeatfraction, startix, endix) if not pd.isna(startix) else np.nan for \
startix, endix in zip(pgrams['ix4_0'],pgrams['ix4_1'])]
arfftype['IOIbeatfractionfirst'] = 'numeric'
arfftype['IOIbeatfractionsecond'] = 'numeric'
arfftype['IOIbeatfractionthird'] = 'numeric'
arfftype['IOIbeatfractionfourth'] = 'numeric'
arfftype['IOIbeatfractionfifth'] = 'numeric'
pgrams['durationcummulation'] = [((d2 > d1) and (d3 > d2)) for d1, d2, d3 in \
zip(pgrams['IOIbeatfractionfirst'],pgrams['IOIbeatfractionsecond'],pgrams['IOIbeatfractionthird'])]
arfftype['durationcummulation'] = '{True, False}'
#these will be in crossrelation: IOIbeatfractionfirstsecond, etc.
#pgrams['durationcontoursecond'] = [parsons[np.sign(dur2 - dur1)] for dur1, dur2 in \
# zip(pgrams['IOIbeatfractionfirst'],pgrams['IOIbeatfractionsecond'])]
#pgrams['durationcontourthird'] = [parsons[np.sign(dur2 - dur1)] for dur1, dur2 in \
# zip(pgrams['IOIbeatfractionsecond'],pgrams['IOIbeatfractionthird'])]
#pgrams['durationcontourfourth'] = [parsons[np.sign(dur2 - dur1)] if not pd.isna(dur2) else np.nan for dur1, dur2 in \
# zip(pgrams['IOIbeatfractionthird'],pgrams['IOIbeatfractionfourth'])]
#pgrams['durationcontourfifth'] = [parsons[np.sign(dur2 - dur1)] if not pd.isna(dur2) else np.nan for dur1, dur2 in \
# zip(pgrams['IOIbeatfractionfourth'],pgrams['IOIbeatfractionfifth'])]
#arfftype['durationcontoursecond'] = '{-,=,+}'
#arfftype['durationcontourthird'] = '{-,=,+}'
#arfftype['durationcontourfourth'] = '{-,=,+}'
#arfftype['durationcontourfifth'] = '{-,=,+}'
pgrams['onthebeatfirst'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 for ix in pgrams['ix0_0']]
pgrams['onthebeatsecond'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 for ix in pgrams['ix1_0']]
pgrams['onthebeatthird'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 for ix in pgrams['ix2_0']]
pgrams['onthebeatfourth'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['onthebeatfifth'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']]
arfftype['onthebeatfirst'] = '{True, False}'
arfftype['onthebeatsecond'] = '{True, False}'
arfftype['onthebeatthird'] = '{True, False}'
arfftype['onthebeatfourth'] = '{True, False}'
arfftype['onthebeatfifth'] = '{True, False}'
pgrams['completesmeasurephrase'] = [completesmeasure_phrase[ix-1] for ix in pgrams['ix2_1']]
pgrams['completesmeasuresong'] = [completesmeasure_song[ix-1] for ix in pgrams['ix2_1']]
pgrams['completesbeatphrase'] = [completesbeat_phrase[ix-1] for ix in pgrams['ix2_1']]
pgrams['completesbeatsong'] = [completesbeat_song[ix-1] for ix in pgrams['ix2_1']]
arfftype['completesmeasurephrase'] = '{True, False}'
arfftype['completesmeasuresong'] = '{True, False}'
arfftype['completesbeatphrase'] = '{True, False}'
arfftype['completesbeatsong'] = '{True, False}'
if 'grouper' in seq['features'].keys():
grouper = seq['features']['grouper']
pgrams['grouperfirst'] = [grouper[int(ix)] for ix in pgrams['ix0_0']]
pgrams['groupersecond'] = [grouper[int(ix)] for ix in pgrams['ix1_0']]
pgrams['grouperthird'] = [grouper[int(ix)] for ix in pgrams['ix2_0']]
pgrams['grouperfourth'] = [grouper[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['grouperfifth'] = [grouper[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']]
arfftype['grouperfirst'] = '{True, False}'
arfftype['groupersecond'] = '{True, False}'
arfftype['grouperthird'] = '{True, False}'
arfftype['grouperfourth'] = '{True, False}'
arfftype['grouperfifth'] = '{True, False}'
#values for final note of third group
pgrams['noteoffset'] = pd.array([(ix-1) - phrasestart_ix[(ix-1)] for ix in pgrams['ix2_1']], dtype="Int16")
pgrams['beatoffset'] = [float(Fraction(beatinphrase[ix-1])) - \
float(Fraction(beatinphrase[phrasestart_ix[(ix-1)]])) \
for ix in pgrams['ix2_1']]
arfftype['noteoffset'] = 'numeric'
arfftype['beatoffset'] = 'numeric'
pgrams['beatduration'] = [getBeatDuration(timesig[int(ix)]) for ix in pgrams['ix0_0']]
pgrams['beatcount'] = pd.array([m21.meter.TimeSignature(timesig[int(ix)]).beatCount for ix in pgrams['ix0_0']], dtype="Int16")
arfftype['beatduration'] = 'numeric'
arfftype['beatcount'] = 'numeric'
#get values for the last note!
pgrams['gpr2afirst'] = [gpr2a[ix-1] for ix in pgrams['ix0_1']]
pgrams['gpr2asecond'] = [gpr2a[ix-1] for ix in pgrams['ix1_1']]
pgrams['gpr2athird'] = [gpr2a[ix-1] for ix in pgrams['ix2_1']]
pgrams['gpr2afourth'] = [gpr2a[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['gpr2afifth'] = [gpr2a[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['gpr2afirst'] = 'numeric'
arfftype['gpr2asecond'] = 'numeric'
arfftype['gpr2athird'] = 'numeric'
arfftype['gpr2afourth'] = 'numeric'
arfftype['gpr2afifth'] = 'numeric'
pgrams['gpr2bfirst'] = [gpr2b[ix-1] for ix in pgrams['ix0_1']]
pgrams['gpr2bsecond'] = [gpr2b[ix-1] for ix in pgrams['ix1_1']]
pgrams['gpr2bthird'] = [gpr2b[ix-1] for ix in pgrams['ix2_1']]
pgrams['gpr2bfourth'] = [gpr2b[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['gpr2bfifth'] = [gpr2b[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['gpr2bfirst'] = 'numeric'
arfftype['gpr2bsecond'] = 'numeric'
arfftype['gpr2bthird'] = 'numeric'
arfftype['gpr2bfourth'] = 'numeric'
arfftype['gpr2bfifth'] = 'numeric'
pgrams['gpr3afirst'] = [gpr3a[ix-1] for ix in pgrams['ix0_1']]
pgrams['gpr3asecond'] = [gpr3a[ix-1] for ix in pgrams['ix1_1']]
pgrams['gpr3athird'] = [gpr3a[ix-1] for ix in pgrams['ix2_1']]
pgrams['gpr3afourth'] = [gpr3a[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['gpr3afifth'] = [gpr3a[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['gpr3afirst'] = 'numeric'
arfftype['gpr3asecond'] = 'numeric'
arfftype['gpr3athird'] = 'numeric'
arfftype['gpr3afourth'] = 'numeric'
arfftype['gpr3afifth'] = 'numeric'
pgrams['gpr3dfirst'] = [gpr3d[ix-1] for ix in pgrams['ix0_1']]
pgrams['gpr3dsecond'] = [gpr3d[ix-1] for ix in pgrams['ix1_1']]
pgrams['gpr3dthird'] = [gpr3d[ix-1] for ix in pgrams['ix2_1']]
pgrams['gpr3dfourth'] = [gpr3d[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['gpr3dfifth'] = [gpr3d[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['gpr3dfirst'] = 'numeric'
arfftype['gpr3dsecond'] = 'numeric'
arfftype['gpr3dthird'] = 'numeric'
arfftype['gpr3dfourth'] = 'numeric'
arfftype['gpr3dfifth'] = 'numeric'
pgrams['gprsumfirst'] = [gprsum[ix-1] for ix in pgrams['ix0_1']]
pgrams['gprsumsecond'] = [gprsum[ix-1] for ix in pgrams['ix1_1']]
pgrams['gprsumthird'] = [gprsum[ix-1] for ix in pgrams['ix2_1']]
pgrams['gprsumfourth'] = [gprsum[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['gprsumfifth'] = [gprsum[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['gprsumfirst'] = 'numeric'
arfftype['gprsumsecond'] = 'numeric'
arfftype['gprsumthird'] = 'numeric'
arfftype['gprsumfourth'] = 'numeric'
arfftype['gprsumfifth'] = 'numeric'
pgrams['pitchproximityfirst'] = pd.array([pprox[ix] for ix in pgrams['ix0_0']], dtype="Int16")
pgrams['pitchproximitysecond'] = pd.array([pprox[ix] for ix in pgrams['ix1_0']], dtype="Int16")
pgrams['pitchproximitythird'] = pd.array([pprox[ix] for ix in pgrams['ix2_0']], dtype="Int16")
pgrams['pitchproximityfourth'] = pd.array([pprox[ix] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16")
pgrams['pitchproximityfifth'] = pd.array([pprox[ix] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16")
arfftype['pitchproximityfirst'] = 'numeric'
arfftype['pitchproximitysecond'] = 'numeric'
arfftype['pitchproximitythird'] = 'numeric'
arfftype['pitchproximityfourth'] = 'numeric'
arfftype['pitchproximityfifth'] = 'numeric'
pgrams['pitchreversalfirst'] = [prev[ix] for ix in pgrams['ix0_0']]
pgrams['pitchreversalsecond'] = [prev[ix] for ix in pgrams['ix1_0']]
pgrams['pitchreversalthird'] = [prev[ix] for ix in pgrams['ix2_0']]
pgrams['pitchreversalfourth'] = [prev[ix] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']]
pgrams['pitchreversalfifth'] = [prev[ix] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']]
arfftype['pitchreversalfirst'] = 'numeric'
arfftype['pitchreversalsecond'] = 'numeric'
arfftype['pitchreversalthird'] = 'numeric'
arfftype['pitchreversalfourth'] = 'numeric'
arfftype['pitchreversalfifth'] = 'numeric'
#get values for last note in pitchgroup
pgrams['lbdmpitchfirst'] = [lbdmpitch[ix-1] for ix in pgrams['ix0_1']]
pgrams['lbdmpitchsecond'] = [lbdmpitch[ix-1] for ix in pgrams['ix1_1']]
pgrams['lbdmpitchthird'] = [lbdmpitch[ix-1] for ix in pgrams['ix2_1']]
pgrams['lbdmpitchfourth'] = [lbdmpitch[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['lbdmpitchfifth'] = [lbdmpitch[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['lbdmpitchfirst'] = 'numeric'
arfftype['lbdmpitchsecond'] = 'numeric'
arfftype['lbdmpitchthird'] = 'numeric'
arfftype['lbdmpitchfourth'] = 'numeric'
arfftype['lbdmpitchfifth'] = 'numeric'
pgrams['lbdmioifirst'] = [lbdmioi[ix-1] for ix in pgrams['ix0_1']]
pgrams['lbdmioisecond'] = [lbdmioi[ix-1] for ix in pgrams['ix1_1']]
pgrams['lbdmioithird'] = [lbdmioi[ix-1] for ix in pgrams['ix2_1']]
pgrams['lbdmioifourth'] = [lbdmioi[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']]
pgrams['lbdmioififth'] = [lbdmioi[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']]
arfftype['lbdmioifirst'] = 'numeric'
arfftype['lbdmioisecond'] = 'numeric'
arfftype['lbdmioithird'] = 'numeric'
arfftype['lbdmioifourth'] = 'numeric'
arfftype['lbdmioififth'] = 'numeric'
pgrams['lbdmrestfirst'] = [lbdmrest[ix-1] for ix in pgrams['ix0_1']]
pgrams['lbdmrestsecond'] = [lbdmrest[ix-1] for ix in pgrams['ix1_1']]
pgrams['lbdmrestthird'] = [lbdmrest[ix-1] for ix in pgrams['ix2_1']]
pgrams['lbdmrestfourth'] = [lbdmrest[ix-1] if not | pd.isna(ix) | pandas.isna |
import natsort
import numpy as np
import pandas as pd
import plotly.io as pio
import plotly.express as px
import plotly.graph_objects as go
import plotly.figure_factory as ff
import re
import traceback
from io import BytesIO
from sklearn.decomposition import PCA
from sklearn.metrics import pairwise as pw
import json
import statistics
import matplotlib.pyplot as plt
import matplotlib_venn as venn
from matplotlib_venn import venn2, venn3, venn3_circles
from PIL import Image
from upsetplot import from_memberships
from upsetplot import plot as upplot
import pkg_resources
def natsort_index_keys(x):
order = natsort.natsorted(np.unique(x.values))
return pd.Index([order.index(el) for el in x], name=x.name)
def natsort_list_keys(x):
order = natsort.natsorted(np.unique(x))
return [order.index(el) for el in x]
class SpatialDataSet:
regex = {
"imported_columns": "^[Rr]atio H/L (?!normalized|type|is.*|variability|count)[^ ]+|^Ratio H/L variability.... .+|^Ratio H/L count .+|id$|[Mm][Ss].*[cC]ount.+$|[Ll][Ff][Qq].*|.*[nN]ames.*|.*[Pp][rR]otein.[Ii][Dd]s.*|[Pp]otential.[cC]ontaminant|[Oo]nly.[iI]dentified.[bB]y.[sS]ite|[Rr]everse|[Ss]core|[Qq]-[Vv]alue|R.Condition|PG.Genes|PG.ProteinGroups|PG.Cscore|PG.Qvalue|PG.RunEvidenceCount|PG.Quantity|^Proteins$|^Sequence$"
}
acquisition_set_dict = {
"LFQ6 - Spectronaut" : ["LFQ intensity", "MS/MS count"],
"LFQ5 - Spectronaut" : ["LFQ intensity", "MS/MS count"],
"LFQ5 - MQ" : ["[Ll][Ff][Qq].[Ii]ntensity", "[Mm][Ss]/[Mm][Ss].[cC]ount", "[Ii]ntensity"],
"LFQ6 - MQ" : ["[Ll][Ff][Qq].[Ii]ntensity", "[Mm][Ss]/[Mm][Ss].[cC]ount", "[Ii]ntensity"],
"SILAC - MQ" : [ "[Rr]atio.[Hh]/[Ll](?!.[Vv]aria|.[Cc]ount)","[Rr]atio.[Hh]/[Ll].[Vv]ariability.\[%\]", "[Rr]atio.[Hh]/[Ll].[cC]ount"],
"Custom": ["(?!Protein IDs|Gene names)"]
}
Spectronaut_columnRenaming = {
"R.Condition": "Map", "PG.Genes" : "Gene names", "PG.Qvalue": "Q-value", "PG.Cscore":"C-Score",
"PG.ProteinGroups" : "Protein IDs", "PG.RunEvidenceCount" : "MS/MS count", "PG.Quantity" : "LFQ intensity"
}
css_color = ["#b2df8a", "#6a3d9a", "#e31a1c", "#b15928", "#fdbf6f", "#ff7f00", "#cab2d6", "#fb9a99", "#1f78b4", "#ffff99", "#a6cee3",
"#33a02c", "blue", "orange", "goldenrod", "lightcoral", "magenta", "brown", "lightpink", "red", "turquoise",
"khaki", "darkgoldenrod","darkturquoise", "darkviolet", "greenyellow", "darksalmon", "hotpink", "indianred", "indigo","darkolivegreen",
"coral", "aqua", "beige", "bisque", "black", "blanchedalmond", "blueviolet", "burlywood", "cadetblue", "yellowgreen", "chartreuse",
"chocolate", "cornflowerblue", "cornsilk", "darkblue", "darkcyan", "darkgray", "darkgrey", "darkgreen", "darkkhaki", "darkmagenta",
"darkorange", "darkorchid", "darkred", "darkseagreen", "darkslateblue", "snow", "springgreen", "darkslategrey", "mediumpurple", "oldlace",
"olive", "lightseagreen", "deeppink", "deepskyblue", "dimgray", "dimgrey", "dodgerblue", "firebrick", "floralwhite", "forestgreen",
"fuchsia", "gainsboro", "ghostwhite", "gold", "gray", "ivory", "lavenderblush", "lawngreen", "lemonchiffon", "lightblue", "lightcyan",
"fuchsia", "gainsboro", "ghostwhite", "gold", "gray", "ivory", "lavenderblush", "lawngreen", "lemonchiffon", "lightblue", "lightcyan",
"lightgoldenrodyellow", "lightgray", "lightgrey", "lightgreen", "lightsalmon", "lightskyblue", "lightslategray", "lightslategrey",
"lightsteelblue", "lightyellow", "lime", "limegreen", "linen", "maroon", "mediumaquamarine", "mediumblue", "mediumseagreen",
"mediumslateblue", "mediumspringgreen", "mediumturquoise", "mediumvioletred", "midnightblue", "mintcream", "mistyrose", "moccasin",
"olivedrab", "orangered", "orchid", "palegoldenrod", "palegreen", "paleturquoise", "palevioletred", "papayawhip", "peachpuff", "peru",
"pink", "plum", "powderblue", "rosybrown", "royalblue", "saddlebrown", "salmon", "sandybrown", "seagreen", "seashell", "sienna", "silver",
"skyblue", "slateblue", "steelblue", "teal", "thistle", "tomato", "violet", "wheat", "white", "whitesmoke", "slategray", "slategrey",
"aquamarine", "azure","crimson", "cyan", "darkslategray", "grey","mediumorchid","navajowhite", "navy"]
analysed_datasets_dict = {}
df_organellarMarkerSet = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/organellemarkers/{}.csv'.format("Homo sapiens - Uniprot")),
usecols=lambda x: bool(re.match("Gene name|Compartment", x)))
df_organellarMarkerSet = df_organellarMarkerSet.rename(columns={"Gene name":"Gene names"})
df_organellarMarkerSet = df_organellarMarkerSet.astype({"Gene names": "str"})
def __init__(self, filename, expname, acquisition, comment, name_pattern="e.g.:.* (?P<cond>.*)_(?P<rep>.*)_(?P<frac>.*)", reannotate_genes=False, **kwargs):
self.filename = filename
self.expname = expname
self.acquisition = acquisition
self.name_pattern = name_pattern
self.comment = comment
self.imported_columns = self.regex["imported_columns"]
self.fractions, self.map_names = [], []
self.df_01_stacked, self.df_log_stacked = pd.DataFrame(), pd.DataFrame()
if acquisition == "SILAC - MQ":
if "RatioHLcount" not in kwargs.keys():
self.RatioHLcount = 2
else:
self.RatioHLcount = kwargs["RatioHLcount"]
del kwargs["RatioHLcount"]
if "RatioVariability" not in kwargs.keys():
self.RatioVariability = 30
else:
self.RatioVariability = kwargs["RatioVariability"]
del kwargs["RatioVariability"]
elif acquisition == "Custom":
self.custom_columns = kwargs["custom_columns"]
self.custom_normalized = kwargs["custom_normalized"]
self.imported_columns = "^"+"$|^".join(["$|^".join(el) if type(el) == list else el for el in self.custom_columns.values() if el not in [[], None, ""]])+"$"
#elif acquisition == "LFQ5 - MQ" or acquisition == "LFQ6 - MQ" or acquisition == "LFQ6 - Spectronaut" or acquisition == "LFQ5 - Spectronaut":
else:
if "summed_MSMS_counts" not in kwargs.keys():
self.summed_MSMS_counts = 2
else:
self.summed_MSMS_counts = kwargs["summed_MSMS_counts"]
del kwargs["summed_MSMS_counts"]
if "consecutiveLFQi" not in kwargs.keys():
self.consecutiveLFQi = 4
else:
self.consecutiveLFQi = kwargs["consecutiveLFQi"]
del kwargs["consecutiveLFQi"]
#self.markerset_or_cluster = False if "markerset_or_cluster" not in kwargs.keys() else kwargs["markerset_or_cluster"]
if "organism" not in kwargs.keys():
marker_table = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/complexes/{}.csv'.format("Homo sapiens - Uniprot")))
self.markerproteins = {k: v.replace(" ", "").split(",") for k,v in zip(marker_table["Cluster"], marker_table["Members - Gene names"])}
else:
assert kwargs["organism"]+".csv" in pkg_resources.resource_listdir(__name__, "annotations/complexes")
marker_table = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/complexes/{}.csv'.format(kwargs["organism"])))
self.markerproteins = {k: v.replace(" ", "").split(",") for k,v in zip(marker_table["Cluster"], marker_table["Members - Gene names"])}
self.organism = kwargs["organism"]
del kwargs["organism"]
self.analysed_datasets_dict = {}
self.analysis_summary_dict = {}
def data_reading(self, filename=None, content=None):
"""
Data import. Can read the df_original from a file or buffer.
df_original contains all information of the raw file; tab separated file is imported,
Args:
self:
filename: string
imported_columns : dictionry; columns that correspond to this regular expression will be imported
filename: default None, to use the class attribute. Otherwise overwrites the class attribute upon success.
content: default None, to use the filename. Any valid input to pd.read_csv can be provided, e.g. a StringIO buffer.
Returns:
self.df_orginal: raw, unprocessed dataframe, single level column index
"""
# use instance attribute if no filename is provided
if filename is None:
filename = self.filename
# if no buffer is provided for the content read straight from the file
if content is None:
content = filename
if filename.endswith("xls") or filename.endswith("txt"):
self.df_original = pd.read_csv(content, sep="\t", comment="#", usecols=lambda x: bool(re.match(self.imported_columns, x)), low_memory = True)
else: #assuming csv file
self.df_original = pd.read_csv(content, sep=",", comment="#", usecols=lambda x: bool(re.match(self.imported_columns, x)), low_memory = True)
assert self.df_original.shape[0]>10 and self.df_original.shape[1]>5
self.filename = filename
return self.df_original
def processingdf(self, name_pattern=None, summed_MSMS_counts=None, consecutiveLFQi=None, RatioHLcount=None, RatioVariability=None, custom_columns=None, custom_normalized=None):
"""
Analysis of the SILAC/LFQ-MQ/LFQ-Spectronaut data will be performed. The dataframe will be filtered, normalized, and converted into a dataframe,
characterized by a flat column index. These tasks is performed by following functions:
indexingdf(df_original, acquisition_set_dict, acquisition, fraction_dict, name_pattern)
spectronaut_LFQ_indexingdf(df_original, Spectronaut_columnRenaming, acquisition_set_dict, acquisition, fraction_dict, name_pattern)
stringency_silac(df_index)
normalization_01_silac(df_stringency_mapfracstacked):
logarithmization_silac(df_stringency_mapfracstacked):
stringency_lfq(df_index):
normalization_01_lfq(df_stringency_mapfracstacked):
logarithmization_lfq(df_stringency_mapfracstacked):
Args:
self.acquisition: string, "LFQ6 - Spectronaut", "LFQ5 - Spectronaut", "LFQ5 - MQ", "LFQ6 - MQ", "SILAC - MQ"
additional arguments can be used to override the value set by the class init function
Returns:
self:
map_names: list of Map names
df_01_stacked: df; 0-1 normalized data with "normalized profile" as column name
df_log_stacked: df; log transformed data
analysis_summary_dict["0/1 normalized data - mean"] : 0/1 normalized data across all maps by calculating the mean
["changes in shape after filtering"]
["Unique Proteins"] : unique proteins, derived from the first entry of Protein IDs, seperated by a ";"
["Analysis parameters"] : {"acquisition" : ...,
"filename" : ...,
#SILAC#
"Ratio H/L count 1 (>=X)" : ...,
"Ratio H/L count 2 (>=Y, var<Z)" : ...,
"Ratio variability (<Z, count>=Y)" : ...
#LFQ#
"consecutive data points" : ...,
"summed MS/MS counts" : ...
}
"""
if name_pattern is None:
name_pattern = self.name_pattern
if self.acquisition == "SILAC - MQ":
if RatioHLcount is None:
RatioHLcount = self.RatioHLcount
if RatioVariability is None:
RatioVariability = self.RatioVariability
elif self.acquisition == "Custom":
if custom_columns is None:
custom_columns = self.custom_columns
if custom_normalized is None:
custom_normalized = self.custom_normalized
else:
if summed_MSMS_counts is None:
summed_MSMS_counts = self.summed_MSMS_counts
if consecutiveLFQi is None:
consecutiveLFQi = self.consecutiveLFQi
shape_dict = {}
def indexingdf():
"""
For data output from MaxQuant, all columns - except of "MS/MS count" and "LFQ intensity" (LFQ) | "Ratio H/L count", "Ratio H/L variability [%]"
(SILAC) - will be set as index. A multiindex will be generated, containing "Set" ("MS/MS count", "LFQ intensity"| "Ratio H/L count", "Ratio H/L
variability [%]"), "Fraction" (= defined via "name_pattern") and "Map" (= defined via "name_pattern") as level names, allowing the stacking and
unstacking of the dataframe. The dataframe will be filtered by removing matches to the reverse database, matches only identified by site, and
potential contaminants.
Args:
self:
df_original: dataframe, columns defined through self.imported_columns
acquisition_set_dict: dictionary, all columns will be set as index, except of those that are listed in acquisition_set_dict
acquisition: string, one of "LFQ6 - Spectronaut", "LFQ5 - Spectronaut", "LFQ5 - MQ", "LFQ6 - MQ", "SILAC - MQ"
fraction_dict: "Fraction" is part of the multiindex; fraction_dict allows the renaming of the fractions e.g. 3K -> 03K
name_pattern: regular expression, to identify Map-Fraction-(Replicate)
Returns:
self:
df_index: mutliindex dataframe, which contains 3 level labels: Map, Fraction, Type
shape_dict["Original size"] of df_original
shape_dict["Shape after categorical filtering"] of df_index
fractions: list of fractions e.g. ["01K", "03K", ...]
"""
df_original = self.df_original.copy()
df_original.rename({"Proteins": "Protein IDs"}, axis=1, inplace=True)
df_original = df_original.set_index([col for col in df_original.columns
if any([re.match(s, col) for s in self.acquisition_set_dict[self.acquisition]]) == False])
# multindex will be generated, by extracting the information about the Map, Fraction and Type from each individual column name
multiindex = pd.MultiIndex.from_arrays(
arrays=[
[[re.findall(s, col)[0] for s in self.acquisition_set_dict[self.acquisition] if re.match(s,col)][0]
for col in df_original.columns],
[re.match(self.name_pattern, col).group("rep") for col in df_original.columns] if not "<cond>" in self.name_pattern
else ["_".join(re.match(self.name_pattern, col).group("cond", "rep")) for col in df_original.columns],
[re.match(self.name_pattern, col).group("frac") for col in df_original.columns],
],
names=["Set", "Map", "Fraction"]
)
df_original.columns = multiindex
df_original.sort_index(1, inplace=True)
shape_dict["Original size"] = df_original.shape
try:
df_index = df_original.xs(
np.nan, 0, "Reverse")
except:
pass
try:
df_index = df_index.xs(
np.nan, 0, "Potential contaminant")
except:
pass
try:
df_index = df_index.xs(
np.nan, 0, "Only identified by site")
except:
pass
df_index.replace(0, np.nan, inplace=True)
shape_dict["Shape after categorical filtering"] = df_index.shape
df_index.rename(columns={"MS/MS Count":"MS/MS count"}, inplace=True)
fraction_wCyt = list(df_index.columns.get_level_values("Fraction").unique())
##############Cyt should get only be removed if it is not an NMC split
if "Cyt" in fraction_wCyt and len(fraction_wCyt) >= 4:
df_index.drop("Cyt", axis=1, level="Fraction", inplace=True)
try:
if self.acquisition == "LFQ5 - MQ":
df_index.drop("01K", axis=1, level="Fraction", inplace=True)
except:
pass
self.fractions = natsort.natsorted(list(df_index.columns.get_level_values("Fraction").unique()))
self.df_index = df_index
return df_index
def custom_indexing_and_normalization():
df_original = self.df_original.copy()
df_original.rename({custom_columns["ids"]: "Protein IDs", custom_columns["genes"]: "Gene names"}, axis=1, inplace=True)
df_original = df_original.set_index([col for col in df_original.columns
if any([re.match(s, col) for s in self.acquisition_set_dict[self.acquisition]]) == False])
# multindex will be generated, by extracting the information about the Map, Fraction and Type from each individual column name
multiindex = pd.MultiIndex.from_arrays(
arrays=[
["normalized profile" for col in df_original.columns],
[re.match(self.name_pattern, col).group("rep") for col in df_original.columns] if not "<cond>" in self.name_pattern
else ["_".join(re.match(self.name_pattern, col).group("cond", "rep")) for col in df_original.columns],
[re.match(self.name_pattern, col).group("frac") for col in df_original.columns],
],
names=["Set", "Map", "Fraction"]
)
df_original.columns = multiindex
df_original.sort_index(1, inplace=True)
shape_dict["Original size"] = df_original.shape
# for custom upload assume full normalization for now. this should be extended to valid value filtering and 0-1 normalization later
df_index = df_original.copy()
self.fractions = natsort.natsorted(list(df_index.columns.get_level_values("Fraction").unique()))
self.df_index = df_index
return df_index
def spectronaut_LFQ_indexingdf():
"""
For data generated from the Spectronaut software, columns will be renamed, such it fits in the scheme of MaxQuant output data. Subsequently, all
columns - except of "MS/MS count" and "LFQ intensity" will be set as index. A multiindex will be generated, containing "Set" ("MS/MS count" and
"LFQ intensity"), Fraction" and "Map" (= defined via "name_pattern"; both based on the column name R.condition - equivalent to the column name "Map"
in df_renamed["Map"]) as level labels.
!!!
!!!It is very important to define R.Fraction, R.condition already during the setup of Spectronaut!!!
!!!
Args:
self:
df_original: dataframe, columns defined through self.imported_columns
Spectronaut_columnRenaming
acquisition_set_dict: dictionary, all columns will be set as index, except of those that are listed in acquisition_set_dict
acquisition: string, "LFQ6 - Spectronaut", "LFQ5 - Spectronaut"
fraction_dict: "Fraction" is part of the multiindex; fraction_dict allows the renaming of the fractions e.g. 3K -> 03K
name_pattern: regular expression, to identify Map-Fraction-(Replicate)
Returns:
self:
df_index: mutliindex dataframe, which contains 3 level labels: Map, Fraction, Type
shape_dict["Original size"] of df_index
fractions: list of fractions e.g. ["01K", "03K", ...]
"""
df_original = self.df_original.copy()
df_renamed = df_original.rename(columns=self.Spectronaut_columnRenaming)
df_renamed["Fraction"] = [re.match(self.name_pattern, i).group("frac") for i in df_renamed["Map"]]
df_renamed["Map"] = [re.match(self.name_pattern, i).group("rep") for i in df_renamed["Map"]] if not "<cond>" in self.name_pattern else ["_".join(
re.match(self.name_pattern, i).group("cond", "rep")) for i in df_renamed["Map"]]
df_index = df_renamed.set_index([col for col in df_renamed.columns if any([re.match(s, col) for s in self.acquisition_set_dict[self.acquisition]])==False])
df_index.columns.names = ["Set"]
# In case fractionated data was used this needs to be catched and aggregated
try:
df_index = df_index.unstack(["Map", "Fraction"])
except ValueError:
df_index = df_index.groupby(by=df_index.index.names).agg(np.nansum, axis=0)
df_index = df_index.unstack(["Map", "Fraction"])
df_index.replace(0, np.nan, inplace=True)
shape_dict["Original size"]=df_index.shape
fraction_wCyt = list(df_index.columns.get_level_values("Fraction").unique())
#Cyt is removed only if it is not an NMC split
if "Cyt" in fraction_wCyt and len(fraction_wCyt) >= 4:
df_index.drop("Cyt", axis=1, level="Fraction", inplace=True)
try:
if self.acquisition == "LFQ5 - Spectronaut":
df_index.drop("01K", axis=1, level="Fraction", inplace=True)
except:
pass
self.fractions = natsort.natsorted(list(df_index.columns.get_level_values("Fraction").unique()))
self.df_index = df_index
return df_index
def stringency_silac(df_index):
"""
The multiindex dataframe is subjected to stringency filtering. Only Proteins with complete profiles are considered (a set of f.e. 5 SILAC ratios
in case you have 5 fractions / any proteins with missing values were rejected). Proteins were retained with 3 or more quantifications in each
subfraction (=count). Furthermore, proteins with only 2 quantification events in one or more subfraction were retained, if their ratio variability for
ratios obtained with 2 quantification events was below 30% (=var). SILAC ratios were linearly normalized by division through the fraction median.
Subsequently normalization to SILAC loading was performed.Data is annotated based on specified marker set e.g. eLife.
Args:
df_index: multiindex dataframe, which contains 3 level labels: MAP, Fraction, Type
RatioHLcount: int, 2
RatioVariability: int, 30
df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index
fractions: list of fractions e.g. ["01K", "03K", ...]
Returns:
df_stringency_mapfracstacked: dataframe, in which "MAP" and "Fraction" are stacked;
columns "Ratio H/L count", "Ratio H/L variability [%]", and "Ratio H/L" stored as single level indices
shape_dict["Shape after Ratio H/L count (>=3)/var (count>=2, var<30) filtering"] of df_countvarfiltered_stacked
shape_dict["Shape after filtering for complete profiles"] of df_stringency_mapfracstacked
"""
# Fraction and Map will be stacked
df_stack = df_index.stack(["Fraction", "Map"])
# filtering for sufficient number of quantifications (count in "Ratio H/L count"), taken variability (var in Ratio H/L variability [%]) into account
# zip: allows direct comparison of count and var
# only if the filtering parameters are fulfilled the data will be introduced into df_countvarfiltered_stacked
#default setting: RatioHLcount = 2 ; RatioVariability = 30
df_countvarfiltered_stacked = df_stack.loc[[count>RatioHLcount or (count==RatioHLcount and var<RatioVariability)
for var, count in zip(df_stack["Ratio H/L variability [%]"], df_stack["Ratio H/L count"])]]
shape_dict["Shape after Ratio H/L count (>=3)/var (count==2, var<30) filtering"] = df_countvarfiltered_stacked.unstack(["Fraction", "Map"]).shape
# "Ratio H/L":normalization to SILAC loading, each individual experiment (FractionXMap) will be divided by its median
# np.median([...]): only entries, that are not NANs are considered
df_normsilac_stacked = df_countvarfiltered_stacked["Ratio H/L"]\
.unstack(["Fraction", "Map"])\
.apply(lambda x: x/np.nanmedian(x), axis=0)\
.stack(["Map", "Fraction"])
df_stringency_mapfracstacked = df_countvarfiltered_stacked[["Ratio H/L count", "Ratio H/L variability [%]"]].join(
pd.DataFrame(df_normsilac_stacked, columns=["Ratio H/L"]))
# dataframe is grouped (Map, id), that allows the filtering for complete profiles
df_stringency_mapfracstacked = df_stringency_mapfracstacked.groupby(["Map", "id"]).filter(lambda x: len(x)>=len(self.fractions))
shape_dict["Shape after filtering for complete profiles"]=df_stringency_mapfracstacked.unstack(["Fraction", "Map"]).shape
# Ratio H/L is converted into Ratio L/H
df_stringency_mapfracstacked["Ratio H/L"] = df_stringency_mapfracstacked["Ratio H/L"].transform(lambda x: 1/x)
#Annotation with marker genes
df_organellarMarkerSet = self.df_organellarMarkerSet
df_stringency_mapfracstacked.reset_index(inplace=True)
df_stringency_mapfracstacked = df_stringency_mapfracstacked.merge(df_organellarMarkerSet, how="left", on="Gene names")
df_stringency_mapfracstacked.set_index([c for c in df_stringency_mapfracstacked.columns
if c not in ["Ratio H/L count","Ratio H/L variability [%]","Ratio H/L"]], inplace=True)
df_stringency_mapfracstacked.rename(index={np.nan:"undefined"}, level="Compartment", inplace=True)
return df_stringency_mapfracstacked
def normalization_01_silac(df_stringency_mapfracstacked):
"""
The multiindex dataframe, that was subjected to stringency filtering, is 0-1 normalized ("Ratio H/L").
Args:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" are stacked;
columns "Ratio H/L count", "Ratio H/L variability [%]", and "Ratio H/L" stored as single level indices
self:
fractions: list of fractions e.g. ["01K", "03K", ...]
data_completeness: series, for each individual map, as well as combined maps: 1 - (percentage of NANs)
Returns:
df_01_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "Ratio H/L" is 0-1 normalized and renamed to "normalized
profile"; the columns "Ratio H/L count", "Ratio H/L variability [%]", and "normalized profile" stored as single level indices;
plotting is possible now
self:
analysis_summary_dict["Data/Profile Completeness"] : df, with information about Data/Profile Completeness
column: "Experiment", "Map", "Data completeness", "Profile completeness"
no row index
"""
df_01norm_unstacked = df_stringency_mapfracstacked["Ratio H/L"].unstack("Fraction")
# 0:1 normalization of Ratio L/H
df_01norm_unstacked = df_01norm_unstacked.div(df_01norm_unstacked.sum(axis=1), axis=0)
df_01_stacked = df_stringency_mapfracstacked[["Ratio H/L count", "Ratio H/L variability [%]"]].join(pd.DataFrame
(df_01norm_unstacked.stack("Fraction"),columns=["Ratio H/L"]))
# "Ratio H/L" will be renamed to "normalized profile"
df_01_stacked.columns = [col if col!="Ratio H/L" else "normalized profile" for col in df_01_stacked.columns]
return df_01_stacked
def logarithmization_silac(df_stringency_mapfracstacked):
"""
The multiindex dataframe, that was subjected to stringency filtering, is logarithmized ("Ratio H/L").
Args:
df_stringency_mapfracstacked: dataframe, in which "MAP" and "Fraction" are stacked; the columns "Ratio H/L count", "Ratio H/L variability [%]",
and "Ratio H/L" stored as single level indices
Returns:
df_log_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "log profile" originates from logarithmized "Ratio H/L"
data; the columns "Ratio H/L count", "Ratio H/L variability [%]" and "log profile" are stored as single level indices;
PCA is possible now
"""
# logarithmizing, basis of 2
df_lognorm_ratio_stacked = df_stringency_mapfracstacked["Ratio H/L"].transform(np.log2)
df_log_stacked = df_stringency_mapfracstacked[["Ratio H/L count", "Ratio H/L variability [%]"]].join(
pd.DataFrame(df_lognorm_ratio_stacked, columns=["Ratio H/L"]))
# "Ratio H/L" will be renamed to "log profile"
df_log_stacked.columns = [col if col !="Ratio H/L" else "log profile" for col in df_log_stacked.columns]
return df_log_stacked
def stringency_lfq(df_index):
"""
The multiindex dataframe is subjected to stringency filtering. Only Proteins which were identified with
at least [4] consecutive data points regarding the "LFQ intensity", and if summed MS/MS counts >= n(fractions)*[2]
(LFQ5: min 10 and LFQ6: min 12, respectively; coverage filtering) were included.
Data is annotated based on specified marker set e.g. eLife.
Args:
df_index: multiindex dataframe, which contains 3 level labels: MAP, Fraction, Typ
self:
df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index
fractions: list of fractions e.g. ["01K", "03K", ...]
summed_MSMS_counts: int, 2
consecutiveLFQi: int, 4
Returns:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" is stacked; "LFQ intensity" and "MS/MS count" define a
single-level column index
self:
shape_dict["Shape after MS/MS value filtering"] of df_mscount_mapstacked
shape_dict["Shape after consecutive value filtering"] of df_stringency_mapfracstacked
"""
df_index = df_index.stack("Map")
# sorting the level 0, in order to have LFQ intensity - MS/MS count instead of continuous alternation
df_index.sort_index(axis=1, level=0, inplace=True)
# "MS/MS count"-column: take the sum over the fractions; if the sum is larger than n[fraction]*2, it will be stored in the new dataframe
minms = (len(self.fractions) * self.summed_MSMS_counts)
if minms > 0:
df_mscount_mapstacked = df_index.loc[df_index[("MS/MS count")].apply(np.sum, axis=1) >= minms]
shape_dict["Shape after MS/MS value filtering"]=df_mscount_mapstacked.unstack("Map").shape
df_stringency_mapfracstacked = df_mscount_mapstacked.copy()
else:
df_stringency_mapfracstacked = df_index.copy()
# series no dataframe is generated; if there are at least i.e. 4 consecutive non-NANs, data will be retained
df_stringency_mapfracstacked.sort_index(level="Fraction", axis=1, key=natsort_index_keys, inplace=True)
df_stringency_mapfracstacked = df_stringency_mapfracstacked.loc[
df_stringency_mapfracstacked[("LFQ intensity")]\
.apply(lambda x: np.isfinite(x), axis=0)\
.apply(lambda x: sum(x) >= self.consecutiveLFQi and any(x.rolling(window=self.consecutiveLFQi).sum() >= self.consecutiveLFQi), axis=1)]
shape_dict["Shape after consecutive value filtering"]=df_stringency_mapfracstacked.unstack("Map").shape
df_stringency_mapfracstacked = df_stringency_mapfracstacked.copy().stack("Fraction")
#Annotation with marker genes
df_organellarMarkerSet = self.df_organellarMarkerSet
df_stringency_mapfracstacked.reset_index(inplace=True)
df_stringency_mapfracstacked = df_stringency_mapfracstacked.merge(df_organellarMarkerSet, how="left", on="Gene names")
df_stringency_mapfracstacked.set_index([c for c in df_stringency_mapfracstacked.columns
if c!="MS/MS count" and c!="LFQ intensity"], inplace=True)
df_stringency_mapfracstacked.rename(index={np.nan : "undefined"}, level="Compartment", inplace=True)
return df_stringency_mapfracstacked
def normalization_01_lfq(df_stringency_mapfracstacked):
"""
The multiindex dataframe, that was subjected to stringency filtering, is 0-1 normalized ("LFQ intensity").
Args:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" is stacked, "LFQ intensity" and "MS/MS count" define a
single-level column index
self:
fractions: list of fractions e.g. ["01K", "03K", ...]
Returns:
df_01_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "LFQ intensity" is 0-1 normalized and renamed to
"normalized profile"; the columns "normalized profile" and "MS/MS count" are stored as single level indices; plotting is possible now
"""
df_01norm_mapstacked = df_stringency_mapfracstacked["LFQ intensity"].unstack("Fraction")
# 0:1 normalization of Ratio L/H
df_01norm_unstacked = df_01norm_mapstacked.div(df_01norm_mapstacked.sum(axis=1), axis=0)
df_rest = df_stringency_mapfracstacked.drop("LFQ intensity", axis=1)
df_01_stacked = df_rest.join(pd.DataFrame(df_01norm_unstacked.stack(
"Fraction"),columns=["LFQ intensity"]))
# rename columns: "LFQ intensity" into "normalized profile"
df_01_stacked.columns = [col if col!="LFQ intensity" else "normalized profile" for col in
df_01_stacked.columns]
#imputation
df_01_stacked = df_01_stacked.unstack("Fraction").replace(np.NaN, 0).stack("Fraction")
df_01_stacked = df_01_stacked.sort_index()
return df_01_stacked
def logarithmization_lfq(df_stringency_mapfracstacked):
"""The multiindex dataframe, that was subjected to stringency filtering, is logarithmized ("LFQ intensity").
Args:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" is stacked; "LFQ intensity" and "MS/MS count" define a
single-level column index
Returns:
df_log_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "log profile" originates from logarithmized
"LFQ intensity"; the columns "log profile" and "MS/MS count" are stored as single level indices; PCA is possible now
"""
df_lognorm_ratio_stacked = df_stringency_mapfracstacked["LFQ intensity"].transform(np.log2)
df_rest = df_stringency_mapfracstacked.drop("LFQ intensity", axis=1)
df_log_stacked = df_rest.join(pd.DataFrame(df_lognorm_ratio_stacked, columns=["LFQ intensity"]))
# "LFQ intensity" will be renamed to "log profile"
df_log_stacked.columns = [col if col!="LFQ intensity" else "log profile" for col in df_log_stacked.columns]
return df_log_stacked
def split_ids_uniprot(el):
"""
This finds the primary canoncial protein ID in the protein group. If no canonical ID is present it selects the first isoform ID.
"""
p1 = el.split(";")[0]
if "-" not in p1:
return p1
else:
p = p1.split("-")[0]
if p in el.split(";"):
return p
else:
return p1
if self.acquisition == "SILAC - MQ":
# Index data
df_index = indexingdf()
map_names = df_index.columns.get_level_values("Map").unique()
self.map_names = map_names
# Run stringency filtering and normalization
df_stringency_mapfracstacked = stringency_silac(df_index)
self.df_stringencyFiltered = df_stringency_mapfracstacked
self.df_01_stacked = normalization_01_silac(df_stringency_mapfracstacked)
self.df_log_stacked = logarithmization_silac(df_stringency_mapfracstacked)
# format and reduce 0-1 normalized data for comparison with other experiments
df_01_comparison = self.df_01_stacked.copy()
comp_ids = pd.Series([split_ids_uniprot(el) for el in df_01_comparison.index.get_level_values("Protein IDs")], name="Protein IDs")
df_01_comparison.index = df_01_comparison.index.droplevel("Protein IDs")
df_01_comparison.set_index(comp_ids, append=True, inplace=True)
df_01_comparison.drop(["Ratio H/L count", "Ratio H/L variability [%]"], inplace=True, axis=1)
df_01_comparison = df_01_comparison.unstack(["Map", "Fraction"])
df_01_comparison.columns = ["?".join(el) for el in df_01_comparison.columns.values]
df_01_comparison = df_01_comparison.copy().reset_index().drop(["C-Score", "Q-value", "Score", "Majority protein IDs", "Protein names", "id"], axis=1, errors="ignore")
# poopulate analysis summary dictionary with (meta)data
unique_proteins = [split_ids_uniprot(i) for i in set(self.df_01_stacked.index.get_level_values("Protein IDs"))]
unique_proteins.sort()
self.analysis_summary_dict["0/1 normalized data"] = df_01_comparison.to_json()
self.analysis_summary_dict["Unique Proteins"] = unique_proteins
self.analysis_summary_dict["changes in shape after filtering"] = shape_dict.copy()
analysis_parameters = {"acquisition" : self.acquisition,
"filename" : self.filename,
"comment" : self.comment,
"Ratio H/L count" : self.RatioHLcount,
"Ratio variability" : self.RatioVariability,
"organism" : self.organism,
}
self.analysis_summary_dict["Analysis parameters"] = analysis_parameters.copy()
# TODO this line needs to be removed.
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
elif self.acquisition == "LFQ5 - MQ" or self.acquisition == "LFQ6 - MQ" or self.acquisition == "LFQ5 - Spectronaut" or self.acquisition == "LFQ6 - Spectronaut":
#if not summed_MS_counts:
# summed_MS_counts = self.summed_MS_counts
#if not consecutiveLFQi:
# consecutiveLFQi = self.consecutiveLFQi
if self.acquisition == "LFQ5 - MQ" or self.acquisition == "LFQ6 - MQ":
df_index = indexingdf()
elif self.acquisition == "LFQ5 - Spectronaut" or self.acquisition == "LFQ6 - Spectronaut":
df_index = spectronaut_LFQ_indexingdf()
map_names = df_index.columns.get_level_values("Map").unique()
self.map_names = map_names
df_stringency_mapfracstacked = stringency_lfq(df_index)
self.df_stringencyFiltered = df_stringency_mapfracstacked
self.df_log_stacked = logarithmization_lfq(df_stringency_mapfracstacked)
self.df_01_stacked = normalization_01_lfq(df_stringency_mapfracstacked)
df_01_comparison = self.df_01_stacked.copy()
comp_ids = pd.Series([split_ids_uniprot(el) for el in df_01_comparison.index.get_level_values("Protein IDs")], name="Protein IDs")
df_01_comparison.index = df_01_comparison.index.droplevel("Protein IDs")
df_01_comparison.set_index(comp_ids, append=True, inplace=True)
df_01_comparison.drop("MS/MS count", inplace=True, axis=1, errors="ignore")
df_01_comparison = df_01_comparison.unstack(["Map", "Fraction"])
df_01_comparison.columns = ["?".join(el) for el in df_01_comparison.columns.values]
df_01_comparison = df_01_comparison.copy().reset_index().drop(["C-Score", "Q-value", "Score", "Majority protein IDs", "Protein names", "id"], axis=1, errors="ignore")
self.analysis_summary_dict["0/1 normalized data"] = df_01_comparison.to_json()#double_precision=4) #.reset_index()
unique_proteins = [split_ids_uniprot(i) for i in set(self.df_01_stacked.index.get_level_values("Protein IDs"))]
unique_proteins.sort()
self.analysis_summary_dict["Unique Proteins"] = unique_proteins
self.analysis_summary_dict["changes in shape after filtering"] = shape_dict.copy()
analysis_parameters = {"acquisition" : self.acquisition,
"filename" : self.filename,
"comment" : self.comment,
"consecutive data points" : self.consecutiveLFQi,
"summed MS/MS counts" : self.summed_MSMS_counts,
"organism" : self.organism,
}
self.analysis_summary_dict["Analysis parameters"] = analysis_parameters.copy()
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
#return self.df_01_stacked
elif self.acquisition == "Custom":
df_index = custom_indexing_and_normalization()
map_names = df_index.columns.get_level_values("Map").unique()
self.map_names = map_names
df_01_stacked = df_index.stack(["Map", "Fraction"])
df_01_stacked = df_01_stacked.reset_index().merge(self.df_organellarMarkerSet, how="left", on="Gene names")
df_01_stacked.set_index([c for c in df_01_stacked.columns if c not in ["normalized profile"]], inplace=True)
df_01_stacked.rename(index={np.nan:"undefined"}, level="Compartment", inplace=True)
self.df_01_stacked = df_01_stacked
df_01_comparison = self.df_01_stacked.copy()
comp_ids = pd.Series([split_ids_uniprot(el) for el in df_01_comparison.index.get_level_values("Protein IDs")], name="Protein IDs")
df_01_comparison.index = df_01_comparison.index.droplevel("Protein IDs")
df_01_comparison.set_index(comp_ids, append=True, inplace=True)
df_01_comparison.drop("MS/MS count", inplace=True, axis=1, errors="ignore")
df_01_comparison = df_01_comparison.unstack(["Map", "Fraction"])
df_01_comparison.columns = ["?".join(el) for el in df_01_comparison.columns.values]
df_01_comparison = df_01_comparison.copy().reset_index().drop(["C-Score", "Q-value", "Score", "Majority protein IDs", "Protein names", "id"], axis=1, errors="ignore")
self.analysis_summary_dict["0/1 normalized data"] = df_01_comparison.to_json()#double_precision=4) #.reset_index()
unique_proteins = [split_ids_uniprot(i) for i in set(self.df_01_stacked.index.get_level_values("Protein IDs"))]
unique_proteins.sort()
self.analysis_summary_dict["Unique Proteins"] = unique_proteins
self.analysis_summary_dict["changes in shape after filtering"] = shape_dict.copy()
analysis_parameters = {"acquisition" : self.acquisition,
"filename" : self.filename,
"comment" : self.comment,
"organism" : self.organism,
}
self.analysis_summary_dict["Analysis parameters"] = analysis_parameters.copy()
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
else:
return "I do not know this"
def plot_log_data(self):
"""
Args:
self.df_log_stacked
Returns:
log_histogram: Histogram of log transformed data
"""
log_histogram = px.histogram(self.df_log_stacked.reset_index().sort_values(["Map", "Fraction"], key=natsort_list_keys),
x="log profile",
facet_col="Fraction",
facet_row="Map",
template="simple_white",
labels={"log profile": "log tranformed data ({})".format("LFQ intenisty" if self.acquisition != "SILAC - MQ" else "Ratio H/L")}
)
log_histogram.for_each_xaxis(lambda axis: axis.update(title={"text":""}))
log_histogram.for_each_yaxis(lambda axis: axis.update(title={"text":""}))
log_histogram.add_annotation(x=0.5, y=0, yshift=-50, xref="paper",showarrow=False, yref="paper",
text="log2(LFQ intensity)")
log_histogram.add_annotation(x=0, y=0.5, textangle=270, xref="paper",showarrow=False, yref="paper", xshift=-50,
text="count")
log_histogram.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
return log_histogram
def quantity_profiles_proteinGroups(self):
"""
Number of profiles, protein groups per experiment, and the data completness of profiles (total quantity, intersection) is calculated.
Args:
self:
acquisition: string, "LFQ6 - Spectronaut", "LFQ5 - Spectronaut", "LFQ5 - MQ", "LFQ6 - MQ", "SILAC - MQ"
df_index: multiindex dataframe, which contains 3 level labels: MAP, Fraction, Typ
df_01_stacked: df; 0-1 normalized data with "normalized profile" as column name
Returns:
self:
df_quantity_pr_pg: df; no index, columns: "filtering", "type", "npg", "npr", "npr_dc"; containign following information:
npg_t: protein groups per experiment total quantity
npgf_t = groups with valid profiles per experiment total quanitity
npr_t: profiles with any valid values
nprf_t = total number of valid profiles
npg_i: protein groups per experiment intersection
npgf_i = groups with valid profiles per experiment intersection
npr_i: profiles with any valid values in the intersection
nprf_i = total number of valid profiles in the intersection
npr_t_dc: profiles, % values != nan
nprf_t_dc = profiles, total, filtered, % values != nan
npr_i_dc: profiles, intersection, % values != nan
nprf_i_dc = profiles, intersection, filtered, % values != nan
df_npg | df_npgf: index: maps e.g. "Map1", "Map2",..., columns: fractions e.g. "03K", "06K", ...
npg_f = protein groups, per fraction
or npgf_f = protein groups, filtered, per fraction
df_npg_dc | df_npgf_dc: index: maps e.g. "Map1", "Map2",..., columns: fractions e.g. "03K", "06K", ...
npg_f_dc = protein groups, per fraction, % values != nan
or npgf_f_dc = protein groups, filtered, per fraction, % values != nan
"""
if self.acquisition == "SILAC - MQ":
df_index = self.df_index["Ratio H/L"]
df_01_stacked = self.df_01_stacked["normalized profile"]
elif self.acquisition.startswith("LFQ"):
df_index = self.df_index["LFQ intensity"]
df_01_stacked = self.df_01_stacked["normalized profile"].replace(0, np.nan)
elif self.acquisition == "Custom":
df_index = self.df_index["normalized profile"]
df_01_stacked = self.df_01_stacked["normalized profile"].replace(0, np.nan)
#unfiltered
npg_t = df_index.shape[0]
df_index_MapStacked = df_index.stack("Map")
npr_t = df_index_MapStacked.shape[0]/len(self.map_names)
npr_t_dc = 1-df_index_MapStacked.isna().sum().sum()/np.prod(df_index_MapStacked.shape)
#filtered
npgf_t = df_01_stacked.unstack(["Map", "Fraction"]).shape[0]
df_01_MapStacked = df_01_stacked.unstack("Fraction")
nprf_t = df_01_MapStacked.shape[0]/len(self.map_names)
nprf_t_dc = 1-df_01_MapStacked.isna().sum().sum()/np.prod(df_01_MapStacked.shape)
#unfiltered intersection
try:
df_index_intersection = df_index_MapStacked.groupby(level="Sequence").filter(lambda x : len(x)==len(self.map_names))
except:
df_index_intersection = df_index_MapStacked.groupby(level="Protein IDs").filter(lambda x : len(x)==len(self.map_names))
npr_i = df_index_intersection.shape[0]/len(self.map_names)
npr_i_dc = 1-df_index_intersection.isna().sum().sum()/np.prod(df_index_intersection.shape)
npg_i = df_index_intersection.unstack("Map").shape[0]
#filtered intersection
try:
df_01_intersection = df_01_MapStacked.groupby(level = "Sequence").filter(lambda x : len(x)==len(self.map_names))
except:
df_01_intersection = df_01_MapStacked.groupby(level = "Protein IDs").filter(lambda x : len(x)==len(self.map_names))
nprf_i = df_01_intersection.shape[0]/len(self.map_names)
nprf_i_dc = 1-df_01_intersection.isna().sum().sum()/np.prod(df_01_intersection.shape)
npgf_i = df_01_intersection.unstack("Map").shape[0]
# summarize in dataframe and save to attribute
df_quantity_pr_pg = pd.DataFrame(
{
"filtering": pd.Series(["before filtering", "before filtering", "after filtering", "after filtering"], dtype=np.dtype("O")),
"type": pd.Series(["total", "intersection", "total", "intersection"], dtype=np.dtype("O")),
"number of protein groups": pd.Series([npg_t, npg_i, npgf_t, npgf_i], dtype=np.dtype("float")),
"number of profiles": pd.Series([npr_t, npr_i, nprf_t, nprf_i], dtype=np.dtype("float")),
"data completeness of profiles": pd.Series([npr_t_dc, npr_i_dc, nprf_t_dc, nprf_i_dc], dtype=np.dtype("float"))})
self.df_quantity_pr_pg = df_quantity_pr_pg.reset_index()
self.analysis_summary_dict["quantity: profiles/protein groups"] = self.df_quantity_pr_pg.to_json()
#additional depth assessment per fraction
dict_npgf = {}
dict_npg = {}
list_npg_dc = []
list_npgf_dc = []
for df_intersection in [df_index_intersection, df_01_intersection]:
for fraction in self.fractions:
df_intersection_frac = df_intersection[fraction]
npgF_f_dc = 1-df_intersection_frac.isna().sum()/len(df_intersection_frac)
npgF_f = df_intersection_frac.unstack("Map").isnull().sum(axis=1).value_counts()
if fraction not in dict_npg.keys():
dict_npg[fraction] = npgF_f
list_npg_dc.append(npgF_f_dc)
else:
dict_npgf[fraction] = npgF_f
list_npgf_dc.append(npgF_f_dc)
df_npg = pd.DataFrame(dict_npg)
df_npg.index.name = "Protein Groups present in:"
df_npg.rename_axis("Fraction", axis=1, inplace=True)
df_npg = df_npg.stack("Fraction").reset_index()
df_npg = df_npg.rename({0: "Protein Groups"}, axis=1)
df_npg.sort_values(["Fraction", "Protein Groups present in:"], inplace=True, key=natsort_list_keys)
df_npgf = pd.DataFrame(dict_npgf)
df_npgf.index.name = "Protein Groups present in:"
df_npgf.rename_axis("Fraction", axis=1, inplace=True)
df_npgf = df_npgf.stack("Fraction").reset_index()
df_npgf = df_npgf.rename({0: "Protein Groups"}, axis=1)
df_npgf.sort_values(["Fraction", "Protein Groups present in:"], inplace=True, key=natsort_list_keys)
max_df_npg = df_npg["Protein Groups present in:"].max()
min_df_npg = df_npg["Protein Groups present in:"].min()
rename_numOFnans = {}
for x, y in zip(range(max_df_npg,min_df_npg-1, -1), range(max_df_npg+1)):
if y == 1:
rename_numOFnans[x] = "{} Map".format(y)
elif y == 0:
rename_numOFnans[x] = "PG not identified".format(y)
else:
rename_numOFnans[x] = "{} Maps".format(y)
for keys in rename_numOFnans.keys():
df_npg.loc[df_npg["Protein Groups present in:"] ==keys, "Protein Groups present in:"] = rename_numOFnans[keys]
df_npgf.loc[df_npgf["Protein Groups present in:"] ==keys, "Protein Groups present in:"] = rename_numOFnans[keys]
# summarize in dataframe and save to attributes
self.df_npg_dc = pd.DataFrame(
{
"Fraction" : pd.Series(self.fractions),
"Data completeness before filtering": pd.Series(list_npg_dc),
"Data completeness after filtering": pd.Series(list_npgf_dc),
})
self.df_npg = df_npg
self.df_npgf = df_npgf
def plot_quantity_profiles_proteinGroups(self):
"""
Args:
self:
df_quantity_pr_pg: df; no index, columns: "filtering", "type", "npg", "npr", "npr_dc"; further information: see above
Returns:
"""
df_quantity_pr_pg = self.df_quantity_pr_pg
layout = go.Layout(barmode="overlay",
xaxis_tickangle=90,
autosize=False,
width=300,
height=500,
xaxis=go.layout.XAxis(linecolor="black",
linewidth=1,
#title="Map",
mirror=True),
yaxis=go.layout.YAxis(linecolor="black",
linewidth=1,
mirror=True),
template="simple_white")
fig_npg = go.Figure()
for t in df_quantity_pr_pg["type"].unique():
plot_df = df_quantity_pr_pg[df_quantity_pr_pg["type"] == t]
fig_npg.add_trace(go.Bar(
x=plot_df["filtering"],
y=plot_df["number of protein groups"],
name=t))
fig_npg.update_layout(layout, title="Number of Protein Groups", yaxis=go.layout.YAxis(title="Protein Groups"))
fig_npr = go.Figure()
for t in df_quantity_pr_pg["type"].unique():
plot_df = df_quantity_pr_pg[df_quantity_pr_pg["type"] == t]
fig_npr.add_trace(go.Bar(
x=plot_df["filtering"],
y=plot_df["number of profiles"],
name=t))
fig_npr.update_layout(layout, title="Number of Profiles")
df_quantity_pr_pg = df_quantity_pr_pg.sort_values("filtering")
fig_npr_dc = go.Figure()
for t in df_quantity_pr_pg["filtering"].unique():
plot_df = df_quantity_pr_pg[df_quantity_pr_pg["filtering"] == t]
fig_npr_dc.add_trace(go.Bar(
x=plot_df["type"],
y=plot_df["data completeness of profiles"],
name=t))
fig_npr_dc.update_layout(layout, title="Coverage", yaxis=go.layout.YAxis(title="Data completness"))
#fig_npr_dc.update_xaxes(tickangle=30)
fig_npg_F = px.bar(self.df_npg,
x="Fraction",
y="Protein Groups",
color="Protein Groups present in:",
template="simple_white",
title = "Protein groups per fraction - before filtering",
width=500)
fig_npgf_F = px.bar(self.df_npgf,
x="Fraction",
y="Protein Groups",
color="Protein Groups present in:",
template="simple_white",
title = "Protein groups per fraction - after filtering",
width=500)
fig_npg_F_dc = go.Figure()
for data_type in ["Data completeness after filtering", "Data completeness before filtering"]:
fig_npg_F_dc.add_trace(go.Bar(
x=self.df_npg_dc["Fraction"],
y=self.df_npg_dc[data_type],
name=data_type))
fig_npg_F_dc.update_layout(layout, barmode="overlay", title="Data completeness per fraction", yaxis=go.layout.YAxis(title=""), height=450, width=600)
return fig_npg, fig_npr, fig_npr_dc, fig_npg_F, fig_npgf_F, fig_npg_F_dc
def perform_pca(self):
"""
PCA will be performed, using logarithmized data.
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
"V-type proton ATP
df_log_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "log profile" originates from logarithmized "LFQ intensity"
and "Ratio H/L", respectively; additionally the columns "MS/MS count" and "Ratio H/L count|Ratio H/L variability [%]" are stored
as single level indices
df_01_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "LFQ intensity" is 0-1 normalized and renamed to "normalized
profile"; the columns "normalized profile"" and "MS/MS count" are stored as single level indices; plotting is possible now
Returns:
self:
df_pca: df, PCA was performed, while keeping the information of the Maps
columns: "PC1", "PC2", "PC3"
index: "Protein IDs", "Majority protein IDs", "Protein names", "Gene names", "Q-value", "Score", "id", "Map" "Compartment"
df_pca_combined: df, PCA was performed across the Maps
columns: "PC1", "PC2", "PC3"
index: "Protein IDs", "Majority protein IDs", "Protein names", "Gene names", "Q-value", "Score", "id", "Compartment"
df_pca_all_marker_cluster_maps: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3", filtered for marker genes, that are consistent
throughout all maps / coverage filtering.
"""
markerproteins = self.markerproteins
if self.acquisition == "SILAC - MQ":
df_01orlog_fracunstacked = self.df_log_stacked["log profile"].unstack("Fraction").dropna()
df_01orlog_MapFracUnstacked = self.df_log_stacked["log profile"].unstack(["Fraction", "Map"]).dropna()
elif self.acquisition.startswith("LFQ") or self.acquisition == "Custom":
df_01orlog_fracunstacked = self.df_01_stacked["normalized profile"].unstack("Fraction").dropna()
df_01orlog_MapFracUnstacked = self.df_01_stacked["normalized profile"].unstack(["Fraction", "Map"]).dropna()
pca = PCA(n_components=3)
# df_pca: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3"
df_pca = pd.DataFrame(pca.fit_transform(df_01orlog_fracunstacked))
df_pca.columns = ["PC1", "PC2", "PC3"]
df_pca.index = df_01orlog_fracunstacked.index
self.df_pca = df_pca.sort_index(level=["Gene names", "Compartment"])
# df_pca: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3"
df_pca_combined = pd.DataFrame(pca.fit_transform(df_01orlog_MapFracUnstacked))
df_pca_combined.columns = ["PC1", "PC2", "PC3"]
df_pca_combined.index = df_01orlog_MapFracUnstacked.index
self.df_pca_combined = df_pca_combined.sort_index(level=["Gene names", "Compartment"])
map_names = self.map_names
df_pca_all_marker_cluster_maps = pd.DataFrame()
df_pca_filtered = df_pca.unstack("Map").dropna()
for clusters in markerproteins:
for marker in markerproteins[clusters]:
try:
plot_try_pca = df_pca_filtered.xs(marker, level="Gene names", drop_level=False)
except KeyError:
continue
df_pca_all_marker_cluster_maps = df_pca_all_marker_cluster_maps.append(
plot_try_pca)
if len(df_pca_all_marker_cluster_maps) == 0:
df_pca_all_marker_cluster_maps = df_pca_filtered.stack("Map")
else:
df_pca_all_marker_cluster_maps = df_pca_all_marker_cluster_maps.stack("Map")
self.df_pca_all_marker_cluster_maps = df_pca_all_marker_cluster_maps.sort_index(level=["Gene names", "Compartment"])
def plot_global_pca(self, map_of_interest="Map1", cluster_of_interest="Proteasome", x_PCA="PC1", y_PCA="PC3", collapse_maps=False):
""""
PCA plot will be generated
Args:
self:
df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index
df_pca: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3",
index: "Gene names", "Protein IDs", "C-Score", "Q-value", "Map", "Compartment",
Returns:
pca_figure: global PCA plot
"""
if collapse_maps == False:
df_global_pca = self.df_pca.unstack("Map").swaplevel(0,1, axis=1)[map_of_interest].reset_index()
else:
df_global_pca = self.df_pca_combined.reset_index()
for i in self.markerproteins[cluster_of_interest]:
df_global_pca.loc[df_global_pca["Gene names"] == i, "Compartment"] = "Selection"
compartments = self.df_organellarMarkerSet["Compartment"].unique()
compartment_color = dict(zip(compartments, self.css_color))
compartment_color["Selection"] = "black"
compartment_color["undefined"] = "lightgrey"
fig_global_pca = px.scatter(data_frame=df_global_pca,
x=x_PCA,
y=y_PCA,
color="Compartment",
color_discrete_map=compartment_color,
title= "Protein subcellular localization by PCA for {}".format(map_of_interest)
if collapse_maps == False else "Protein subcellular localization by PCA of combined maps",
hover_data=["Protein IDs", "Gene names", "Compartment"],
template="simple_white",
opacity=0.9
)
return fig_global_pca
def plot_cluster_pca(self, cluster_of_interest="Proteasome"):
"""
PCA plot will be generated
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
df_pca_all_marker_cluster_maps: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3", filtered for marker genes, that are
consistent throughout all maps / coverage filtering.
Returns:
pca_figure: PCA plot, for one protein cluster all maps are plotted
"""
df_pca_all_marker_cluster_maps = self.df_pca_all_marker_cluster_maps
map_names = self.map_names
markerproteins = self.markerproteins
try:
for maps in map_names:
df_setofproteins_PCA = pd.DataFrame()
for marker in markerproteins[cluster_of_interest]:
try:
plot_try_pca = df_pca_all_marker_cluster_maps.xs((marker, maps), level=["Gene names", "Map"],
drop_level=False)
except KeyError:
continue
df_setofproteins_PCA = df_setofproteins_PCA.append(plot_try_pca)
df_setofproteins_PCA.reset_index(inplace=True)
if maps == map_names[0]:
pca_figure = go.Figure(
data=[go.Scatter3d(x=df_setofproteins_PCA.PC1,
y=df_setofproteins_PCA.PC2,
z=df_setofproteins_PCA.PC3,
hovertext=df_setofproteins_PCA["Gene names"],
mode="markers",
name=maps
)])
else:
pca_figure.add_trace(go.Scatter3d(x=df_setofproteins_PCA.PC1,
y=df_setofproteins_PCA.PC2,
z=df_setofproteins_PCA.PC3,
hovertext=df_setofproteins_PCA["Gene names"],
mode="markers",
name=maps
))
pca_figure.update_layout(autosize=False, width=500, height=500,
title="PCA plot for <br>the protein cluster: {}".format(cluster_of_interest),
template="simple_white")
return pca_figure
except:
return "This protein cluster was not quantified"
def calc_biological_precision(self):
"""
This function calculates the biological precision of all quantified protein clusters. It provides access to the data slice for all marker proteins, the distance profiles and the aggregated distances. It repeatedly applies the methods get_marker_proteins_unfiltered and calc_cluster_distances.
TODO: integrate optional arguments for calc_cluster_distances: complex_profile, distance_measure.
TODO: replace compatibiliy attributes with function return values and adjust attribute usage in downstream plotting functions.
Args:
self attributes:
markerproteins: dict, contains marker protein assignments
df_01_stacked: df, contains 0-1 nromalized data, required for execution of get_marker_proteins_unfiltered
Returns:
df_alldistances_individual_mapfracunstacked: df, distance profiles, fully unstacked
df_alldistances_aggregated_mapunstacked: df, profile distances (manhattan distance by default), fully unstacked
df_allclusters_01_unfiltered_mapfracunstacked: df, collected marker protein data
self attributes:
df_distance_noindex: compatibility version of df_alldistances_aggregated_mapunstacked
df_allclusters_01_unfiltered_mapfracunstacked
df_allclusters_clusterdist_fracunstacked_unfiltered: compatibility version of df_allclusters_01_unfiltered_mapfracunstacked (only used by quantificaiton_overview)
df_allclusters_clusterdist_fracunstacked: compatibility version of df_alldistances_individual_mapfracunstacked
genenames_sortedout_list = list of gene names with incomplete coverage
analysis_summary_dict entries:
"Manhattan distances" = df_distance_noindex
"Distances to the median profile": df_allclusters_clusterdist_fracunstacked, sorted and melted
"""
df_alldistances_individual_mapfracunstacked = pd.DataFrame()
df_alldistances_aggregated_mapunstacked = pd.DataFrame()
df_allclusters_01_unfiltered_mapfracunstacked = pd.DataFrame()
for cluster in self.markerproteins.keys():
# collect data irrespective of coverage
df_cluster_unfiltered = self.get_marker_proteins_unfiltered(cluster)
df_allclusters_01_unfiltered_mapfracunstacked = df_allclusters_01_unfiltered_mapfracunstacked.append(df_cluster_unfiltered)
# filter for coverage and calculate distances
df_cluster = df_cluster_unfiltered.dropna()
if len(df_cluster) == 0:
continue
df_distances_aggregated, df_distances_individual = self.calc_cluster_distances(df_cluster)
df_alldistances_individual_mapfracunstacked = df_alldistances_individual_mapfracunstacked.append(df_distances_individual)
df_alldistances_aggregated_mapunstacked = df_alldistances_aggregated_mapunstacked.append(df_distances_aggregated)
if len(df_alldistances_individual_mapfracunstacked) == 0:
self.df_distance_noindex = pd.DataFrame(columns = ["Gene names", "Map", "Cluster", "distance"])
self.df_allclusters_01_unfiltered_mapfracunstacked = pd.DataFrame(columns = ["Gene names", "Map", "Cluster", "distance"])
self.df_allclusters_clusterdist_fracunstacked_unfiltered = pd.DataFrame(columns = ["Fraction"])
self.df_allclusters_clusterdist_fracunstacked = pd.DataFrame(columns = ["Fraction"])
self.genenames_sortedout_list = "No clusters found"
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame()
else:
df_alldistances_aggregated_mapunstacked.columns.name = "Map"
## Get compatibility with plotting functions, by mimicking assignment of old functions:
# old output of distance_calculation
self.df_distance_noindex = df_alldistances_aggregated_mapunstacked.stack("Map").reset_index().rename({0: "distance"}, axis=1)
self.analysis_summary_dict["Manhattan distances"] = self.df_distance_noindex.to_json()
# old output of multiple_iterations
# self.df_allclusters_clusterdist_fracunstacked_unfiltered --> this won't exist anymore, replaced by:
self.df_allclusters_01_unfiltered_mapfracunstacked = df_allclusters_01_unfiltered_mapfracunstacked
# kept for testing of quantification table:
self.df_allclusters_clusterdist_fracunstacked_unfiltered = df_allclusters_01_unfiltered_mapfracunstacked.stack("Map")
# same as before, but now already abs
self.df_allclusters_clusterdist_fracunstacked = df_alldistances_individual_mapfracunstacked.stack("Map")
df_dist_to_median = self.df_allclusters_clusterdist_fracunstacked.stack("Fraction")
df_dist_to_median.name = "distance"
df_dist_to_median = df_dist_to_median.reindex(index=natsort.natsorted(df_dist_to_median.index))
self.analysis_summary_dict["Distances to the median profile"] = df_dist_to_median.reset_index().to_json()
self.genenames_sortedout_list = [el for el in df_allclusters_01_unfiltered_mapfracunstacked.index.get_level_values("Gene names")
if el not in df_alldistances_individual_mapfracunstacked.index.get_level_values("Gene names")]
return df_alldistances_individual_mapfracunstacked, df_alldistances_aggregated_mapunstacked, df_allclusters_01_unfiltered_mapfracunstacked
def get_marker_proteins_unfiltered(self, cluster):
"""
This funciton retrieves the 0-1 normalized data for any given protein cluster, unfiltered for coverage.
Args:
cluster: str, cluster name, should be one of self.markerproteins.keys()
self attributes:
df_01_stacked: df, contains the fully stacked 0-1 normalized data
markerproteins: dict, contains marker protein assignments
Returns:
df_cluster_unfiltered: df, unfiltered data for the selected cluster, maps and fractions are unstacked.
self attribtues:
None
"""
df_in = self.df_01_stacked["normalized profile"].unstack("Fraction")
markers = self.markerproteins[cluster]
# retrieve marker proteins
df_cluster_unfiltered = pd.DataFrame()
for marker in markers:
try:
df_p = df_in.xs(marker, level="Gene names", axis=0, drop_level=False)
except:
continue
df_cluster_unfiltered = df_cluster_unfiltered.append(df_p)
if len(df_cluster_unfiltered) == 0:
return df_cluster_unfiltered
# Unstack maps and add Cluster to index
df_cluster_unfiltered = df_cluster_unfiltered.unstack("Map")
df_cluster_unfiltered.set_index(pd.Index(np.repeat(cluster, len(df_cluster_unfiltered)), name="Cluster"), append=True, inplace=True)
return df_cluster_unfiltered
def calc_cluster_distances(self, df_cluster, complex_profile=np.median, distance_measure="manhattan"):
"""
Calculates the absolute differences in each fraction and the profile distances relative to the center of a cluster.
Per default this is the manhattan distance to the median profile.
Args:
df_cluster: df, 0-1 normalized profiles of cluster members, should already be filtered for full coverage and be in full wide format.
complex_profile: fun, function provided to apply for calculating the reference profile, default: np.median.
distance_measure: str, selected distance measure to calculate. Currently only 'manhattan' is supported, everything else raises a ValueError.
self attributes:
None
Returns:
df_distances_aggregated: df, proteins x maps, if stacked distance column is currently named 0 but contains manhattan distances.
df_distances_individual: df, same shape as df_cluster, but now with absolute differences to the reference.
self attribtues:
None
"""
df_distances_aggregated = pd.DataFrame()
ref_profile = pd.DataFrame(df_cluster.apply(complex_profile, axis=0, result_type="expand")).T
df_distances_individual = df_cluster.apply(lambda x: np.abs(x-ref_profile.iloc[0,:]), axis=1)
# loop over maps
maps = set(df_cluster.columns.get_level_values("Map"))
for m in maps:
if distance_measure == "manhattan":
d_m = pw.manhattan_distances(df_cluster.xs(m, level="Map", axis=1), ref_profile.xs(m, level="Map", axis=1))
else:
raise ValueError(distance_measure)
d_m = pd.DataFrame(d_m, columns=[m], index=df_cluster.index)
df_distances_aggregated = pd.concat([df_distances_aggregated, d_m], axis=1)
df_distances_aggregated.columns.set_names(names="Map", inplace=True)
return df_distances_aggregated, df_distances_individual
def profiles_plot(self, map_of_interest="Map1", cluster_of_interest="Proteasome"):
"""
The function allows the plotting of filtered and normalized spatial proteomic data using plotly.express.
The median profile is also calculated based on the overlapping proteins. Profiles of proteins that are not quantified in all maps are dashed.
Args:
map_of_interest: str, must be in self.map_names
cluster_of_interest: str, must be in self.markerproteins.keys()
self attribtues:
df_allclusters_01_unfiltered_mapfracunstacked: df, contains 0-1 normalized profiles for all markerproteins detected in any map
Returns:
abundance_profiles_and_median_figure: plotly line plot, displaying the relative abundance profiles.
"""
try:
df_setofproteins = self.df_allclusters_01_unfiltered_mapfracunstacked.xs(cluster_of_interest, level="Cluster", axis=0)
df_setofproteins_median = df_setofproteins.dropna().xs(map_of_interest, level="Map", axis=1).median(axis=0)
# fractions get sorted
df_setofproteins = df_setofproteins.xs(map_of_interest, level="Map", axis=1).stack("Fraction")
df_setofproteins = df_setofproteins.reindex(index=natsort.natsorted(df_setofproteins.index))
df_setofproteins.name = "normalized profile"
# make it available for plotting
df_setofproteins = df_setofproteins.reindex(index=natsort.natsorted(df_setofproteins.index))
df_setofproteins = df_setofproteins.reset_index()
abundance_profiles_figure = px.line(df_setofproteins,
x="Fraction",
y="normalized profile",
color="Gene names",
line_group="Sequence" if "Sequence" in df_setofproteins.columns else "Gene names",
template="simple_white",
title="Relative abundance profile for {} of <br>the protein cluster: {}".format(map_of_interest, cluster_of_interest)
)
df_setofproteins_median.name = "normalized profile"
#fractions get sorted
df_setofproteins_median = df_setofproteins_median.reindex(index=natsort.natsorted(df_setofproteins_median.index))
# make it available for plotting
df_setofproteins_median = df_setofproteins_median.reset_index()
df_setofproteins_median.insert(0, "Gene names", np.repeat("Median profile", len(df_setofproteins_median)))
abundance_profiles_and_median_figure = abundance_profiles_figure.add_scatter(x=df_setofproteins_median["Fraction"],
y=df_setofproteins_median["normalized profile"],
name="Median profile"
)
# dash lines for proteins that have insufficient coverage across maps
abundance_profiles_and_median_figure.for_each_trace(lambda x: x.update(line={"dash":"dash"}),
selector=lambda x: x.name in self.genenames_sortedout_list)
return abundance_profiles_and_median_figure
except:
return "This protein cluster was not quantified"
def quantification_overview(self, cluster_of_interest="Proteasome"):
"""
Args:
self.df_allclusters_clusterdist_fracunstacked_unfiltered
columns: 01K, 03K, 06K, 12K, 24K, 80K
index: Gene names, Protein IDs, C-Score, Q-value, Map, Compartment, Cluster
Returns:
df
"""
df_quantification_overview = self.df_allclusters_clusterdist_fracunstacked_unfiltered.xs(cluster_of_interest, level="Cluster", axis=0)\
[self.fractions[0]].unstack("Map")
if "Sequence" in df_quantification_overview.index.names:
df_quantification_overview = df_quantification_overview.droplevel([i for i in df_quantification_overview.index.names if not i in ["Sequence","Gene names"]])
else:
df_quantification_overview = df_quantification_overview.droplevel([i for i in df_quantification_overview.index.names if not i=="Gene names"])
df_quantification_overview = df_quantification_overview.notnull().replace({True: "x", False: "-"})
return df_quantification_overview
def distance_boxplot(self, cluster_of_interest="Proteasome"):
"""
A box plot for 1 desired cluster, and across all maps is generated displaying the distribution of the e.g.
Manhattan distance.
Args:
self:
df_distance_noindex: stored as attribute (self.df_distance_noindex),index is reset.
It contains the column name "distance", in which the e.g. Manhattan distances for each individual protein
of the specified clusters (see self.markerproteins) are stored
map_names: individual map names are stored as an index
Returns:
distance_boxplot_figure: boxplot. Along the x-axis the maps, along the y-axis the distances are shown
"""
map_names = self.map_names
df_distance_noindex = self.df_distance_noindex
# "Gene names", "Map", "Cluster" and transferred into the index
df_distance_map_cluster_gene_in_index = df_distance_noindex.set_index(["Gene names", "Map", "Cluster"])
if "Sequence" in df_distance_map_cluster_gene_in_index.columns:
df_distance_map_cluster_gene_in_index.set_index("Sequence", append=True, inplace=True)
df_cluster_xmaps_distance_with_index = pd.DataFrame()
try:
# for each individual map and a defined cluster data will be extracted from the dataframe
# "df_distance_map_cluster_gene_in_index" and appended to the new dataframe df_cluster_xmaps_distance_with_index
for maps in map_names:
plot_try = df_distance_map_cluster_gene_in_index.xs((cluster_of_interest, maps),
level=["Cluster", "Map"], drop_level=False)
df_cluster_xmaps_distance_with_index = df_cluster_xmaps_distance_with_index.append(plot_try)
df_cluster_xmaps_distance_with_index["Combined Maps"] = "Combined Maps"
#number of proteins within one cluster
self.proteins_quantified_across_all_maps = df_cluster_xmaps_distance_with_index.unstack("Map").shape[0]
# index will be reset, required by px.box
df_cluster_xmaps_distance = df_cluster_xmaps_distance_with_index.reset_index()
distance_boxplot_figure = go.Figure()
distance_boxplot_figure.add_trace(go.Box(
x=df_cluster_xmaps_distance["Map"],
y=df_cluster_xmaps_distance["distance"],
boxpoints="all",
whiskerwidth=0.2,
marker_size=2,
hovertext=df_cluster_xmaps_distance["Gene names"]
))
distance_boxplot_figure.add_trace(go.Box(
x=df_cluster_xmaps_distance["Combined Maps"],
y=df_cluster_xmaps_distance["distance"],
boxpoints="all",
whiskerwidth=0.2,
marker_size=2,
hovertext=df_cluster_xmaps_distance["Gene names"]
))
distance_boxplot_figure.update_layout(
title="Manhattan distance distribution for <br>the protein cluster: {}".format(cluster_of_interest),
autosize=False,
showlegend=False,
width=500,
height=500,
# black box around the graph
xaxis=go.layout.XAxis(linecolor="black",
linewidth=1,
title="Map",
mirror=True),
yaxis=go.layout.YAxis(linecolor="black",
linewidth=1,
title="distance",
mirror=True),
template="simple_white"
)
return distance_boxplot_figure
except:
self.cache_cluster_quantified = False
def distance_to_median_boxplot(self, cluster_of_interest="Proteasome"):
"""
A box plot for 1 desired cluster, across all maps and fractions is generated displaying the
distribution of the distance to the median. For each fraction, one box plot will be displayed.
Args:
self:
df_allclusters_clusterdist_fracunstacked, dataframe with single level column, stored as attribute
(self.allclusters_clusterdist_fracunstacked), in which "Fraction" is unstacked. It contains only the
normalized data of individual protein clusters substracted by the median of the respective protein cluster
for each fraction.
map_names: individual map names are stored as an index
Returns:
distance_to_median_boxplot_figure: Box plot. Along the x-axis, the maps are shown, along the y-axis
the distances is plotted
"""
df_boxplot_manymaps = pd.DataFrame()
try:
# for each individual map and a defined cluster data will be extracted from the dataframe
# "df_allclusters_clusterdist_fracunstacked" and appended to the new dataframe df_boxplot_manymaps
for maps in self.map_names:
plot_try = self.df_allclusters_clusterdist_fracunstacked.xs((cluster_of_interest, maps), level=["Cluster", "Map"], drop_level=False)
df_boxplot_manymaps = df_boxplot_manymaps.append(plot_try)
self.df_boxplot_manymaps = df_boxplot_manymaps
# index will be reset, required by px.violin
df_boxplot_manymaps = abs(df_boxplot_manymaps.stack("Fraction"))
df_boxplot_manymaps.name = "distance"
df_boxplot_manymaps = df_boxplot_manymaps.reindex(index=natsort.natsorted(df_boxplot_manymaps.index))
df_boxplot_manymaps = df_boxplot_manymaps.reset_index()
# box plot will be generated, every fraction will be displayed in a single plot
distance_to_median_boxplot_figure = px.box(df_boxplot_manymaps,
x="Map",
y="distance",
facet_col="Fraction",
facet_col_wrap=2,
boxmode="overlay", height=900, width=700, points="all",
hover_name="Gene names",
template="simple_white",
title="Distribution of the distance to the median for <br>the protein cluster: {}".format(cluster_of_interest))
return distance_to_median_boxplot_figure
except:
return "This protein cluster was not quantified"
def dynamic_range(self):
"""
Dynamic range of each individual protein clusters (of the median profile) across all maps is calculated"
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
df_01_stacked: "MAP" and "Fraction" are stacked; the data in the column "normalized profile" is used for plotting. Additionally the columns
"MS/MS count" and "Ratio H/L count | Ratio H/L variability [%] | Ratio H/L" are found in LFQ and SILAC data respectively
Returns:
fig_dynamicRange: Bar plot, displaying the dynamic range for each protein cluster
self.df_dynamicRange: df, no index, columns: "Max", "Min", "Dynamic Range", "Cluster"
"""
df_setofproteins_allMaps = pd.DataFrame()
df_dynamicRange = pd.DataFrame()
df_01_stacked = self.df_01_stacked
for clusters in self.markerproteins:
try:
df_setofproteins_allMaps = pd.DataFrame()
for marker in self.markerproteins[clusters]:
try:
df_marker_allMaps = df_01_stacked.xs(marker, level="Gene names", drop_level=False)
except KeyError:
continue
df_setofproteins_allMaps = df_setofproteins_allMaps.append(df_marker_allMaps)
df_setofproteins_allMaps_median = df_setofproteins_allMaps["normalized profile"].unstack("Fraction").median()
df_dynamicRange = df_dynamicRange.append(pd.DataFrame(np.array([[max(df_setofproteins_allMaps_median),
min(df_setofproteins_allMaps_median),
max(df_setofproteins_allMaps_median)-min(df_setofproteins_allMaps_median),
clusters]]),
columns=["Max", "Min", "Dynamic Range", "Cluster"]),
ignore_index=True)
except:
continue
self.analysis_summary_dict["Dynamic Range"] = df_dynamicRange.to_json()
def plot_dynamic_range(self):
"""
Dynamic range of each individual protein clusters (of the median profile) across all maps is displayed"
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
df_01_stacked: "MAP" and "Fraction" are stacked; the data in the column "normalized profile" is used for plotting. Additionally the columns
"MS/MS count" and "Ratio H/L count | Ratio H/L variability [%] | Ratio H/L" are found in LFQ and SILAC data respectively
Returns:
fig_dynamicRange: Bar plot, displaying the dynamic range for each protein cluster
self.df_dynamicRange: df, no index, columns: "Max", "Min", "Dynamic Range", "Cluster"
"""
fig_dynamicRange = px.bar(pd.read_json(self.analysis_summary_dict["Dynamic Range"]),
x="Cluster",
y="Dynamic Range",
base="Min",
template="simple_white",
width=1000,
height=500).update_xaxes(categoryorder="total ascending")
return fig_dynamicRange
def results_overview_table(self):
"""
Dataframe will be created, that provides information about "range", "mean" and "standardeviation",
given as the column names, based on the data given in df_distance_noindex
Args:
self:
df_distance_noindex: stored as attribute (self.df_distance_noindex),index is reset. It contains the column name "distance",
in which the e.g. Manhattan distances for each individual protein of the specified clusters (see self.markerproteins)
are stored
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
"""
df_distance_noindex = self.df_distance_noindex
df_distance_map_cluster_gene_in_index = df_distance_noindex.set_index(["Gene names", "Map", "Cluster"])
map_names = self.map_names
df_overview = pd.DataFrame()
for clusters in self.markerproteins:
#if a certain cluster is not available in the dataset at all
try:
for maps in map_names:
df_dist_map_cluster = df_distance_map_cluster_gene_in_index.xs((clusters, maps), level=["Cluster", "Map"], drop_level=False)
statistic_table = {"range": (df_dist_map_cluster["distance"].max(axis=0)) - (df_dist_map_cluster["distance"].min(axis=0)),
"median": df_dist_map_cluster["distance"].median(axis=0),
"standardeviation": df_dist_map_cluster["distance"].std(axis=0),
"Cluster": clusters,
"Map": maps
}
statistic_series = | pd.Series(data=statistic_table) | pandas.Series |
# Author: <NAME>
# Created: 6/29/20, 3:41 PM
import logging
import os
from textwrap import wrap
import seaborn
import argparse
import numpy as np
import pandas as pd
from typing import *
from functools import reduce
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
# noinspection All
import pathmagic
# noinspection PyUnresolvedReferences
import mg_log # runs init in mg_log and configures logger
# Custom imports
from mg_io.general import save_obj, load_obj
from mg_viz.general import square_subplots, set_size
from mg_general import Environment, add_env_args_to_parser
from mg_stats.shelf import update_dataframe_with_stats, tidy_genome_level, \
_helper_df_joint_reference
from mg_general.general import all_elements_equal, fix_names, next_name, os_join, get_value
from mg_viz.colormap import ColorMap as CM
# ------------------------------ #
# Parse CMD #
# ------------------------------ #
from mg_viz.shelf import number_formatter, update_tool_names_to_full
from mg_viz.stats_large import case_insensitive_match
parser = argparse.ArgumentParser("Visualize statistics collected per gene.")
parser.add_argument('--pf-data', required=True)
parser.add_argument('--ref-5p', required=False, nargs="+", help="Reference(s) on which to compare 5' predictions")
parser.add_argument('--ref-3p', required=False, nargs="+", help="Reference(s) on which to compare 3' predictions")
parser.add_argument('--pf-checkpoint-5p')
parser.add_argument('--pf-checkpoint-3p')
parser.add_argument('--tools', nargs="+", help="If set, only compare these tools. Otherwise all tools are chosen")
parser.add_argument('--parse-names', action='store_true', help="If set, try to shorten genome names. Useful only "
"genome ID's in the data are actually names")
add_env_args_to_parser(parser)
parsed_args = parser.parse_args()
# ------------------------------ #
# Main Code #
# ------------------------------ #
# Load environment variables
my_env = Environment.init_from_argparse(parsed_args)
# Setup logger
logging.basicConfig(level=parsed_args.loglevel)
logger = logging.getLogger("logger") # type: logging.Logger
def get_stats_at_gcfid_level_with_reference(df, tools, reference):
# type: (pd.DataFrame, List[str], str) -> pd.DataFrame
list_entries = list()
for gcfid, df_group in df.groupby("Genome", as_index=False):
result = dict()
for t in tools:
tag = ",".join([t, reference])
tag_eq = "=".join([t, reference])
if df_group[f"3p:Match({tag_eq})"].sum() == 0:
result[f"Match({tag})"] = np.nan
result[f"Number of Error({tag})"] = np.nan
result[f"Error Rate({tag})"] = np.nan
result[f"Number of Error({tag})"] = np.nan
result[f"Number of Match({tag})"] = np.nan
# result[f"Number of Predictions({t},{t})"] = np.nan
else:
result[f"Match({tag})"] = 100 * df_group[f"5p:Match({tag_eq})"].sum() / float(
df_group[f"3p:Match({tag_eq})"].sum())
result[f"Error Rate({tag})"] = 100 - result[f"Match({tag})"]
result[f"Number of Error({tag})"] = df_group[f"3p:Match({tag_eq})"].sum() - df_group[
f"5p:Match({tag_eq})"].sum()
result[f"Number of Found({tag})"] = df_group[f"3p:Match({tag_eq})"].sum()
result[f"Number of Missed({tag})"] = df_group[f"5p-{reference}"].count() - df_group[
f"3p:Match({tag_eq})"].sum()
result[f"Number of Match({tag})"] = df_group[f"5p:Match({tag_eq})"].sum()
# result[f"Number of Predictions({t},{t})"] = df_group[f"5p-{t}"].count()
result[f"Number of IC5p Match({tag})"] = (
df_group[f"5p:Match({tag_eq})"] & df_group[f"Partial5p-{reference}"]).sum()
result[f"Number of IC5p Found({tag})"] = (
df_group[f"3p:Match({tag_eq})"] & df_group[f"Partial5p-{reference}"]).sum()
result[f"IC5p Match({tag})"] = 100 * result[f"Number of IC5p Match({tag})"] / result[
f"Number of IC5p Found({tag})"]
result[f"Number of IC3p Match({tag})"] = (
df_group[f"5p:Match({tag_eq})"] & df_group[f"Partial3p-{reference}"]).sum()
result[f"Number of IC3p Found({tag})"] = (
df_group[f"3p:Match({tag_eq})"] & df_group[f"Partial3p-{reference}"]).sum()
result[f"IC3p Match({tag})"] = 100 * result[f"Number of IC3p Match({tag})"] / result[
f"Number of IC3p Found({tag})"]
result[f"Number of Comp Match({tag})"] = (
df_group[f"5p:Match({tag_eq})"] & ~(
df_group[f"Partial5p-{reference}"] | df_group[f"Partial3p-{reference}"])).sum()
result[f"Number of Comp Found({tag})"] = (
df_group[f"3p:Match({tag_eq})"] & ~(
df_group[f"Partial5p-{reference}"] | df_group[f"Partial3p-{reference}"])).sum()
result[f"Comp Match({tag})"] = 100 * result[f"Number of Comp Match({tag})"] / result[
f"Number of Comp Found({tag})"]
for t in tools + [reference]:
result[f"Number of Predictions({t},{t})"] = df_group[f"5p-{t}"].count()
result[f"Runtime({t},{t})"] = df_group[f"Runtime"].mean()
if t != reference:
result[f"Precision({t},{reference})"] = result[f"Number of Found({t},{reference})"] / result[
f"Number of Predictions({t},{t})"]
result[f"Recall({t},{reference})"] = result[f"Number of Found({t},{reference})"] / df_group[
f"5p-{reference}"].count()
result[f"WR({t},{reference})"] = (result[f"Number of Predictions({t},{t})"] - result[
f"Number of Found({t},{reference})"]) / result[f"Number of Predictions({t},{t})"]
result[f"Sensitivity({t},{reference})"] = result[f"Number of Found({t},{reference})"] / df_group[
f"5p-{reference}"].count()
result[f"Specificity({t},{reference})"] = result[f"Number of Found({t},{reference})"] / result[
f"Number of Predictions({t},{t})"]
# result[f"Runtime({t, t})"] = df_group[f"Runtime"].mean()
result["Genome"] = gcfid
result["Genome GC"] = df_group.at[df_group.index[0], "Genome GC"]
result["Chunk Size"] = df_group.at[df_group.index[0], "Chunk Size"]
result["Number in Reference"] = result[f"Number of Predictions({reference},{reference})"]
list_entries.append(result)
return pd.DataFrame(list_entries)
# def get_stats_at_gcfid_level(df, tools, reference):
# # type: (pd.DataFrame, List[str], str) -> pd.DataFrame
#
# list_entries = list()
#
# ps = powerset(tools, min_len=2)
#
#
#
# for gcfid, df_group in df.groupby("Genome", as_index=False):
# result = dict()
#
# for comb in ps:
# tag = ",".join(comb)
# tag_eq = "=".join(comb)
#
# result[f"Match({tag})"] = 100 * df_group[f"5p:{tag_eq}"].sum()/ float(df_group[f"3p:{tag_eq}"].sum())
#
# result["Genome"] = gcfid
# result["Chunk Size"] = df_group.at[df_group.index[0], "Chunk Size"]
# list_entries.append(result)
#
# return pd.DataFrame(list_entries)
def viz_stats_at_gcfid_level(df, tools):
pass
def ridgeplot(df):
# Create the data
names = sorted(set(df["Genome"]))
x = df["GC Diff"].values
g = df.apply(lambda r: f"{r['Genome']} ({r['Genome GC']:.2f})", axis=1)
df = pd.DataFrame(dict(x=x, g=g))
hue_order = sorted(set(g), key=lambda x: float(x.split("(")[1].split(")")[0]))
# Initialize the FacetGrid object
pal = seaborn.cubehelix_palette(10, rot=-.25, light=.7)
g = seaborn.FacetGrid(df, row="g", hue="g",
hue_order=hue_order,
row_order=hue_order,
aspect=15, height=.5, palette=pal)
# Draw the densities in a few steps
g.map(seaborn.kdeplot, "x", clip_on=False, shade=True, alpha=1, lw=1.5, bw=.2)
g.map(seaborn.kdeplot, "x", clip_on=False, color="w", lw=2, bw=.2)
g.map(plt.axhline, y=0, lw=2, clip_on=False)
# Define and use a simple function to label the plot in axes coordinates
def label(x, color, label):
ax = plt.gca()
ax.text(0, .2, label, fontweight="bold", color=color,
ha="left", va="center", transform=ax.transAxes)
g.map(label, "x")
# Set the subplots to overlap
g.fig.subplots_adjust(hspace=-.25)
# Remove axes details that don't play well with overlap
g.set_titles("")
g.set(yticks=[])
g.despine(bottom=True, left=True)
plt.show()
def number_and_match(env, df_total, hue_order, col_number, col_perc, sup_title):
g = seaborn.FacetGrid(df_total, col="Genome", col_wrap=4, hue="Tool", sharey=False)
g.map(plt.plot, "Chunk Size", col_number, linestyle="dashed")
# g.map(plt.plot, "x", "y_fit")
g.set_xlabels("Chunk Size")
g.set_titles("{col_name}")
g.set(ylim=(0, None))
g.set(xlim=(0, 5100))
g.set_ylabels("Number of predictions")
g.add_legend()
for ax, (_, subdata) in zip(g.axes, df_total.groupby('Genome')):
ax2 = ax.twinx()
subdata = subdata.sort_values("Chunk Size")
for hue in hue_order:
subdata_hue = subdata[subdata["Tool"] == hue]
ax2.plot(subdata_hue["Chunk Size"], subdata_hue[col_perc], label=hue)
ax2.set_ylim(40, 100)
# subdata.plot(x='data_sondage', y='impossible', ax=ax2, legend=False, color='r')
plt.tight_layout()
plt.savefig(next_name(env["pd-work"]))
plt.suptitle(sup_title)
plt.show()
def viz_number_of_predictions_for_short(env, df):
# type: (Environment, pd.DataFrame) -> None
df = df[df["Tool"] != "VERIFIED"]
hue_order = sorted(df["Tool"].unique())
df["Found%"] = 100 * df["Number of Found"] / df["Number of Predictions"]
g = seaborn.FacetGrid(df, col="Genome", col_wrap=4, hue="Tool", sharey=True)
xlim = (0, 5100)
g.map(plt.plot, "Chunk Size", "Number of Found", linestyle="dashed")
g.map(plt.plot, "Chunk Size", "Number of Predictions")
for ax in g.axes:
ax.yaxis.set_major_formatter(FuncFormatter(number_formatter))
# g.map(plt.plot, "x", "y_fit")
g.set_xlabels("Fragment Size (nt)")
g.set_titles("{col_name}", style="italic")
g.set(ylim=(0, None))
g.set(xlim=xlim)
g.set_ylabels("Number of predictions")
# for ax, (_, subdata) in zip(g.axes, df.groupby('Genome')):
# # ax2 = ax.twinx()
# ax2 = inset_axes(ax, width="50%", height="50%", loc=1, borderpad=1)
#
# subdata = subdata.sort_values("Chunk Size")
# for hue in hue_order:
# subdata_hue = subdata[subdata["Tool"] == hue]
# ax2.plot(subdata_hue["Chunk Size"], subdata_hue["Found%"], label=hue)
# ax2.set_ylim(40,100)
# ax2.set_ylabel("TPR")
# ax2.set_xlim(*xlim)
# ax2.set_xticks([])
# ax2.set_yticks([])
#
# # subdata.plot(x='data_sondage', y='impossible', ax=ax2, legend=False, color='r')
# plt.tight_layout()
g.add_legend()
plt.savefig(next_name(env["pd-work"]))
plt.show()
g = seaborn.FacetGrid(df, col="Genome", col_wrap=4, hue="Tool", sharey=False)
g.map(plt.plot, "Chunk Size", "Found%")
# g.map(plt.plot, "Chunk Size", "Number of Found", linestyle="dashed")
# g.map(plt.plot, "x", "y_fit")
g.set_xlabels("Fragment Size (nt)")
g.set_titles("{col_name}", style="italic")
g.set(ylim=(0, None))
g.set(xlim=(0, 5100))
g.set_ylabels("Number of predictions")
g.add_legend()
plt.savefig(next_name(env["pd-work"]))
plt.show()
df = df[df["Tool"].isin({"MGM2", "MPRODIGAL", "FGS", "MGA"})]
g = seaborn.FacetGrid(df, col="Genome", col_wrap=4, hue="Tool", sharey=True)
g.map(plt.plot, "Recall", "Precision")
# g.map(plt.plot, "Chunk Size", "Number of Found", linestyle="dashed")
# g.map(plt.plot, "x", "y_fit")
# g.set_xlabels("Fragment Size (nt)")
g.set_titles("{col_name}", style="italic")
g.set(ylim=(0, 1))
g.set(xlim=(0, 1))
# g.set_ylabels("Number of predictions")
g.add_legend()
plt.savefig(next_name(env["pd-work"]))
plt.show()
g = seaborn.FacetGrid(df, col="Genome", col_wrap=4, hue="Tool", sharey=True, palette=CM.get_map("tools"))
g.map(plt.plot, "Chunk Size", "WR", linestyle="dashed")
g.map(plt.plot, "Chunk Size", "Precision")
# g.map(plt.plot, "Chunk Size", "Number of Found", linestyle="dashed")
# g.map(plt.plot, "x", "y_fit")
g.set_titles("{col_name}", style="italic")
g.set(ylim=(0, 1))
g.set(xlim=(0, 5100))
g.set_xlabels("Fragment Size (nt)")
g.set_ylabels("Score")
g.add_legend()
# inset_ylim=(0, max(df["Number of Predictions"])+100)
# for ax, (_, subdata) in zip(g.axes, df.groupby('Genome')):
# # ax2 = ax.twinx()
# ax2 = inset_axes(ax, width="40%", height="40%", loc=7, borderpad=1)
#
# subdata = subdata.sort_values("Chunk Size")
# for hue in hue_order:
# subdata_hue = subdata[subdata["Tool"] == hue]
# ax2.plot(subdata_hue["Chunk Size"], subdata_hue["Number of Predictions"], label=hue,
# color=CM.get_map("tools")[hue])
# # ax2.set_ylim(40,100)
# ax2.set_ylabel("Total Predictions")
# ax2.set_xlim(*xlim)
# ax2.set_ylim(*inset_ylim)
#
# ax2.yaxis.set_major_formatter(FuncFormatter(number_formatter))
# ax2.set_xticks([])
# # ax2.set_yticks([])
#
# # subdata.plot(x='data_sondage', y='impossible', ax=ax2, legend=False, color='r')
plt.savefig(next_name(env["pd-work"]))
plt.show()
def viz_plot_per_genome_y_error_x_chunk(env, df):
genomes = sorted(df["Genome"].unique())
nrows, ncols = square_subplots(len(genomes))
values_to_melt = ["Match", "Number of Error", "Number of Found", "Number of Match", "Number of Predictions",
"Number of IC5p Match", "Number of IC5p Found", "Number of IC3p Match", "Number of IC3p Found",
"Number of Comp Match", "Number of Comp Found", "Precision", "Recall", "WR", "Number of Missed",
"IC3p Match", "IC5p Match", "Comp Match"]
df_total = list()
for v in values_to_melt:
if v == "Precision":
print('hi')
df_curr = pd.melt(df, id_vars=["Genome", "Chunk Size", "Genome GC"],
value_vars=[x for x in df.columns if v == x.split("(")[0].strip()],
var_name="Combination", value_name=v)
df_curr["Tool"] = df_curr["Combination"].apply(lambda x: x.split("(")[1].split(",")[0].upper())
df_total.append(df_curr)
df_total = reduce(lambda df1, df2: pd.merge(df1, df2, on=["Genome", "Chunk Size", "Genome GC", "Tool"],
how="outer"), df_total)
viz_number_of_predictions_for_short(env, df_total)
# return
# df_total = pd.melt(
# df_total,
# id_vars=["Genome", "Chunk Size", "Genome GC", "Combination"],
# value_vars=values_to_melt,
# var_name="Metric", value_name="Score")
# dfs = [df_tmp.set_index(["Genome", "Chunk Size", "Genome GC"]) for df_tmp in df_total]
# dfs = pd.concat(dfs, ignore_index=True, sort=False, axis=1)
hue_order = sorted(df_total["Tool"].unique())
g = seaborn.FacetGrid(df_total, col="Genome", col_wrap=4, hue="Tool", sharey=True, hue_order=hue_order)
# g.map(plt.plot, "Chunk Size", "Match", marker="o")
g.map(plt.plot, "Chunk Size", "Number of Found", linestyle="--")
# g.map(plt.plot, "x", "y_fit")
g.set_xlabels("Chunk Size")
g.set_ylabels("Metric")
g.set(ylim=(0, None))
g.set(xlim=(None, None))
g.add_legend()
for ax, (_, subdata) in zip(g.axes, df_total.groupby('Genome')):
ax2 = ax.twinx()
subdata = subdata.sort_values("Chunk Size")
for hue in hue_order:
subdata_hue = subdata[subdata["Tool"] == hue]
ax2.plot(subdata_hue["Chunk Size"], subdata_hue["Match"], label=hue)
ax2.set_ylim(40, 100)
# subdata.plot(x='data_sondage', y='impossible', ax=ax2, legend=False, color='r')
plt.tight_layout()
plt.savefig(next_name(env["pd-work"]))
plt.show()
g = seaborn.FacetGrid(df_total, col="Genome", col_wrap=4, hue="Tool", sharey=False)
g.map(plt.plot, "Chunk Size", "Number of Predictions")
# g.map(plt.plot, "x", "y_fit")
g.set_xlabels("Chunk Size")
g.set_titles("{col_name}")
g.set(ylim=(0, None))
# g.set(xlim=(None,5100))
g.set_ylabels("Number of predictions")
g.add_legend()
plt.savefig(next_name(env["pd-work"]))
plt.show()
# Incomplete
g = seaborn.FacetGrid(df_total, col="Genome", col_wrap=4, hue="Tool", sharey=False)
g.map(plt.plot, "Chunk Size", "Number of IC5p Found", linestyle="dashed")
# g.map(plt.plot, "x", "y_fit")
g.set_xlabels("Chunk Size")
g.set_titles("{col_name}")
g.set(ylim=(0, None))
# g.set(xlim=(0, 5100))
g.set_ylabels("Number of predictions")
g.add_legend()
for ax, (_, subdata) in zip(g.axes, df_total.groupby('Genome')):
ax2 = ax.twinx()
subdata = subdata.sort_values("Chunk Size")
for hue in hue_order:
subdata_hue = subdata[subdata["Tool"] == hue]
ax2.plot(subdata_hue["Chunk Size"], subdata_hue["IC5p Match"], label=hue)
ax2.set_ylim(40, 100)
# subdata.plot(x='data_sondage', y='impossible', ax=ax2, legend=False, color='r')
plt.tight_layout()
plt.savefig(next_name(env["pd-work"]))
plt.suptitle("IC5p")
plt.show()
number_and_match(env, df_total, hue_order, "Number of IC5p Match", "IC5p Match", "IC5p")
number_and_match(env, df_total, hue_order, "Number of IC3p Match", "IC3p Match", "IC3p")
number_and_match(env, df_total, hue_order, "Number of Comp Match", "Comp Match", "Comp")
df_comprehensive = df_total.groupby(["Chunk Size", "Tool"], as_index=False).sum()
df_comprehensive = pd.melt(df_comprehensive, id_vars=["Chunk Size", "Tool"],
value_vars=[f"Number of {x} Match" for x in ["IC3p", "IC5p", "Comp"]] + [
"Number of Match"],
var_name="Partial", value_name="Value")
df_comprehensive_2 = pd.melt(df_total.groupby(["Chunk Size", "Tool"], as_index=False).sum(),
id_vars=["Chunk Size", "Tool"],
value_vars=[f"Number of {x} Found" for x in ["IC3p", "IC5p", "Comp"]] + [
"Number of Found"],
var_name="Partial", value_name="Value")
df_comprehensive["Match"] = 100 * df_comprehensive["Value"] / df_comprehensive_2["Value"]
g = seaborn.lmplot("Chunk Size", "Match", data=df_comprehensive, hue="Tool", col="Partial", lowess=True)
g.set(xlim=(0, 5010), ylim=(0, 100))
plt.show()
print(df_comprehensive.to_csv())
return
fig, axes = plt.subplots(2, 4, sharey="all", sharex="all")
axes = axes.ravel()
for i, g in enumerate(genomes):
ax = axes[i] # type: plt.Axes
df_curr = df[df["Genome"] == g]
df_curr = pd.melt(df_curr, id_vars=["Genome", "Chunk Size"],
value_vars=[x for x in df_curr.columns if "Number of Error(" in x],
var_name="Combination", value_name="Number of Error")
seaborn.lineplot("Chunk Size", "Number of Error", data=df_curr, hue="Combination", ax=ax, legend=False)
plt.show()
fig, axes = plt.subplots(2, 4, sharey="all", sharex="all")
axes = axes.ravel()
for i, g in enumerate(genomes):
ax = axes[i] # type: plt.Axes
df_curr = df[df["Genome"] == g]
df_curr = pd.melt(df_curr, id_vars=["Genome", "Chunk Size"],
value_vars=[x for x in df_curr.columns if "Number of Found(" in x],
var_name="Combination", value_name="Number of Found")
seaborn.lineplot("Chunk Size", "Number of Found", data=df_curr, hue="Combination", ax=ax, legend=False)
plt.show()
def viz_plot_per_genome_5p(env, df_gcfid):
# type: (Environment, pd.DataFrame) -> None
pass
def viz_stats_genome_level(env, df_gcfid, tools, reference, **kwargs):
# type: (Environment, pd.DataFrame, List[str], str, Dict[str, Any]) -> None
# 3' analysis
viz_plot_per_genome_y_error_x_chunk(env, df_gcfid)
# 5' analysis
viz_plot_per_genome_5p(env, df_gcfid)
def viz_stats_3p_number_of_predictions_number_of_found(env, df_tidy, reference):
# type: (Environment, pd.DataFrame, str) -> None
g = seaborn.FacetGrid(df_tidy, col="Genome", col_wrap=4, hue="Tool", sharey=True, palette=CM.get_map("tools"))
g.map(plt.plot, "Chunk Size", "Number of Predictions", linestyle="dashed")
g.map(plt.plot, "Chunk Size", "Number of Found")
g.set_titles("{col_name}", style="italic")
# g.set(ylim=(0, 1))
g.set(xlim=(0, 5100))
g.set_xlabels("Fragment Size (nt)")
g.set_ylabels("Score")
g.add_legend()
plt.savefig(next_name(env["pd-work"]))
plt.show()
def viz_stats_3p_sensitivity_specificity(env, df_tidy, reference):
# type: (Environment, pd.DataFrame, str) -> None
g = seaborn.FacetGrid(df_tidy, col="Genome", col_wrap=4, hue="Tool", sharey=True, palette=CM.get_map("tools"))
g.map(plt.plot, "Chunk Size", "Sensitivity")
g.set_titles("{col_name}", style="italic")
# g.set(ylim=(0, 1))
g.set(xlim=(0, 5100))
g.set_xlabels("Fragment Size (nt)")
g.set_ylabels("Sensitivity")
g.add_legend()
plt.savefig(next_name(env["pd-work"]))
plt.show()
g = seaborn.FacetGrid(df_tidy, col="Genome", col_wrap=4, hue="Tool", sharey=True, palette=CM.get_map("tools"))
g.map(plt.plot, "Chunk Size", "Specificity")
g.set_titles("{col_name}", style="italic")
# g.set(ylim=(0, 1))
g.set(xlim=(0, 5100))
g.set_xlabels("Fragment Size (nt)")
g.set_ylabels("Specificity")
g.add_legend()
plt.savefig(next_name(env["pd-work"]))
plt.show()
g = seaborn.FacetGrid(df_tidy, col="Genome", col_wrap=4, hue="Tool", sharey=True, palette=CM.get_map("tools"))
g.map(plt.plot, "Specificity", "Sensitivity")
g.set_titles("{col_name}", style="italic")
# g.set(ylim=(0, 1))
# g.set(xlim=(0, 5100))
g.set_ylabels("Sensitivity")
g.set_xlabels("Specificity")
g.add_legend()
plt.savefig(next_name(env["pd-work"]))
plt.show()
def viz_stats_3p_number_of_predictions_precision(env, df_tidy, reference):
# type: (Environment, pd.DataFrame, str) -> None
df_tidy = df_tidy[df_tidy["Tool"].apply(lambda x: x.lower()) != reference.lower()]
df_tidy = df_tidy[df_tidy["Tool"].apply(lambda x: x.lower()) != "mgm"]
hue_order = sorted(df_tidy["Tool"].unique())
cw = 4
g = seaborn.FacetGrid(df_tidy, col="Genome", col_wrap=cw, hue="Tool", hue_order=hue_order,
sharey=True, palette=CM.get_map("tools"))
g.map(plt.plot, "Chunk Size", "Number of Predictions", linestyle="dashed")
g.map(plt.plot, "Chunk Size", "Precision")
g.set_titles("{col_name}", style="italic")
# g.set(ylim=(0, 1))
g.set(xlim=(0, 5100))
g.set_xlabels("Fragment Size (nt)")
g.set_ylabels("Number of Predictions")
g.add_legend()
counter = 0
for ax, (_, subdata) in zip(g.axes, df_tidy.groupby('Genome')):
ax.yaxis.set_major_formatter(FuncFormatter(number_formatter))
if counter == 0:
yticklabels = ax.get_yticklabels()
ax2 = ax.twinx()
subdata = subdata.sort_values("Chunk Size")
for hue in hue_order:
subdata_hue = subdata[subdata["Tool"] == hue]
ax2.plot(subdata_hue["Chunk Size"], subdata_hue["Precision"], label=hue,
color=CM.get_map("tools")[hue.lower()])
ax2.set_ylim(0, 1)
counter += 1
if counter % cw == 0:
ax2.set_ylabel("Precision")
else:
ax2.set_yticks([])
# if counter % cw != 1:
# ax.set_yticklabels([])
# else:
# ax.set_yticklabels(yticklabels)
plt.tight_layout()
plt.savefig(next_name(env["pd-work"]))
plt.show()
# tabulate
# df_piv = df_tidy.pivot(index="Genome", columns="Tool", values=["Precision"])
# print(df_piv.to_csv())
def f_mi(x):
d = []
d.append(x['a'].sum())
d.append(x['a'].max())
d.append(x['b'].mean())
d.append((x['c'] * x['d']).sum())
return pd.Series(d, index=[['a', 'a', 'b', 'c_d'],
['sum', 'max', 'mean', 'prodsum']])
df1 = df_tidy.groupby(["Chunk Size", "Tool"], as_index=False).sum()
# df_tidy.groupby(["Chunk Size", "Tool"], as_index=False).agg(
# {**{x: ['sum'] for x in df_tidy.columns if x not in {"Chunk Size", "Tool", "Number in Reference"}},
# 'Number in Reference': ['sum']})
df1["Precision"] = df1["Number of Found"] / df1["Number of Predictions"]
df1["WR"] = (df1["Number of Predictions"] - df1["Number of Found"]) / df1["Number of Predictions"]
df1["Sensitivity"] = df1["Number of Found"] / df1["Number in Reference"]
df1["Specificity"] = df1["Number of Found"] / df1["Number of Predictions"]
print(df1.pivot(index="Chunk Size", columns="Tool", values=["Precision", "Number of Found"]))
print(df1.pivot(index="Chunk Size", columns="Tool", values=["Precision"]))
print(df1.pivot(index="Chunk Size", columns="Tool",
values=["Precision", "Number of Missed", "Number of Predictions"]))
print(df1.pivot(index="Chunk Size", columns="Tool",
values=["Sensitivity", "Specificity", "Number of Found", "Number in Reference",
"Number of Predictions"]).to_csv())
print("hi")
df1 = df_tidy.groupby(["Chunk Size", "Tool"], as_index=False).mean()
print(df1.pivot(index="Chunk Size", columns="Tool",
values=["Sensitivity", "Specificity"]).to_csv())
def viz_stats_5p_number_of_errors_number_of_found(env, df_tidy, reference):
# type: (Environment, pd.DataFrame, str) -> None
g = seaborn.FacetGrid(df_tidy, col="Genome", col_wrap=4, hue="Tool", sharey=True, palette=CM.get_map("tools"))
g.map(plt.plot, "Chunk Size", "Number of Found", linestyle="dashed")
g.map(plt.plot, "Chunk Size", "Number of Error")
g.set_titles("{col_name}", style="italic")
# g.set(ylim=(0, 1))
g.set(xlim=(0, 5100))
g.set_xlabels("Fragment Size (nt)")
g.set_ylabels("Sensitivity")
g.add_legend()
plt.savefig(next_name(env["pd-work"]))
plt.show()
def viz_stats_5p_error_rate(env, df_tidy, reference):
# type: (Environment, pd.DataFrame, str) -> None
g = seaborn.FacetGrid(df_tidy, col="Genome", col_wrap=4, hue="Tool", sharey=True, palette=CM.get_map("tools"))
g.map(plt.plot, "Chunk Size", "Error Rate")
g.set_titles("{col_name}", style="italic")
# g.set(ylim=(0, 1))
g.set(xlim=(0, 5100))
g.set_xlabels("Fragment Size (nt)")
g.set_ylabels("Error Rate")
g.add_legend()
plt.savefig(next_name(env["pd-work"]))
plt.show()
def viz_stats_5p_error_rate_partial(env, df_tidy, reference):
# type: (Environment, pd.DataFrame, str) -> None
df_tidy = df_tidy[df_tidy["Tool"].apply(lambda x: x.lower()) != "verified"].copy()
for cond in ["IC5p", "IC3p", "Comp"]:
df_tidy[f"Error Rate {cond}"] = (df_tidy[f"Number of {cond} Found"] - df_tidy[f"Number of {cond} Match"]) / \
df_tidy[f"Number of {cond} Found"]
g = seaborn.FacetGrid(df_tidy, col="Genome", col_wrap=4, hue="Tool", sharey=True, palette=CM.get_map("tools"))
g.map(plt.plot, "Chunk Size", f"Error Rate {cond}")
g.set_titles("{col_name}", style="italic")
# g.set(ylim=(0, 1))
g.set(xlim=(0, 5100))
g.set_xlabels("Fragment Sizee (nt)")
g.set_ylabels("Error Rate")
g.add_legend()
plt.suptitle({
"IC5p": "Incomplete at 5' end",
"IC3p": "Incomplete at 3' end",
"Comp": "Complete genes"
}[cond])
plt.savefig(next_name(env["pd-work"]))
plt.show()
# show 5p error by condition (combine all tools)
df2 = df_tidy.groupby(["Chunk Size", "Tool"], as_index=False).sum()
df2_tidy = pd.melt(
df2, id_vars=["Chunk Size", "Tool"],
value_vars=[f"Number of {cond} Found" for cond in ["IC5p", "IC3p", "Comp"]],
var_name="Condition", value_name="Found"
)
df2_tidy["Condition"] = df2_tidy["Condition"].apply(lambda x: x.split()[2])
df_tmp = pd.melt(
df2, id_vars=["Chunk Size", "Tool"],
value_vars=[f"Number of {cond} Match" for cond in ["IC5p", "IC3p", "Comp"]],
var_name="Condition", value_name="Match"
)
df_tmp["Condition"] = df_tmp["Condition"].apply(lambda x: x.split()[2])
df2_tidy = reduce(lambda df1, df2: pd.merge(df1, df2, on=["Chunk Size", "Condition", "Tool"],
how="outer"), [df2_tidy, df_tmp])
df2_tidy[f"Error Rate"] = (df2_tidy[f"Found"] - df2_tidy[f"Match"]) / df2_tidy[f"Found"]
df2_tidy["Condition"].replace({
"IC5p": "Incomplete at Gene Start",
"IC3p": "Incomplete at Gene End",
"Comp": "Complete genes"
}, inplace=True)
hue_order = sorted(df_tidy["Tool"].unique())
g = seaborn.FacetGrid(df2_tidy, col="Condition", hue="Tool", sharey=True, palette=CM.get_map("tools"),
hue_order=hue_order)
g.map(plt.plot, "Chunk Size", f"Error Rate")
g.set_titles("{col_name}", style="italic")
# g.set(ylim=(0, 1))
g.set(xlim=(0, 5100))
g.set_xlabels("Fragment Size (nt)")
g.set_ylabels("Gene-Start Error Rate")
g.add_legend()
for ax, (_, subdata) in zip(g.axes[0], df2_tidy.groupby('Condition')):
ax2 = ax.twinx()
subdata = subdata.sort_values("Chunk Size")
for hue in hue_order:
subdata_hue = subdata[subdata["Tool"] == hue]
ax2.plot(subdata_hue["Chunk Size"], subdata_hue["Found"], label=hue, linestyle="dashed")
# ax2.set_ylim(40, 100)
# subdata.plot(x='data_sondage', y='impossible', ax=ax2, legend=False, color='r')
plt.savefig(next_name(env["pd-work"]))
plt.show()
###################### 2-level facetgrid ######################
# show 5p error by condition (combine all tools)
df2 = df_tidy.groupby(["Chunk Size", "Tool"], as_index=False).sum()
df2["Number of Comp Found"] += df2["Number of IC3p Found"]
df2["Number of Comp Match"] += df2["Number of IC3p Match"]
df2_tidy = pd.melt(
df2, id_vars=["Chunk Size", "Tool"],
value_vars=[f"Number of {cond} Found" for cond in ["IC5p", "Comp"]],
var_name="Condition", value_name="Score"
)
df2_tidy["Condition"] = df2_tidy["Condition"].apply(lambda x: x.split()[2])
df2_tidy["Metric"] = "Found"
df_tmp = pd.melt(
df2, id_vars=["Chunk Size", "Tool"],
value_vars=[f"Number of {cond} Match" for cond in ["IC5p", "Comp"]],
var_name="Condition", value_name="Match"
)
df_tmp["Condition"] = df_tmp["Condition"].apply(lambda x: x.split()[2])
df_tmp = reduce(lambda df1, df2: pd.merge(df1, df2, on=["Chunk Size", "Condition", "Tool"],
how="outer"), [df2_tidy, df_tmp])
df_tmp[f"Score"] = (df_tmp[f"Score"] - df_tmp[f"Match"]) / df_tmp[f"Score"]
df_tmp["Metric"] = "Error Rate"
df2_tidy = pd.concat([df2_tidy, df_tmp])
df2_tidy["Condition"].replace({
"IC5p": "Incomplete at Gene Start",
# "IC3p": "Incomplete at Gene End",
"Comp": "Complete at Gene Start"
}, inplace=True)
df2_tidy = df2_tidy[df2_tidy["Chunk Size"] <= 5000]
hue_order = sorted(df2_tidy["Tool"].unique())
g = seaborn.FacetGrid(
df2_tidy, col="Condition", hue="Tool", sharey="row", palette=CM.get_map("tools"),
row="Metric", hue_order=hue_order
)
g.map(plt.plot, "Chunk Size", f"Score")
g.set_titles("{col_name}", style="italic")
# g.set(ylim=(0, 1))
# g.set(xlim=(0, 5100))
g.set_xlabels("Fragment Size (nt)")
# g.set_ylabels("Gene-Start Error Rate")
for i, axes_row in enumerate(g.axes):
for j, axes_col in enumerate(axes_row):
if j == 0:
if i == 0:
axes_col.set_ylabel("Number of Genes Found")
else:
axes_col.set_ylabel("Gene-Start Error Rate")
g.add_legend()
plt.tight_layout(rect=[0, 0, 0.8, 1])
plt.savefig(next_name(env["pd-work"]))
plt.show()
# paper
df2_tidy.loc[df2_tidy["Tool"] == "MPRODIGAL", "Tool"] = "MProdigal"
hue_order = sorted(df2_tidy["Tool"].unique())
figsize = set_size("thesis", subplots=(2,2), legend=True, titles=True)
fig, axes = plt.subplots(2, 2, sharex="all", sharey="row", figsize=figsize)
for h in hue_order:
ids = (df2_tidy["Metric"] == "Found") & (df2_tidy["Condition"] == "Incomplete at Gene Start") & (df2_tidy["Tool"] == h)
axes[0][0].plot(
df2_tidy.loc[ids, "Chunk Size"], df2_tidy.loc[ids, "Score"],
label=h, color=CM.get_map("tools")[h.upper()]
)
for h in hue_order:
ids = (df2_tidy["Metric"] == "Found") & (df2_tidy["Condition"] == "Complete at Gene Start") & (df2_tidy["Tool"] == h)
axes[0][1].plot(
df2_tidy.loc[ids, "Chunk Size"], df2_tidy.loc[ids, "Score"],
label=h, color=CM.get_map("tools")[h.upper()]
)
for h in hue_order:
ids = (df2_tidy["Metric"] == "Error Rate") & (df2_tidy["Condition"] == "Incomplete at Gene Start") & (df2_tidy["Tool"] == h)
axes[1][0].plot(
df2_tidy.loc[ids, "Chunk Size"], df2_tidy.loc[ids, "Score"],
label=h, color=CM.get_map("tools")[h.upper()]
)
for h in hue_order:
ids = (df2_tidy["Metric"] == "Error Rate") & (df2_tidy["Condition"] == "Complete at Gene Start") & (df2_tidy["Tool"] == h)
axes[1][1].plot(
df2_tidy.loc[ids, "Chunk Size"], df2_tidy.loc[ids, "Score"],
label=h, color=CM.get_map("tools")[h.upper()]
)
axes[0][0].set_title("Incomplete at Gene Start", style="italic")
axes[0][1].set_title("Complete at Gene Start", style="italic")
axes[0][0].yaxis.set_major_formatter(FuncFormatter(number_formatter))
axes[0][1].yaxis.set_major_formatter(FuncFormatter(number_formatter))
axes[1][0].set_xlabel("Fragment Size (nt)")
axes[1][1].set_xlabel("Fragment Size (nt)")
axes[0][0].set_ylabel("Number of Genes Found")
axes[1][0].set_ylabel("Gene 5' Error Rate")
handles, labels = axes[0][0].get_legend_handles_labels()
labels = update_tool_names_to_full(labels)
# leg = fig.legend(handles, labels, bbox_to_anchor=(0.5, 0.1), loc='upper center', ncol=5,
# bbox_transform=fig.transFigure, frameon=False)
fig.align_ylabels(axes[:,0])
# plt.tight_layout(rect=[0, 0.1, 1, 1])
leg = fig.legend(handles, labels, bbox_to_anchor=(1.05, 0.5), loc='center left', frameon=False)
# fig.subplots_adjust(right=0.85)
fig.tight_layout()
fig.savefig(next_name(env["pd-work"]), bbox_extra_artists=(leg,), bbox_inches='tight')
plt.show()
# thesis
figsize = set_size("thesis", subplots=(2, 2), legend=True, titles=True)
fig, axes = plt.subplots(2, 2, sharex="all", sharey="row", figsize=figsize)
for h in hue_order:
ids = (df2_tidy["Metric"] == "Found") & (df2_tidy["Condition"] == "Incomplete at Gene Start") & (
df2_tidy["Tool"] == h)
axes[0][0].plot(
df2_tidy.loc[ids, "Chunk Size"], df2_tidy.loc[ids, "Score"],
label=h, color=CM.get_map("tools")[h.upper()]
)
for h in hue_order:
ids = (df2_tidy["Metric"] == "Found") & (df2_tidy["Condition"] == "Complete at Gene Start") & (
df2_tidy["Tool"] == h)
axes[0][1].plot(
df2_tidy.loc[ids, "Chunk Size"], df2_tidy.loc[ids, "Score"],
label=h, color=CM.get_map("tools")[h.upper()]
)
for h in hue_order:
ids = (df2_tidy["Metric"] == "Error Rate") & (df2_tidy["Condition"] == "Incomplete at Gene Start") & (
df2_tidy["Tool"] == h)
axes[1][0].plot(
df2_tidy.loc[ids, "Chunk Size"], df2_tidy.loc[ids, "Score"],
label=h, color=CM.get_map("tools")[h.upper()]
)
for h in hue_order:
ids = (df2_tidy["Metric"] == "Error Rate") & (df2_tidy["Condition"] == "Complete at Gene Start") & (
df2_tidy["Tool"] == h)
axes[1][1].plot(
df2_tidy.loc[ids, "Chunk Size"], df2_tidy.loc[ids, "Score"],
label=h, color=CM.get_map("tools")[h.upper()]
)
axes[0][0].set_title("Incomplete at Gene Start", style="italic")
axes[0][1].set_title("Complete at Gene Start", style="italic")
axes[0][0].yaxis.set_major_formatter(FuncFormatter(number_formatter))
axes[0][1].yaxis.set_major_formatter(FuncFormatter(number_formatter))
axes[1][0].set_xlabel("Fragment Size (nt)")
axes[1][1].set_xlabel("Fragment Size (nt)")
axes[0][0].set_ylabel("Number of Genes Found")
axes[1][0].set_ylabel("Gene Start Error Rate")
handles, labels = axes[0][0].get_legend_handles_labels()
labels = update_tool_names_to_full(labels)
# leg = fig.legend(handles, labels, bbox_to_anchor=(0.5, 0.1), loc='upper center', ncol=5,
# bbox_transform=fig.transFigure, frameon=False)
fig.align_ylabels(axes[:, 0])
# plt.tight_layout(rect=[0, 0.1, 1, 1])
leg = fig.legend(handles, labels, bbox_to_anchor=(1.05, 0.5), loc='center left', frameon=False)
# fig.subplots_adjust(right=0.85)
fig.tight_layout()
fig.savefig(next_name(env["pd-work"]), bbox_extra_artists=(leg,), bbox_inches='tight')
plt.show()
def viz_stats_5p_partial(env, df_tidy, tool_order, reference):
# show 5p error by condition (combine all tools)
df2 = df_tidy.groupby(["Chunk Size", "Tool"], as_index=False).sum()
df2["Number of Comp Found"] += df2["Number of IC3p Found"]
df2["Number of Comp Match"] += df2["Number of IC3p Match"]
df2[f"Error Rate Comp"] = (df2[f"Number of Comp Found"] - df2[f"Number of Comp Match"]) / df2[f"Number of Comp Found"]
df2[f"Error Rate IC5p"] = (df2[f"Number of IC5p Found"] - df2[f"Number of IC5p Match"]) / df2[
f"Number of IC5p Found"]
figsize = set_size("thesis", subplots=(2,2), legend="bottom")
fig, axes = plt.subplots(2, 2, figsize=figsize, sharey="row")
reg_kws = {"lowess": True, "scatter_kws": {"s": 0.1, "alpha": 0.3},
"line_kws": {"linewidth": 1}}
from collections import abc
axes_unr = axes
if not isinstance(axes, abc.Iterable):
axes = [axes]
else:
axes = axes.ravel()
ax = None
i = 0
fontsize = "xx-small"
for ax, col in zip(axes[0:2], ["Number of IC5p Found", "Number of Comp Found"]):
for t in tool_order:
if t.lower() == reference.lower():
continue
df_curr = df2[case_insensitive_match(df2, "Tool", t)]
seaborn.regplot(
df_curr["Chunk Size"], df_curr[col], label=t, color=CM.get_map("tools")[t.lower()],
**reg_kws, ax=ax
)
if max(df2[col]) > 2000:
ax.yaxis.set_major_formatter(FuncFormatter(number_formatter))
col_text = "\n".join(wrap(col, 20, break_long_words=False))
ax.set_ylabel(col_text, wrap=True, fontsize=fontsize)
ax.tick_params(labelsize=fontsize, length=2)
if i == 0:
ax.set_ylabel("Number of Genes Found", fontsize=fontsize)
else:
ax.set_ylabel("")
ax.set_xlabel("")
i += 1
for ax, col in zip(axes[2:], ["Error Rate IC5p", "Error Rate Comp"]):
for t in tool_order:
if t.lower() == reference.lower():
continue
df_curr = df2[case_insensitive_match(df_tidy, "Tool", t)]
seaborn.regplot(
df_curr["Chunk Size"], df_curr[col], label=t, color=CM.get_map("tools")[t.lower()],
**reg_kws, ax=ax
)
if len(df_curr[col]) > 0 and max(df_curr[col]) > 2000:
ax.yaxis.set_major_formatter(FuncFormatter(number_formatter))
col_text = "\n".join(wrap(col, 20, break_long_words=False))
ax.set_ylabel(col_text, wrap=True, fontsize=fontsize)
ax.tick_params(labelsize=fontsize, length=2)
if i == 0:
ax.set_ylabel("Gene-Start Error Rate", fontsize=fontsize)
else:
ax.set_ylabel("")
ax.set_xlabel("Fragment Size (nt)")
i += 1
if ax is not None:
fig.subplots_adjust(bottom=0.2)
handles, labels = ax.get_legend_handles_labels()
# labels = [{
# "mgm": "MGM",
# "mgm2": "MGM2",
# "mgm2_auto": "MGM2",
# "mga": "MGA",
# "mprodigal": "MProdigal",
# "fgs": "FGS",
# "gms2": "GMS2",
# "prodigal": "Prodigal"
# }[l.lower()] for l in labels]
labels = update_tool_names_to_full(labels)
leg = fig.legend(handles, labels, bbox_to_anchor=(0.5, 0.1), loc='upper center', ncol=len(tool_order),
bbox_transform=fig.transFigure, frameon=False,
fontsize=fontsize)
for lh in leg.legendHandles:
lh.set_alpha(1)
lh.set_sizes([18] * (len(tool_order)))
for i in range(2):
fig.align_ylabels(axes_unr[:, i])
fig.tight_layout(rect=[0, 0.1, 1, 1])
fig.savefig(next_name(env["pd-work"]), bbox_extra_artists=(leg,)) # bbox_inches='tight'
plt.show()
def _helper_join_reference_and_tidy_data(env, df_per_gene, tools, list_ref):
# type: (Environment, pd.DataFrame, List[str], List[str]) -> [str, pd.DataFrame]
reference = _helper_df_joint_reference(df_per_gene, list_ref)
df_per_gene = update_dataframe_with_stats(df_per_gene, tools, reference).copy()
#### Genome Level
# compute stats per genome
df_stats_gcfid = list()
for _, df_group in df_per_gene.groupby("Chunk Size", as_index=False):
df_stats_gcfid.append(get_stats_at_gcfid_level_with_reference(df_group, tools, reference))
df_per_genome = | pd.concat(df_stats_gcfid, ignore_index=True, sort=False) | pandas.concat |
import glob
import json
import os
import numpy as np
import pandas as pd
import datetime
import matplotlib.pyplot as plt
from vxbt_calc import vxbt_calc
#from datetime import datetime
capi_data_path = '/path/to/coinapi_csvs'
start_c = | pd.to_datetime('2019-05-01 00:00:00') | pandas.to_datetime |
import datetime
import logging
import unittest
from unittest.mock import Mock, patch
import numpy as np
import pandas as pd
import pytz
from sqlalchemy import Column, Integer, MetaData, Table
from src.pipeline.processing import (
array_equals_row_on_window,
back_propagate_ones,
coalesce,
concatenate_columns,
concatenate_values,
df_to_dict_series,
df_values_to_json,
df_values_to_psql_arrays,
drop_duplicates_by_decreasing_priority,
drop_rows_already_in_table,
get_unused_col_name,
is_a_value,
join_on_multiple_keys,
left_isin_right_by_decreasing_priority,
prepare_df_for_loading,
rows_belong_to_sequence,
to_json,
to_pgarr,
zeros_ones_to_bools,
)
class TestProcessingMethods(unittest.TestCase):
def test_get_unused_col_name(self):
self.assertEqual(
get_unused_col_name("id", | pd.DataFrame({"id": [1, 2, 3]}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import datetime
def get_US_baby_names():
'''
loads the raw US baby name data stored in the data/raw/ directory
Returns
-------
df : pd.DataFrame
dataframe containing all US baby name data from 1880 - 2017
'''
df_dict = {year: pd.read_csv('./data/raw/yob{}.txt'.format(year),
names=['Name', 'Sex', 'Count'])
for year in range(1880, 2017)}
for year in df_dict:
df_dict[year]['Year'] = year
return | pd.concat([df_dict[i] for i in df_dict], axis=0) | pandas.concat |
import functools
from tqdm.contrib.concurrent import process_map
import copy
from Utils.Data.Dictionary.MappingDictionary import *
from Utils.Data.Features.Generated.GeneratedFeature import GeneratedFeaturePickle
import pandas as pd
import numpy as np
def add(dictionary, key):
dictionary[key] = dictionary.get(key, 0) + 1
def compute_chunk(chunk):
timestamp = chunk.index.to_numpy().mean()
dictionary = {}
chunk['hashtags'].map(lambda x: [add(dictionary, e) for e in x] if x is not None else [0])
return timestamp, dictionary
def get_popularity(chunk, result, s):
out = []
result = copy.deepcopy(result)
s = copy.deepcopy(s)
for hashtag, timestamp in zip(chunk['hashtags'], chunk['time']):
if hashtag is not None:
index = np.searchsorted(s, timestamp, 'left') - 1
x = [result[index][1].get(h, 0)
for h in hashtag]
else:
x = [0]
out.append(x)
return pd.Series(out)
class HashtagPopularity(GeneratedFeaturePickle):
def __init__(self, feature_name: str, dataset_id: str, window_size, window_overlap):
super().__init__(feature_name, dataset_id)
self.window_size = window_size
self.window_overlap = window_overlap
self.pck_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/hashtag_popularity/{self.feature_name}.pck.gz")
self.csv_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/hashtag_popularity/{self.feature_name}.csv.gz")
self.popularity_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/hashtag_popularity/{self.window_size}_{self.window_overlap}_popularity.npy")
def get_popularity(self):
import Utils.Data.Data as data
if self.popularity_path.is_file():
return np.load(self.popularity_path, allow_pickle=True)
else:
x = data.get_dataset(
[
"mapped_feature_tweet_id",
"mapped_feature_tweet_hashtags",
"raw_feature_tweet_timestamp"
], self.dataset_id
)
x.columns = ["tweet", "hashtags", "time"]
x = x.drop_duplicates("tweet")
x = x.set_index('time', drop=True)
x = x.sort_index()
# Group size
n = self.window_size
# Overlapping size
m = self.window_overlap
chunks = [x[i:i + n] for i in range(0, len(x), n - m)]
result = process_map(compute_chunk, chunks)
s = [r[0] for r in result]
y = data.get_dataset(
[
"mapped_feature_tweet_id",
"mapped_feature_tweet_hashtags",
"raw_feature_tweet_timestamp"
], self.dataset_id
)
y.columns = ["tweet", "hashtags", "time"]
get_popularity_partial = functools.partial(get_popularity, result=result, s=s)
popularity = pd.concat(process_map(get_popularity_partial, np.array_split(y, 100)))
self.popularity_path.parent.mkdir(parents=True, exist_ok=True)
np.save(self.popularity_path, popularity, allow_pickle=True)
return popularity
class MaxHashtagPopularity(HashtagPopularity):
def __init__(self, dataset_id: str, window_size, window_overlap):
super().__init__(f"max_hashtag_popularity_{window_size}_{window_overlap}", dataset_id, window_size, window_overlap)
def create_feature(self):
popularity = self.get_popularity()
max_popularity = np.array([max(p) for p in popularity])
result = | pd.DataFrame(max_popularity) | pandas.DataFrame |
import gc
import numpy as np
import pandas as pd
import os
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import RepeatedKFold
from sklearn.preprocessing import LabelEncoder
from datetime import datetime
from tqdm import tqdm
import lightgbm as lgb
# Load Data
dtype = {
'id': str,
'teacher_id': str,
'teacher_prefix': str,
'school_state': str,
'project_submitted_datetime': str,
'project_grade_category': str,
'project_subject_categories': str,
'project_subject_subcategories': str,
'project_title': str,
'project_essay_1': str,
'project_essay_2': str,
'project_essay_3': str,
'project_essay_4': str,
'project_resource_summary': str,
'teacher_number_of_previously_posted_projects': int,
'project_is_approved': np.uint8,
}
# Write code that limits the rows until I've sorted out the kinks
data_dir = "F:/Nerdy Stuff/Kaggle/DonorsChoose"
sub_path = "F:/Nerdy Stuff/Kaggle submissions/DonorChoose"
train = pd.read_csv(os.path.join(data_dir, 'data/train_stem.csv'),
low_memory=True)
test = pd.read_csv(os.path.join(data_dir, 'data/test_stem.csv'),
low_memory=True)
id_test = test['id'].values
# Extract features
def extract_features(df):
df['project_title_len'] = df['project_title'].apply(lambda x: len(str(x)))
df['project_essay_1_len'] = df['project_essay_1'].apply(lambda x: len(str(x)))
df['project_essay_2_len'] = df['project_essay_2'].apply(lambda x: len(str(x)))
df['project_essay_3_len'] = df['project_essay_3'].apply(lambda x: len(str(x)))
df['project_essay_4_len'] = df['project_essay_4'].apply(lambda x: len(str(x)))
df['project_resource_summary_len'] = df['project_resource_summary'].apply(lambda x: len(str(x)))
df['project_title_wc'] = df['project_title'].apply(lambda x: len(str(x).split(' ')))
df['project_essay_1_wc'] = df['project_essay_1'].apply(lambda x: len(str(x).split(' ')))
df['project_essay_2_wc'] = df['project_essay_2'].apply(lambda x: len(str(x).split(' ')))
df['project_essay_3_wc'] = df['project_essay_3'].apply(lambda x: len(str(x).split(' ')))
df['project_essay_4_wc'] = df['project_essay_4'].apply(lambda x: len(str(x).split(' ')))
df['project_resource_summary_wc'] = df['project_resource_summary'].apply(lambda x: len(str(x).split(' ')))
extract_features(train)
extract_features(test)
train.drop([
'project_essay_1',
'project_essay_2',
'project_essay_3',
'project_essay_4'], axis=1, inplace=True)
test.drop([
'project_essay_1',
'project_essay_2',
'project_essay_3',
'project_essay_4'], axis=1, inplace=True)
# Recoding as when stopwords are removed some titles have no values
print("Recoding missing values once NLP preprocessing done. Might want to check that")
train.loc[train['project_title'].isnull() == True, 'project_title'] = 'No values once NLP preprocessing is done'
test.loc[test['project_title'].isnull() == True, 'project_title'] = 'No values once NLP preprocessing is done'
train.loc[train['project_essay'].isnull() == True, 'project_essay'] = 'No values once NLP preprocessing is done'
test.loc[test['project_essay'].isnull() == True, 'project_essay'] = 'No values once NLP preprocessing is done'
train.loc[train['project_resource_summary'].isnull() == True, 'project_resource_summary'] = 'No values once NLP preprocessing is done'
test.loc[test['project_resource_summary'].isnull() == True, 'project_resource_summary'] = 'No values once NLP preprocessing is done'
train.loc[train['description_ttl'].isnull() == True, 'description_ttl'] = 'No values once NLP preprocessing is done'
test.loc[test['description_ttl'].isnull() == True, 'description_ttl'] = 'No values once NLP preprocessing is done'
gc.collect()
# Preprocess columns with label encoder
print('Label Encoder...')
cols = [
'teacher_id',
'teacher_prefix',
'school_state',
'project_grade_category',
'project_subject_categories',
'project_subject_subcategories'
]
df_all = pd.concat([train, test], axis=0)
for c in tqdm(cols):
le = LabelEncoder()
le.fit(df_all[c].astype(str))
train[c] = le.transform(train[c].astype(str))
test[c] = le.transform(test[c].astype(str))
del le
gc.collect()
print('Done.')
# Preprocess timestamp
print('Preprocessing timestamp...')
def process_timestamp(df):
df['project_submitted_datetime'] = pd.to_datetime(df['project_submitted_datetime'])
df['year'] = df['project_submitted_datetime'].apply(lambda x: x.year)
df['month'] = df['project_submitted_datetime'].apply(lambda x: x.month)
df['day'] = df['project_submitted_datetime'].apply(lambda x: x.day)
df['day_of_week'] = df['project_submitted_datetime'].apply(lambda x: x.dayofweek)
df['hour'] = df['project_submitted_datetime'].apply(lambda x: x.hour)
df['minute'] = df['project_submitted_datetime'].apply(lambda x: x.minute)
df['project_submitted_datetime'] = df['project_submitted_datetime'].values.astype(np.int64)
process_timestamp(train)
process_timestamp(test)
print('Done.')
# Preprocess text
print('Preprocessing text...')
cols = [
'project_title',
'project_essay',
'project_resource_summary',
'description_ttl'
]
n_features = [
400,
4040,
400,
400
]
for c_i, c in tqdm(enumerate(cols)):
print("TFIDF for %s" % (c))
tfidf = TfidfVectorizer(
max_features=n_features[c_i],
norm='l2',
)
tfidf.fit(df_all[c])
tfidf_train = np.array(tfidf.transform(train[c]).toarray(), dtype=np.float16)
tfidf_test = np.array(tfidf.transform(test[c]).toarray(), dtype=np.float16)
for i in range(n_features[c_i]):
train[c + '_tfidf_' + str(i)] = tfidf_train[:, i]
test[c + '_tfidf_' + str(i)] = tfidf_test[:, i]
del tfidf, tfidf_train, tfidf_test
gc.collect()
print('Done.')
gc.collect()
# Prepare data
cols_to_drop = [
'Unnamed: 0'
, 'id'
, 'teacher_id'
, 'project_title'
, 'project_essay'
, 'project_resource_summary'
, 'project_is_approved'
, 'description_ttl'
]
X = train.drop(cols_to_drop, axis=1, errors='ignore')
y = train['project_is_approved']
X_test = test.drop(cols_to_drop, axis=1, errors='ignore')
id_test = test['id'].values
feature_names = list(X.columns)
print(X.shape, X_test.shape)
# del train, test
gc.collect()
# Build the model
cnt = 0
p_buf = []
n_splits = 5
n_repeats = 1
kf = RepeatedKFold(
n_splits=n_splits,
n_repeats=n_repeats,
random_state=0)
auc_buf = []
num_rows = 60000
X_train_test = X.iloc[0:num_rows, :]
y_train_test = y.iloc[0:num_rows]
prob_ests = []
y_test = []
prb = np.array(prob_ests[0])
y_tst = np.asarray(y_test[0], np.int32)
prb.dtype
y_tst.dtype
prb.shape
y_tst.shape
prb_ser = | pd.Series(prb) | pandas.Series |
import mailbox, re, os
import pandas as pd
from datetime import datetime
targetdir = '/Users/carlos/Dropbox'
mbox_file = '/Volumes/Backup/EmailVeducaFinal/VeducaBackup.mbox/mbox'
# '/Volumes/Backup/EmailVeduca/VeducaBackup.partial.mbox/mbox'
email_lines = []
emails = []
df1 = pd.DataFrame(columns=['Name', 'Email', 'String'])
df2 = | pd.DataFrame(columns=['Email']) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Use deep learning to recognise LCD readings
# ## Train the text recognition model using <u>deep-text-recognition</u> ([github link](https://github.com/clovaai/deep-text-recognition-benchmark))
# ### Different settings and models were used to achieve best acuracy. The arguments are listed as follow:<br>
# ---
# **Basic settings:**
#
# |Command|help|Input|
# |:---:|:---:|:---:|
# |--exp_name|Where to store logs and models|Directory to store trained model|
# |--train_data|required=True, path to training dataset|Directory of training dataset|
# |--valid_data|required=True, path to validation dataset|Directory of training dataset|
# |--manualSeed|type=int, default=1111|for random seed setting|
# |--workers|type=int, number of data loading workers, default=4|int|
# |--batch_size|type=int, default=192|input batch size|
# |--num_iter|type=int, default=300000|number of iterations to train for|
# |--valInterval|type=int, default=2000, Interval between each validation|int|
# |--saved_model|default='', path of model to continue training|Directory|
# |--FT|action='store_true', whether to do fine-tuning|No input, activates by include this argument|
# |--adam|action='store_true', Whether to use adam (default is Adadelta)|No input|
# |--lr|type=float, default=1, learning rate, default=1.0 for Adadelta|float|
# |--beta1|type=float, default=0.9, beta1 for adam. default=0.9|float|
# |--rho|type=float, default=0.95, decay rate rho for Adadelta. default=0.95|float|
# |--eps|type=float, default=1e-8, eps for Adadelta. default=1e-8|float|
# |--grad_clip| type=float, default=5, gradient clipping value. default=5|float|
# |--baiduCTC| action='store_true', for data_filtering_off mode|No input|
#
# ---
# **Data processing:**
#
# |Command|help|Input|
# |:---:|:---:|:---:|
# |--select_data| type=str, default='MJ-ST', select training data (default is MJ-ST, which means MJ and ST used as training data|For use sample data|
# |--batch_ratio| type=str, default='0.5-0.5', assign ratio for each selected data in the batch|Use with MJ-ST|
# |--total_data_usage_ratio| type=str, default='1.0', total data usage ratio, this ratio is multiplied to total number of data.|For use part of data|
# |--batch_max_length| type=int, default=25, maximum-label-length| |
# |--imgH| type=int, default=32, the height of the input image|image size|
# |--imgW| type=int, default=100, the width of the input image|image size|
# |--rgb| action='store_true', use rgb input'|No input|
# |--character| type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', character label|To add or fileter symbols, characters|
# |--sensitive| action='store_true', for sensitive character mode|Use this to recognise Upper case|
# |--PAD| action='store_true', whether to keep ratio then pad for image resize| |
# |--data_filtering_off| action='store_true', for data_filtering_off mode|No input|
#
# ---
# **Model Architecture:**
#
# |Command|help|Input|
# |:---:|:---:|:---:|
# |--Transformation| type=str, required=True, Transformation stage. |None or TPS|
# |--FeatureExtraction| type=str, required=True, FeatureExtraction stage. |VGG, RCNN or ResNet|
# |--SequenceModeling| type=str, required=True, SequenceModeling stage. |None or BiLSTM|
# |--Prediction| type=str, required=True, Prediction stage. |CTC or Attn|
# |--num_fiducial| type=int, default=20, number of fiducial points of TPS-STN|int|
# |--input_channel| type=int, default=1, the number of input channel of Feature extractor|int|
# |--output_channel| type=int, default=512, the number of output channel of Feature extractor|int|
# |--hidden_size| type=int, default=256, the size of the LSTM hidden state|int|
# ### Train the models
# The variables used will be:
#
# |Model|Experiment Name|Command used|
# |:---:|:---:|:---:|
# |VGG | vgg-notran-nolstm-ctc | CUDA_VISIBLE_DEVICES=0 python3 train.py --exp_name vgg-notran-nolstm-ctc \ --train_data result/train --valid_data result/test --batch_size 200 \ --Transformation None --FeatureExtraction VGG --SequenceModeling None --Prediction CTC \ --num_iter 10000 --valInterval 1000 |
# |VGG | vgg-tps-nolstm-ctc| CUDA_VISIBLE_DEVICES=0 python3 train.py --exp_name vgg-tps-nolstm-ctc \ --train_data result/train --valid_data result/test --batch_size 200 \ --Transformation TPS --FeatureExtraction VGG --SequenceModeling None --Prediction CTC \ --num_iter 10000 --valInterval 1000 |
# |VGG |vgg-notran-nolstm-attn|CUDA_VISIBLE_DEVICES=0 python3 train.py --exp_name vgg-notran-nolstm-attn \ --train_data result/train --valid_data result/test --batch_size 200 \ --Transformation None --FeatureExtraction VGG --SequenceModeling None --Prediction Attn \ --num_iter 10000 --valInterval 1000|
# |RCNN | rcnn-notran-nolstm-ctc | CUDA_VISIBLE_DEVICES=0 python3 train.py --exp_name rcnn-notran-nolstm-ctc \ --train_data result/train --valid_data result/test --batch_size 200 \ --Transformation None --FeatureExtraction RCNN --SequenceModeling None --Prediction CTC \ --num_iter 10000 --valInterval 1000 |
# |RCNN | rcnn-notran-nolstm-atnn | CUDA_VISIBLE_DEVICES=0 python3 train.py --exp_name rcnn-notran-nolstm-atnn \ --train_data result/train --valid_data result/test --batch_size 200 \ --Transformation None --FeatureExtraction RCNN --SequenceModeling None --Prediction Attn \ --num_iter 10000 --valInterval 1000 |
# |ResNet | resnet-notran-nolstm-ctc | CUDA_VISIBLE_DEVICES=0 python3 train.py --exp_name resnet-notran-nolstm-ctc \ --train_data result/train --valid_data result/test --batch_size 200 \ --Transformation None --FeatureExtraction ResNet --SequenceModeling None --Prediction CTC \ --num_iter 10000 --valInterval 1000 |
# |ResNet | resnet-notran-nolstm-atnn | CUDA_VISIBLE_DEVICES=0 python3 train.py --exp_name resnet-notran-nolstm-atnn \ --train_data result/train --valid_data result/test --batch_size 200 \ --Transformation None --FeatureExtraction ResNet --SequenceModeling None --Prediction Attn \ --num_iter 10000 --valInterval 1000 |
# ### Experiment checklist
# In[1]:
from IPython.display import display
from ipywidgets import Checkbox
box1 = Checkbox(False, description='vgg-notran-nolstm-ctc')
box2 = Checkbox(False, description='vgg-notran-nolstm-attn')
box3 = Checkbox(False, description='rcnn-notran-nolstm-ctc')
box4 = Checkbox(False, description='rcnn-notran-nolstm-atnn')
box5 = Checkbox(False, description='resnet-notran-nolstm-ctc')
box6 = Checkbox(False, description='resnet-notran-nolstm-atnn')
display(box1,box2,box3,box4,box5,box6)
def changed(b):
print(b)
box1.observe(changed)
box2.observe(changed)
box3.observe(changed)
box4.observe(changed)
box5.observe(changed)
box6.observe(changed)
# ### Experiment summary
# By using ResNet (no Transformation, no BiLTSM) with ctc prediction, an prediction accuracy of over 98 % was achieved.
#
# |Model|Exp Name|Accuracy|
# |:---:|:---:|:---:|
# |VGG | vgg-notran-nolstm-ctc |90.837|
# |VGG | vgg-tps-nolstm-ctc|64.542|
# |VGG |vgg-notran-nolstm-attn|86.853|
# |RCNN | rcnn-notran-nolstm-ctc |80.080|
# |RCNN | rcnn-notran-nolstm-atnn | - |
# |ResNet | resnet-notran-nolstm-ctc |<mark>98.805</mark>|
# |ResNet | resnet-notran-nolstm-atnn |94.422|
# Command to train ResNet with a batch size of 50:
#
# ```
# !CUDA_VISIBLE_DEVICES=0 python3 train.py --exp_name resnet-notran-nolstm-ctc-bs50 \
# --train_data result/train --valid_data result/test --batch_size 50 \
# --Transformation None --FeatureExtraction ResNet --SequenceModeling None --Prediction CTC \
# --num_iter 10000 --valInterval 1000 \
# --saved_model saved_models/resnet-notran-nolstm-ctc/best_accuracy.pth
# ```
# ### Predict readings from trained model
# In[2]:
get_ipython().run_line_magic('cd', '/mnt/c/Users/stcik/scire/papers/muon/deep-text-recognition-benchmark')
# In[3]:
# Predict 90C data
output = get_ipython().getoutput('python3 predict.py --Transformation None --FeatureExtraction ResNet --SequenceModeling None --Prediction CTC --image_folder 90C/ --batch_size 400 --saved_model resnet-notran-nolstm-ctc-50bs.pth')
# In[6]:
output
# In[3]:
from IPython.core.display import display, HTML
from PIL import Image
import base64
import io
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# In[4]:
from cycler import cycler
plt.rcParams.update({
"text.usetex": True,
"font.family": "DejaVu Sans",
"font.serif": ["Computer Modern Roman"],
"font.size": 10,
"xtick.labelsize": 10,
"ytick.labelsize": 10,
"figure.subplot.left": 0.21,
"figure.subplot.right": 0.96,
"figure.subplot.bottom": 0.18,
"figure.subplot.top": 0.93,
"legend.frameon": False,
})
params= {'text.latex.preamble' : [r'\usepackage{amsmath, amssymb, unicode-math}',
r'\usepackage[dvips]{graphicx}',
r'\usepackage{xfrac}', r'\usepackage{amsbsy}']}
# In[7]:
data = | pd.DataFrame() | pandas.DataFrame |
import os
import numpy as np
import struct
import pandas as pd
import sys
import glob
import pickle as pkl
import random
import matplotlib.pyplot as plt
from lib_dolphin.eval import *
from lib_dolphin.discrete import *
from subprocess import check_output
FLOOR = 1.0
PERIOD = 1
SAMPLE_SIZE = 4
USER = 9
ENDIEN = 'big'
def write_htk(sequence, to, norm = True):
n = len(sequence)
dim = len(sequence[0])
with open(to, "wb") as f:
sze = dim * SAMPLE_SIZE
f.write(n.to_bytes(4, byteorder=ENDIEN))
f.write(PERIOD.to_bytes(4, byteorder=ENDIEN))
f.write(sze.to_bytes(2, byteorder=ENDIEN))
f.write(USER.to_bytes(2, byteorder=ENDIEN))
for i in range(0, n):
if norm:
mu = np.mean(sequence[i])
std = np.std(sequence[i]) + 1e-8
for j in range(0, dim):
x = sequence[i, j]
if np.isnan(x):
print(to)
if norm:
ba = bytearray(struct.pack(">f", (x - mu) / std))
else:
ba = bytearray(struct.pack(">f", x))
f.write(ba)
return n
def vec(v):
line = " ".join([str(x) for x in v])
return "\t{}".format(line)
def mat(x):
return "\n".join([vec(x[i]) for i in range(len(x))])
def silence_proto(dims, means, name = "sil"):
k = len(means)
transitions = np.zeros((3,3))
transitions[0, 1] = 1.00
transitions[1, 1] = 0.99
transitions[1, 2] = 0.01
variances = np.ones(dims)
components = []
p = 1.0 / k
for i in range(0, k):
component = """
<MIXTURE> {} {}
<Mean> {}
{}
<Variance> {}
{}
""".format(i + 1, p, dims, vec(means[i]), dims, vec(variances))
components.append(component)
return """~o <VecSize> {} <USER>
~h "{}"
<BeginHMM>
<NumStates> 3
<STATE> 2 <NumMixes> {}
{}
<TransP> 3
{}
<EndHMM>
""".format(dims, name, k, "".join(components), mat(transitions))
def left_right_hmm(max_states, dims, name="proto"):
transitions = np.zeros((max_states, max_states))
means = np.zeros(dims)
variances = np.ones(dims)
transitions[0, 1] = 1.0
states = []
for i in range(1, max_states - 1):
state = """
<State> {}
<Mean> {}
{}
<Variance> {}
{}
""".format(i + 1, dims, vec(means), dims, vec(variances))
states.append(state)
transitions[i,i] = 1.0 - 1.0 / max_states
if i + 1 < max_states:
transitions[i, i + 1] = 1.0 - transitions[i, i]
return """
~o <VecSize> {} <USER>
~h "{}"
<BeginHMM>
<NumStates> {}
{}
<TransP> {}
{}
<EndHMM>
""".format(dims, name, max_states, "".join(states), max_states, mat(transitions))
def simple_grammar(label_file, continuous=False):
df = pd.read_csv(label_file, sep=" ", header=None, names=["start", "stop", "lab"], skiprows=2)
df = df.dropna()
labels = list(set(df["lab"]))
if continuous:
labels.append("sil")
patterns = " | ".join(labels)
patterns = "$patterns = {};".format(patterns)
if continuous:
grammars = "( <$patterns> )"
else:
grammars = "( $patterns )"
return "\n".join([patterns, grammars])
def wordlist(label_file, continuous=False):
df = pd.read_csv(label_file, sep=" ", header=None, names=["start", "stop", "lab"], skiprows=2)
df = df.dropna()
labels = list(set(df["lab"]))
if continuous:
labels.append("sil")
labels = sorted(labels)
labels = ["{} {}".format(i,j) for i, j in zip(labels, labels)]
return "\n".join(labels)
def parse_model(model):
lines = [line for line in open(model)]
hmm = []
start = False
for line in lines:
if line.startswith("<BEGINHMM>"):
start = True
if start:
hmm.append(line)
if line.endswith("<ENDHMM>"):
break
return hmm
def mmf(label_file, proto_file, dim, hmm_out="hmm0", hmm_list_out="monophones"):
df = pd.read_csv(label_file, sep=" ", header=None, names=["start", "stop", "lab"], skiprows=2)
df = df.dropna()
labels = set(df["lab"])
print(labels)
hmm = parse_model(proto_file)
monophones = []
mmf = ["""~o <VECSIZE> {} <USER><DIAGC>""".format(dim)]
for i in labels:
header = "~h \"{}\"\n".format(i)
monophones.append("{}".format(i))
mmf.append(header)
mmf.extend(hmm)
mmf.append("\n")
mmf = "".join(mmf)
monophones = "\n".join(monophones)
with open(hmm_out, "w") as f:
f.write(mmf)
with open(hmm_list_out, "w") as f:
f.write(monophones)
def htk_name(num):
return "".join([str(chr(int(i) + 97)) for i in str(num)])
def get_ll(out):
out = out.split(b"\n")
for line in out:
if b"average log prob per frame" in line:
ll = line.strip().split(b" ")[-1]
ll = float(ll)
return ll
def take_step(folder, i):
files = glob.glob("{}/data/train/*.htk".format(folder))
out = check_output(["rm", "-rf", "{}/hmm{}".format(folder, i)])
out = check_output(["mkdir", "{}/hmm{}".format(folder, i)])
out = check_output("HERest -A -T 1 -v {} -I {}/clusters_TRAIN.mlf -M {}/hmm{} -H {}/hmm{}/hmm_mmf {}/list".format(FLOOR, folder, folder, i, folder, i - 1, folder).split(" ") + files)
return get_ll(out)
def label_cluster(predictions, ids, reverse):
x = np.sum([np.mean(predictions[i], axis=0) for i in ids], axis=0)
x = dict([(reverse[i], x[i]) for i in range(0, len(x))])
y = {}
y['WSTL'] = x['WSTL_UP'] + x['WSTL_DOWN']
y['BURST'] = x['BURST']
y['ECHO'] = x['ECHO']
y = list(y.items())
y.sort(key = lambda x: -x[1])
return y[0][0]
def htk_eval(folder, last_hmm):
files = glob.glob("{}/data/test/*.htk".format(folder))
out = check_output("HVite -T 1 -H {}/hmm{}/hmm_mmf -i {}/predictions.mlf -w {}/wdnet {}/dict {}/list".format(
folder, last_hmm, folder, folder, folder, folder
).split(" ") + files)
out = check_output("HResults -I {}/clusters_TEST.mlf {}/list {}/predictions.mlf".format(folder, folder, folder).split(" "))
return out.decode("utf-8")
def states(instance, per_state=3):
n = len(instance)
return n // per_state
def htk_export(folder, out_htk, out_lab, htk, k, min_c = 5):
instances_file = "{}/instances.pkl".format(folder)
predictions_file = "{}/predictions.pkl".format(folder)
clusters_file = "{}/clusters.pkl".format(folder)
label_file = "{}/labels.pkl".format(folder)
n_states_file = "{}/states.pkl".format(htk)
instances = pkl.load(open(instances_file, "rb"))
clusters = pkl.load(open(clusters_file, "rb"))[k, :]
predictions = pkl.load(open(predictions_file, "rb"))
label_dict = pkl.load(open(label_file, "rb"))
reverse = dict([(v,k) for k, v in label_dict.items()])
ids_cluster = {}
for i, cluster in enumerate(clusters):
if len(instances[i]) > 0:
if cluster not in ids_cluster:
ids_cluster[cluster] = []
ids_cluster[cluster].append(i)
states_dict = {}
for i, c in enumerate(clusters):
l = htk_name(c)
n = states(instances[i])
if l not in states_dict:
states_dict[l] = []
states_dict[l].append(n)
states_dict = dict([(k, int(np.mean(v))) for k, v in states_dict.items()])
pkl.dump(states_dict, open(n_states_file, "wb"))
plt.hist([x for _, x in states_dict.items()], bins=40)
plt.title('State Distribution')
plt.savefig('{}/state_hist.png'.format(htk))
plt.close()
cur = 0
label_dict = {}
train = "{}_TRAIN.mlf".format(out_lab.replace(".mlf", ""))
test = "{}_TEST.mlf".format(out_lab.replace(".mlf", ""))
os.system("mkdir {}/train".format(out_htk))
os.system("mkdir {}/test".format(out_htk))
n_exp = 0
with open(train, 'w') as fp_train, open(test, 'w') as fp_test:
fp_train.write("#!MLF!#\n")
fp_test.write("#!MLF!#\n")
for c, ids in ids_cluster.items():
label = label_cluster(predictions, ids, reverse)
if label != "ECHO" and len(ids) >= min_c:
if c not in label_dict:
label_dict[c] = htk_name(c)
n_exp += 1
random.shuffle(ids)
n_train = int(0.9 * len(ids))
train_ids = ids[0:n_train]
test_ids = ids[n_train:len(ids)]
for i in train_ids:
n = len(instances[i])
write_htk(instances[i], "{}/train/{}_{}.htk".format(out_htk, label_dict[c], i))
fp_train.write("\"*/{}_{}.lab\"\n".format(label_dict[c], i))
fp_train.write("{} {} {}\n".format(0, n, label_dict[c]))
fp_train.write(".\n")
for i in test_ids:
n = len(instances[i])
write_htk(instances[i], "{}/test/{}_{}.htk".format(out_htk, label_dict[c], i))
fp_test.write("\"*/{}_{}.lab\"\n".format(label_dict[c], i))
fp_test.write("{} {} {}\n".format(0, n, label_dict[c]))
fp_test.write(".\n")
for c, name in label_dict.items():
print(" ... {} {}".format(c, name))
print("#clusters: {}".format(n_exp))
def is_header(line):
return line.startswith("#!") or line.startswith('.')
def label(line):
if line.strip().endswith(".rec\""):
return line.strip().split('/')[-1].replace(".rec\"", "").split("_")[0]
else:
return line.strip().split(" ")[2]
def ids(line):
return line.strip().split('/')[-1].replace(".rec\"", "").split("_")[1]
def parse_htk(file):
corr = []
pred = []
ids_c = []
i = 0
for line in open(file):
if not is_header(line):
l = label(line)
if i % 2 == 0:
corr.append(l)
ids_c.append(ids(line))
else:
pred.append(l)
i += 1
return corr, pred, ids_c
def number(x):
strg = ""
for i in x:
strg += str(ord(i) - 97)
return int(strg)
def htk_confusion(file):
corr, pred, _ = parse_htk(file)
ldict = {}
confusions = []
cur = 0
for c, p in zip(corr, pred):
cl = number(c)
pl = number(p)
if cl not in ldict:
ldict[cl] = cur
cur += 1
if pl not in ldict:
ldict[pl] = cur
cur += 1
confusions.append([ldict[cl], ldict[pl]])
conf = np.zeros((len(ldict), len(ldict)))
for i, j in confusions:
conf[i, j] += 1
names = [(k, v) for k, v in ldict.items()]
names.sort(key = lambda x: x[1])
names = [k for k, _ in names]
return conf, names
def htk_init(label_file, proto_file, dim, train_folder, htk, latent, min_states, hmm_out="hmm0", hmm_list_out="monophones"):
files = glob.glob(train_folder)
df = | pd.read_csv(label_file, sep=" ", header=None, names=["start", "stop", "lab"], skiprows=2) | pandas.read_csv |
import unittest
import numpy as np
import pandas as pd
from schemaflow.pipeline import Pipeline
from schemaflow.pipe import Pipe
from schemaflow import types, ops
class Pipe1(Pipe):
transform_requires = {
'x': types.PandasDataFrame(schema={'a': np.float64, 'b': np.float64}),
}
transform_modifies = {
'x': ops.ModifyDataFrame({'a * b': ops.Set(np.float64)})
}
def transform(self, data: dict):
data['x']['a * b'] = data['x']['a'] * data['x']['b']
return data
class Pipe2(Pipe):
transform_requires = {
'x': types.PandasDataFrame(schema={'a': np.float64, 'b': np.float64}),
}
transform_modifies = {
'x': ops.ModifyDataFrame({'a': ops.Drop()})
}
def transform(self, data: dict):
data['x'] = data['x'].drop('a', axis=1)
return data
class TestPipeline(unittest.TestCase):
def test_set(self):
p = Pipeline([Pipe1()])
result = p.transform({'x': pd.DataFrame({'a': [2.0], 'b': [2.0]})})
self.assertEqual(result['x'].loc[:, 'a * b'].values, [4.0])
self.assertEqual(p.transform_modifies, Pipe1.transform_modifies)
def test_drop(self):
p = Pipeline([Pipe2()])
result = p.transform({'x': pd.DataFrame({'a': [2.0], 'b': [2.0]})})
self.assertEqual(len(result['x'].columns), 1)
self.assertEqual(p.transform_modifies, Pipe2.transform_modifies)
def test_combine(self):
p = Pipeline([Pipe1(), Pipe2()])
result = p.transform({'x': | pd.DataFrame({'a': [2.0], 'b': [2.0]}) | pandas.DataFrame |
# %% imports
import logging
import os
import numpy as np
import pandas as pd
import config as cfg
from logging_config import setup_logging
from src.utils.data_processing import medea_path, download_energy_balance, resample_index, heat_yr2day, heat_day2hr
# ----------------------------------------------------------------------------
# %% settings
# ----------------------------------------------------------------------------
YEARS = range(2012, 2019)
setup_logging()
# ----------------------------------------------------------------------------
# %% download data from sources
# ----------------------------------------------------------------------------
# Austrian energy balance as provided by Statistik Austria
download_energy_balance('AT')
enbal_at = medea_path('data', 'raw', 'enbal_AT.xlsx')
ht_enduse_at = pd.read_excel(enbal_at, sheet_name='Fernwärme', header=[438], index_col=[0], nrows=24,
na_values=['-']).astype('float')
# German energy balance as provided by AGEB
download_energy_balance('DE')
ht_enduse_de = pd.DataFrame()
for yr in [x - 2000 for x in YEARS]:
# mix of xlsx and xls files...
enebal_de = medea_path('data', 'raw', f'enbal_DE_20{yr}.xlsx')
if not os.path.exists(enebal_de):
# check if xls file exists...
enebal_de = medea_path('data', 'raw', f'enbal_DE_20{yr}.xls')
if not os.path.exists(enebal_de):
raise FileNotFoundError(f'File {enebal_de} does not exist!')
df = pd.read_excel(enebal_de, sheet_name='tj', index_col=[0], usecols=[0, 31], skiprows=list(range(0, 50)),
nrows=24, na_values=['-'])
df.columns = [2000 + yr]
ht_enduse_de = | pd.concat([ht_enduse_de, df], axis=1) | pandas.concat |
import pandas as pd
import gc
def data_prep(data):
"""
It will take about 15 seconds for 30,000 tweet objects
when read from a .json file in the full format
"""
c = int(len(data))
test = [[data['user'][i]['statuses_count'],
data['user'][i]['followers_count'],
data['user'][i]['friends_count'],
data['user'][i]['listed_count'],
data['user'][i]['favourites_count'],
data['user'][i]['screen_name'],
data['user'][i]['created_at'],
data['user'][i]['default_profile'],
data['user'][i]['default_profile_image'],
data['user'][i]['location'],
data['user'][i]['time_zone'],
data['user'][i]['name'],
data['user'][i]['lang'],
data['user'][i]['description'],
data['entities'][i]] for i in range(c)]
df1 = pd.DataFrame(test)
del test
data = data.drop('entities', axis=1)
df1.columns = ['user.statuses_count',
'user.followers_count',
'user.friends_count',
'user.listed_count',
'user.favourites_count',
'user.screen_name',
'user.created_at',
'user.default_profile',
'user.default_profile_image',
'user.location',
'user.time_zone',
'user.name',
'user.lang',
'user.description',
'entities']
out = | pd.concat([df1, data], axis=1) | pandas.concat |
import pandas as pd
from datacode.panel.expandselect import expand_entity_date_selections
from datacode.summarize.subset.outliers.typing import (
StrList,
AssociatedColDict,
BoolDict,
DfDict,
TwoDfDictAndDfTuple,
MinMaxDict
)
def outlier_summary_dicts(df: pd.DataFrame, associated_col_dict: AssociatedColDict,
min_max_dict: MinMaxDict,
ascending_sort_dict: BoolDict = None,
always_associated_cols: StrList = None, bad_column_name: str = '_bad_column',
num_firms: int = 3, firm_id_col: str = 'TICKER',
date_col: str = 'Date',
begin_datevar: str = 'Begin Date',
end_datevar: str = 'End Date',
expand_months: int = 3
) -> TwoDfDictAndDfTuple:
bad_df = _bad_df_from_df(
df,
min_max_dict,
bad_column_name=bad_column_name
)
bad_df_dict = _column_bad_df_dict(
bad_df,
associated_col_dict,
ascending_sort_dict=ascending_sort_dict,
always_associated_cols=always_associated_cols,
bad_column_name=bad_column_name
)
selected_orig_df_dict = _col_bad_df_dict_to_selected_orig_df_dict(
df,
bad_df_dict,
associated_col_dict,
always_associated_cols=always_associated_cols,
num_firms=num_firms,
firm_id_col=firm_id_col,
date_col=date_col,
begin_datevar=begin_datevar,
end_datevar=end_datevar,
expand_months=expand_months
)
return bad_df_dict, selected_orig_df_dict, bad_df
def drop_outliers_by_cutoffs(df: pd.DataFrame, min_max_dict: MinMaxDict):
valid_df = df.copy()
for col, min_max in min_max_dict.items():
col_min = min_max[0]
col_max = min_max[1]
if col in df.columns:
valid_df = valid_df.loc[
(valid_df[col] >= col_min) & (valid_df[col] <= col_max)
]
return valid_df
def _bad_df_from_df(df: pd.DataFrame, min_max_dict: MinMaxDict,
bad_column_name: str = '_bad_column'):
bad_df = | pd.DataFrame() | pandas.DataFrame |
import yfinance as yf
import numpy as np, pandas as pd, matplotlib.pyplot as plt
import math
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.statespace.sarimax import SARIMAX
from sklearn.metrics import mean_squared_error,mean_absolute_error
import os
from pandas import datetime
from pandas.tseries.offsets import DateOffset
from datetime import datetime, timedelta
clear = lambda: os.system('clear')
clear()
df = yf.download("BTC-USD")
plt.plot(df.index, df['Adj Close'])
#plt.show()
to_row = int(len(df)*0.9)
training_data = list(df[:to_row]['Adj Close'])
test_data = list(df[to_row:]['Adj Close'])
#print(test_data[len(test_data)-1])
#input("PAUSE")
model_predictions = []
n_test_obser = len(test_data)
for i in range(0,n_test_obser):
model = ARIMA(training_data, order = (4,1,0)) # (4,1,1) --> Mejores resultados
model_fit = model.fit()
output = model_fit.forecast()
yhat = output[0]
#print(yhat)
model_predictions.append(yhat)
#print(model_predictions)
actual_test_value = test_data[i]
training_data.append(actual_test_value)
# PRUEBA DE PREDICCION
""" ------------------------------------ PRUEBA 1 -------------------------------------
# Generamos un dataframe con fechas futuras
# Obtenemos la ultima fecha del dataset para generar un nuevo dataset con las fechas futuras a predecir, en este caso, 60 dias en el futuro
inicio = df.index[-1].date() + timedelta(1)
fin = inicio + timedelta(60)
model = ARIMA(df[:]['Adj Close'], order = (4,1,0))
model_fit = model.fit()
future_predictions = model_fit.predict(start=inicio, end=fin)
future = pd.DataFrame()
future['Date'] = [inicio + timedelta(days=d) for d in range((fin - inicio).days + 1)]
future['Adj Close'] = list(future_predictions)
future.set_index('Date',inplace = True)
df.drop(['Open'], axis=1)
df.drop(['High'], axis=1)
df.drop(['Low'], axis=1)
df.drop(['Close'], axis=1)
df.drop(['Volume'], axis=1)
print(pd.concat([df, future], axis=0)) # Unimos los dataframes por columnas
print(df.head)
print(future.head)
# FINAL PRUEBA PREDICCION
"""
# ------------------------------------ PRUEBA 2 -------------------------------------
# Predecimos los datos futuros hasta 1 dia, tomando las predicciones como "hechos" y re-entrenando al modelo en cada iteracion
future_predictions = []
train = list(df[:]['Adj Close'])
for i in range(0,60):
model = ARIMA(train, order = (4,1,0))
model_fit = model.fit()
output = model_fit.forecast()
yhat = output[0]
future_predictions.append(yhat)
train.append(yhat)
# Generamos un dataframe con fechas futuras
# Obtenemos la ultima fecha del dataset para generar un nuevo dataset con las fechas futuras a predecir, en este caso, 60 dias en el futuro
inicio = df.index[-1].date() + timedelta(1)
fin = inicio + timedelta(60)
future = pd.DataFrame()
future['Date'] = [inicio + timedelta(days=d) for d in range((fin - inicio).days)]
future['Adj Close'] = list(future_predictions)
future.set_index('Date',inplace = True)
df.drop(['Open'], axis=1)
df.drop(['High'], axis=1)
df.drop(['Low'], axis=1)
df.drop(['Close'], axis=1)
df.drop(['Volume'], axis=1)
# Unimos los dataframes por columnas
pd.concat([df, future], axis=0)
# FINAL PRUEBA PREDICCION
print(model_fit.summary())
plt.figure(figsize=(15,9))
plt.grid(True)
date_range = df[to_row:].index
plt.plot(date_range, model_predictions, color="blue", marker="o", linestyle="dashed", label="Precio de BTC predicho")
plt.plot(date_range, test_data, color="red", label="Precio de BTC real")
plt.title("Predicción del precio del bitcoin")
plt.xlabel("Date")
plt.ylabel("Price")
plt.legend()
plt.figure(figsize=(10,6))
plt.grid(True)
plt.xlabel("Dates")
plt.ylabel("Closing Prices")
plt.plot(df[:to_row]['Adj Close'],"green",label="Train Data")
plt.plot(df[to_row:]['Adj Close'],"blue",label="Test Data")
plt.plot(future[:]['Adj Close'],"red",label="Future Prediction")
plt.legend()
plt.figure(figsize=(10,6))
plt.grid(True)
plt.xlabel("Dates")
plt.ylabel("Closing Prices")
plt.plot(future[0:]['Adj Close'],"red",label="Prediction")
plt.legend()
plt.show()
# Correlation
predictions_df = pd.DataFrame(model_predictions, columns=['Adj Close'])
test_df = | pd.DataFrame(test_data, columns=['Adj Close']) | pandas.DataFrame |
from bimt.query.cfg import config
import xml.etree.ElementTree as ET
import pandas as pd
import numpy as np
class ProcessQuery:
def __init__(self, xml_file):
self.xml_file = xml_file
def transform(self, raw_query):
query = raw_query.strip(";,?!()\{\}\\/'")
query = query.upper()
return query
def from_tag_to_consultas_json(self, tag):
return {
"QueryNumber": tag.find("QueryNumber").text,
"QueryText": tag.find("QueryText").text,
"ProcessedQuery": self.transform(tag.find("QueryText").text)
}
def write_consultas_file(self):
consulta_file_path = config["DEFAULT"]["CONSULTAS"]
queries_tags = self.xml_file.findall('QUERY')
consulta_data = [ self.from_tag_to_consultas_json(tag) for tag in queries_tags ]
consulta_df = pd.DataFrame(consulta_data)
consulta_df.to_csv(consulta_file_path, sep=";", index=False)
def parse_doc_score_field(self, score_as_text):
score_values = [int(n) for n in score_as_text]
return np.mean(score_values)
def from_tag_to_esperados_json(self, tag):
query_number = tag.find("QueryNumber").text
records_list = tag.findall("Records/Item")
return [{
"QueryNumber": query_number,
"DocNumber": t.text,
"DocVotes": self.parse_doc_score_field(t.get("score")) }
for t in records_list]
def write_esperados_file(self):
esperados_file_path = config["DEFAULT"]["ESPERADOS"]
queries_tags = self.xml_file.findall('QUERY')
esperados_data = [ self.from_tag_to_esperados_json(tag) for tag in queries_tags ]
esperados_data = np.ravel(esperados_data)
unpacked_data = []
for i in esperados_data:
unpacked_data.extend(i)
esperados_df = | pd.DataFrame(unpacked_data) | pandas.DataFrame |
import numpy as np
#import matplotlib.pyplot as plt
import pandas as pd
import numpy.matlib # use repmat function
from scipy.spatial import distance_matrix
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.metrics import roc_curve, auc
from sklearn import preprocessing
import random
import time
import copy
import concurrent.futures
#import jason
random.seed(1)
# Compute Adjacency matrix for the Grabriel Graph
def get_adjacency(X):
dist_matrix = distance_matrix(X,X)
Adj_matrix = np.zeros(shape = dist_matrix.shape)
nrow = dist_matrix.shape[0]
for i in range(nrow):
for j in range(nrow):
if (i != j):
d1 = (dist_matrix[i,j])/2
dist = pd.DataFrame((X.iloc[i,:]+X.iloc[j,:])/2).T
d = distance_matrix(dist, X)
d[0,i] = float("inf")
d[0,j] = float("inf")
compara = (d<d1)
if not compara.any():
Adj_matrix[i,j] = 1
Adj_matrix[j,i] = 1
return Adj_matrix
# Removing overlapping samples:
def remove_noise(X, y):
Adj_matrix = get_adjacency(X)
c1 = np.asarray(np.where(y==1)).ravel()
c2 = np.asarray(np.where(y==-1)).ravel()
A1 = Adj_matrix[:,c1].sum(axis = 0) # sum over columns
A2 = Adj_matrix[:,c2].sum(axis = 0)
M = pd.DataFrame(Adj_matrix)
adj_1 = np.asarray(M.iloc[c1,c1])
adj_2 = np.asarray(M.iloc[c2,c2])
A1h = adj_1.sum(axis = 0)
A2h = adj_2.sum(axis = 0)
#Computing the quality coefficient Q for each class
Q_class1 = A1h / A1
Q_class2 = A2h / A2
# Computing the threshold value t for each class
t_class1 = sum(Q_class1) / Q_class1.shape[0]
t_class2 = sum(Q_class2) / Q_class2.shape[0]
noise_c1 = np.where(Q_class1 < t_class1)
noise_c2 = np.where(Q_class2 < t_class2)
noise_data = np.c_[noise_c1, noise_c2]
noise = noise_data.ravel()
# Filtering the data
X_new = X.drop(noise)
y_new = y.drop(noise)
return X_new, y_new
# Split the data for concurrent computing
def split(X_train, y_train, split_size):
data_train = np.c_[X_train, y_train]
np.random.shuffle(data_train)
data_split = np.array_split(data_train, split_size)
return data_split
# Finding the separation border:
def get_borda(y, Adj_matrix):
y_t = pd.DataFrame(y).T
ncol = y_t.shape[1]
mask = np.matlib.repmat(y_t, ncol, 1)
mask2 = pd.DataFrame(mask*Adj_matrix)
borda = pd.DataFrame(np.zeros(ncol)).T
for idx in range(ncol):
a1 = sum(-y_t.iloc[0, idx] == mask2.iloc[idx,:]) # check if the labels are different
if a1 > 0:
borda[idx] = 1
return borda
# Finding the support edges:
def get_arestas_suporte(X, y, borda, Adj_matrix):
X = np.asarray(X)
y_t = pd.DataFrame(y).T
ncol = y_t.shape[1]
mask = np.matlib.repmat(y_t, ncol, 1)
nrow = Adj_matrix.shape[0]
maskBorda = np.matlib.repmat(borda == 1, nrow, 1)
maskBorda = np.asarray(maskBorda)
# Removing the lines that not belong to the margin
aux = maskBorda * np.transpose(maskBorda)
# Removing edges that do not belong to the graph
aux = Adj_matrix * aux
# Removing edges from same labels vertices
aux1 = aux + (mask * aux)
aux2 = aux - (mask * aux)
aux1 = np.asarray(aux1)
aux2 = np.asarray(aux2)
aux = aux1 * np.transpose(aux2)
# converting matrix to binary
aux = (aux != 0)
arestas = np.where(aux == 1)
arestas = np.transpose(np.asarray(arestas))
nrow_arestas = arestas.shape[0]
ncol_arestas = arestas.shape[1]
arestas_suporte = []
y_suporte = []
y_arr = np.asarray(y)
for i in range(nrow_arestas):
for j in range(ncol_arestas):
idx = arestas[i,j]
arestas_suporte.append(X[idx,:])
y_suporte.append(y_arr[idx])
X_suporte = np.asarray(arestas_suporte)
y_suporte = np.asarray(y_suporte)
return X_suporte, y_suporte
# Another support edges function that contains the other functions
def support_edges(data):
if not isinstance(data, pd.DataFrame):
data = pd.DataFrame(data)
X_train = data.iloc[:,:-1]
y_train = data.iloc[:, -1]
Adj_matrix = get_adjacency(X_train)
borda = get_borda(y_train, Adj_matrix)
X_suporte, y_suporte = get_arestas_suporte(X_train, y_train, borda, Adj_matrix)
arestas_suporte = np.c_[X_suporte, y_suporte]
if arestas_suporte.shape[0] > 0:
arestas_suporte = np.unique(arestas_suporte, axis = 0)
return arestas_suporte
# Classification
def classify_data(X_test, y_test, arestas_suporte):
X_suporte = arestas_suporte[:,:-1]
#y_suporte = arestas_suporte[:,-1]
nrow = X_test.shape[0]
dist_test = distance_matrix(X_test, X_suporte) # compute the distance from the sample to the support egdes
y_hat = np.zeros(nrow)
for idx in range(nrow):
dist = dist_test[idx,:]
min_idx = np.argmin(dist)
y_hat[idx] = arestas_suporte[min_idx, -1]
return y_hat
# Performance measure using AUC
def compute_AUC(y_test, y_hat):
fpr, tpr, _ = roc_curve(y_test, y_hat)
if fpr.shape[0] < 2 or tpr.shape[0] < 2:
roc_auc = float('nan')
else:
roc_auc = auc(fpr, tpr)
return roc_auc
# Parallel graph method:
def parallel_graph(X_train, y_train, split_size):
data_train = np.c_[X_train, y_train]
np.random.shuffle(data_train)
data_split = np.array_split(data_train, split_size)
arestas_suporte_final = []
for i in range(split_size):
data = pd.DataFrame(data_split[i])
X_train = data.iloc[:,:-1]
y_train = data.iloc[:, -1]
# Finding the support edges from this slot of data:
arestas_suporte = support_edges(data)
arestas_suporte_final.append(arestas_suporte)
arr = arestas_suporte_final[0]
for i in range(split_size-1):
i = i+1
arr = np.concatenate((arr, arestas_suporte_final[i]), axis = 0)
data_train_new = pd.DataFrame(arr)
X_train_new = data_train_new.iloc[:,:-1]
y_train_new = data_train_new.iloc[:,-1]
return X_train_new, y_train_new
def compute_pseudo_support_edges(data, scale_factor = 10):
# separating the lables
c1 = data[data.iloc[:,-1] == 1]
c2 = data[data.iloc[:,-1] == -1]
# Choosing one random reference sample from each class
c1_reference = c1.sample(n = 1)
c2_reference = c2.sample(n = 1)
# Compute the distance matrix between each sample and the opposite class
dist_c1 = distance_matrix(c2_reference, c1)
dist_c2 = distance_matrix(c1_reference, c2)
n_edges = int(data.shape[0] / scale_factor) # number of pseudo support edges
# Indices from the n smallests support edges
idx_c1 = np.argpartition(dist_c1, n_edges)
idx_c2 = np.argpartition(dist_c2, n_edges)
c1_support_edges = c1.iloc[idx_c1[0,:n_edges]]
c2_support_edges = c2.iloc[idx_c2[0,:n_edges]]
pseudo_support_edges = np.array( | pd.concat([c1_support_edges, c2_support_edges]) | pandas.concat |
import numpy as np
from numpy.random import randn
import pytest
from pandas import DataFrame, Series
import pandas._testing as tm
@pytest.mark.parametrize("name", ["var", "vol", "mean"])
def test_ewma_series(series, name):
series_result = getattr(series.ewm(com=10), name)()
assert isinstance(series_result, Series)
@pytest.mark.parametrize("name", ["var", "vol", "mean"])
def test_ewma_frame(frame, name):
frame_result = getattr(frame.ewm(com=10), name)()
assert isinstance(frame_result, DataFrame)
def test_ewma_adjust():
vals = Series(np.zeros(1000))
vals[5] = 1
result = vals.ewm(span=100, adjust=False).mean().sum()
assert np.abs(result - 1) < 1e-2
@pytest.mark.parametrize("adjust", [True, False])
@pytest.mark.parametrize("ignore_na", [True, False])
def test_ewma_cases(adjust, ignore_na):
# try adjust/ignore_na args matrix
s = Series([1.0, 2.0, 4.0, 8.0])
if adjust:
expected = Series([1.0, 1.6, 2.736842, 4.923077])
else:
expected = Series([1.0, 1.333333, 2.222222, 4.148148])
result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
def test_ewma_nan_handling():
s = Series([1.0] + [np.nan] * 5 + [1.0])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([1.0] * len(s)))
s = Series([np.nan] * 2 + [1.0] + [np.nan] * 2 + [1.0])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([np.nan] * 2 + [1.0] * 4))
@pytest.mark.parametrize(
"s, adjust, ignore_na, w",
[
(
Series([np.nan, 1.0, 101.0]),
True,
False,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0],
),
(
Series([np.nan, 1.0, 101.0]),
True,
True,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0],
),
(
Series([np.nan, 1.0, 101.0]),
False,
False,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))],
),
(
Series([np.nan, 1.0, 101.0]),
False,
True,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))],
),
(
Series([1.0, np.nan, 101.0]),
True,
False,
[(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, 1.0],
),
(
Series([1.0, np.nan, 101.0]),
True,
True,
[(1.0 - (1.0 / (1.0 + 2.0))), np.nan, 1.0],
),
(
Series([1.0, np.nan, 101.0]),
False,
False,
[(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, (1.0 / (1.0 + 2.0))],
),
(
Series([1.0, np.nan, 101.0]),
False,
True,
[(1.0 - (1.0 / (1.0 + 2.0))), np.nan, (1.0 / (1.0 + 2.0))],
),
(
Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
True,
False,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))) ** 3, np.nan, np.nan, 1.0, np.nan],
),
(
Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
True,
True,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), np.nan, np.nan, 1.0, np.nan],
),
(
Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
False,
False,
[
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))) ** 3,
np.nan,
np.nan,
(1.0 / (1.0 + 2.0)),
np.nan,
],
),
(
Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
False,
True,
[
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))),
np.nan,
np.nan,
(1.0 / (1.0 + 2.0)),
np.nan,
],
),
(
Series([1.0, np.nan, 101.0, 50.0]),
True,
False,
[
(1.0 - (1.0 / (1.0 + 2.0))) ** 3,
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))),
1.0,
],
),
(
Series([1.0, np.nan, 101.0, 50.0]),
True,
True,
[
(1.0 - (1.0 / (1.0 + 2.0))) ** 2,
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))),
1.0,
],
),
(
Series([1.0, np.nan, 101.0, 50.0]),
False,
False,
[
(1.0 - (1.0 / (1.0 + 2.0))) ** 3,
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))) * (1.0 / (1.0 + 2.0)),
(1.0 / (1.0 + 2.0))
* ((1.0 - (1.0 / (1.0 + 2.0))) ** 2 + (1.0 / (1.0 + 2.0))),
],
),
(
Series([1.0, np.nan, 101.0, 50.0]),
False,
True,
[
(1.0 - (1.0 / (1.0 + 2.0))) ** 2,
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))) * (1.0 / (1.0 + 2.0)),
(1.0 / (1.0 + 2.0)),
],
),
],
)
def test_ewma_nan_handling_cases(s, adjust, ignore_na, w):
# GH 7603
expected = (s.multiply(w).cumsum() / Series(w).cumsum()).fillna(method="ffill")
result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
if ignore_na is False:
# check that ignore_na defaults to False
result = s.ewm(com=2.0, adjust=adjust).mean()
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
# Import Libraries
import statistics
import numpy as np
import pandas as pd
import streamlit as st
# PREDICTION FUNCTION
def predict_AQI(city, week, year, multi_week, month):
if city == 'Chicago':
data = pd.read_csv("pages/data/chi_actual_pred.csv")
if multi_week:
result = []
actual = []
for i in week.values():
result_val = pd.DataFrame(data[(data["week"] == (i)) & (data["year"] == int(year))])
result_val = result_val.iloc[:, 1].values
actual_val = pd.DataFrame(data[(data["week"] == (i)) & (data["year"] == int(year))])
actual_val = actual_val.iloc[:, 6].values
result.append(np.array_repr(result_val))
actual.append(np.array_repr(actual_val))
f_r = []
f_a = []
for i in result:
i = i.replace('array([', '')
f_r.append(i.replace('])', ''))
for i in actual:
i = i.replace('array([', '')
f_a.append(i.replace('])', ''))
return f_r, f_a
elif month != '0':
result = pd.DataFrame(data[(data["month"] == int(month)) & (data["year"] == int(year))])
result = statistics.mean(result.iloc[:, 1].values)
actual = pd.DataFrame(data[(data["month"] == int(month)) & (data["year"] == int(year))])
actual = statistics.mean(actual.iloc[:, 6].values)
return result, actual
else:
result = pd.DataFrame(data[(data["week"] == int(week)) & (data["year"] == int(year))])
result = result.iloc[:, 1].values
actual = pd.DataFrame(data[(data["week"] == int(week)) & (data["year"] == int(year))])
actual = actual.iloc[:, 6].values
return result, actual
if city == 'Philadelphia':
data = | pd.read_csv("pages/data/phl_actual_pred.csv") | pandas.read_csv |
"""
Author: <NAME>, Czech Academy of Sciences
This script searches the High Energy Astrophysics Science Archive Research Center
(HEASARC) archive for any archival X-ray data available for a user-provided list
of targets.
- Input:
Targets can be provided in a text file, with one source identifier (or set of
coordinates) per line. A combination of identifiers + coordinates is allowed,
but only one unique value per source is needed.
Note: script resolves names with Simbad by default. Future change should be to
allow the user to utilise the "name_resolver" field in the Perl script,
assuming this is possible in the Java one?
Coordinates can be given in the following forms:
- DEGREES: e.g., "105.0 54.7"
- SEXAGESIMAL: e.g., "12 29 06.70,02 03 08.6"
Make sure the coordinates are one per line in the form "RA DEC".
- Method:
This script parallelizes the search to avoid overwhelming the servers used
to search. Thus currently, the source list is split into chunks with a
maximum number of 10 sources per chunk. The script then parallelizes queries
over each chunk and stitches the results back together at the end.
- User-defined tables:
Any tables available on HEASARC can be queried. The user just needs to add
these to the "INSTRUMENTS.csv" file present in the folder, along with the
instrument, instrument offset (in arcmin) and name of the exposure column
(can be found by clicking on the corresponding instrument and table here:
https://heasarc.gsfc.nasa.gov/cgi-bin/W3Browse/w3browse.pl).
Note: the constraints column can be left blank, or the user can add their
own (at their own risk!).
- Summary file:
The script finishes by creating a summary file -- "SUMMARY.csv" that
gives the details of all X-ray data found for every source, in the
original order provided by the user.
- to make this script work on other machines:
Change setupDir to the directory containing users.jar and INSTRUMENTS.csv
Then change to the directory containing your source list and use the command:
>>> xq -t my_sources.txt -w y
(-w y makes sure wget commands are found)
where my_sources.txt is the file containing the source list
MAKE SURE coordinate RA and dec are separated by a space!
- other links:
Full user guide for the Java script: https://heasarc.gsfc.nasa.gov/xamin/doc/CLIUsersGuide.html
"""
import os,sys,argparse,datetime,glob,multiprocessing,uuid,re,requests
import pandas as pd
import numpy as np
from tqdm import *
from astropy.time import Time
setupDir = "/Users/pboorman/Dropbox/data/0_analysis/xrayQuery/"
workingDir = os.getcwd()
def createSummary(outputDir,srcList,TABLES):
"""
Create summary file listing the data found for all sources in the original order
they were given in. Sources with no data found are included in the summary for
clarity
"""
print("Creating summary file for all sources in original order given...")
fullSrcList=pd.read_csv("../"+srcList,names=["0INPUT"])
fullSrcList.loc[:,"1MATCHED_TARGET"]=np.repeat("NOT_FOUND",len(fullSrcList))
ins_abbreviations=["NUS","CHA","SUZ","XRT","XMM"]
totObs=0.
totTime=0.
with tqdm(total=len(TABLES)*len(fullSrcList)) as pbar:
for i,table in enumerate(TABLES):
INSTR=ins_abbreviations[i]
nObs="N_%(INSTR)s" %locals()
medOffset="medSep_%(INSTR)s" %locals()
medExp="medks_%(INSTR)s" %locals()
totExp="totks_%(INSTR)s" %locals()
fullSrcList.loc[:,nObs]=int(0)
fullSrcList.loc[:,medOffset]=0.
fullSrcList.loc[:,medExp]=0.
fullSrcList.loc[:,totExp]=0.
for s,src in fullSrcList.iterrows():
tempInst=pd.read_csv("%(table)s.csv" %locals(), dtype = "str")
specificObs=tempInst.loc[tempInst["b_target"]==src["0INPUT"]]
if len(specificObs)>0:
fullSrcList.loc[s,"1MATCHED_TARGET"]=specificObs["a_name"].values[0]
fullSrcList.loc[s,nObs]=str(len(specificObs))
fullSrcList.loc[s,medOffset]=np.round(specificObs["OFFSET_ARCMIN"].astype(float).median(),2)
fullSrcList.loc[s,medExp]=np.round(specificObs["EXPOSURE"].astype(float).median()/10**3,2)
fullSrcList.loc[s,totExp]=np.round(specificObs["EXPOSURE"].astype(float).sum()/10**3,2)
pbar.update()
totObs+=fullSrcList[nObs].astype(int).sum()
totTime+=fullSrcList[totExp].sum()
avObs=np.round(totObs/len(fullSrcList),1)
fullSrcList=fullSrcList.reindex(sorted(fullSrcList.columns), axis=1)
fullSrcList=fullSrcList.rename(columns={
"0INPUT":'INPUT',
"1MATCHED_TARGET":'MATCHED_TARGET'
})
fullSrcList.to_csv("SUMMARY.csv" %locals(),na_rep="NaN",index=False)
print("Done.")
return int(totObs),avObs,np.round(totTime,2)
def getInstrument_df(chunk_df):
"""
This generates a dataframe from the tables specified in the external
"INSTRUMENTS.csv" file.
Users can add additional tables here to search over.
Additional values are appended to the dataframe to use later.
"""
ID=str(uuid.uuid4().hex)
info_df=pd.read_csv(setupDir + "/INSTRUMENTS.csv").fillna("")
info_df.loc[:,"constraint"]=""
info_df.loc[info_df.instrument=="nustar","constraint"]="constraint='exposure_a > 1000'"
info_df=info_df.loc[info_df.table==chunk_df.TABLE.values[0]]
javaPath = setupDir + "/users.jar"
javaCmd="java -jar %(javaPath)s" %locals()
cwd=os.getcwd()
info_df.loc[:,"javaUpload"]=cwd+"/"+ID+"_sources.txt"
info_df.loc[:,"javaOutput"]=cwd+"/"+ID+"_"+info_df["table"]+".xls"
info_df.loc[:,"JAVA_CMD"]=javaCmd + \
" table="+info_df["table"].astype(str) + \
" upload="+info_df["javaUpload"].values[0] + \
" offset=a:b:"+info_df["offset_arcmin"].astype(str) + \
" showoffsets " + \
info_df["constraint"].astype(str) + \
" fields=" + columns + " " + \
"output="+info_df["javaOutput"].astype(str) + \
" format=excel"
chunk_df["0INPUT"].to_csv(info_df["javaUpload"].values[0],header=False,index=False)
## alternatively, fields=standard will return other columns
return info_df
def chunkSearch(chunk_df):
"""
This searches for data on a specific chunk of the original source list.
First save source list chunk to unique name.
... Then create instrument df with java command featuring the specific unique name.
"""
instrument_df=getInstrument_df(chunk_df)
runJavaCmd=instrument_df.JAVA_CMD.values[0]
javaUpload=instrument_df.javaUpload.values[0]
javaOutput=instrument_df.javaOutput.values[0]
instrument=instrument_df.instrument.values[0]
os.system(runJavaCmd)
xlsResFile=instrument_df.javaOutput.values[0]
if os.stat(xlsResFile).st_size != 0:
temp=pd.read_excel(xlsResFile,skiprows=1, dtype = "str").dropna(subset=["b_target"])
temp=temp.rename(columns={
instrument_df.exposure_column.values[0]:'EXPOSURE',
"a_obsid":'OBSID',
"_delta_ab":'OFFSET_ARCMIN',
"_delta_ba":'OFFSET_ARCMIN'})
csvResFile=xlsResFile.replace(".xls",".csv")
temp.to_csv(csvResFile,index=False)
os.remove(xlsResFile)
def parallelizeQueries(searchNames_df,FUNCTION=chunkSearch,NCORES=4):
"""
Split entire source list into chunks of maximum length maxChunkLen.
The function then parallelizes the queries of multiple chunks
depending on how many cores are available.
"""
maxChunkLen=10 ## maximum number of rows in a dataframe chunk
nChunk=int(np.ceil(len(searchNames_df)/maxChunkLen))
tab=searchNames_df.TABLE.values[0]
print("Searching %(tab)s..." %locals())
DATA_SPLIT=np.array_split(searchNames_df,nChunk)
with multiprocessing.Pool(processes=NCORES) as p:
with tqdm(total=nChunk) as pbar:
for i,_ in enumerate(p.imap_unordered(FUNCTION,DATA_SPLIT)):
pbar.update()
for file in glob.glob("*_sources.txt"):
os.remove(file)
def create_obstime_column(tab, df_in):
"""
Used to convert the MJD date of observation to human-readable form
"""
if tab == "swiftmastr":
time_column = "a_start_time"
else:
time_column = "a_time"
df = df_in.copy()
df.loc[:, "obs_year"] = df.dropna(subset = [time_column])[time_column].apply(lambda x: Time(float(x), format = "mjd").to_datetime().year)
df.loc[:, "obs_month"] = df.dropna(subset = [time_column])[time_column].apply(lambda x: Time(float(x), format = "mjd").to_datetime().month)
df.loc[:, "obs_day"] = df.dropna(subset = [time_column])[time_column].apply(lambda x: Time(float(x), format = "mjd").to_datetime().day)
df.loc[:, "OBSTIME"] = df.dropna(subset = [time_column])[time_column].apply(lambda x: Time(float(x), format = "mjd").to_datetime().strftime("%Y%b%d"))
return df
def startQueries(searchFile, wgetLocs = "n", filterData = "y"):
"""
Main function for running the query.
Reads the input target list, and starts the parallelization.
Finishes by creating summary file for every target searched,
irrespective of whether any data was found or not.
"""
start=datetime.datetime.now()
instruments= | pd.read_csv(setupDir + "/INSTRUMENTS.csv") | pandas.read_csv |
import os
from multiprocessing.pool import Pool
import pandas as pd
from lob_data_utils import lob, model
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
def svm_classification(df, gdf_columns) -> dict:
clf = LogisticRegression()
X = df.loc[:, gdf_columns]
y = df['mid_price_indicator'].values.reshape(-1, 1)
scores = model.validate_model(clf, X, y)
return scores
def main(stock):
"""
This gets gdf_data
:return:
"""
K = 50
length = 15000
rr = [0.01, 0.05, 0.1, 0.5, 1.0]
ss = [0.01, 0.05, 0.1, 0.5, 1.0]
gdf_data_dir = 'data_gdf'
results_dir = 'data_res_logistic'
gdf_start = 0
gdf_end = 50
algorithm = 'logistic'
results = []
results_filename = os.path.join(
results_dir, 'res_log_{}_len{}_K{}-{}.csv'.format(stock, length, gdf_start, gdf_end))
results_partial_filename = os.path.join(
results_dir, 'res_log_{}_len{}_K{}-{}_partial.csv'.format(stock, length, gdf_start, gdf_end))
for r in rr:
for s in ss:
gdf_filename = 'gdf_{}_len{}_r{}_s{}_K{}'.format(stock, length, r, s, K)
dfs, dfs_test = lob.load_prepared_data(gdf_filename, data_dir=gdf_data_dir, cv=False, length=length)
gdf_columns = ['gdf_' + str(i) for i in range(gdf_start, gdf_end)]
res = {'r': r, 's': s, 'stock': stock, 'K': K, 'method': algorithm}
print('********************************************')
print(res)
try:
scores = svm_classification(dfs, gdf_columns)
print(res, scores)
results.append({**res, **scores})
except Exception as e:
print('Exception', e, res)
results.append(res)
| pd.DataFrame(results) | pandas.DataFrame |
import numpy as _np
from scipy.stats import sem as _sem
import pandas as _pd
import matplotlib.pyplot as _plt
from nicepy import format_fig as _ff, format_ax as _fa
class TofData:
"""
General class for TOF data
"""
def __init__(self, filename, params, norm=True, noise_range=(3, 8), bkg_range=(3, 8), fluor=True, factor=0.92152588, offset=-0.36290086):
"""
:param filename:
:param params:
:param norm:
:param noise_range:
:param bkg_range:
:param fluor:
:param factor:
:param offset:
"""
self.filename = filename
self.idx = False
self.norm = norm
self.noise_range = noise_range
self.bkg_range = bkg_range
self.factor = factor
self.offset = offset
self.fluor = fluor
self._get_data(filename)
self._subtract_bkg()
self._get_noise()
self._get_params(filename, params)
self.peaks = None
def _get_data(self, filename):
"""
:param filename:
:return:
"""
dat = list(_np.loadtxt(filename))
fluor = dat.pop()
if self.fluor is False:
fluor = 1
center = int(len(dat) / 2)
time = _np.array([s for s in dat[:center]])
mass = self._time_to_mass(time, self.factor, self.offset)
raw = _np.array([-i / fluor for i in dat[center:]]) / fluor
raw = _pd.DataFrame({'Time': time, 'Mass': mass, 'Volts': raw})
if self.norm is True:
tot = raw['Volts'].sum()
raw['Volts'] = raw['Volts']/tot
self.raw = raw
def _subtract_bkg(self):
"""
:return:
"""
temp = self._select_range('Mass', self.bkg_range[0], self.bkg_range[1])['Volts']
m = temp.mean()
self.raw['Volts'] = self.raw['Volts'] - m
def _get_noise(self):
"""
:return:
"""
temp = self._select_range('Mass', self.noise_range[0], self.noise_range[1])['Volts']
n = temp.std()
self.noise = n
def _get_params(self, filename, params):
"""
:param filename:
:param params:
:return:
"""
listed = filename.replace('.txt', '').split('_')
temp = {key: listed[val] for key, val in params.items()}
self.params = {}
for key, val in temp.items():
if key.lower() == 'version':
val = val.lower()
val = val.replace('v', '')
else:
pass
if '.' in val or 'e' in val:
try:
val = float(val)
except ValueError:
pass
else:
try:
val = int(val)
except ValueError:
val = val.lower()
self.params[key] = val
self.params = _pd.Series(self.params)
def _select_range(self, column, lower, upper):
"""
Selects part of data that is between values upper and lower in column
:param column: column name to rate_constants used to bound
:param lower: lower value in column
:param upper: upper value in column
:return: parsed data frame
"""
temp = self.raw[(self.raw[column] <= upper) & (self.raw[column] >= lower)]
return temp
def _get_closest(self, column, value):
"""
:param column:
:param value:
:return:
"""
temp = self.raw.loc[(self.raw[column] - value).abs().idxmin()]
return temp
def _get_range(self, mass, pk_range=(-80, 80)):
"""
:param mass:
:param pk_range:
:return:
"""
idx = self._get_closest('Mass', mass).name
lower = idx + pk_range[0]
if lower < 0:
lower = 0
upper = idx + pk_range[1]
if upper > self.raw.index.max():
upper = self.raw.index.max()
return lower, upper
def _get_peak(self, lower, upper):
"""
:param lower:
:param upper:
:return:
"""
temp = self.raw.loc[range(lower, upper + 1)]
p = temp['Volts'].sum()
if p < self.noise:
p = 0
return p
def get_peaks(self, masses, **kwargs):
"""
:param masses:
:param kwargs:
:return:
"""
self.peaks = {}
self.idx = {}
for key, val in masses.items():
lower, upper = self._get_range(val, **kwargs)
self.peaks[key] = self._get_peak(lower, upper)
self.idx[key] = (lower, val, upper)
self.peaks = _pd.Series(self.peaks)
self.idx = _pd.Series(self.idx)
@staticmethod
def _time_to_mass(time, factor, offset):
mass = [(t - factor) ** 2 + offset for t in time]
return mass
def show(self, x='Mass', shade=True, **kwargs):
"""
:param x:
:param shade:
:param kwargs:
:return:
"""
fig, ax = _plt.subplots()
title = {key: val for key, val in self.params.items()}
self.raw.plot.line(x=x, y='Volts', title='%s' % title, color='black', ax=ax, **kwargs)
if shade is True:
if self.idx is not False:
for key, val in self.idx.items():
idx_range = self.raw.loc[range(val[0], val[2] + 1)]
ax.fill_between(idx_range[x], idx_range['Volts'], label=key, alpha=0.5)
ax.legend(loc=0)
else:
pass
else:
pass
ax.legend(loc=0)
return fig, ax
class TofSet:
def __init__(self, filenames, params, **kwargs):
"""
:param filenames:
:param params:
:param kwargs:
"""
self.filenames = filenames
self.params = params
self._get_tofs(**kwargs)
self._get_raw()
self.idx = False
self.peaks = None
def _get_tofs(self, **kwargs):
"""
:param kwargs:
:return:
"""
tof_list = []
for filename in self.filenames:
t = TofData(filename, self.params, **kwargs)
tof_list.append(t)
tof_objs = []
for t in tof_list:
temp = t.params.copy()
temp['tof'] = t
tof_objs.append(temp)
temp = _pd.DataFrame(tof_objs)
temp.set_index(list(self.params.keys()), inplace=True)
self.tof_objs = temp.sort_index()
def _get_raw(self):
"""
:return:
"""
# temp = self.tof_objs.copy()
# temp['raw'] = [t.raw for t in temp['tof']]
# self.raw = temp.drop('tof', axis=1)
temp = self.tof_objs.copy()
raw = []
for tof in temp['tof']:
t = tof.raw
for key, val in tof.params.items():
t[key] = val
raw.append(t)
self.raw = _pd.concat(raw)
self.raw.set_index(list(self.params.keys()), inplace=True)
self.raw.sort_index(inplace=True)
def _get_tof_peaks(self, masses, **kwargs):
"""
:param masses:
:param kwargs:
:return:
"""
for t in self.tof_objs['tof']:
t.get_peaks(masses, **kwargs)
self.idx = t.idx
def get_peaks(self, masses, **kwargs):
"""
:param masses:
:param kwargs:
:return:
"""
self._get_tof_peaks(masses, **kwargs)
temp_list = []
for t in self.tof_objs['tof']:
temp = _pd.concat([t.peaks, t.params])
temp_list.append(temp)
temp = _pd.concat(temp_list, axis=1)
self.peaks = temp.transpose()
self.peaks.set_index(list(self.params.keys()), inplace=True)
self.peaks.sort_index(inplace=True)
# self.peaks['total'] = self.peaks.sum(axis=1)
def get_raw_means(self, levels=None):
if levels is None:
levels = []
for key in ['version', 'delay']:
if key in self.params.keys():
levels.append(key)
else:
pass
self.levels = levels
grouped = self.tof_objs.groupby(levels)
temp_mean = []
temp_error = []
for indices, group in grouped:
times = _np.mean([tof.raw['Time'] for tof in group['tof']], axis=0)
masses = _np.mean([tof.raw['Mass'] for tof in group['tof']], axis=0)
volts = _np.mean([tof.raw['Volts'] for tof in group['tof']], axis=0)
errors = _sem([tof.raw['Volts'] for tof in group['tof']], axis=0)
df_mean = _pd.DataFrame({'Time': times, 'Mass': masses, 'Volts': volts})
df_error = _pd.DataFrame({'Time': times, 'Mass': masses, 'Volts': errors})
if type(indices) is not tuple:
indices = [indices]
for key, index in zip(levels, indices):
df_mean[key] = index
df_error[key] = index
temp_mean.append(df_mean)
temp_error.append(df_error)
self.raw_means = _pd.concat(temp_mean)
self.raw_errors = | _pd.concat(temp_error) | pandas.concat |
from typing import cast
import pandas as pd
from hooqu.constraints import (
AnalysisBasedConstraint,
completeness_constraint,
compliance_constraint,
max_constraint,
mean_constraint,
min_constraint,
quantile_constraint,
size_constraint,
standard_deviation_constraint,
sum_constraint,
uniqueness_constraint,
)
from hooqu.constraints.constraint import (
ConstraintDecorator,
ConstraintResult,
ConstraintStatus,
)
def calculate(constraint: AnalysisBasedConstraint, df) -> ConstraintResult:
if isinstance(constraint, ConstraintDecorator):
constraint = cast(AnalysisBasedConstraint, constraint.inner)
return constraint.calculate_and_evaluate(df)
def test_completeness_constraint(df_missing):
df = df_missing
assert (
calculate(completeness_constraint("att1", lambda v: v == 0.5), df).status
== ConstraintStatus.SUCCESS
)
assert (
calculate(completeness_constraint("att1", lambda v: v != 0.5), df).status
== ConstraintStatus.FAILURE
)
assert (
calculate(completeness_constraint("att2", lambda v: v == 0.75), df).status
== ConstraintStatus.SUCCESS
)
assert (
calculate(completeness_constraint("att2", lambda v: v != 0.75), df).status
== ConstraintStatus.FAILURE
)
def test_basic_stats_constraints(df_with_numeric_values):
df = df_with_numeric_values
assert (
calculate(min_constraint("att1", lambda v: v == 1.0), df).status
== ConstraintStatus.SUCCESS
)
assert (
calculate(max_constraint("att1", lambda v: v == 6.0), df).status
== ConstraintStatus.SUCCESS
)
assert (
calculate(mean_constraint("att1", lambda v: v == 3.5), df).status
== ConstraintStatus.SUCCESS
)
assert (
calculate(sum_constraint("att1", lambda v: v == 21.0), df).status
== ConstraintStatus.SUCCESS
)
assert (
calculate(mean_constraint("att1", lambda v: v == 3.5), df).status
== ConstraintStatus.SUCCESS
)
assert (
calculate(
standard_deviation_constraint("att1", lambda v: v == 1.707825127659933), df
).status
== ConstraintStatus.SUCCESS
)
assert (
calculate(quantile_constraint("att1", 0.5, lambda v: v == 3.0), df).status
== ConstraintStatus.SUCCESS
)
def test_size_constraint(df_missing):
df = df_missing
assert (
calculate(size_constraint(lambda v: v == len(df)), df).status
== ConstraintStatus.SUCCESS
)
def test_compliance_constraint(df_with_numeric_values):
df = df_with_numeric_values
assert (
calculate(
compliance_constraint("rule1", "att1 > 2 ", lambda pct: pct >= 0.6), df
).status
== ConstraintStatus.SUCCESS
)
assert (
calculate(
compliance_constraint("rule1", "att1 > 2 ", lambda pct: pct >= 0.9), df
).status
== ConstraintStatus.FAILURE
)
def test_uniqueness_constraint():
df_nunique = | pd.DataFrame({"att1": [0, 1, 2, 5, 5]}) | pandas.DataFrame |
#!/usr/bin/env python
'''Run a reblocking analysis on pauxy QMC output files.'''
import glob
import h5py
import json
import numpy
import pandas as pd
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import pyblock
import scipy.stats
from pauxy.analysis.extraction import (
extract_mixed_estimates,
extract_data,
get_metadata, set_info,
extract_rdm
)
from pauxy.utils.misc import get_from_dict
from pauxy.utils.linalg import get_ortho_ao_mod
from pauxy.analysis.autocorr import reblock_by_autocorr
def average_single(frame, delete=True, multi_sym=False):
if multi_sym:
short = frame.groupby('Iteration')
else:
short = frame
means = short.mean()
err = short.aggregate(lambda x: scipy.stats.sem(x, ddof=1))
averaged = means.merge(err, left_index=True, right_index=True,
suffixes=('', '_error'))
columns = [c for c in averaged.columns.values if '_error' not in c]
columns = [[c, c+'_error'] for c in columns]
columns = [item for sublist in columns for item in sublist]
averaged.reset_index(inplace=True)
delcol = ['Weight', 'Weight_error']
for d in delcol:
if delete:
columns.remove(d)
return averaged[columns]
def average_ratio(numerator, denominator):
re_num = numerator.real
re_den = denominator.real
im_num = numerator.imag
im_den = denominator.imag
# When doing FP we need to compute E = \bar{ENumer} / \bar{EDenom}
# Only compute real part of the energy
num_av = (re_num.mean()*re_den.mean()+im_num.mean()*im_den.mean())
den_av = (re_den.mean()**2 + im_den.mean()**2)
mean = num_av / den_av
# Doing error analysis properly is complicated. This is not correct.
re_nume = scipy.stats.sem(re_num)
re_dene = scipy.stats.sem(re_den)
# Ignoring the fact that the mean includes complex components.
cov = numpy.cov(re_num, re_den)[0,1]
nsmpl = len(re_num)
error = abs(mean) * ((re_nume/re_num.mean())**2 +
(re_dene/re_den.mean())**2 -
2*cov/(nsmpl*re_num.mean()*re_den.mean()))**0.5
return (mean, error)
def average_fp(frame):
iteration = numpy.real(frame['Iteration'].values)
frame = frame.drop('Iteration', axis=1)
real_df = frame.apply(lambda x: x.real)
imag_df = frame.apply(lambda x: x.imag)
real_df['Iteration'] = iteration
imag_df['Iteration'] = iteration
real = average_single(real_df, multi_sym=True)
imag = average_single(imag_df, multi_sym=True)
results = pd.DataFrame()
re_num = real.ENumer.values
re_den = real.EDenom.values
im_num = imag.ENumer.values
im_den = imag.EDenom.values
results['Iteration'] = sorted(real_df.groupby('Iteration').groups.keys())
# When doing FP we need to compute E = \bar{ENumer} / \bar{EDenom}
# Only compute real part of the energy
results['E'] = (re_num*re_den+im_num*im_den) / (re_den**2 + im_den**2)
# Doing error analysis properly is complicated. This is not correct.
re_nume = real.ENumer_error.values
re_dene = real.EDenom_error.values
# Ignoring the fact that the mean includes complex components.
cov_nd = real_df.groupby('Iteration').apply(lambda x: x['ENumer'].cov(x['EDenom'])).values
nsamp = len(re_nume)
results['E_error'] = numpy.abs(results.E) * ((re_nume/re_num)**2 +
(re_dene/re_den)**2 -
2*cov_nd/(nsamp*re_num*re_den))**0.5
return results
def reblock_mixed(groupby, columns, verbose=False):
analysed = []
for group, frame in groupby:
drop = ['index', 'Time', 'EDenom', 'ENumer', 'Weight', 'Overlap',
'WeightFactor', 'EHybrid']
if not verbose:
drop += ['E1Body', 'E2Body']
short = frame.reset_index()
try:
short = short.drop(columns+drop, axis=1)
except KeyError:
short = short.drop(columns+['index'], axis=1)
(data_len, blocked_data, covariance) = pyblock.pd_utils.reblock(short)
reblocked = pd.DataFrame({'ETotal': [0.0]})
for c in short.columns:
try:
rb = pyblock.pd_utils.reblock_summary(blocked_data.loc[:,c])
reblocked[c] = rb['mean'].values[0]
reblocked[c+'_error'] = rb['standard error'].values
reblocked[c+'_error_error'] = rb['standard error error'].values
ix = list(blocked_data[c]['optimal block']).index('<--- ')
reblocked[c+'_nsamp'] = data_len.values[ix]
except KeyError:
if verbose:
print("Reblocking of {:4} failed. Insufficient "
"statistics.".format(c))
for i, v in enumerate(group):
reblocked[columns[i]] = v
analysed.append(reblocked)
final = pd.concat(analysed, sort=True)
y = short["ETotal"].values
reblocked_ac = reblock_by_autocorr(y)
for c in reblocked_ac.columns:
final[c] = reblocked_ac[c].values
return final
def reblock_free_projection(frame):
short = frame.drop(['Time', 'Weight', 'ETotal'], axis=1)
analysed = []
(data_len, blocked_data, covariance) = pyblock.pd_utils.reblock(short)
reblocked = | pd.DataFrame() | pandas.DataFrame |
from can_tools.models import Base
import os
import pickle
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Optional, Type
import pandas as pd
import us
from sqlalchemy.engine.base import Engine
# `us` v2.0 removed DC from the `us.STATES` list, so we are creating
# our own which includes DC. In v3.0, there will be an env option to
# include `DC` in the `us.STATES` list and, if we upgrade, we should
# activate that option and replace this with just `us.STATES`
ALL_STATES_PLUS_DC = us.STATES + [us.states.DC]
class CMU:
def __init__(
self,
category="cases",
measurement="cumulative",
unit="people",
age="all",
race="all",
ethnicity="all",
sex="all",
):
self.category = category
self.measurement = measurement
self.unit = unit
self.age = age
self.race = race
self.ethnicity = ethnicity
self.sex = sex
class DatasetBase(ABC):
"""
Attributes
----------
autodag: bool = True
Whether an airflow dag should be automatically generated for this class
data_type: str = "general"
The type of data for this scraper. This is often set to "covid"
by subclasses
table: Type[Base]
The SQLAlchemy base table where this data should be inserted
location_type: Optional[str]
Optional information used when a scraper only retrieves data about a
single type of geography. It will set the `"location_type"` column
to this value (when performing the `put`) if `"location_type"` is not
already set in the df
"""
autodag: bool = True
data_type: str = "general"
table: Type[Base]
location_type: Optional[str]
base_path: Path
def __init__(self, execution_dt: pd.Timestamp = | pd.Timestamp.utcnow() | pandas.Timestamp.utcnow |
"""Provides a :class:`BaseMapper` class for mapping stock and mutual
fund data from the SEC."""
import time
from collections import defaultdict
from pathlib import Path
from typing import ClassVar, Dict, List, Union, cast
import pandas as pd
import requests
from .retrievers import MutualFundRetriever, StockRetriever
from .types import CompanyData, FieldIndices, Fields, KeyToValueSet
from .utils import with_cache
class BaseMapper:
"""A :class:`BaseMapper` object."""
_headers: ClassVar[Dict[str, str]] = {
"User-Agent": f"{int(time.time())} {int(time.time())}<EMAIL>",
"Accept-Encoding": "gzip, deflate",
"Host": "www.sec.gov",
}
def __init__(self, retriever: Union[StockRetriever, MutualFundRetriever]) -> None:
"""Constructor for the :class:`BaseMapper` class."""
self.retriever = retriever
self.mapping_metadata = self._get_mapping_metadata_from_sec()
def __new__(cls, *args, **kwargs):
"""BaseMapper should not be directly instantiated,
so throw an error on instantiation.
More info: https://stackoverflow.com/a/7990308/3820660
"""
if cls is BaseMapper:
raise TypeError(
f"{cls.__name__} cannot be directly instantiated. "
"Please instantiate the StockMapper and/or MutualFundMapper "
"classes instead."
)
return object.__new__(cls, *args, **kwargs)
def _get_indices_from_fields(self, fields: Fields) -> FieldIndices:
"""Get list indices from field names."""
field_indices = {field: fields.index(field) for field in fields}
return cast(FieldIndices, field_indices)
def _get_mapping_metadata_from_sec(self) -> pd.DataFrame:
"""Get company mapping metadata from the SEC as a pandas dataframe,
sorted by CIK and ticker.
"""
resp = requests.get(self.retriever.source_url, headers=BaseMapper._headers)
resp.raise_for_status()
data = resp.json()
fields: Fields = data["fields"]
field_indices: FieldIndices = self._get_indices_from_fields(fields)
company_data: CompanyData = data["data"]
transformed_data: List[Dict[str, str]] = []
for cd in company_data:
transformed_data.append(
self.retriever.transform(field_indices, cd),
)
df = | pd.DataFrame(transformed_data) | pandas.DataFrame |
# Copyright 2019 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from typing import Any, Dict, Union
import numpy as np
import pandas as pd
from google.protobuf.json_format import MessageToDict
from feast.protos.feast.types.Value_pb2 import (
BoolList,
BytesList,
DoubleList,
FloatList,
Int32List,
Int64List,
StringList,
)
from feast.protos.feast.types.Value_pb2 import Value as ProtoValue
from feast.value_type import ValueType
def feast_value_type_to_python_type(field_value_proto: ProtoValue) -> Any:
"""
Converts field value Proto to Dict and returns each field's Feast Value Type value
in their respective Python value.
Args:
field_value_proto: Field value Proto
Returns:
Python native type representation/version of the given field_value_proto
"""
field_value_dict = MessageToDict(field_value_proto)
for k, v in field_value_dict.items():
if k == "int64Val":
return int(v)
if k == "bytesVal":
return bytes(v)
if (k == "int64ListVal") or (k == "int32ListVal"):
return [int(item) for item in v["val"]]
if (k == "floatListVal") or (k == "doubleListVal"):
return [float(item) for item in v["val"]]
if k == "stringListVal":
return [str(item) for item in v["val"]]
if k == "bytesListVal":
return [bytes(item) for item in v["val"]]
if k == "boolListVal":
return [bool(item) for item in v["val"]]
if k in ["int32Val", "floatVal", "doubleVal", "stringVal", "boolVal"]:
return v
else:
raise TypeError(
f"Casting to Python native type for type {k} failed. "
f"Type {k} not found"
)
def python_type_to_feast_value_type(
name: str, value, recurse: bool = True
) -> ValueType:
"""
Finds the equivalent Feast Value Type for a Python value. Both native
and Pandas types are supported. This function will recursively look
for nested types when arrays are detected. All types must be homogenous.
Args:
name: Name of the value or field
value: Value that will be inspected
recurse: Whether to recursively look for nested types in arrays
Returns:
Feast Value Type
"""
type_name = type(value).__name__
type_map = {
"int": ValueType.INT64,
"str": ValueType.STRING,
"float": ValueType.DOUBLE,
"bytes": ValueType.BYTES,
"float64": ValueType.DOUBLE,
"float32": ValueType.FLOAT,
"int64": ValueType.INT64,
"uint64": ValueType.INT64,
"int32": ValueType.INT32,
"uint32": ValueType.INT32,
"uint8": ValueType.INT32,
"int8": ValueType.INT32,
"bool": ValueType.BOOL,
"timedelta": ValueType.UNIX_TIMESTAMP,
"datetime64[ns]": ValueType.UNIX_TIMESTAMP,
"datetime64[ns, tz]": ValueType.UNIX_TIMESTAMP,
"category": ValueType.STRING,
}
if type_name in type_map:
return type_map[type_name]
if type_name == "ndarray" or isinstance(value, list):
if recurse:
# Convert to list type
list_items = | pd.core.series.Series(value) | pandas.core.series.Series |
import os
import copy
import pytest
import numpy as np
import pandas as pd
import pyarrow as pa
from pyarrow import feather as pf
from pyarrow import parquet as pq
from time_series_transform.io.base import io_base
from time_series_transform.io.numpy import (
from_numpy,
to_numpy
)
from time_series_transform.io.pandas import (
from_pandas,
to_pandas
)
from time_series_transform.io.arrow import (
from_arrow_record_batch,
from_arrow_table,
to_arrow_record_batch,
to_arrow_table
)
from time_series_transform.transform_core_api.base import (
Time_Series_Data,
Time_Series_Data_Collection
)
from time_series_transform.io.parquet import (
from_parquet,
to_parquet
)
from time_series_transform.io.feather import (
from_feather,
to_feather
)
@pytest.fixture(scope = 'class')
def dictList_single():
return {
'time': [1, 2],
'data': [1, 2]
}
@pytest.fixture(scope = 'class')
def dictList_collection():
return {
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_single_expandTime():
return {
'data_1':[1],
'data_2':[2]
}
@pytest.fixture(scope = 'class')
def expect_single_seperateLabel():
return [{
'time': [1, 2],
'data': [1, 2]
},
{
'data_label': [1, 2]
}]
@pytest.fixture(scope = 'class')
def expect_collection_seperateLabel():
return [{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
{
'data_label':[1,2,1,2]
}
]
@pytest.fixture(scope = 'class')
def expect_collection_expandTime():
return {
'pad': {
'data_1':[1,1],
'data_2':[2,np.nan],
'data_3':[np.nan,2],
'category':[1,2]
},
'remove': {
'data_1':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandCategory():
return {
'pad': {
'time':[1,2,3],
'data_1':[1,2,np.nan],
'data_2':[1,np.nan,2]
},
'remove': {
'time':[1],
'data_1':[1],
'data_2':[1]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandFull():
return {
'pad': {
'data_1_1':[1],
'data_2_1':[1],
'data_1_2':[2],
'data_2_2':[np.nan],
'data_1_3':[np.nan],
'data_2_3':[2]
},
'remove': {
'data_1_1':[1],
'data_2_1':[1],
}
}
@pytest.fixture(scope = 'class')
def expect_collection_noExpand():
return {
'ignore':{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
'pad': {
'time': [1,2,3,1,2,3],
'data':[1,2,np.nan,1,np.nan,2],
'category':[1,1,1,2,2,2]
},
'remove': {
'time': [1,1],
'data':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def seq_single():
return {
'time':[1,2,3],
'data':[[1,2,3],[11,12,13],[21,22,23]]
}
@pytest.fixture(scope = 'class')
def seq_collection():
return {
'time':[1,2,1,2],
'data':[[1,2],[1,2],[2,2],[2,2]],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_seq_collection():
return {
'data_1_1':[[1,2]],
'data_2_1':[[2,2]],
'data_1_2':[[1,2]],
'data_2_2':[[2,2]]
}
class Test_base_io:
def test_base_io_from_single(self, dictList_single,expect_single_expandTime):
ExpandTimeAns = expect_single_expandTime
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(ts, 'time', None)
timeSeries = io.from_single(False)
for i in timeSeries:
assert timeSeries[i].tolist() == data[i]
timeSeries = io.from_single(True)
for i in timeSeries:
assert timeSeries[i] == ExpandTimeAns[i]
def test_base_io_to_single(self, dictList_single):
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(data, 'time', None)
assert io.to_single() == ts
def test_base_io_from_collection_expandTime(self, dictList_collection,expect_collection_expandTime):
noChange = dictList_collection
expand = expect_collection_expandTime
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(False,True,'ignore')
timeSeries = io.from_collection(False,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandCategory(self, dictList_collection,expect_collection_expandCategory):
noChange = dictList_collection
expand = expect_collection_expandCategory
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(True,False,'ignore')
timeSeries = io.from_collection(True,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandFull(self, dictList_collection,expect_collection_expandFull):
noChange = dictList_collection
expand = expect_collection_expandFull
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(True,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_to_collection(self, dictList_collection):
dataList = dictList_collection
io = io_base(dataList, 'time', 'category')
testData = io.to_collection()
tsd = Time_Series_Data(dataList,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
assert testData== tsc
def test_base_io_from_collection_no_expand(self,dictList_collection,expect_collection_noExpand):
noChange = dictList_collection
expand = expect_collection_noExpand
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(False,False,'ignore')
for i in timeSeries:
np.testing.assert_array_equal(timeSeries[i],expand['ignore'][i])
timeSeries = io.from_collection(False,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
class Test_Pandas_IO:
def test_from_pandas_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
tsd = Time_Series_Data(data,'time')
testData = from_pandas(df,'time',None)
assert tsd == testData
def test_from_pandas_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = from_pandas(df,'time','category')
assert tsc == testData
def test_to_pandas_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_pandas_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_pandas_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_pandas_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_pandas_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore'
)
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_pandas_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_pandas(tsd,False,False,'ignore',True)
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_pandas_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_pandas(tsc,False,False,'ignore',True)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_pandas_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
test = to_pandas(tsd,False,False,'ignore',False)
pd.testing.assert_frame_equal(test,df,False)
def test_to_pandas_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_pandas(tsc,False,False,'ignore')
pd.testing.assert_frame_equal(df,test,False)
test = to_pandas(tsc,True,True,'ignore')
full = pd.DataFrame(expect_seq_collection)
print(test)
print(full)
test = test.reindex(sorted(df.columns), axis=1)
full = full.reindex(sorted(df.columns), axis=1)
pd.testing.assert_frame_equal(test,full,False)
class Test_Numpy_IO:
def test_from_numpy_single(self,dictList_single):
data = dictList_single
tsd = Time_Series_Data()
tsd.set_time_index(data['time'],0)
tsd.set_data(data['data'],1)
numpydata = pd.DataFrame(dictList_single).values
testData = from_numpy(numpydata,0,None)
assert tsd == testData
def test_from_numpy_collection(self,dictList_collection):
data = dictList_collection
numpyData = pd.DataFrame(data).values
numpyDataDict = pd.DataFrame(pd.DataFrame(data).values).to_dict('list')
testData = from_numpy(numpyData,0,2)
tsd = Time_Series_Data(numpyDataDict,0)
assert testData == Time_Series_Data_Collection(tsd,0,2)
def test_to_numpy_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
numpyData = pd.DataFrame(data).values
expandTime = pd.DataFrame(expect_single_expandTime).values
tsd = Time_Series_Data()
tsd.set_time_index(data['time'],0)
tsd.set_data(data['data'],1)
testData = to_numpy(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
np.testing.assert_equal(testData,numpyData)
testData = to_numpy(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
np.testing.assert_equal(testData,expandTime)
def test_to_numpy_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
results = expect_collection_expandTime
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = True,
preprocessType='pad'
)
remove_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = True,
preprocessType='remove'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
with pytest.raises(ValueError):
timeSeries = to_numpy(tsc,False,True,'ignore')
def test_to_numpy_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
results = expect_collection_expandCategory
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = False,
preprocessType='pad'
)
remove_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = False,
preprocessType='remove'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
with pytest.raises(ValueError):
timeSeries = to_numpy(tsc,False,True,'ignore')
def test_to_numpy_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
results = expect_collection_expandFull
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = True,
preprocessType='pad'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
remove_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = True,
preprocessType='remove'
)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
def test_to_numpy_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
results = expect_collection_noExpand
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='pad'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
remove_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='remove'
)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
ignore_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='ignore'
)
np.testing.assert_equal(ignore_numpy,pd.DataFrame(results['ignore']).values)
def test_to_numpy_seperateLabel_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX).values
expectedY = pd.DataFrame(expectedY).values
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_numpy(tsd,False,False,'ignore',True)
print(x)
print(y)
np.testing.assert_equal(x,expectedX)
np.testing.assert_equal(y,expectedY)
def test_to_numpy_seperateLabel_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX).values
expectedY = pd.DataFrame(expectedY).values
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_numpy(tsc,False,False,'ignore',True)
np.testing.assert_equal(x,expectedX)
np.testing.assert_equal(y,expectedY)
def test_to_numpy_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
test = to_numpy(tsd,False,False,'ignore',False)
np.testing.assert_equal(df,test)
def test_to_numpy_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_numpy(tsc,False,False,'ignore')
for i in range(len(test)):
if isinstance(test[i][1],np.ndarray):
test[i][1] = test[i][1].tolist()
np.testing.assert_equal(df,test)
test = to_numpy(tsc,True,True,'ignore')
full = pd.DataFrame(expect_seq_collection).values
for i in range(len(test[0])):
if isinstance(test[0][i],np.ndarray):
test[0][i] = test[0][i].tolist()
np.testing.assert_equal(full,test)
class Test_Arrow_IO:
def test_from_arrow_table_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.Table.from_pandas(df)
testData = from_arrow_table(table,'time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
def test_from_arrow_table_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.Table.from_pandas(df)
testData = from_arrow_table(table,'time','category')
assert tsc == testData
def test_from_arrow_batch_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.RecordBatch.from_pandas(df,preserve_index = False)
testData = from_arrow_record_batch(table,'time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
def test_from_arrow_batch_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.RecordBatch.from_pandas(df,preserve_index = False)
testData = from_arrow_record_batch(table,'time','category')
assert tsc == testData
def test_to_arrow_table_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_arrow_table(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
).to_pandas()
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_arrow_table(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_arrow_table_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
tsc = Time_Series_Data_Collection(tsd,'time','category')
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_arrow_table_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_arrow_table_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_arrow_table_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_arrow_table_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_arrow_table(tsd,False,False,'ignore',True)
x = x.to_pandas()
y = y.to_pandas()
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_arrow_table_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_arrow_table(tsc,False,False,'ignore',True)
x = x.to_pandas()
y = y.to_pandas()
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_arrow_table_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
test = to_arrow_table(tsd,False,False,'ignore',False).to_pandas()
pd.testing.assert_frame_equal(test,df,False)
def test_to_arrow_table_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_arrow_table(tsc,False,False,'ignore').to_pandas()
pd.testing.assert_frame_equal(df,test,False)
test = to_arrow_table(tsc,True,True,'ignore').to_pandas()
full = pd.DataFrame(expect_seq_collection)
print(test)
print(full)
test = test.reindex(sorted(df.columns), axis=1)
full = full.reindex(sorted(df.columns), axis=1)
pd.testing.assert_frame_equal(test,full,False)
###
def record_batch_to_pandas(self,batchList):
df = None
for i in batchList:
if df is None:
df = i.to_pandas()
continue
df = df.append(i.to_pandas(),ignore_index = True)
return df
def test_to_arrow_batch_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_arrow_record_batch(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None,
max_chunksize = 1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_arrow_record_batch(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None,
max_chunksize = 1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_arrow_batch_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_arrow_batch_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_arrow_batch_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_arrow_batch_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_arrow_batch_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_arrow_record_batch(tsd,1,False,False,'ignore',True)
x = self.record_batch_to_pandas(x)
y = self.record_batch_to_pandas(y)
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_arrow_table_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_arrow_record_batch(tsc,1,False,False,'ignore',True)
x = self.record_batch_to_pandas(x)
y = self.record_batch_to_pandas(y)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
class Test_Parquet_IO:
def test_from_parquet_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.Table.from_pandas(df)
pq.write_table(table,'test.parquet')
testData = from_parquet('test.parquet','time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
os.remove('test.parquet')
def test_from_parquet_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.Table.from_pandas(df)
pq.write_table(table,'test_collection.parquet')
testData = from_parquet('test_collection.parquet','time','category')
assert tsc == testData
os.remove('test_collection.parquet')
###########
def test_to_parquet_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
to_parquet(
'test.parquet',
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
to_parquet(
'test.parquet',
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
os.remove('test.parquet')
def test_to_parquet_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_parquet('test.parquet',tsc,False,True,'ignore')
os.remove('test.parquet')
def test_to_parquet_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_parquet('test.parquet',tsc,True,False,'ignore')
os.remove('test.parquet')
def test_to_parquet_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas('test.parquet',tsc,True,True,'ignore')
def test_to_parquet_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
os.remove('test.parquet')
def test_to_parquet_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
to_parquet(['test.parquet','label.parquet'],tsd,False,False,'ignore',True)
x = pq.read_table('test.parquet').to_pandas()
y = pq.read_table('label.parquet').to_pandas()
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
os.remove('test.parquet')
os.remove('label.parquet')
def test_to_parquet_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
to_parquet(['test.parquet','label.parquet'],tsc,False,False,'ignore',True)
x = pq.read_table('test.parquet').to_pandas()
y = pq.read_table('label.parquet').to_pandas()
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
os.remove('test.parquet')
os.remove('label.parquet')
def test_to_parquet_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
to_parquet('test.parquet',tsd,False,False,'ignore',False)
test = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(test,df,False)
os.remove('test.parquet')
def test_to_parquet_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
to_parquet('test.parquet',tsc,False,False,'ignore')
test = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(df,test,False)
to_parquet('test.parquet',tsc,True,True,'ignore')
test = pq.read_table('test.parquet').to_pandas()
full = pd.DataFrame(expect_seq_collection)
print(test)
print(full)
test = test.reindex(sorted(df.columns), axis=1)
full = full.reindex(sorted(df.columns), axis=1)
pd.testing.assert_frame_equal(test,full,False)
os.remove('test.parquet')
class Test_Generator_IO:
def test_from_generator(self):
pass
def test_to_generator(self):
pass
class Test_Feather_IO:
def test_from_feather_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.Table.from_pandas(df)
pf.write_feather(table,'test.feather')
testData = from_feather('test.feather','time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
os.remove('test.feather')
def test_from_feather_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.Table.from_pandas(df)
pf.write_feather(table,'test_collection.feather')
testData = from_feather('test_collection.feather','time','category')
assert tsc == testData
os.remove('test_collection.feather')
###########
def test_to_feather_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
to_feather(
'test.feather',
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
testData = pf.read_table('test.feather').to_pandas()
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
to_feather(
'test.feather',
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
testData = pf.read_table('test.feather').to_pandas()
| pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False) | pandas.testing.assert_frame_equal |
import pandas as pd
import numpy as np
import datetime
import pycountry
def get_vacc_data():
vaccine_data = pd.read_csv('https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/vaccinations.csv')
vaccine_loc = pd.read_csv('https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/locations.csv')
df_vaccine = pd.merge(vaccine_data, vaccine_loc, on=["location", "iso_code"])
df_vaccine.drop(['daily_vaccinations_raw'], axis=1)
df_vaccine['date'] = pd.to_datetime(df_vaccine['date'])
df_vaccine = df_vaccine.sort_values('date', ascending=True)
df_vaccine['date'] = df_vaccine['date'].dt.strftime('%Y-%m-%d')
df_vaccine = df_vaccine.rename(columns={'location': 'country'})
for iso_code in df_vaccine['iso_code'].unique():
df_vaccine.loc[df_vaccine['iso_code'] == iso_code, :] = df_vaccine.loc[df_vaccine['iso_code'] == iso_code, :].fillna(method='ffill').fillna(0)
df_vaccine.to_csv('data/df_vaccine.csv')
def aggregate(df: pd.Series, agg_col: str) -> pd.DataFrame:
data = df.groupby(["country"])[agg_col].max()
data = pd.DataFrame(data)
return data
def get_summ_data():
summary_data = pd.read_csv(r'C:\Users\Srijhak\Documents\Covid19-dash\data\worldometer_coronavirus_summary_data.csv')
df_vaccine = pd.read_csv(r'C:\Users\Srijhak\Documents\Covid19-dash\data\df_vaccine.csv')
df_vaccine.country = df_vaccine.country.replace().replace({
"Antigua and Barbuda": "Antigua And Barbuda",
"Bosnia and Herzegovina": "Bosnia And Herzegovina",
"Brunei": "Brunei Darussalam",
"Cape Verde": "Cabo Verde",
"Cote d'Ivoire": "Cote D Ivoire",
"Czechia": "Czech Republic",
"Democratic Republic of Congo": "Democratic Republic Of The Congo",
"Falkland Islands": "Falkland Islands Malvinas",
"Guinea-Bissau": "Guinea Bissau",
"Isle of Man": "Isle Of Man",
"North Macedonia": "Macedonia",
"Northern Cyprus": "Cyprus",
"Northern Ireland": "Ireland",
"Saint Kitts and Nevis": "Saint Kitts And Nevis",
"Saint Vincent and the Grenadines": "Saint Vincent And The Grenadines",
"Sao Tome and Principe": "Sao Tome And Principe",
"Sint Maarten (Dutch part)": "Sint Maarten",
"Timor": "Timor Leste",
"Trinidad and Tobago": "Trinidad And Tobago",
"Turks and Caicos Islands": "Turks And Caicos Islands",
"United Kingdom": "UK",
"United States": "USA",
"Vietnam": "Viet Nam",
"Wallis and Futuna": "Wallis And Futuna Islands"})
df_vaccine = df_vaccine[df_vaccine.country.apply(lambda x: x not in ['Bonaire Sint Eustatius and Saba','England','Eswatini','Guernsey','Hong Kong','Jersey','Kosovo','Macao',
'Nauru','Palestine','Pitcairn','Scotland','Tonga','Turkmenistan','Tuvalu', 'Wales'])]
summary = summary_data.set_index("country")
vaccines = df_vaccine[['country', 'vaccines']].drop_duplicates().set_index('country')
summary = summary.join(vaccines)
for cols in df_vaccine.columns[4:-4]:
summary = summary.join(aggregate(df_vaccine,cols))
summary['vaccinated_percent'] = summary.total_vaccinations / summary.population * 100
summary['tested_positive'] = summary.total_confirmed / summary.total_tests * 100
summary.to_csv('data/summary_df.csv')
def get_daily_data():
df_daily = pd.read_csv(r'C:\Users\Srijhak\Documents\Covid19-dash\data\worldometer_coronavirus_daily_data.csv')
df_vaccine = pd.read_csv(r'C:\Users\Srijhak\Documents\Covid19-dash\data\df_vaccine.csv')
# use only common countries and dates
countries = df_vaccine.dropna(subset=['daily_vaccinations'])['country'].unique()
dates = df_vaccine.dropna(subset=['daily_vaccinations'])['date'].unique()
country_mask = df_daily.country.apply(lambda x: x in countries)
date_mask = df_daily.date.apply(lambda x: x in dates)
# generate the visualization data
columns_to_sum = ['daily_new_cases', 'cumulative_total_cases', 'cumulative_total_deaths', 'active_cases']
daily_cases = df_daily[country_mask & date_mask].groupby('date')[columns_to_sum].sum()
daily_vaccs = df_vaccine.groupby('date')[[ 'daily_vaccinations']].sum()
# make it a dataframe for convenience
data = pd.DataFrame(daily_cases).join( | pd.DataFrame(daily_vaccs) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import pytest
from featuretools import list_primitives
from featuretools.primitives import (
Age,
Count,
Day,
GreaterThan,
Haversine,
Last,
Max,
Mean,
Min,
Mode,
Month,
NumCharacters,
NumUnique,
NumWords,
PercentTrue,
Skew,
Std,
Sum,
Weekday,
Year,
get_aggregation_primitives,
get_default_aggregation_primitives,
get_default_transform_primitives,
get_transform_primitives
)
from featuretools.primitives.base import PrimitiveBase
from featuretools.primitives.utils import (
_apply_roll_with_offset_gap,
_get_descriptions,
_get_rolled_series_without_gap,
_get_unique_input_types,
_roll_series_with_gap,
list_primitive_files,
load_primitive_from_file
)
from featuretools.tests.primitive_tests.utils import get_number_from_offset
from featuretools.utils.gen_utils import Library
def test_list_primitives_order():
df = list_primitives()
all_primitives = get_transform_primitives()
all_primitives.update(get_aggregation_primitives())
for name, primitive in all_primitives.items():
assert name in df['name'].values
row = df.loc[df['name'] == name].iloc[0]
actual_desc = _get_descriptions([primitive])[0]
if actual_desc:
assert actual_desc == row['description']
assert row['dask_compatible'] == (Library.DASK in primitive.compatibility)
assert row['valid_inputs'] == ', '.join(_get_unique_input_types(primitive.input_types))
assert row['return_type'] == getattr(primitive.return_type, '__name__', None)
types = df['type'].values
assert 'aggregation' in types
assert 'transform' in types
def test_valid_input_types():
actual = _get_unique_input_types(Haversine.input_types)
assert actual == {'<ColumnSchema (Logical Type = LatLong)>'}
actual = _get_unique_input_types(GreaterThan.input_types)
assert actual == {'<ColumnSchema (Logical Type = Datetime)>',
"<ColumnSchema (Semantic Tags = ['numeric'])>",
'<ColumnSchema (Logical Type = Ordinal)>'}
actual = _get_unique_input_types(Sum.input_types)
assert actual == {"<ColumnSchema (Semantic Tags = ['numeric'])>"}
def test_descriptions():
primitives = {NumCharacters: 'Calculates the number of characters in a string.',
Day: 'Determines the day of the month from a datetime.',
Last: 'Determines the last value in a list.',
GreaterThan: 'Determines if values in one list are greater than another list.'}
assert _get_descriptions(list(primitives.keys())) == list(primitives.values())
def test_get_default_aggregation_primitives():
primitives = get_default_aggregation_primitives()
expected_primitives = [Sum, Std, Max, Skew, Min, Mean, Count, PercentTrue,
NumUnique, Mode]
assert set(primitives) == set(expected_primitives)
def test_get_default_transform_primitives():
primitives = get_default_transform_primitives()
expected_primitives = [Age, Day, Year, Month, Weekday, Haversine, NumWords,
NumCharacters]
assert set(primitives) == set(expected_primitives)
@pytest.fixture
def this_dir():
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def primitives_to_install_dir(this_dir):
return os.path.join(this_dir, "primitives_to_install")
@pytest.fixture
def bad_primitives_files_dir(this_dir):
return os.path.join(this_dir, "bad_primitive_files")
def test_list_primitive_files(primitives_to_install_dir):
files = list_primitive_files(primitives_to_install_dir)
custom_max_file = os.path.join(primitives_to_install_dir, "custom_max.py")
custom_mean_file = os.path.join(primitives_to_install_dir, "custom_mean.py")
custom_sum_file = os.path.join(primitives_to_install_dir, "custom_sum.py")
assert {custom_max_file, custom_mean_file, custom_sum_file}.issubset(set(files))
def test_load_primitive_from_file(primitives_to_install_dir):
primitve_file = os.path.join(primitives_to_install_dir, "custom_max.py")
primitive_name, primitive_obj = load_primitive_from_file(primitve_file)
assert issubclass(primitive_obj, PrimitiveBase)
def test_errors_more_than_one_primitive_in_file(bad_primitives_files_dir):
primitive_file = os.path.join(bad_primitives_files_dir, "multiple_primitives.py")
error_text = "More than one primitive defined in file {}".format(primitive_file)
with pytest.raises(RuntimeError) as excinfo:
load_primitive_from_file(primitive_file)
assert str(excinfo.value) == error_text
def test_errors_no_primitive_in_file(bad_primitives_files_dir):
primitive_file = os.path.join(bad_primitives_files_dir, "no_primitives.py")
error_text = "No primitive defined in file {}".format(primitive_file)
with pytest.raises(RuntimeError) as excinfo:
load_primitive_from_file(primitive_file)
assert str(excinfo.value) == error_text
def test_get_rolled_series_without_gap(rolling_series_pd):
# Data is daily, so number of rows should be number of days not included in the gap
assert len(_get_rolled_series_without_gap(rolling_series_pd, "11D")) == 9
assert len(_get_rolled_series_without_gap(rolling_series_pd, "0D")) == 20
assert len(_get_rolled_series_without_gap(rolling_series_pd, "48H")) == 18
assert len(_get_rolled_series_without_gap(rolling_series_pd, "4H")) == 19
def test_get_rolled_series_without_gap_not_uniform(rolling_series_pd):
non_uniform_series = rolling_series_pd.iloc[[0, 2, 5, 6, 8, 9]]
assert len(_get_rolled_series_without_gap(non_uniform_series, "10D")) == 0
assert len(_get_rolled_series_without_gap(non_uniform_series, "0D")) == 6
assert len(_get_rolled_series_without_gap(non_uniform_series, "48H")) == 4
assert len(_get_rolled_series_without_gap(non_uniform_series, "4H")) == 5
assert len(_get_rolled_series_without_gap(non_uniform_series, "4D")) == 3
assert len(_get_rolled_series_without_gap(non_uniform_series, "4D2H")) == 2
def test_get_rolled_series_without_gap_empty_series(rolling_series_pd):
empty_series = pd.Series()
assert len(_get_rolled_series_without_gap(empty_series, "1D")) == 0
assert len(_get_rolled_series_without_gap(empty_series, "0D")) == 0
def test_get_rolled_series_without_gap_large_bound(rolling_series_pd):
assert len(_get_rolled_series_without_gap(rolling_series_pd, "100D")) == 0
assert len(_get_rolled_series_without_gap(rolling_series_pd.iloc[[0, 2, 5, 6, 8, 9]], "20D")) == 0
@pytest.mark.parametrize(
"window_length, gap",
[
(3, 2),
(3, 4), # gap larger than window
(2, 0), # gap explicitly set to 0
('3d', '2d'), # using offset aliases
('3d', '4d'),
('4d', '0d'),
],
)
def test_roll_series_with_gap(window_length, gap, rolling_series_pd):
rolling_max = _roll_series_with_gap(rolling_series_pd, window_length, gap=gap).max()
rolling_min = _roll_series_with_gap(rolling_series_pd, window_length, gap=gap).min()
assert len(rolling_max) == len(rolling_series_pd)
assert len(rolling_min) == len(rolling_series_pd)
gap_num = get_number_from_offset(gap)
window_length_num = get_number_from_offset(window_length)
for i in range(len(rolling_series_pd)):
start_idx = i - gap_num - window_length_num + 1
if isinstance(gap, str):
# No gap functionality is happening, so gap isn't taken account in the end index
# it's like the gap is 0; it includes the row itself
end_idx = i
else:
end_idx = i - gap_num
# If start and end are negative, they're entirely before
if start_idx < 0 and end_idx < 0:
assert pd.isnull(rolling_max.iloc[i])
assert pd.isnull(rolling_min.iloc[i])
continue
if start_idx < 0:
start_idx = 0
# Because the row values are a range from 0 to 20, the rolling min will be the start index
# and the rolling max will be the end idx
assert rolling_min.iloc[i] == start_idx
assert rolling_max.iloc[i] == end_idx
@pytest.mark.parametrize("window_length", [3, "3d"])
def test_roll_series_with_no_gap(window_length, rolling_series_pd):
actual_rolling = _roll_series_with_gap(rolling_series_pd, window_length).mean()
expected_rolling = rolling_series_pd.rolling(window_length, min_periods=1).mean()
pd.testing.assert_series_equal(actual_rolling, expected_rolling)
@pytest.mark.parametrize(
"window_length, gap",
[
(6, 2),
(6, 0), # No gap - changes early values
('6d', '0d'), # Uses offset aliases
('6d', '2d')
]
)
def test_roll_series_with_gap_early_values(window_length, gap, rolling_series_pd):
gap_num = get_number_from_offset(gap)
window_length_num = get_number_from_offset(window_length)
# Default min periods is 1 - will include all
default_partial_values = _roll_series_with_gap(rolling_series_pd,
window_length,
gap=gap).count()
num_empty_aggregates = len(default_partial_values.loc[default_partial_values == 0])
num_partial_aggregates = len((default_partial_values
.loc[default_partial_values != 0])
.loc[default_partial_values < window_length_num])
assert num_partial_aggregates == window_length_num - 1
if isinstance(gap, str):
# gap isn't handled, so we'll always at least include the row itself
assert num_empty_aggregates == 0
else:
assert num_empty_aggregates == gap_num
# Make min periods the size of the window
no_partial_values = _roll_series_with_gap(rolling_series_pd,
window_length,
gap=gap,
min_periods=window_length_num).count()
num_null_aggregates = len(no_partial_values.loc[pd.isna(no_partial_values)])
num_partial_aggregates = len(no_partial_values.loc[no_partial_values < window_length_num])
# because we shift, gap is included as nan values in the series.
# Count treats nans in a window as values that don't get counted,
# so the gap rows get included in the count for whether a window has "min periods".
# This is different than max, for example, which does not count nans in a window as values towards "min periods"
assert num_null_aggregates == window_length_num - 1
if isinstance(gap, str):
# gap isn't handled, so we'll never have any partial aggregates
assert num_partial_aggregates == 0
else:
assert num_partial_aggregates == gap_num
def test_roll_series_with_gap_nullable_types(rolling_series_pd):
window_length = 3
gap = 2
# Because we're inserting nans, confirm that nullability of the dtype doesn't have an impact on the results
nullable_series = rolling_series_pd.astype('Int64')
non_nullable_series = rolling_series_pd.astype('int64')
nullable_rolling_max = _roll_series_with_gap(nullable_series, window_length, gap=gap).max()
non_nullable_rolling_max = _roll_series_with_gap(non_nullable_series, window_length, gap=gap).max()
pd.testing.assert_series_equal(nullable_rolling_max, non_nullable_rolling_max)
def test_roll_series_with_gap_nullable_types_with_nans(rolling_series_pd):
window_length = 3
gap = 2
nullable_floats = rolling_series_pd.astype('float64').replace({1: np.nan, 3: np.nan})
nullable_ints = nullable_floats.astype('Int64')
nullable_ints_rolling_max = _roll_series_with_gap(nullable_ints, window_length, gap=gap).max()
nullable_floats_rolling_max = _roll_series_with_gap(nullable_floats, window_length, gap=gap).max()
pd.testing.assert_series_equal(nullable_ints_rolling_max, nullable_floats_rolling_max)
expected_early_values = ([np.nan, np.nan, 0, 0, 2, 2, 4] +
list(range(7 - gap, len(rolling_series_pd) - gap)))
for i in range(len(rolling_series_pd)):
actual = nullable_floats_rolling_max.iloc[i]
expected = expected_early_values[i]
if pd.isnull(actual):
assert pd.isnull(expected)
else:
assert actual == expected
@pytest.mark.parametrize(
"window_length, gap",
[
('3d', '2d'),
('3d', '4d'),
('4d', '0d'),
],
)
def test_apply_roll_with_offset_gap(window_length, gap, rolling_series_pd):
def max_wrapper(sub_s):
return _apply_roll_with_offset_gap(sub_s, gap, max, min_periods=1)
rolling_max_obj = _roll_series_with_gap(rolling_series_pd, window_length, gap=gap)
rolling_max_series = rolling_max_obj.apply(max_wrapper)
def min_wrapper(sub_s):
return _apply_roll_with_offset_gap(sub_s, gap, min, min_periods=1)
rolling_min_obj = _roll_series_with_gap(rolling_series_pd, window_length, gap=gap)
rolling_min_series = rolling_min_obj.apply(min_wrapper)
assert len(rolling_max_series) == len(rolling_series_pd)
assert len(rolling_min_series) == len(rolling_series_pd)
gap_num = get_number_from_offset(gap)
window_length_num = get_number_from_offset(window_length)
for i in range(len(rolling_series_pd)):
start_idx = i - gap_num - window_length_num + 1
# Now that we have the _apply call, this acts as expected
end_idx = i - gap_num
# If start and end are negative, they're entirely before
if start_idx < 0 and end_idx < 0:
assert pd.isnull(rolling_max_series.iloc[i])
assert pd.isnull(rolling_min_series.iloc[i])
continue
if start_idx < 0:
start_idx = 0
# Because the row values are a range from 0 to 20, the rolling min will be the start index
# and the rolling max will be the end idx
assert rolling_min_series.iloc[i] == start_idx
assert rolling_max_series.iloc[i] == end_idx
@pytest.mark.parametrize(
"min_periods",
[1, 0, None],
)
def test_apply_roll_with_offset_gap_default_min_periods(min_periods, rolling_series_pd):
window_length = '5d'
window_length_num = 5
gap = '3d'
gap_num = 3
def count_wrapper(sub_s):
return _apply_roll_with_offset_gap(sub_s, gap, len, min_periods=min_periods)
rolling_count_obj = _roll_series_with_gap(rolling_series_pd, window_length, gap=gap)
rolling_count_series = rolling_count_obj.apply(count_wrapper)
# gap essentially creates a rolling series that has no elements; which should be nan
# to differentiate from when a window only has null values
num_empty_aggregates = rolling_count_series.isna().sum()
num_partial_aggregates = len((rolling_count_series
.loc[rolling_count_series != 0])
.loc[rolling_count_series < window_length_num])
assert num_empty_aggregates == gap_num
assert num_partial_aggregates == window_length_num - 1
@pytest.mark.parametrize(
"min_periods",
[2, 3, 4, 5],
)
def test_apply_roll_with_offset_gap_min_periods(min_periods, rolling_series_pd):
window_length = '5d'
window_length_num = 5
gap = '3d'
gap_num = 3
def count_wrapper(sub_s):
return _apply_roll_with_offset_gap(sub_s, gap, len, min_periods=min_periods)
rolling_count_obj = _roll_series_with_gap(rolling_series_pd, window_length, gap=gap)
rolling_count_series = rolling_count_obj.apply(count_wrapper)
# gap essentially creates rolling series that have no elements; which should be nan
# to differentiate from when a window only has null values
num_empty_aggregates = rolling_count_series.isna().sum()
num_partial_aggregates = len((rolling_count_series
.loc[rolling_count_series != 0])
.loc[rolling_count_series < window_length_num])
assert num_empty_aggregates == min_periods - 1 + gap_num
assert num_partial_aggregates == window_length_num - min_periods
def test_apply_roll_with_offset_gap_non_uniform():
window_length = '3d'
gap = '3d'
# When the data isn't uniform, this impacts the number of values in each rolling window
datetimes = (list(pd.date_range(start='2017-01-01', freq='1d', periods=7)) +
list(pd.date_range(start='2017-02-01', freq='2d', periods=7)) +
list(pd.date_range(start='2017-03-01', freq='1d', periods=7)))
no_freq_series = pd.Series(range(len(datetimes)), index=datetimes)
assert pd.infer_freq(no_freq_series.index) is None
expected_series = pd.Series([None, None, None, 1, 2, 3, 3] +
[None, None, 1, 1, 1, 1, 1] +
[None, None, None, 1, 2, 3, 3], index=datetimes)
def count_wrapper(sub_s):
return _apply_roll_with_offset_gap(sub_s, gap, len, min_periods=1)
rolling_count_obj = _roll_series_with_gap(no_freq_series, window_length, gap=gap)
rolling_count_series = rolling_count_obj.apply(count_wrapper)
| pd.testing.assert_series_equal(rolling_count_series, expected_series) | pandas.testing.assert_series_equal |
from abc import abstractmethod, ABC
from typing import Any
import numpy as np
import pandas as pd
from sklearn.base import clone
from resources.backend_scripts.feature_selection import FeatureSelection
from resources.backend_scripts.is_data import DataEnsurer
from resources.backend_scripts.parameter_search import ParameterSearch
from resources.backend_scripts.score import CVScore, CVModelScore
from resources.backend_scripts.split_data import SplitterReturner
from resources.backend_scripts.switcher import Switch
DataFrame = pd.DataFrame
NpArray = np.ndarray
class SBSMachineLearning(ABC):
_data_frame = pd.DataFrame()
_feature_selector: FeatureSelection = None
_parameter_selector: ParameterSearch = None
_best_features: NpArray = None
_best_params: dict = None
_initial_params: dict = None
_clf: Any = None
_cv_score: CVModelScore = CVScore()
@property
def data_frame(self) -> DataFrame:
return self._data_frame
@data_frame.setter
def data_frame(self, value: DataFrame) -> None:
self._data_frame = value
@property
def feature_selector(self) -> FeatureSelection:
return self._feature_selector
@feature_selector.setter
def feature_selector(self, value: FeatureSelection) -> None:
self._feature_selector = value
@property
def parameter_selector(self) -> ParameterSearch:
return self._parameter_selector
@parameter_selector.setter
def parameter_selector(self, value: ParameterSearch) -> None:
self._parameter_selector = value
@property
def best_features(self) -> NpArray:
return self._best_features
@best_features.setter
def best_features(self, value: NpArray) -> None:
self._best_features = value
@property
def best_parameters(self) -> dict:
return self._best_params
@best_parameters.setter
def best_parameters(self, value: dict) -> None:
self._best_params = value
@property
def initial_parameters(self) -> dict:
return self._initial_params
@initial_parameters.setter
def initial_parameters(self, value: dict) -> None:
self._initial_params = value
@property
def estimator(self) -> Any:
return self._clf
@estimator.setter
def estimator(self, value: Any) -> None:
self._clf = value
@abstractmethod
def score_model(self, score_type: str, n_folds_validation: int) -> float:
pass
class SimpleSBS(SBSMachineLearning):
def score_model(self, score_type: str, n_folds_validation: int) -> float:
# get x and y from df
x, y = SplitterReturner.split_x_y_from_df(self.data_frame)
self.best_parameters = self.initial_parameters # they are the same in a simple model
# set clf params. ** because it accepts key-value one by one, not a big dictionary
self.estimator.set_params(**self.best_parameters)
self.best_features = x.columns.values # get features as numpy data
# return the cv score
score = self._cv_score.get_score(x, y, clone(self.estimator), score_type, n_folds_validation)
self.estimator.fit(x, y)
return score
class OnlyFeatureSelectionSBS(SBSMachineLearning):
def score_model(self, score_type: str, n_folds_validation: int) -> float:
# get x and y from df. Ravel is set to False so we can save the original y with its column
x, y = SplitterReturner.split_x_y_from_df(self.data_frame, ravel_data=False)
self.best_parameters = self.initial_parameters # they are the same in a only feature selection model
# set clf params. ** because it accepts key-value one by one, not a big dictionary
self.estimator.set_params(**self.best_parameters)
# get best features
best_features_dataframe, score = self.feature_selector.select_features(x, y.values.ravel(),
clone(self.estimator),
score_type, n_folds_validation)
self.best_features = best_features_dataframe.columns.values # get features as numpy data
self.data_frame = pd.concat([best_features_dataframe, y], axis=1)
self.estimator.fit(best_features_dataframe, y)
return score
class OnlyParameterSearchSBS(SBSMachineLearning):
def score_model(self, score_type: str, n_folds_validation: int) -> float:
# get x and y from df
x, y = SplitterReturner.split_x_y_from_df(self.data_frame)
# transform initial params grid into a simple dict which is best_params
self.best_parameters, score = self.parameter_selector.search_parameters(x, y, self.initial_parameters,
n_folds_validation,
clone(self.estimator),
score_type)
self.best_features = x.columns.values # get features as numpy data
# set clf params from the previous search. ** because it accepts key-value one by one, not a big dictionary
self.estimator.set_params(**self.best_parameters)
self.estimator.fit(x, y)
return score
class FeatureAndParameterSearchSBS(SBSMachineLearning):
def score_model(self, score_type: str, n_folds_validation: int) -> float:
# get x and y from df. Ravel is set to False so we can save the original y with its column
x, y = SplitterReturner.split_x_y_from_df(self.data_frame, ravel_data=False)
# transform best params grid into a simple dict
self.best_parameters, _ = self.parameter_selector.search_parameters(x, y.values.ravel(),
self.initial_parameters,
n_folds_validation,
clone(self.estimator),
score_type)
# set clf params from the previous search. ** because it accepts key-value one by one, not a big dictionary
self.estimator.set_params(**self.best_parameters)
# get best features
best_features_dataframe, score = self.feature_selector.select_features(x, y.values.ravel(),
clone(self.estimator),
score_type, n_folds_validation)
self.best_features = best_features_dataframe.columns.values # get features as numpy data
self.data_frame = | pd.concat([best_features_dataframe, y], axis=1) | pandas.concat |
import sys
import datetime
import random as r
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pandas import read_csv, DataFrame
from scipy.optimize import curve_fit
def cubic(x, a, b, c, d):
"""
@type x: number
@type a: number
@type b: number
@type c: number
@type d: number
Calculates cubic function a*x^3+b*x^2+c*x+d
@rtype: number
@return: result of cubic function calculation
"""
return a * (x ** 3) + b * (x ** 2) + c * x + d
def normalize(points, min):
"""
@type points: array
@param points: array to be normalized
@type min: number
@param min: min value of normalized array
Normalizes array
"""
for i in range(len(points)):
if points[i] < min:
points[i] = min + 0.1
def load_data(csv_file):
"""
@type csv_file: string
@param csv_file: path to csv file
Loads data from specified csv file
@rtype: pandas.DataFrame
@return: DataFrame from csv file without Month column
"""
return pd.read_csv(csv_file).drop('Month', 1)
def fill_with_NaNs(amount, data_frame):
"""
@type amount: integer
@param amount: numbers of rows to be filled with NaNs
@type data_frame: pandas.DataFrame
@param data_frame: dataFrame to be filled by NaNs
Fills DataFrame with NaN's
"""
for column in range(amount):
data_frame.loc[len(data_frame)] = [None for i in range(len(data_frame.columns))]
def generate_date_rows(base_date, amount):
"""
@type base_date: datetime
@param base_date: initial date
@type amount: integer
@param amount: amount of rows(months) to be generated
Generate dates(Year-Month) for all rows from specified initial date
@rtype: numpy.Array
@return: array of generate dates
"""
return np.array([(base_date + datetime.timedelta(i*365/12)).strftime("%Y-%m") for i in range(amount)])
def extrapolate(data_frame):
"""
@type data_frame: pandas.DataFrame
@param data_frame: dataFrame to be extrapolated
Extrapolates specified dataFrame (NaN values)
"""
# Create copy of data to remove NaNs for curve fitting
fit_df = data_frame.dropna()
# Place to store function parameters for each column
col_params = {}
# Curve fit each column
for col in fit_df.columns:
# Get x & y
x = fit_df.index.astype(float).values
y = fit_df[col].values
# Curve fit column and get curve parameters
params = curve_fit(cubic, x, y)
# Store optimized parameters
col_params[col] = params[0]
for col in data_frame.columns:
# Get the index values for NaNs in the column
x = data_frame[pd.isnull(data_frame[col])] \
.index.astype(float).values
# Extrapolate those points with the fitted function
points = cubic(x, *col_params[col])
normalize(points, 0)
# Add random changes
for i in range(len(points)):
if int(points[i]) % int(r.random() * 3 + 1) is 0:
points[i] += r.random() * 4 - 2
normalize(points, 0)
data_frame[col][x] = points
if __name__ == "__main__":
plt.style.use('ggplot')
csv_file = "web-frameworks-trends.csv"
# Loads data from CSV file
df = load_data(csv_file)
# Months
months = 24 # Five years
# Fill specified amount of rows of dataframe with NaN's
fill_with_NaNs(months, df)
# Interpolate
df.interpolate()
# Extrapolate dataframe
extrapolate(df)
# Generate dates(Year-Month) for all rows from specified initial date
date_rows = generate_date_rows(datetime.datetime(2004, 1, 1), len(df.index))
# Add date to dataframe
df['Month'] = | pd.Series(date_rows, index=df.index) | pandas.Series |
"""Custom pandas accessors.
!!! note
The underlying Series/DataFrame must already be a signal series.
Input arrays must be `np.bool`.
```python-repl
>>> import vectorbt as vbt
>>> import numpy as np
>>> import pandas as pd
>>> from numba import njit
>>> from datetime import datetime
>>> sig = pd.DataFrame({
... 'a': [True, False, False, False, False],
... 'b': [True, False, True, False, True],
... 'c': [True, True, True, False, False]
... }, index=pd.Index([
... datetime(2020, 1, 1),
... datetime(2020, 1, 2),
... datetime(2020, 1, 3),
... datetime(2020, 1, 4),
... datetime(2020, 1, 5)
... ]))
>>> sig
a b c
2020-01-01 True True True
2020-01-02 False False True
2020-01-03 False True True
2020-01-04 False False False
2020-01-05 False True False
```"""
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from vectorbt.defaults import contrast_color_schema
from vectorbt.root_accessors import register_dataframe_accessor, register_series_accessor
from vectorbt.utils import checks
from vectorbt.utils.config import merge_kwargs
from vectorbt.utils.colors import adjust_lightness
from vectorbt.utils.decorators import cached_property
from vectorbt.base import reshape_fns, index_fns
from vectorbt.base.common import add_nb_methods
from vectorbt.generic.accessors import Generic_Accessor, Generic_SRAccessor, Generic_DFAccessor
from vectorbt.signals import nb
from vectorbt.utils.widgets import CustomFigureWidget
@add_nb_methods([
nb.shuffle_nb,
nb.fshift_nb,
], module_name='vectorbt.signals.nb')
class Signals_Accessor(Generic_Accessor):
"""Accessor on top of signal series. For both, Series and DataFrames.
Accessible through `pd.Series.vbt.signals` and `pd.DataFrame.vbt.signals`."""
def __init__(self, obj, freq=None):
if not checks.is_pandas(obj): # parent accessor
obj = obj._obj
checks.assert_dtype(obj, np.bool)
Generic_Accessor.__init__(self, obj, freq=freq)
@classmethod
def empty(cls, *args, fill_value=False, **kwargs):
"""`vectorbt.base.accessors.Base_Accessor.empty` with `fill_value=False`.
Example:
```python-repl
>>> pd.Series.vbt.signals.empty(5, index=sig.index, name=sig['a'].name)
2020-01-01 False
2020-01-02 False
2020-01-03 False
2020-01-04 False
2020-01-05 False
Name: a, dtype: bool
>>> pd.DataFrame.vbt.signals.empty((5, 3), index=sig.index, columns=sig.columns)
a b c
2020-01-01 False False False
2020-01-02 False False False
2020-01-03 False False False
2020-01-04 False False False
2020-01-05 False False False
```"""
return Generic_Accessor.empty(*args, fill_value=fill_value, **kwargs)
@classmethod
def empty_like(cls, *args, fill_value=False, **kwargs):
"""`vectorbt.base.accessors.Base_Accessor.empty_like` with `fill_value=False`.
Example:
```python-repl
>>> pd.Series.vbt.signals.empty_like(sig['a'])
2020-01-01 False
2020-01-02 False
2020-01-03 False
2020-01-04 False
2020-01-05 False
Name: a, dtype: bool
>>> pd.DataFrame.vbt.signals.empty_like(sig)
a b c
2020-01-01 False False False
2020-01-02 False False False
2020-01-03 False False False
2020-01-04 False False False
2020-01-05 False False False
```"""
return Generic_Accessor.empty_like(*args, fill_value=fill_value, **kwargs)
# ############# Signal generation ############# #
@classmethod
def generate(cls, shape, choice_func_nb, *args, **kwargs):
"""See `vectorbt.signals.nb.generate_nb`.
`**kwargs` will be passed to pandas constructor.
Example:
Generate random signals manually:
```python-repl
>>> @njit
... def choice_func_nb(col, from_i, to_i):
... return col + from_i
>>> pd.DataFrame.vbt.signals.generate((5, 3),
... choice_func_nb, index=sig.index, columns=sig.columns)
a b c
2020-01-01 True False False
2020-01-02 False True False
2020-01-03 False False True
2020-01-04 False False False
2020-01-05 False False False
```"""
checks.assert_numba_func(choice_func_nb)
if not isinstance(shape, tuple):
shape = (shape, 1)
elif isinstance(shape, tuple) and len(shape) == 1:
shape = (shape[0], 1)
result = nb.generate_nb(shape, choice_func_nb, *args)
if cls.is_series():
if shape[1] > 1:
raise ValueError("Use DataFrame accessor")
return pd.Series(result[:, 0], **kwargs)
return pd.DataFrame(result, **kwargs)
@classmethod
def generate_both(cls, shape, entry_choice_func_nb, exit_choice_func_nb,
entry_args, exit_args, **kwargs):
"""See `vectorbt.signals.nb.generate_enex_nb`.
`**kwargs` will be passed to pandas constructor.
Example:
Generate entry and exit signals one after another:
```python-repl
>>> @njit
... def entry_choice_func_nb(col, from_i, to_i, wait1):
... next_pos = col + from_i + wait1
... if next_pos < to_i:
... return np.array([next_pos])
... return np.empty(0, dtype=np.int_)
>>> @njit
... def exit_choice_func_nb(col, from_i, to_i, wait2):
... next_pos = col + from_i + wait2
... if next_pos < to_i:
... return np.array([next_pos])
... return np.empty(0, dtype=np.int_)
>>> en, ex = pd.DataFrame.vbt.signals.generate_both(
... (5, 3), entry_choice_func_nb, exit_choice_func_nb, (0,), (1,),
... index=sig.index, columns=sig.columns)
>>> en
a b c
2020-01-01 True False False
2020-01-02 False True False
2020-01-03 False False True
2020-01-04 True False False
2020-01-05 False False False
>>> ex
a b c
2020-01-01 False False False
2020-01-02 False False False
2020-01-03 True False False
2020-01-04 False False False
2020-01-05 False True False
```"""
checks.assert_numba_func(entry_choice_func_nb)
checks.assert_numba_func(exit_choice_func_nb)
if not isinstance(shape, tuple):
shape = (shape, 1)
elif isinstance(shape, tuple) and len(shape) == 1:
shape = (shape[0], 1)
result1, result2 = nb.generate_enex_nb(
shape,
entry_choice_func_nb,
exit_choice_func_nb,
entry_args,
exit_args
)
if cls.is_series():
if shape[1] > 1:
raise ValueError("Use DataFrame accessor")
return pd.Series(result1[:, 0], **kwargs), pd.Series(result2[:, 0], **kwargs)
return pd.DataFrame(result1, **kwargs), pd.DataFrame(result2, **kwargs)
def generate_exits(self, exit_choice_func_nb, *args):
"""See `vectorbt.signals.nb.generate_ex_nb`.
Example:
Fill all space between signals in `sig`:
```python-repl
>>> @njit
... def exit_choice_func_nb(col, from_i, to_i):
... return np.arange(from_i, to_i)
>>> sig.vbt.signals.generate_exits(exit_choice_func_nb)
a b c
2020-01-01 False False False
2020-01-02 True True False
2020-01-03 True False False
2020-01-04 True True True
2020-01-05 True False True
```"""
checks.assert_numba_func(exit_choice_func_nb)
return self.wrap(nb.generate_ex_nb(self.to_2d_array(), exit_choice_func_nb, *args))
# ############# Random ############# #
@classmethod
def generate_random(cls, shape, n=None, prob=None, seed=None, **kwargs):
"""Generate signals randomly.
If `n` is set, see `vectorbt.signals.nb.generate_rand_nb`.
If `prob` is set, see `vectorbt.signals.nb.generate_rand_by_prob_nb`.
`prob` must be either a single number or an array that will be broadcast to match `shape`.
`**kwargs` will be passed to pandas constructor.
Example:
For each column, generate two signals randomly:
```python-repl
>>> pd.DataFrame.vbt.signals.generate_random((5, 3), n=2,
... seed=42, index=sig.index, columns=sig.columns)
a b c
2020-01-01 False False True
2020-01-02 True True True
2020-01-03 False False False
2020-01-04 False True False
2020-01-05 True False False
```
For each column and time step, pick a signal with 50% probability:
```python-repl
>>> pd.DataFrame.vbt.signals.generate_random((5, 3), prob=0.5,
... seed=42, index=sig.index, columns=sig.columns)
a b c
2020-01-01 True True True
2020-01-02 False True False
2020-01-03 False False False
2020-01-04 False False True
2020-01-05 True False True
```"""
if not isinstance(shape, tuple):
shape = (shape, 1)
elif isinstance(shape, tuple) and len(shape) == 1:
shape = (shape[0], 1)
if n is not None:
result = nb.generate_rand_nb(shape, n, seed=seed)
elif prob is not None:
probs = np.broadcast_to(prob, shape)
result = nb.generate_rand_by_prob_nb(shape, probs, seed=seed)
else:
raise ValueError("At least n or prob must be set")
if cls.is_series():
if shape[1] > 1:
raise ValueError("Use DataFrame accessor")
return pd.Series(result[:, 0], **kwargs)
return pd.DataFrame(result, **kwargs)
# ############# Exits ############# #
@classmethod
def generate_random_both(cls, shape, n=None, entry_prob=None, exit_prob=None, seed=None, **kwargs):
"""Generate entry and exit signals randomly and iteratively.
If `n` is set, see `vectorbt.signals.nb.generate_rand_enex_nb`.
If `prob` is set, see `vectorbt.signals.nb.generate_rand_enex_by_prob_nb`.
`entry_prob` and `exit_prob` must be either a single number or an array that will be
broadcast to match `shape`. `**kwargs` will be passed to pandas constructor.
Example:
For each column, generate two entries and exits randomly:
```python-repl
>>> en, ex = pd.DataFrame.vbt.signals.generate_random_both(
... (5, 3), n=2, seed=42, index=sig.index, columns=sig.columns)
>>> en
a b c
2020-01-01 True True True
2020-01-02 False False False
2020-01-03 True True False
2020-01-04 False False True
2020-01-05 False False False
>>> ex
a b c
2020-01-01 False False False
2020-01-02 True True True
2020-01-03 False False False
2020-01-04 False True False
2020-01-05 True False True
```
For each column and time step, pick entry with 50% probability and exit right after:
```python-repl
>>> en, ex = pd.DataFrame.vbt.signals.generate_random_both(
... (5, 3), entry_prob=0.5, exit_prob=1.,
... seed=42, index=sig.index, columns=sig.columns)
>>> en
a b c
2020-01-01 True True False
2020-01-02 False False True
2020-01-03 False True False
2020-01-04 True False True
2020-01-05 False False False
>>> ex
a b c
2020-01-01 False False False
2020-01-02 True True False
2020-01-03 False False True
2020-01-04 False True False
2020-01-05 True False True
```"""
if not isinstance(shape, tuple):
shape = (shape, 1)
elif isinstance(shape, tuple) and len(shape) == 1:
shape = (shape[0], 1)
if n is not None:
entries, exits = nb.generate_rand_enex_nb(shape, n, seed=seed)
elif entry_prob is not None and exit_prob is not None:
entry_prob = np.broadcast_to(entry_prob, shape)
exit_prob = np.broadcast_to(exit_prob, shape)
entries, exits = nb.generate_rand_enex_by_prob_nb(shape, entry_prob, exit_prob, seed=seed)
else:
raise ValueError("At least n, or entry_prob and exit_prob must be set")
if cls.is_series():
if shape[1] > 1:
raise ValueError("Use DataFrame accessor")
return pd.Series(entries[:, 0], **kwargs), pd.Series(exits[:, 0], **kwargs)
return | pd.DataFrame(entries, **kwargs) | pandas.DataFrame |
import pandas as pd
import numpy as np
from data import Data
import pickle
class Stats():
def __init__(self, data):
'''Enter dataclass of pandas dataframe'''
if isinstance(data, Data):
self.df = data.df
elif isinstance(data, pd.DataFrame):
self.df = data
self.totalsparsity = self.calc_sparsity()
self.featuresparsity = self.calc_featuresparsity()
self.constants = self.constantvalues()
self.corrfeatures = self.correlation()
self.mean = self.calc_mean()
self.nonzero = self.calc_nonzero()
self.zero = self.calc_zero()
self.min = self.calc_min()
self.max = self.calc_max()
self.stddv = self.calc_stddv()
self.q1 = self.calc_q1()
self.median = self.calc_median()
self.q3 = self.calc_q3()
def calc_sparsity(self):
'''Calculate the sparsity of the selected data'''
zeroes = 0
for column in self.df.columns:
zeroes += np.count_nonzero(self.df[column] == 0)
return zeroes / (self.df.shape[0] * self.df.shape[1])
def calc_featuresparsity(self):
'''Calculate sparsity per feature'''
df = self.df
result = pd.DataFrame()
result['sparsity'] = df.apply(lambda x: np.count_nonzero(x == 0)/len(x))
return result
def constantvalues(self):
'''Collect the variables which have a contant value'''
constant_columns = [column for column in self.df.columns if len(self.df[column].unique()) < 2]
return constant_columns
def correlation(self):
'''Collect the high correlation variables (> 0.90)'''
corr = self.df.corr(method='pearson').abs()
upper = corr.where(np.triu(np.ones(corr.shape), k=1).astype(np.bool))
high_correlations = [column for column in upper.columns if any(upper[column] > 0.90)]
return high_correlations
def calc_mean(self):
df = self.df
result = pd.DataFrame()
result['mean'] = df.apply(lambda x: np.mean(x))
return result
def calc_nonzero(self):
df = self.df
result = pd.DataFrame()
result['nonzero'] = df.apply(lambda x: (np.count_nonzero(x)))
return result
def calc_zero(self):
df = self.df
result = pd.DataFrame()
result['zero'] = df.apply(lambda x: (np.count_nonzero(x == 0)))
return result
def calc_min(self):
df = self.df
result = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import statistics
import pandas as pd
from pandas import DataFrame
from tabulate import tabulate
from base import BaseObject
class ZScoreCalculator(BaseObject):
""" Compute Z-Scores on Dimensions for a Single Record """
def __init__(self,
df_schema_weights: DataFrame,
is_debug: bool = False):
"""
Created:
7-Aug-2019
<EMAIL>
* refactored out of 'process-single-record'
:param df_schema_weights:
Schema Weight
cloud 1.0
system administrator 9.0
database 5.0
data science 4.0
hard skill 11.0
other 7.0
soft skill 9.0
project management 7.0
service management 8.0
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self.df_schema_weights = df_schema_weights
def process(self) -> DataFrame or None:
"""
Purpose:
Take a Schema Weights DataFrame and generate zScores
:return:
Schema Weight zScore zScoreNorm
cloud 1.0 -1.9 0.0
system administrator 9.0 0.7 2.6
database 5.0 -0.6 1.3
data science 4.0 -0.9 1.0
hard skill 11.0 1.4 3.3
other 7.0 0.1 2.0
soft skill 9.0 0.7 2.6
project management 7.0 0.1 2.0
service management 8.0 0.4 2.3
"""
weights = list(self.df_schema_weights.Weight)
def stdev():
a_stdev = statistics.stdev(weights)
if a_stdev == 0:
return 0.005
return a_stdev
_stdev = stdev()
_mean = statistics.mean(weights)
zscores = []
for weight in weights:
zscore = (weight - _mean) / _stdev
zscores.append(round(zscore, 2))
results = []
for i, row in self.df_schema_weights.iterrows():
results.append({
"Schema": row["Schema"],
"Weight": row["Weight"],
"zScore": zscores[i],
"zScoreNorm": round(zscores[i] - min(zscores), 1)})
df_zscore = | pd.DataFrame(results) | pandas.DataFrame |
""" Class modelling discrete and finite distribution
extending pandas DataFrame."""
# Imported libraries
import pkg_resources
# For computations on data
import numpy as np
import pandas as pd
from .DiscreteVariable import DiscreteVariable
from ..utils import ddomain_equals, pdInterval_series_from_string
# For graph plot
installed_pkg = {pkg.key for pkg in pkg_resources.working_set}
if 'ipdb' in installed_pkg:
import ipdb
if "plotly" in installed_pkg:
import plotly.io as pio
import plotly.offline as pof
import plotly.graph_objects as go
import plotly.express as px
# Classe utilisée pour geler les attributions directe
class FrozenClass(object):
__isfrozen = False
def __setattr__(self, key, value):
if self.__isfrozen and not hasattr(self, key):
raise TypeError(
"No new Attributes can be added to the Discretedistribution")
object.__setattr__(self, key, value)
def _freeze(self):
self.__isfrozen = True
# ===============================================#
# Definition de l'objet DiscreteDistribution #
# ===============================================#
# The discrete distribution class aims to represent probability distribution for DiscreteVariable object.
# Therefore DiscreteDistribution inherits from pandas.DataFrame for probabilities storage and has a DiscreteVariable
# attribute to characterise the underlying discrete and finite random variable.
class DiscreteDistribution(FrozenClass, pd.DataFrame):
# Constructeur de classe
def __init__(self, probs=None, name=None,
domain=[], bins=[], unit=None,
include_lowest=False,
**df_specs):
self.variable = DiscreteVariable(name=name,
domain=domain,
bins=bins,
unit=unit,
include_lowest=include_lowest)
# Remove domain_type from df_specs if specified to avoid
# error in the following __init__ call
df_specs.pop("domain_type", None)
# Pour les intervals : On convertit la liste de bins définissant le domain en liste d'intervalles pandas .
if self.variable.domain_type == "interval":
domain = | pd.IntervalIndex.from_breaks(self.variable.bins) | pandas.IntervalIndex.from_breaks |
import pandas as pd
import numpy as np
import os
import datetime
import scipy.io
def loadfiles(path, dirs, pkup = 0):
filedir = dirs[pkup]
flag = False
fpath = path + filedir + "/"
files = [d for d in os.listdir(fpath) if d.startswith("AppTag")]
files = sorted(files)
if "withlabel" in os.listdir(path):
vfiles = [d for d in os.listdir(path+"withlabel") if d.startswith("vtime") and filedir.rsplit("_",1)[1] in d and "edited" in d]
if len(vfiles) > 0:
flag = True
df = pd.read_csv(path +"withlabel/"+ vfiles[0], index_col=0)
print(vfiles)
df['label'] = pd.to_numeric(df['label'], errors='coerce')
ls = df['label'].tolist()
labels = np.array([0 if np.isnan(i) else int(i) for i in ls])
vfiles = [d for d in os.listdir(path) if d.startswith("vtime") and filedir.rsplit("_",1)[1] in d][0]
print(vfiles)
stamp = []
for j in range(len(files)):
df = pd.read_csv(fpath + files[j], index_col=0)
ax_ = df["AccelerationX"]
ay_ = df["AccelerationY"]
az_ = df["AccelerationZ"]
temp = np.array([np.float64(ax_),np.float64(ay_),np.float64(az_)])
stamp_ = [datetime.datetime.strptime(i, '\t%Y/%m/%d %H:%M:%S.%f') for i in df.index if len(str(i))>4]
if j == 0:
acc = np.array(temp)
time0 = stamp_[0]
else:
acc = np.hstack((acc, temp))
stamp_ = [(st - time0).total_seconds() for st in stamp_]
xs = np.arange(0,len(stamp_)*10,10)
xs2 = np.arange(len(df.index))
stamp.extend(np.interp(xs2,xs,stamp_))
df = | pd.read_csv(path + vfiles, index_col=0) | pandas.read_csv |
# pylint: disable=E1101
from pandas.util.py3compat import StringIO, BytesIO, PY3
from datetime import datetime
from os.path import split as psplit
import csv
import os
import sys
import re
import unittest
import nose
from numpy import nan
import numpy as np
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
ExcelFile, TextFileReader, TextParser)
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
network,
ensure_clean)
import pandas.util.testing as tm
import pandas as pd
import pandas.lib as lib
from pandas.util import py3compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
from pandas._parser import OverflowError
from pandas.io.parsers import (ExcelFile, ExcelWriter, read_csv)
def _skip_if_no_xlrd():
try:
import xlrd
ver = tuple(map(int, xlrd.__VERSION__.split(".")[:2]))
if ver < (0, 9):
raise nose.SkipTest('xlrd not installed, skipping')
except ImportError:
raise nose.SkipTest('xlrd not installed, skipping')
def _skip_if_no_xlwt():
try:
import xlwt
except ImportError:
raise nose.SkipTest('xlwt not installed, skipping')
def _skip_if_no_openpyxl():
try:
import openpyxl
except ImportError:
raise nose.SkipTest('openpyxl not installed, skipping')
def _skip_if_no_excelsuite():
_skip_if_no_xlrd()
_skip_if_no_xlwt()
_skip_if_no_openpyxl()
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)[:10]
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])[:10]
_tsframe = tm.makeTimeDataFrame()[:5]
_mixed_frame = _frame.copy()
_mixed_frame['foo'] = 'bar'
class ExcelTests(unittest.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
def test_parse_cols_int(self):
_skip_if_no_openpyxl()
_skip_if_no_xlrd()
suffix = ['', 'x']
for s in suffix:
pth = os.path.join(self.dirpath, 'test.xls%s' % s)
xls = ExcelFile(pth)
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols=3)
df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['A', 'B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True, parse_cols=3)
tm.assert_frame_equal(df, df2, check_names=False) # TODO add index to xls file)
tm.assert_frame_equal(df3, df2, check_names=False)
def test_parse_cols_list(self):
_skip_if_no_openpyxl()
_skip_if_no_xlrd()
suffix = ['', 'x']
for s in suffix:
pth = os.path.join(self.dirpath, 'test.xls%s' % s)
xls = ExcelFile(pth)
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols=[0, 2, 3])
df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True,
parse_cols=[0, 2, 3])
tm.assert_frame_equal(df, df2, check_names=False) # TODO add index to xls file
tm.assert_frame_equal(df3, df2, check_names=False)
def test_parse_cols_str(self):
_skip_if_no_openpyxl()
_skip_if_no_xlrd()
suffix = ['', 'x']
for s in suffix:
pth = os.path.join(self.dirpath, 'test.xls%s' % s)
xls = ExcelFile(pth)
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols='A:D')
df2 = read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['A', 'B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True, parse_cols='A:D')
tm.assert_frame_equal(df, df2, check_names=False) # TODO add index to xls, read xls ignores index name ?
tm.assert_frame_equal(df3, df2, check_names=False)
del df, df2, df3
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols='A,C,D')
df2 = read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True,
parse_cols='A,C,D')
tm.assert_frame_equal(df, df2, check_names=False) # TODO add index to xls file
tm.assert_frame_equal(df3, df2, check_names=False)
del df, df2, df3
df = xls.parse('Sheet1', index_col=0, parse_dates=True,
parse_cols='A,C:D')
df2 = read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = df2.reindex(columns=['B', 'C'])
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0,
parse_dates=True,
parse_cols='A,C:D')
tm.assert_frame_equal(df, df2, check_names=False)
tm.assert_frame_equal(df3, df2, check_names=False)
def test_excel_stop_iterator(self):
_skip_if_no_xlrd()
excel_data = ExcelFile(os.path.join(self.dirpath, 'test2.xls'))
parsed = excel_data.parse('Sheet1')
expected = DataFrame([['aaaa', 'bbbbb']], columns=['Test', 'Test1'])
tm.assert_frame_equal(parsed, expected)
def test_excel_cell_error_na(self):
_skip_if_no_xlrd()
excel_data = ExcelFile(os.path.join(self.dirpath, 'test3.xls'))
parsed = excel_data.parse('Sheet1')
expected = DataFrame([[np.nan]], columns=['Test'])
tm.assert_frame_equal(parsed, expected)
def test_excel_table(self):
_skip_if_no_xlrd()
pth = os.path.join(self.dirpath, 'test.xls')
xls = ExcelFile(pth)
df = xls.parse('Sheet1', index_col=0, parse_dates=True)
df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df3 = xls.parse('Sheet2', skiprows=[1], index_col=0, parse_dates=True)
tm.assert_frame_equal(df, df2, check_names=False)
tm.assert_frame_equal(df3, df2, check_names=False)
df4 = xls.parse('Sheet1', index_col=0, parse_dates=True,
skipfooter=1)
df5 = xls.parse('Sheet1', index_col=0, parse_dates=True,
skip_footer=1)
tm.assert_frame_equal(df4, df.ix[:-1])
tm.assert_frame_equal(df4, df5)
def test_excel_read_buffer(self):
_skip_if_no_xlrd()
_skip_if_no_openpyxl()
pth = os.path.join(self.dirpath, 'test.xls')
f = open(pth, 'rb')
xls = ExcelFile(f)
# it works
xls.parse('Sheet1', index_col=0, parse_dates=True)
pth = os.path.join(self.dirpath, 'test.xlsx')
f = open(pth, 'rb')
xl = ExcelFile(f)
df = xl.parse('Sheet1', index_col=0, parse_dates=True)
def test_xlsx_table(self):
_skip_if_no_xlrd()
_skip_if_no_openpyxl()
pth = os.path.join(self.dirpath, 'test.xlsx')
xlsx = ExcelFile(pth)
df = xlsx.parse('Sheet1', index_col=0, parse_dates=True)
df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df3 = xlsx.parse('Sheet2', skiprows=[1], index_col=0, parse_dates=True)
tm.assert_frame_equal(df, df2, check_names=False) # TODO add index to xlsx file
tm.assert_frame_equal(df3, df2, check_names=False)
df4 = xlsx.parse('Sheet1', index_col=0, parse_dates=True,
skipfooter=1)
df5 = xlsx.parse('Sheet1', index_col=0, parse_dates=True,
skip_footer=1)
tm.assert_frame_equal(df4, df.ix[:-1])
tm.assert_frame_equal(df4, df5)
def test_specify_kind_xls(self):
_skip_if_no_xlrd()
xlsx_file = os.path.join(self.dirpath, 'test.xlsx')
xls_file = os.path.join(self.dirpath, 'test.xls')
# succeeds with xlrd 0.8.0, weird
# self.assertRaises(Exception, ExcelFile, xlsx_file, kind='xls')
# ExcelFile(open(xls_file, 'rb'), kind='xls')
# self.assertRaises(Exception, ExcelFile, open(xlsx_file, 'rb'),
# kind='xls')
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return | read_csv(*args, **kwds) | pandas.io.parsers.read_csv |
import pandas as pd
from unittest2 import TestCase # or `from unittest import ...` if on Python 3.4+
import numpy as np
import category_encoders.tests.helpers as th
import category_encoders as encoders
np_X = th.create_array(n_rows=100)
np_X_t = th.create_array(n_rows=50, extras=True)
np_y = np.random.randn(np_X.shape[0]) > 0.5
np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5
X = th.create_dataset(n_rows=100)
X_t = th.create_dataset(n_rows=50, extras=True)
y = pd.DataFrame(np_y)
y_t = pd.DataFrame(np_y_t)
class TestOneHotEncoderTestCase(TestCase):
def test_one_hot(self):
enc = encoders.OneHotEncoder(verbose=1, return_df=False)
enc.fit(X)
self.assertEqual(enc.transform(X_t).shape[1],
enc.transform(X).shape[1],
'We have to get the same count of columns despite the presence of a new value')
enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='indicator')
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('extra_-1', out.columns.values)
enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='return_nan')
enc.fit(X)
out = enc.transform(X_t)
self.assertEqual(len([x for x in out.columns.values if str(x).startswith('extra_')]), 3)
enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='error')
# The exception is already raised in fit() because transform() is called there to get
# feature_names right.
enc.fit(X)
with self.assertRaises(ValueError):
enc.transform(X_t)
enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='return_nan', use_cat_names=True)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('extra_A', out.columns.values)
enc = encoders.OneHotEncoder(verbose=1, return_df=True, use_cat_names=True, handle_unknown='indicator')
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('extra_-1', out.columns.values)
# test inverse_transform
X_i = th.create_dataset(n_rows=100, has_none=False)
X_i_t = th.create_dataset(n_rows=50, has_none=False)
cols = ['underscore', 'none', 'extra', 321, 'categorical']
enc = encoders.OneHotEncoder(verbose=1, use_cat_names=True, cols=cols)
enc.fit(X_i)
obtained = enc.inverse_transform(enc.transform(X_i_t))
th.verify_inverse_transform(X_i_t, obtained)
def test_fit_transform_HaveMissingValuesAndUseCatNames_ExpectCorrectValue(self):
encoder = encoders.OneHotEncoder(cols=[0], use_cat_names=True, handle_unknown='indicator')
result = encoder.fit_transform([[-1]])
self.assertListEqual([[1, 0]], result.get_values().tolist())
def test_inverse_transform_HaveDedupedColumns_ExpectCorrectInverseTransform(self):
encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=True)
value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series(-1)})
transformed = encoder.fit_transform(value)
inverse_transformed = encoder.inverse_transform(transformed)
assert value.equals(inverse_transformed)
def test_inverse_transform_HaveNoCatNames_ExpectCorrectInverseTransform(self):
encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=False)
value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series(-1)})
transformed = encoder.fit_transform(value)
inverse_transformed = encoder.inverse_transform(transformed)
assert value.equals(inverse_transformed)
def test_fit_transform_HaveColumnAppearTwice_ExpectColumnsDeduped(self):
encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=True, handle_unknown='indicator')
value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series('-1')})
result = encoder.fit_transform(value)
columns = result.columns.tolist()
self.assertSetEqual({'match_box_-1', 'match_-1', 'match_box_-1#', 'match_box_-1##'}, set(columns))
def test_fit_transform_HaveHandleUnknownValueAndUnseenValues_ExpectAllZeroes(self):
train = | pd.DataFrame({'city': ['Chicago', 'Seattle']}) | pandas.DataFrame |
"""
Module containing the core system of encoding and creation
of understandable dataset for the recommender system.
"""
import joblib
import pandas as pd
from recipe_tagger import recipe_waterfootprint as wf
from recipe_tagger import util
from sklearn import cluster
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler
from stop_words import get_stop_words
from tqdm import tqdm
from cf_recommender import CFRecommender
from configuration import load_configuration
class Encoder:
"""
Class that contains all the encoding and the
creation of data files from the dataset provided
by the user in the input directory and the
mapping specified into the configuration file.
:param language: the language of the dataset.
"""
def __init__(self, language):
"""
Constructor method for the class.
It loads all the necessary path for the files
provided into the configuration file.
"""
config = load_configuration()
self.language = language
self.path_orders = config["path_orders"]
self.path_recipes = config["path_recipes"]
self.path_embedding = config["path_embedding"]
self.path_user_scores = config["path_user_scores"]
self.input_path_orders = config["input_path_orders"]
self.input_path_recipes = config["input_path_recipes"]
print(f">> Initialize encoder, language: {self.language} <<")
def __process_ingredients(self, ingredients):
"""
Process the provided ingredients string.
:param ingredients: a string composed by ingredients comma separated.
:return: a list containing the processed ingredients.
"""
return [
util.process_ingredients(ing, language=self.language, stem=False)
for ing in ingredients.split(",")
]
def __generate_ingredients_embedding(self, column_name="ingredient"):
"""
Generate the ingredients embedding TF-IDF matrix and save it
to a pickle file in the default folder.
:param column_name: the name of the column that contains the
ingredients in the recipe dataset.
:return: None
"""
recipes_df = pd.read_csv(self.input_path_recipes)[[column_name]]
recipes_df[column_name] = recipes_df[column_name].apply(
self.__process_ingredients
)
recipes_df[column_name] = recipes_df[column_name].apply(", ".join)
tfidf = TfidfVectorizer(stop_words=get_stop_words(self.language))
matrix = tfidf.fit_transform(recipes_df[column_name])
joblib.dump(matrix, self.path_embedding)
def __get_user_order_quantity(self, df):
"""
Reformat a dataset containing order history into a
dictionary containing user ratings for recipes that
he has ordered. The rating is computed based on
how many times he has ordered the recipe compared with
the recipes with most orders.
:param df: the dataframe containing user orders.
:return: a dictionary containing user recipes ratings.
"""
data = {"user_id": [], "item_id": [], "rating": []}
for user in df["user_id"].unique():
user_df = df.query(f"user_id == {user}")
user_df = user_df.groupby("item_id").count().reset_index()
max_rating = user_df["user_id"].max()
user_df["rating"] = user_df.apply(
lambda x: int((x["user_id"] * 4) / max_rating) + 1, axis=1
)
data["user_id"].extend([user] * user_df.shape[0])
data["item_id"].extend(user_df["item_id"])
data["rating"].extend(user_df["rating"])
return data
def __get_wf(self, ingredients, quantities):
"""
Return the total water footprint of a single recipe
based on its ingredients and their quantities.
:param ingredients: a list containing all the ingredients.
:param quantities: a list containing all ingredients quantities.
:return: the water footprint of the recipe.
"""
while len(ingredients) > len(quantities):
quantities.append("5ml")
return wf.get_recipe_waterfootprint(
ingredients, quantities, online_search=False, language=self.language
)
def __get_recipe_category(self, index, total):
"""
Return the category of a recipe based on their position
on the sorted dataset.
:param index: the index of the recipe in the dataset.
:param total: the total number of recipes in the dataset.
:return: the category of the recipe. (A, B, C, D, E)
"""
categories = ["A", "B", "C", "D", "E"]
threshold = total / len(categories)
return categories[int(index / threshold)]
def __get_dataset_reduced(self, df, min_user_orders=5, min_recipe_orders=3):
"""
Return the dataset without recipes and orders that don't
match the restrictions. Restrictions are on minimum
orders made by user and minimum orders for a recipe.
:param df: the dataframe containing all the orders.
:param min_user_orders: the minimum number of user orders. Default is 5.
:param min_user_orders: the minimum number of recipe orders. Default is 3.
:return: a dataframe without orders that don't match guidelines.
"""
filter_recipe = df["item_id"].value_counts() > min_recipe_orders
filter_recipe = filter_recipe[filter_recipe].index.tolist()
filter_user = df["user_id"].value_counts() > min_user_orders
filter_user = filter_user[filter_user].index.tolist()
return df[
(df["user_id"].isin(filter_user)) & (df["item_id"].isin(filter_recipe))
]
def __generate_orders(
self,
columns_map,
rating=True,
):
"""
Generate and save to pickle file the new orders dataset, formatted
and reduced following the previous guidelines. If the input
dataframe doesn't contains yet the user ratings it will transform
it on a rating dataset.
:param columns_map: a dictionary containing the mapping of the
column in the input dataset.
:param rating: the presence or not of user ratings.
:return: None
"""
df = | pd.read_csv(self.input_path_orders) | pandas.read_csv |
import os
import numpy as np
import pandas as pd
from covid19model.data.utils import convert_age_stratified_property
class QALY_model():
def __init__(self, comorbidity_distribution):
self.comorbidity_distribution = comorbidity_distribution
# Define absolute path
abs_dir = os.path.dirname(__file__)
# Import life table (q_x)
self.life_table = pd.read_csv(os.path.join(abs_dir, '../../../data/interim/QALYs/Life_table_Belgium_2019.csv'),sep=';',index_col=0)
# Compute the vector mu_x and append to life table
self.life_table['mu_x']= self.compute_death_rate(self.life_table['q_x'])
# Define mu_x explictly to enhance readability of the code
self.mu_x = self.life_table['mu_x']
# Load comorbidity QoL scores for the Belgian population from <NAME>
QoL_Van_Wilder=pd.read_excel(os.path.join(abs_dir,"../../../data/interim/QALYs/De_Wilder_QoL_scores.xlsx"),index_col=0,sheet_name='QoL_scores')
QoL_Van_Wilder.columns = ['0','1','2','3+']
QoL_Van_Wilder.index = pd.IntervalIndex.from_tuples([(0,10),(10,20),(20,30),(30,40),(40,50),(50,60),(60,70),(70,80),(80,120)], closed='left')
self.QoL_Van_Wilder = QoL_Van_Wilder
# Define overall Belgian QoL scores
self.QoL_Belgium = pd.Series(index=pd.IntervalIndex.from_tuples([(0,10),(10,20),(20,30),(30,40),(40,50),(50,60),(60,70),(70,80),(80,120)], closed='left'), data=[0.85, 0.85, 0.84, 0.83, 0.805, 0.78, 0.75, 0.72, 0.72])
# Convert Belgian QoL and Van Wilder QoL to age bins of self.comorbidity_distribution
self.QoL_Belgium = convert_age_stratified_property(self.QoL_Belgium, self.comorbidity_distribution.index)
tmp_QoL_Van_Wilder = pd.DataFrame(index=self.comorbidity_distribution.index, columns=self.QoL_Van_Wilder.columns)
for column in self.QoL_Van_Wilder.columns:
tmp_QoL_Van_Wilder[column] = convert_age_stratified_property(self.QoL_Van_Wilder[column], self.comorbidity_distribution.index)
self.QoL_Van_Wilder = tmp_QoL_Van_Wilder
# Compute the QoL scores of the studied population
self.QoL_df = self.build_comorbidity_QoL(self.comorbidity_distribution, self.QoL_Van_Wilder, self.QoL_Belgium)
# Load comorbidity SMR estimates
SMR_pop_df=pd.read_excel(os.path.join(abs_dir,"../../../data/interim/QALYs/De_Wilder_QoL_scores.xlsx"), index_col=0, sheet_name='SMR')
SMR_pop_df.columns = ['0','1','2','3+']
SMR_pop_df.index = pd.IntervalIndex.from_tuples([(0,10),(10,20),(20,30),(30,40),(40,50),(50,60),(60,70),(70,80),(80,120)], closed='left')
self.SMR_pop_df = SMR_pop_df
# Convert comorbidity SMR estimates to age bins of self.comorbidity_distribution
tmp_SMR_pop_df = pd.DataFrame(index=self.comorbidity_distribution.index, columns=self.SMR_pop_df.columns)
for column in self.SMR_pop_df.columns:
tmp_SMR_pop_df[column] = convert_age_stratified_property(self.SMR_pop_df[column], self.comorbidity_distribution.index)
self.SMR_pop_df = tmp_SMR_pop_df
# Compute the SMR of the studied population
self.SMR_df = self.build_comorbidity_SMR(self.comorbidity_distribution, self.SMR_pop_df)
def build_comorbidity_SMR(self, comorbidity_distribution, population_SMR):
""" A function to compute the Standardized Mortality Ratios (SMRs) in a studied population, based on the comorbidity distribution of the studied population and the comorbidity distribution of the Belgian population
Parameters
----------
comorbidity_distribution : pd.Dataframe
A dataframe containing the studied population fraction with x comorbidities.
This dataframe is the input of the comorbidity-QALY model. The studied population are usually recovered or deceased COVID-19 patients in hospitals.
The dataframe must have te age group as its index and make use of a pandas multicolumn, where the first level denotes the population (usually R or D, but the code is written to use n populations).
The second level denotes the number of comorbidities, which must be equal to 0, 1, 2 or 3+.
population_SMR : pd.Dataframe
A dataframe containing the age-stratified SMRs for individuals with 0, 1, 2 or 3+ comorbidities in the general Belgian population.
Computed using the comorbidity distributions for the general Belgian population obtained from <NAME>, and the relative risk of dying by Charslon et. al (computation performed in MS Excel).
Returns
-------
SMR_df: pd.DataFrame
The weighted Standardized Mortality Ratios (SMRs) in the studied population.
An SMR > 1 indicates the studied population is less healthy than the general Belgian population.
"""
# Extract names of populations
populations = list(comorbidity_distribution.columns.get_level_values(0).unique())
# Initialize dataframe
df = pd.DataFrame(index=population_SMR.index, columns=populations)
# Fill dataframe
for idx,age_group in enumerate(df.index):
for jdx,pop in enumerate(populations):
df.loc[age_group, pop] = sum(comorbidity_distribution.loc[age_group, pop]*population_SMR.loc[age_group])
# Append SMR of average Belgian
df.loc[slice(None), 'BE'] = 1
return df
def build_comorbidity_QoL(self, comorbidity_distribution, comorbidity_QoL, average_QoL):
""" A function to compute the QoL scores in a studied population, based on the comorbidity distribution of the studied population and the QoL scores for 0, 1, 2, 3+ comorbidities for the Belgian population
Parameters
----------
comorbidity_distribution : pd.Dataframe
A dataframe containing the studied population fraction with x comorbidities.
This dataframe is the input of the comorbidity-QALY model. The studied population are usually recovered or deceased COVID-19 patients in hospitals.
The dataframe must have te age group as its index and make use of a pandas multicolumn, where the first level denotes the population (usually R or D, but the code is written to use n populations).
The second level denotes the number of comorbidities, which must be equal to 0, 1, 2 or 3+.
comorbidity_QoL : pd.Dataframe
A dataframe containing the age-stratified QoL scores for individuals with 0, 1, 2 or 3+ comorbidities in the general Belgian population.
Obtained from <NAME>.
average_QoL : pd.Series
A series containing the average QoL score for the (Belgian) population
Returns
-------
QoL_df: pd.DataFrame
The comorbidity-weighted QoL scores of the studied population.
"""
# Extract names of populations
populations = list(comorbidity_distribution.columns.get_level_values(0).unique())
# Initialize dataframe
df = pd.DataFrame(index=comorbidity_QoL.index, columns=populations)
# Fill dataframe
for idx,age_group in enumerate(df.index):
for jdx,pop in enumerate(populations):
df.loc[age_group, pop] = sum(comorbidity_distribution.loc[age_group, pop]*comorbidity_QoL.loc[age_group])
# Append SMR of average Belgian
df.loc[slice(None), 'BE'] = average_QoL
return df
def compute_death_rate(self, q_x):
""" A function to compute the force of mortality (instantaneous death rate at age x)
Parameters
----------
q_x : list or np.array
Probability of dying between age x and age x+1.
Returns
-------
mu_x : np.array
Instantaneous death rage at age x
"""
# Pre-allocate
mu_x = np.zeros(len(q_x))
# Compute first entry
mu_x[0] = -np.log(1-q_x[0])
# Loop over remaining entries
for age in range(1,len(q_x)):
mu_x[age] = -0.5*(np.log(1-q_x[age])+np.log(1-q_x[age-1]))
return mu_x
def survival_function(self, SMR=1):
""" A function to compute the probability of surviving until age x
Parameters
----------
self.mu_x : list or np.array
Instantaneous death rage at age x
SMR : float
"Standardized mortality ratio" (SMR) is the ratio of observed deaths in a study group to expected deaths in the general population.
An SMR of 1 corresponds to an average life expectancy, an increase in SMR shortens the expected lifespan.
Returns
-------
S_x : pd.Series
Survival function, i.e. the probability of surviving up until age x
"""
# Pre-allocate as np.array
S_x = np.zeros(len(self.mu_x))
# Survival rate at age 0 is 100%
S_x[0] = 1
# Loop
for age in range(1,len(self.mu_x)):
S_x[age] = S_x[age-1]*np.exp(-SMR*self.mu_x[age])
# Post-allocate as pd.Series object
S_x = pd.Series(index=range(len(self.mu_x)), data=S_x)
S_x.index.name = 'x'
return S_x
def life_expectancy(self,SMR=1):
""" A function to compute the life expectancy at age x
Parameters
----------
SMR : float
"Standardized mortality ratio" (SMR) is the ratio of observed deaths in a study group to expected deaths in the general population.
An SMR of 1 corresponds to an average life expectancy, an increase in SMR shortens the expected lifespan.
Returns
-------
LE_x : pd.Series
Life expectancy at age x
"""
# Compute survival function
S_x = self.survival_function(SMR)
# First compute inner sum
tmp = np.zeros(len(S_x))
for age in range(len(S_x)-1):
tmp[age] = 0.5*(S_x[age]+S_x[age+1])
# Then sum from x to the end of the table to obtain life expectancy
LE_x = np.zeros(len(S_x))
for x in range(len(S_x)):
LE_x[x] = np.sum(tmp[x:])
# Post-allocate to pd.Series object
LE_x = pd.Series(index=range(len(self.mu_x)), data=LE_x)
LE_x.index.name = 'x'
return LE_x
def compute_QALE_x(self, population='BE', SMR_method='convergent'):
""" A function to compute the quality-adjusted life expectancy at age x
Parameters
----------
self.mu_x : list or np.array
Instantaneous death rage at age x
self.SMR_df : pd.Dataframe
"Standardized mortality ratio" (SMR) is the ratio of observed deaths in a study group to expected deaths in the general population.
An SMR of 1 corresponds to an average life expectancy, an increase in SMR shortens the expected lifespan.
self.QoL_df : pd.Dataframe
Quality-of-life utility weights, as imported from `~/data/interim/QALYs/QoL_scores_Belgium_2018_v3.csv`.
Must contain two columns: "group_limit" and "QoL_score"
population : string
Choice of QoL scores. Valid options are 'Belgium', 'R' and 'D'.
'Belgium' : Overall QoL scores for the Belgian population by De Wilder et. al and an SMR=1 are applied (this represents average QALY loss)
'R' : QoL scores and SMR for those recovering from COVID-19 in the hospital (most likely higher quality than average)
'D' : QoL scores and SMR for those dying from COVID-19 in the hospital (most likely lower quality than average)
SMR_method : string
Choice of SMR model for remainder of life. Valid options are 'convergent' and 'constant'.
'convergent' : the SMR gradually converges to SMR=1 by the end of the subjects life.
If a person is expected to be healthy (SMR<1), this method represents the heuristic that we do not know how healthy this person will be in the future.
We just assume his "healthiness" converges back to the population average as time goes by.
'constant' : the SMR used to compute the QALEs remains equal to the expected value for the rest of the subjects life.
If a person is expected to be healthy (SMR<1), this method assumes the person will remain equally healthy for his entire life.
Returns
-------
QALE_x : pd.Series
Quality-adjusted ife expectancy at age x
"""
# Pre-allocate results
QALE_x = np.zeros(len(self.mu_x))
# Loop over x
for x in range(len(self.mu_x)):
# Pre-allocate dQALY
dQALE = np.zeros([len(self.mu_x)-x-1])
# Set age-dependant utility weights to lowest possible
j=0
age_limit=self.QoL_df.index[j].right - 1
QoL_x=self.QoL_df[population].values[j]
# Calculate the SMR at age x
if ((SMR_method == 'convergent')|(SMR_method == 'constant')):
k = np.where(self.QoL_df.index.contains(x))[0][-1]
age_limit = self.QoL_df.index[k].right - 1
SMR_x = self.SMR_df[population].values[k]
# Loop over years remaining after year x
for i in range(x,len(self.mu_x)-1):
# Find the right age bin
j = np.where(self.QoL_df.index.contains(i))[0][-1]
age_limit = self.QoL_df.index[j].right - 1
# Choose the right QoL score
QoL_x = self.QoL_df[population].values[j]
# Choose the right SMR
if SMR_method == 'convergent':
# SMR gradually converges to one by end of life
SMR = 1 + (SMR_x-1)*((len(self.mu_x)-1-i)/(len(self.mu_x)-1-x))
elif SMR_method == 'constant':
# SMR is equal to SMR at age x for remainder of life
SMR = SMR_x
# Compute the survival function
S_x = self.survival_function(SMR)
# Then compute the quality-adjusted life years lived between age x and x+1
dQALE[i-x] = QoL_x*0.5*(S_x[i] + S_x[i+1])
# Sum dQALY to obtain QALY_x
QALE_x[x] = np.sum(dQALE)
# Post-allocate to pd.Series object
QALE_x = pd.Series(index=range(len(self.mu_x)), data=QALE_x)
QALE_x.index.name = 'x'
return QALE_x
def compute_QALY_x(self, population='BE', r=0.03, SMR_method='convergent'):
""" A function to compute the quality-adjusted life years remaining at age x
Parameters
----------
self.mu_x : list or np.array
Instantaneous death rage at age x
self.SMR_df : pd.Dataframe
"Standardized mortality ratio" (SMR) is the ratio of observed deaths in a study group to expected deaths in the general population.
An SMR of 1 corresponds to an average life expectancy, an increase in SMR shortens the expected lifespan.
self.QoL_df : pd.Dataframe
Quality-of-life utility weights, as imported from `~/data/interim/QALYs/QoL_scores_Belgium_2018_v3.csv`.
Must contain two columns: "group_limit" and "QoL_score"
population : string
Choice of QoL scores. Valid options are 'Belgium', 'R' and 'D'.
'Belgium' : Overall QoL scores for the Belgian population by De Wilder et. al and an SMR=1 are applied (this represents average QALY loss)
'R' : QoL scores and SMR for those recovering from COVID-19 in the hospital (most likely higher quality than average)
'D' : QoL scores and SMR for those dying from COVID-19 in the hospital (most likely lower quality than average)
r : float
Discount rate (default 3%)
SMR_method : string
Choice of SMR model for remainder of life. Valid options are 'convergent' and 'constant'.
'convergent' : the SMR gradually converges to SMR=1 by the end of the subjects life.
If a person is expected to be healthy (SMR<1), this method represents the heuristic that we do not know how healthy this person will be in the future.
We just assume his "healthiness" converges back to the population average as time goes by.
'constant' : the SMR used to compute the QALEs remains equal to the expected value for the rest of the subjects life.
If a person is expected to be healthy (SMR<1), this method assumes the person will remain equally healthy for his entire life.
Returns
-------
QALY_x : pd.Series
Quality-adjusted life years remaining at age x
"""
# Pre-allocate results
QALY_x = np.zeros(len(self.mu_x))
# Loop over x
for x in range(len(self.mu_x)):
# Pre-allocate dQALY
dQALY = np.zeros([len(self.mu_x)-x-1])
# Set age-dependant utility weights to lowest possible
j=0
age_limit=self.QoL_df.index[j].right -1
QoL_x=self.QoL_df[population].values[j]
# Calculate the SMR at age x
if ((SMR_method == 'convergent')|(SMR_method == 'constant')):
k = np.where(self.QoL_df.index.contains(x))[0][-1]
age_limit = self.QoL_df.index[k].right - 1
SMR_x = self.SMR_df[population].values[k]
# Loop over years remaining after year x
for i in range(x,len(self.mu_x)-1):
# Find the right age bin
j = np.where(self.QoL_df.index.contains(i))[0][-1]
age_limit = self.QoL_df.index[j].right - 1
# Choose the right QoL score
QoL_x = self.QoL_df[population].values[j]
# Choose the right SMR
if SMR_method == 'convergent':
# SMR gradually converges to one by end of life
SMR = 1 + (SMR_x-1)*((len(self.mu_x)-1-i)/(len(self.mu_x)-1-x))
elif SMR_method == 'constant':
# SMR is equal to SMR at age x for remainder of life
SMR = SMR_x
# Compute the survival function
S_x = self.survival_function(SMR)
# Then compute the quality-adjusted life years lived between age x and x+1
dQALY[i-x] = QoL_x*0.5*(S_x[i] + S_x[i+1])*(1+r)**(x-i)
# Sum dQALY to obtain QALY_x
QALY_x[x] = np.sum(dQALY)
# Post-allocate to pd.Series object
QALY_x = pd.Series(index=range(len(self.mu_x)), data=QALY_x)
QALY_x.index.name = 'x'
return QALY_x
def bin_QALY_x(self, QALY_x, model_bins=pd.IntervalIndex.from_tuples([(0,12),(12,18),(18,25),(25,35),(35,45),(45,55),(55,65),(65,75),(75,85),(85,120)], closed='left')):
""" A function to bin the vector QALY_x according to the age groups in the COVID-19 SEIQRD
Parameters
----------
QALY_x : np.array
Quality-adjusted life years remaining at age x
model_bins : pd.IntervalIndex
Desired age bins
Returns
-------
QALY_binned: pd.Series
Quality-adjusted life years lost upon death for every age bin of the COVID-19 SEIQRD model
"""
# Pre-allocate results vector
QALY_binned = np.zeros(len(model_bins))
# Loop over model bins
for i in range(len(model_bins)):
# Map QALY_x to model bins
QALY_binned[i] = np.mean(QALY_x[model_bins[i].left:model_bins[i].right-1])
# Post-allocate to pd.Series object
QALY_binned = pd.Series(index=model_bins, data=QALY_binned)
QALY_binned.index.name = 'age_group'
return QALY_binned
def build_binned_QALY_df(self, r=0.03, SMR_method='convergent', model_bins=pd.IntervalIndex.from_tuples([(0,12),(12,18),(18,25),(25,35),(35,45),(45,55),(55,65),(65,75),(75,85),(85,120)], closed='left')):
# Extract names of populations
populations = list(self.SMR_df.columns.get_level_values(0).unique())
# Initialize empty dataframe
df = pd.DataFrame()
# Loop over populations
for pop in populations:
QALY_x = self.compute_QALY_x(population=pop, SMR_method='convergent',r=r)
binned_QALY = self.bin_QALY_x(QALY_x, model_bins)
binned_QALY.name = pop
df = df.append(binned_QALY)
return df.T
def append_acute_QALY_losses(self, out, binned_QALY_df):
# https://link.springer.com/content/pdf/10.1007/s40271-021-00509-z.pdf
##################
## Mild disease ##
##################
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4690729/ --> Table A2 --> utility weight = 0.659
# https://www.valueinhealthjournal.com/article/S1098-3015(21)00034-6/fulltext --> Table 2 --> 1-0.43=0.57
out['QALYs_mild'] = out['M']*np.expand_dims(np.expand_dims((self.QoL_df['Belgium']-0.659)/365,axis=1),axis=0)
#####################
## Hospitalization ##
#####################
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4690729/ --> Table A2 --> utility weight = 0.514
# https://www.valueinhealthjournal.com/article/S1098-3015(21)00034-6/fulltext --> Table 2 --> 1-0.50 = 0.50
out['QALYs_cohort'] = out['C']*np.expand_dims(np.expand_dims((self.QoL_df['Belgium']-0.50)/365,axis=1),axis=0) + out['C_icurec']*np.expand_dims(np.expand_dims((self.QoL_df['Belgium']-0.50)/365,axis=1),axis=0)
# https://www.valueinhealthjournal.com/article/S1098-3015(21)00034-6/fulltext --> Table 2 --> 1-0.60 = 0.40
out['QALYs_ICU'] = out['ICU']*np.expand_dims(np.expand_dims((self.QoL_df['Belgium']-0.40)/365,axis=1),axis=0)
###########
## Death ##
###########
m_C_nt = 0.4
m_ICU_nt = 0.8
out['QALYs_death'] = out['D']*np.expand_dims(np.expand_dims(binned_QALY_df['D'],axis=1),axis=0)
out['QALYs_treatment'] = (m_C_nt*out['R_C'] + m_ICU_nt*out['R_C'])*np.expand_dims(np.expand_dims(binned_QALY_df['R'],axis=1),axis=0)
return out
def lost_QALYs_hospital_care (reduction,granular=False):
"""
This function calculates the expected number of QALYs lost due to a given
percentage reduction in regular (non Covid-19 related) hospital care.
The calculation is an approximation based on the reported hospital costs in Belgium per
disease group and the average cost per QALY gained per disease group (calculated from cost-effectiveness
thresholds reported for the Netherlands)
Parameters
----------
reduction: np.array
Percentage reduction in hospital care. if granular = True, reduction per disease group
granular: bool
If True, calculations are performed per disease group. If False, calculations are performed
on an average basis
Returns
-------
lost_QALYs float or pd.DataFrame
Total number of QALYs lost per day caused by a given reduction in hospital care.
if granular = True, results are given per disease group
"""
# Import hospital care cost per disease group and cost per QALY
#cost per qauly (EUR), total spent (mill EUR)
hospital_data= | pd.read_excel("../../data/interim/QALYs/hospital_data_qalys.xlsx", sheet_name='hospital_data') | pandas.read_excel |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale, LabelEncoder
from sklearn.linear_model import LinearRegression
############################################
# Section 1 - Importing and Combining Data
############################################
np.random.seed(12345)
# Getting company names and sector labels
df = pd.read_csv('D:/ML/book1.csv', header=0, index_col='Ticker')
to_keep = ['Name', 'Sector']
dfA = df[to_keep]
# Getting financial ratio data
dfB = pd.read_csv('D:/ML/ratios.csv', header=0, index_col='Ticker')
ratioNames = np.array(dfB.columns.values)
# Concatenating dataframes to get primary dataset
companyData = dfA.join(dfB).drop(['BF-B', 'CTVA', 'FRC'])
companyData = companyData.fillna(0)
clusterData = np.array(companyData)
companies = np.array(companyData.index)
############################################
# Section 2 - Computing Ranked Measures
############################################
# Storing sector-wise means of ratios
dt = companyData.groupby('Sector').mean()
# Function to get industry-relative ratios
def getRelative(ratioData):
ratios = ratioData[:, 2:]
sector = ratioData[:, 1]
for i in range(len(sector)):
# Get sector of company and sector-wise averages of ratios
ind = sector[i]
indAvgs = dt.loc[ind]
for j in range(len(indAvgs)):
ratios[i, j] = ratios[i, j] / indAvgs[j]
return ratios
# Storing the relative ratios for future use
finalData = pd.DataFrame(getRelative(clusterData), index=companies, columns=ratioNames).fillna(0)
####################################################
# Section 3 - Identifying Optimal Number of Clusters
###################################################
# Loading the feature dataset
X = np.array(finalData)
comp = clusterData[:, 1]
# Encoding output labels
lab = LabelEncoder()
labels = lab.fit_transform(comp)
# Algorithm to compare cluster sizes (adapted from Scikit-learn's documentation)
def bench_k_means(classifier, name, data):
# Prints labels of measures used
print('init\t\ttime\tinertia\thomo\tcompl\tv-meas\tARI\tAMI\tsilhouette')
t0 = time()
classifier.fit(data)
print('%-9s\t%.2fs\t%i\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f'
% (name, (time() - t0), classifier.inertia_,
metrics.homogeneity_score(labels, classifier.labels_),
metrics.completeness_score(labels, classifier.labels_),
metrics.v_measure_score(labels, classifier.labels_),
metrics.adjusted_rand_score(labels, classifier.labels_),
metrics.adjusted_mutual_info_score(labels, classifier.labels_, average_method='arithmetic'),
metrics.silhouette_score(data, classifier.labels_, metric='euclidean', sample_size=497))
)
return classifier.inertia_
# List to store inertia for Elbow Method of cluster size identification
wcss = []
# Comparing multiple values of k (chose to use 4)
for i in range(2, 12):
print("Calculating measurement scores for Cluster Size {}".format(i))
cluster = KMeans(n_clusters=i, init='k-means++', n_init=10, max_iter=300,
precompute_distances=True, random_state=3)
inert = bench_k_means(classifier=cluster, name = "k-means++", data = X)
print('')
wcss.append(inert)
# Plotting inertia for different values of k to identify 'elbow'
plt.figure(figsize=(10, 10))
plt.plot(range(2, 12), wcss)
plt.title('Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.xticks(range(1, 12))
plt.show()
# Function to visualise two most populous clusters on two random axes
def plotClusters(kmeans_out, dimA, dimB):
(values, counts) = np.unique(kmeans_out, return_counts=True)
filled = np.stack((values, counts), axis=1)
sortedFill = filled[filled[:, 1].argsort()]
# Pick the last two clusters, i.e. most populous
for i in [-1, -2]:
cID = sortedFill[i][0]
if i == -1:
plt.scatter(X[kmeans_out == cID, dimA], X[kmeans_out == cID, dimB], s=50, c='lightblue',
marker='o', edgecolor='black', label='cluster 1')
else:
plt.scatter(X[kmeans_out == cID, dimA], X[kmeans_out == cID, dimB], s=50, c='lightgreen',
marker='s', edgecolor='black', label='cluster 2')
plt.legend(scatterpoints=1)
plt.grid()
plt.xlabel('Dimension A')
plt.ylabel('Dimension B')
plt.title('Visual Decomposition of Clustering')
plt.xlim(-1, 2)
plt.ylim(-1, 2)
plt.show()
return sortedFill
# Cluster Size chosen from previous section
size = 7
# Visualising k-means using two random axes
kmeans = KMeans(n_clusters=size, init='random', n_init=20, max_iter=300,
precompute_distances=True, random_state=3)
kmeans_out = kmeans.fit_predict(X)
idlist = plotClusters(kmeans_out, 1, 7)
####################################################
# Section 4 - Using PCA to visualise clusters
###################################################
# Fitting K-means to reduced-form data
pca = PCA(n_components=size-1).fit_transform(X)
cluster = KMeans(init='random', n_clusters=size, n_init=20)
pca_out = cluster.fit_predict(pca)
plotClusters(pca_out, 0, 1)
clusterID = pd.DataFrame(kmeans_out, index=companies, columns=['ClusterID'])
xData = pd.concat((finalData, clusterID), axis=1)
clusterID.to_csv('D:/ML/companylist.csv')
####################################################
# Section 5 - Creating Datasets for Regression
###################################################
stockData = np.array(pd.read_csv('D:/ML/tickerdata.csv', index_col='Date').drop(['BF-B', 'CTVA', 'FRC'], axis=1).fillna(0)).T
clusterStock = np.concatenate((stockData, np.array(clusterID)), axis=1)
# List of popular clusters
cIDA = idlist[-1, 0]
cIDB = idlist[-2, 0]
cIDC = idlist[-3, 0]
cIDD = idlist[-4, 0]
# Boolean conditions to split data
condA = [row for row in clusterStock if row[-1] == cIDA]
condB = [row for row in clusterStock if row[-1] == cIDB]
condC = [row for row in clusterStock if row[-1] == cIDC]
condD = [row for row in clusterStock if row[-1] == cIDD]
# Creating separate datasets
regDataA = np.array(condA).reshape((len(condA), len(condA[0])))[:, :-1].T
regDataB = np.array(condB).reshape((len(condB), len(condB[0])))[:, :-1].T
regDataC = np.array(condC).reshape((len(condC), len(condC[0])))[:, :-1].T
regDataD = np.array(condD).reshape((len(condD), len(condC[0])))[:, :-1].T
# Function to apply Linear Regression to every company inside a given cluster
def runRegression(regDataA, regDataB, regDataC, regDataD):
for numerics in [regDataA, regDataB, regDataC, regDataD]:
numComp = np.shape(numerics)[1]
numEx = np.shape(numerics)[0]
# Array to store weights
if numComp >= 240:
weightsA = np.ndarray((numComp, numComp-1))
elif numComp >= 200:
weightsB = np.ndarray((numComp, numComp-1))
elif numComp >= 6:
weightsC = np.ndarray((numComp, numComp-1))
else:
weightsD = np.ndarray((numComp, numComp-1))
# Get features and output values for linear regression
for i in range(numComp):
yData = numerics[:, i]
xData = np.delete(numerics[:], i, axis=1)
linReg = LinearRegression()
linReg.fit(xData, yData)
beta = linReg.coef_
if numComp >= 240:
weightsA[i] = beta
elif numComp >= 200:
weightsB[i] = beta
elif numComp >= 6:
weightsC[i] = beta
else:
weightsD[i] = beta
print("Done for one cluster.")
return weightsA, weightsB, weightsC, weightsD
# Storing weights as CSV files
wMatA, wMatB, wMatC, wMatD = runRegression(regDataA, regDataB, regDataC, regDataD)
# Getting list of companies in cluster A and B
indA = xData[xData['ClusterID'] == cIDA].index
indB = xData[xData['ClusterID'] == cIDB].index
indC = xData[xData['ClusterID'] == cIDC].index
indD = xData[xData['ClusterID'] == cIDD].index
pd.DataFrame(wMatA, index=indA).to_csv('D:/ML/clusterA.csv')
pd.DataFrame(wMatB, index=indB).to_csv('D:/ML/clusterB.csv')
pd.DataFrame(wMatC, index=indC).to_csv('D:/ML/clusterC.csv')
pd.DataFrame(wMatD, index=indD).to_csv('D:/ML/clusterD.csv')
pd.DataFrame(idlist, columns=['ClusterID', 'Count']).to_csv('D:/ML/popclusters.csv')
####################################################
# Section 6 - Listing similar companies
###################################################
# Loading necessary data
tickers = | pd.read_csv('D:/ML/companylist.csv', index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
| tm.assert_almost_equal(result, expected) | pandas.util.testing.assert_almost_equal |
# -*- coding: utf-8 -*-
import copy
import sys
import click
import six
from six import print_
from six import iteritems
import pandas as pd
from .analyser.simulation_exchange import SimuExchange
from .const import EVENT_TYPE, EXECUTION_PHASE
from .data import BarMap
from .events import SimulatorAStockTradingEventSource
from .utils import ExecutionContext, dummy_func
from .scheduler import scheduler
from .analyser.commission import AStockCommission
from .analyser.slippage import FixedPercentSlippageDecider
class StrategyContext(object):
def __init__(self):
self.__last_portfolio_update_dt = None
@property
def now(self):
return ExecutionContext.get_current_dt()
@property
def slippage(self):
return copy.deepcopy(ExecutionContext.get_exchange().account.slippage_decider)
@slippage.setter
@ExecutionContext.enforce_phase(EXECUTION_PHASE.INIT)
def slippage(self, value):
assert isinstance(value, (int, float))
ExecutionContext.get_exchange().account.slippage_decider = FixedPercentSlippageDecider(rate=value)
@property
def commission(self):
return copy.deepcopy(ExecutionContext.get_exchange().account.commission_decider)
@commission.setter
@ExecutionContext.enforce_phase(EXECUTION_PHASE.INIT)
def commission(self, value):
assert isinstance(value, (int, float))
ExecutionContext.get_exchange().account.commission_decider = AStockCommission(commission_rate=value)
@property
def benchmark(self):
return copy.deepcopy(ExecutionContext.get_trading_params().benchmark)
@benchmark.setter
@ExecutionContext.enforce_phase(EXECUTION_PHASE.INIT)
def benchmark(self, value):
assert isinstance(value, six.string_types)
ExecutionContext.get_trading_params().benchmark = value
@property
def short_selling_allowed(self):
raise NotImplementedError
@short_selling_allowed.setter
def short_selling_allowed(self):
raise NotImplementedError
@property
def portfolio(self):
dt = self.now
# if self.__last_portfolio_update_dt != dt:
# FIXME need to use cache, or might use proxy rather then copy
if True:
self.__portfolio = copy.deepcopy(ExecutionContext.get_exchange().account.portfolio)
self.__last_portfolio_update_dt = dt
return self.__portfolio
def __repr__(self):
items = ("%s = %r" % (k, v)
for k, v in self.__dict__.items()
if not callable(v) and not k.startswith("_"))
return "Context({%s})" % (', '.join(items), )
class StrategyExecutor(object):
def __init__(self, trading_params, data_proxy, **kwargs):
"""init
:param Strategy strategy: current user strategy object
:param TradingParams trading_params: current trading params
:param DataProxy data_proxy: current data proxy to access data
"""
self.trading_params = trading_params
self._data_proxy = data_proxy
self._strategy_context = kwargs.get("strategy_context")
if self._strategy_context is None:
self._strategy_context = StrategyContext()
self._user_init = kwargs.get("init", dummy_func)
self._user_handle_bar = kwargs.get("handle_bar", dummy_func)
self._user_before_trading = kwargs.get("before_trading", dummy_func)
self._simu_exchange = kwargs.get("simu_exchange")
if self._simu_exchange is None:
self._simu_exchange = SimuExchange(data_proxy, trading_params)
self._event_source = SimulatorAStockTradingEventSource(trading_params)
self._current_dt = None
self.current_universe = set()
self.progress_bar = click.progressbar(length=len(self.trading_params.trading_calendar), show_eta=False)
def execute(self):
"""run strategy
:returns: performance results
:rtype: pandas.DataFrame
"""
# use local variable for performance
data_proxy = self.data_proxy
strategy_context = self.strategy_context
simu_exchange = self.exchange
init = self._user_init
before_trading = self._user_before_trading
handle_bar = self._user_handle_bar
exchange_on_dt_change = simu_exchange.on_dt_change
exchange_on_bar_close = simu_exchange.on_bar_close
exchange_on_day_open = simu_exchange.on_day_open
exchange_on_day_close = simu_exchange.on_day_close
exchange_update_portfolio = simu_exchange.update_portfolio
is_show_progress_bar = self.trading_params.show_progress
def on_dt_change(dt):
self._current_dt = dt
exchange_on_dt_change(dt)
with ExecutionContext(self, EXECUTION_PHASE.INIT):
init(strategy_context)
try:
for dt, event in self._event_source:
on_dt_change(dt)
bar_dict = BarMap(dt, self.current_universe, data_proxy)
if event == EVENT_TYPE.DAY_START:
with ExecutionContext(self, EXECUTION_PHASE.BEFORE_TRADING, bar_dict):
exchange_on_day_open()
before_trading(strategy_context, None)
elif event == EVENT_TYPE.HANDLE_BAR:
with ExecutionContext(self, EXECUTION_PHASE.HANDLE_BAR, bar_dict):
exchange_update_portfolio(bar_dict)
handle_bar(strategy_context, bar_dict)
scheduler.next_day(dt, strategy_context, bar_dict)
exchange_on_bar_close(bar_dict)
elif event == EVENT_TYPE.DAY_END:
with ExecutionContext(self, EXECUTION_PHASE.FINALIZED, bar_dict):
exchange_on_day_close()
if is_show_progress_bar:
self.progress_bar.update(1)
finally:
self.progress_bar.render_finish()
results_df = self.generate_result(simu_exchange)
return results_df
def generate_result(self, simu_exchange):
"""generate result dataframe
:param simu_exchange:
:returns: result dataframe contains daliy portfolio, risk and trades
:rtype: pd.DataFrame
"""
account = simu_exchange.account
risk_cal = simu_exchange.risk_cal
columns = [
"daily_returns",
"total_returns",
"annualized_returns",
"market_value",
"portfolio_value",
"total_commission",
"total_tax",
"pnl",
"positions",
"cash",
]
risk_keys = [
"volatility", "max_drawdown",
"alpha", "beta", "sharpe",
"information_rate", "downside_risk",
"tracking_error", "sortino",
]
data = []
for date, portfolio in iteritems(simu_exchange.daily_portfolios):
# portfolio
items = {"date": | pd.Timestamp(date) | pandas.Timestamp |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 14 00:07:42 2019
@author: saugata
"""
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
# Activities are the class labels
# It is a 6 class classification
ACTIVITIES = {
0: 'WALKING',
1: 'WALKING_UPSTAIRS',
2: 'WALKING_DOWNSTAIRS',
3: 'SITTING',
4: 'STANDING',
5: 'LAYING',
}
# Utility function to print the confusion matrix
def confusion_matrix(Y_true, Y_pred):
Y_true = pd.Series([ACTIVITIES[y] for y in np.argmax(Y_true, axis=1)])
Y_pred = pd.Series([ACTIVITIES[y] for y in np.argmax(Y_pred, axis=1)])
return pd.crosstab(Y_true, Y_pred, rownames=['True'], colnames=['Pred'])
# Data directory
DATADIR = 'UCI_HAR_Dataset'
# Raw data signals
# Signals are from Accelerometer and Gyroscope
# The signals are in x,y,z directions
# Sensor signals are filtered to have only body acceleration
# excluding the acceleration due to gravity
# Triaxial acceleration from the accelerometer is total acceleration
SIGNALS = [
"body_acc_x",
"body_acc_y",
"body_acc_z",
"body_gyro_x",
"body_gyro_y",
"body_gyro_z",
"total_acc_x",
"total_acc_y",
"total_acc_z"
]
# Utility function to read the data from csv file
def _read_csv(filename):
return pd.read_csv(filename, delim_whitespace=True, header=None)
# Utility function to load the load
def load_signals(subset):
signals_data = []
for signal in SIGNALS:
filename = f'UCI_HAR_Dataset/{subset}/Inertial Signals/{signal}_{subset}.txt'
signals_data.append(_read_csv(filename).as_matrix())
# Transpose is used to change the dimensionality of the output,
# aggregating the signals by combination of sample/timestep.
# Resultant shape is (7352 train/2947 test samples, 128 timesteps, 9 signals)
return np.transpose(signals_data, (1, 2, 0))
def load_y(subset):
"""
The objective that we are trying to predict is a integer, from 1 to 6,
that represents a human activity. We return a binary representation of
every sample objective as a 6 bits vector using One Hot Encoding
(https://pandas.pydata.org/pandas-docs/stable/generated/pandas.get_dummies.html)
"""
filename = f'UCI_HAR_Dataset/{subset}/y_{subset}.txt'
y = _read_csv(filename)[0]
return | pd.get_dummies(y) | pandas.get_dummies |
import pandas as pd
import numpy as np
from .cross_validation import CrossValidation
from dask import delayed
from threading import Lock
class Data(object):
""" This class represents the set "Train plus Test" datasets.
This "union" is necessary throughout the ensemble. """
def __init__(self, train_ds=None, test_ds=None):
""" Train and test datasets. """
self.pre_train_status = None
self.pos_train_status = None
self.pre_pred_status = None
self.pos_pred_status = None
if train_ds is None:
self.train_ds = Dataset()
else:
self.train_ds = train_ds
if test_ds is None:
self.test_ds = Dataset()
else:
self.test_ds = test_ds
def __hash__(self):
return hash((self.train_ds, self.test_ds))
def __eq__(self, other):
return (self.train_ds, self.test_ds) == (other.train_ds,
other.test_ds)
@delayed
def pre_train(self, *arg):
""" Define actions to be made before training the nodes that
have this Data as source. """
self.train_ds.create_cv()
@delayed
def pos_train(self, *arg):
print("POS_training")
@delayed
def pre_pred(self, *arg):
print("PRE_predict")
@delayed
def pos_pred(self, *arg):
print("POS_predict")
class Dataset(object):
""" This class represents a dataset. At least
these requirements are addressed here:
- Necessary methods to handle its data.
- Control under multitask access.
- Management of the target column.
"""
def __init__(self, dataset=None, target=None):
""" We prevent the target from being assigned twice. This situation
may happen when more than one node is assigning data here. """
self.cv = {}
self.lock_ds = Lock()
self.lock_target = Lock()
self._target_assigned = False
if dataset is None:
self.ds = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
import seaborn as sns
from scipy import stats
import math
def clean_data(df):
"""
INPUT
df_listings - pandas dataframe
OUTPUT
X - A matrix holding all of the variables you want to consider when predicting the response
y - the corresponding response vector
This function cleans df_listings using the following steps to produce X and y:
1. Drop rows with 0 price and outlier prices (prices above 2950)
2. Create y as the price column, transformed by log
3. Create X from selected columns
4. Deal with missing values
5. Create dummy variables for selected categorical variables, drop the original columns
"""
# Drop rows with 0 price
df = df[df.price > 0]
df = df[df.price < 2950]
# Create y
y = df['price'].apply(math.log)
# Select columns for X
potential_vars = ['host_listings_count',
'calculated_host_listings_count_private_rooms',
'neighbourhood_cleansed',
'room_type',
'property_type',
'beds',
'availability_365',
'number_of_reviews',
'neighborhood_overview',
'space',
'notes',
'transit',
'access',
'interaction',
'house_rules',
'host_about',
'host_is_superhost',
'host_has_profile_pic',
'host_identity_verified',
'instant_bookable',
'require_guest_profile_picture',
'require_guest_phone_verification',]
bool_vars = ['host_is_superhost',
'host_has_profile_pic',
'host_identity_verified',
'instant_bookable',
'require_guest_profile_picture',
'require_guest_phone_verification']
free_text_vars = ['neighborhood_overview',
'space',
'notes',
'transit',
'access',
'interaction',
'house_rules',
'host_about']
df = df[potential_vars]
# Deal with missing values
df['number_of_reviews'].fillna(0, inplace=True)
df[bool_vars].fillna('f', inplace=True)
df[free_text_vars].fillna('', inplace=True)
def translate_bool(col):
for index, value in col.iteritems():
col[index] = 1 if value == 't' else 0
return col
def create_bool(col):
for index, value in col.iteritems():
col[index] = 0 if value == '' else 1
return col
fill_mean = lambda col: col.fillna(col.mean())
num_vars = df.select_dtypes(include=['int', 'float']).columns
df[num_vars] = df[num_vars].apply(fill_mean, axis=0)
df[bool_vars] = df[bool_vars].apply(translate_bool, axis=0)
df[bool_vars].dtype = int
df[free_text_vars] = df[free_text_vars].apply(create_bool, axis=0)
df[free_text_vars].dtype = int
# Dummy the categorical variables
cat_vars = ['neighbourhood_cleansed', 'room_type', 'property_type']
for var in cat_vars:
# for each cat add dummy var, drop original column
df = pd.concat([df.drop(var, axis=1), | pd.get_dummies(df[var], prefix=var, prefix_sep='_', drop_first=True) | pandas.get_dummies |
import os
import joblib
import numpy as np
import pandas as pd
from joblib import Parallel
from joblib import delayed
from Fuzzy_clustering.version2.common_utils.logging import create_logger
from Fuzzy_clustering.version2.dataset_manager.common_utils import check_empty_nwp
from Fuzzy_clustering.version2.dataset_manager.common_utils import rescale_mean
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_2d_dense
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_3d
class DatasetCreatorDense:
def __init__(self, projects_group, projects, data, path_nwp, nwp_model, nwp_resolution, data_variables, njobs=1,
test=False, dates=None):
self.projects = projects
self.is_for_test = test
self.projects_group = projects_group
self.data = data
self.path_nwp = path_nwp
self.nwp_model = nwp_model
self.nwp_resolution = nwp_resolution
self.compress = True if self.nwp_resolution == 0.05 else False
self.n_jobs = njobs
self.variables = data_variables
self.logger = create_logger(logger_name=__name__, abs_path=self.path_nwp,
logger_path=f'log_{self.projects_group}.log', write_type='a')
if not self.data is None:
self.dates = self.check_dates()
elif not dates is None:
self.dates = dates
def check_dates(self):
start_date = pd.to_datetime(self.data.index[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(self.data.index[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
data_dates = pd.to_datetime(np.unique(self.data.index.strftime('%d%m%y')), format='%d%m%y')
dates = [d for d in dates if d in data_dates]
self.logger.info('Dates are checked. Number of time samples is %s', str(len(dates)))
return pd.DatetimeIndex(dates)
def correct_nwps(self, nwp, variables):
if nwp['lat'].shape[0] == 0:
area_group = self.projects[0]['static_data']['area_group']
resolution = self.projects[0]['static_data']['NWP_resolution']
nwp['lat'] = np.arange(area_group[0][0], area_group[1][0] + resolution / 2,
resolution).reshape(-1, 1)
nwp['long'] = np.arange(area_group[0][1], area_group[1][1] + resolution / 2,
resolution).reshape(-1, 1).T
for var in nwp.keys():
if not var in {'lat', 'long'}:
if nwp['lat'].shape[0] != nwp[var].shape[0]:
nwp[var] = nwp[var].T
if 'WS' in variables and not 'WS' in nwp.keys():
if 'Uwind' in nwp.keys() and 'Vwind' in nwp.keys():
if nwp['Uwind'].shape[0] > 0 and nwp['Vwind'].shape[0] > 0:
r2d = 45.0 / np.arctan(1.0)
wspeed = np.sqrt(np.square(nwp['Uwind']) + np.square(nwp['Vwind']))
wdir = np.arctan2(nwp['Uwind'], nwp['Vwind']) * r2d + 180
nwp['WS'] = wspeed
nwp['WD'] = wdir
if 'Temp' in nwp.keys():
nwp['Temperature'] = nwp['Temp']
del nwp['Temp']
return nwp
def stack_by_sample(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables, predictions):
timestep = 60
x = dict()
y = dict()
x_3d = dict()
file_name = os.path.join(path_nwp, f"{nwp_model}_{t.strftime('%d%m%y')}.pickle")
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for project in projects:
preds = predictions[project['_id']]
hor = preds.columns[-1] + timestep
p_dates = [t + pd.DateOffset(minutes=hor)]
preds = preds.loc[t].to_frame().T
dates_pred = [t + pd.DateOffset(minutes=h) for h in preds.columns]
pred = pd.DataFrame(preds.values.ravel(), index=dates_pred, columns=[project['_id']])
data_temp = pd.concat([data[project['_id']].iloc[np.where(data.index < t)].to_frame(), pred])
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
y[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
try:
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample(date, nwp, nwp_prev, nwp_next, lats[project_id],
longs[project_id], project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data_temp.loc[(date - pd.DateOffset(hours=1))].values
inp['Obs_lag2'] = data_temp.loc[(date - pd.DateOffset(hours=2))].values
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
y[project_id] = pd.concat([y[project_id], pd.DataFrame(data.loc[date, project_id],
columns=['target'],
index=[date])])
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data_temp.loc[(date - pd.DateOffset(hours=1)), project_id].values
inp['Obs_lag2'] = data_temp.loc[(date - pd.DateOffset(hours=2)), project_id].values
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
y[project['_id']] = pd.concat(
[y[project['_id']], pd.DataFrame(data.loc[date, project['_id']],
columns=['target'], index=[date])])
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
for project in projects:
if len(x_3d[project['_id']].shape) == 3:
x_3d[project['_id']] = x_3d[project['_id']][np.newaxis, :, :, :]
return x, y, x_3d, t.strftime('%d%m%y%H%M')
def stack_daily_nwps(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables):
x = dict()
y = dict()
x_3d = dict()
file_name = os.path.join(path_nwp, f"{nwp_model}_{t.strftime('%d%m%y')}.pickle")
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for project in projects:
if project['static_data']['horizon'] == 'day_ahead':
p_dates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
else:
p_dates = pd.date_range(t + pd.DateOffset(hours=1), t + pd.DateOffset(hours=24), freq='H')
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
y[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample(date, nwp, nwp_prev, nwp_next, lats[project_id],
longs[project_id], project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date - pd.DateOffset(hours=1)), project_id]
inp['Obs_lag2'] = data.loc[(date - pd.DateOffset(hours=2)), project_id]
if not self.is_for_test:
inp['Obs_lag1'] = inp['Obs_lag1'] + np.random.normal(0, 0.05) * inp['Obs_lag1']
inp['Obs_lag2'] = inp['Obs_lag2'] + np.random.normal(0, 0.05) * inp['Obs_lag2']
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
y[project_id] = pd.concat([y[project_id], pd.DataFrame(data.loc[date, project_id],
columns=['target'],
index=[date])])
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date - pd.DateOffset(hours=1)), project_id]
inp['Obs_lag2'] = data.loc[(date - pd.DateOffset(hours=2)), project_id]
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
y[project['_id']] = pd.concat(
[y[project['_id']], pd.DataFrame(data.loc[date, project['_id']],
columns=['target'], index=[date])])
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
return x, y, x_3d, t.strftime('%d%m%y%H%M')
def stack_daily_nwps_rabbitmq(self, t, path_nwp, nwp_model, project, variables):
x = dict()
x_3d = dict()
nwps = project['nwp']
p_dates = pd.date_range(t, t + pd.DateOffset(days=3) - pd.DateOffset(hours=1), freq='H')
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
try:
date_nwp = date.strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_rabbitmq(date, nwp, nwp_prev, nwp_next, project['static_data']['type'])
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
date = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
return x, x_3d, t.strftime('%d%m%y%H%M')
def stack_daily_nwps_online(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables):
x = dict()
x_3d = dict()
file_name = os.path.join(path_nwp, f"{nwp_model}_{t.strftime('%d%m%y')}.pickle")
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for project in projects:
if project['static_data']['horizon'] == 'day_ahead':
p_dates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
else:
p_dates = pd.date_range(t + pd.DateOffset(hours=1), t + pd.DateOffset(hours=24), freq='15min')
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample(date, nwp, nwp_prev, nwp_next, lats[project_id],
longs[project_id], project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date - pd.DateOffset(hours=1)), project_id]
inp['Obs_lag2'] = data.loc[(date - pd.DateOffset(hours=2)), project_id]
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date - pd.DateOffset(hours=1)), project_id]
inp['Obs_lag2'] = data.loc[(date - pd.DateOffset(hours=2)), project_id]
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
return x, x_3d, t.strftime('%d%m%y%H%M')
def get_lats_longs(self):
lats = dict()
longs = dict()
nwp_found = False
for t in self.dates: # Try to load at least one file ??
file_name = os.path.join(self.path_nwp, f"{self.nwp_model}_{t.strftime('%d%m%y')}.pickle")
p_dates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=48), freq='H').strftime(
'%d%m%y%H%M')
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for date in p_dates:
if date in nwps:
nwp = nwps[date]
nwp_found = True
break
if nwp_found:
break
print(nwp_found)
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
if nwp['lat'].shape[0] == 0:
area_group = self.projects[0]['static_data']['area_group']
resolution = self.projects[0]['static_data']['NWP_resolution']
nwp['lat'] = np.arange(area_group[0][0], area_group[1][0] + resolution / 2, resolution).reshape(-1, 1)
nwp['long'] = np.arange(area_group[0][1], area_group[1][1] + resolution / 2, resolution).reshape(-1, 1).T
for project in self.projects:
areas = project['static_data']['areas'] # The final area is a 5x5 grid
project_id = project['_id']
lat, long = nwp['lat'], nwp['long']
if isinstance(areas, list):
# Is this guaranteed to be 5x5 ? I think yes, because of the resolution. TODO: VERIFY
lats[project_id] = np.where((lat[:, 0] >= areas[0][0]) & (lat[:, 0] <= areas[1][0]))[0]
longs[project_id] = np.where((long[0, :] >= areas[0][1]) & (long[0, :] <= areas[1][1]))[0]
else:
lats[project_id] = dict()
longs[project_id] = dict()
for area in sorted(areas.keys()):
lats[project_id][area] = np.where((lat[:, 0] >= areas[0][0]) & (lat[:, 0] <= areas[1][0]))[0]
longs[project_id][area] = np.where((long[0, :] >= areas[0][1]) & (long[0, :] <= areas[1][1]))[0]
return lats, longs
def make_dataset_res_short_term(self):
lats, longs = self.get_lats_longs()
predictions = dict()
for project in self.projects:
predictions[project['_id']] = joblib.load(os.path.join(project['static_data']['path_data']
, 'predictions_short_term.pickle'))
nwp = self.stack_by_sample(self.data.index[20], self.data, lats, longs, self.path_nwp, self.nwp_model, self.projects,
self.variables, predictions)
nwp_samples = Parallel(n_jobs=self.n_jobs)(
delayed(self.stack_by_sample)(t, self.data, lats, longs, self.path_nwp, self.nwp_model, self.projects,
self.variables, predictions) for t in self.data.index[20:])
x = dict()
y = dict()
x_3d = dict()
for project in self.projects:
x[project['_id']] = pd.DataFrame()
y[project['_id']] = pd.DataFrame()
x_3d[project['_id']] = np.array([])
for nwp in nwp_samples:
for project in self.projects:
if project['_id'] in nwp[2].keys():
if nwp[2][project['_id']].shape[0] != 0:
x[project['_id']] = pd.concat([x[project['_id']], nwp[0][project['_id']]])
y[project['_id']] = pd.concat([y[project['_id']], nwp[1][project['_id']]])
x_3d[project['_id']] = stack_3d(x_3d[project['_id']], nwp[2][project['_id']])
self.logger.info('All Inputs stacked')
dataset_x_csv = 'dataset_X_test.csv'
dataset_y_csv = 'dataset_y_test.csv'
dataset_cnn_pickle = 'dataset_cnn_test.pickle'
for project in self.projects:
project_id = project['_id']
data_path = project['static_data']['path_data']
dataset_x = x[project_id]
dataset_y = y[project_id]
if dataset_y.isna().any().values[0]:
dataset_x = dataset_x.drop(dataset_y.index[np.where(dataset_y.isna())[0]])
if len(x_3d.shape) > 1:
x_3d = np.delete(x_3d, np.where(dataset_y.isna())[0], axis=0)
dataset_y = dataset_y.drop(dataset_y.index[np.where(dataset_y.isna())[0]])
if dataset_x.isna().any().values[0]:
dataset_y = dataset_y.drop(dataset_x.index[np.where(dataset_x.isna())[0]])
if len(x_3d.shape) > 1:
x_3d = np.delete(x_3d, np.where(dataset_x.isna())[0], axis=0)
dataset_y = dataset_y.drop(dataset_y.index[np.where(dataset_y.isna())[0]])
index = [d for d in dataset_x.index if d in dataset_y.index]
dataset_x = dataset_x.loc[index]
dataset_y = dataset_y.loc[index]
ind = joblib.load(os.path.join(data_path, 'dataset_columns_order.pickle'))
columns = dataset_x.columns[ind]
dataset_x = dataset_x[columns]
dataset_x.to_csv(os.path.join(data_path, dataset_x_csv))
dataset_y.to_csv(os.path.join(data_path, dataset_y_csv))
joblib.dump(x_3d[project_id], os.path.join(data_path, dataset_cnn_pickle))
self.logger.info('Datasets saved for project %s', project['_id'])
def make_dataset_res_rabbitmq(self):
project = self.projects[0]
nwp_daily = self.stack_daily_nwps_rabbitmq(self.dates[0], self.path_nwp, self.nwp_model, project,
self.variables)
x = nwp_daily[0][project['_id']]
x_3d = nwp_daily[1][project['_id']]
project_id = project['_id']
data_path = project['static_data']['path_data']
dataset_x = x
if os.path.exists(os.path.join(data_path, 'dataset_columns_order.pickle')):
ind = joblib.load(os.path.join(data_path, 'dataset_columns_order.pickle'))
columns = dataset_x.columns[ind]
dataset_x = dataset_x[columns]
return dataset_x, x_3d
def make_dataset_res_online(self):
project = self.projects[0]
lats, longs = self.get_lats_longs()
nwp_daily = Parallel(n_jobs=self.n_jobs)(
delayed(self.stack_daily_nwps_online)(t, self.data, lats, longs, self.path_nwp, self.nwp_model, self.projects,
self.variables) for t in self.dates)
x = pd.DataFrame()
y = pd.DataFrame()
x_3d = np.array([])
for nwp in nwp_daily:
if nwp[1][project['_id']].shape[0] != 0:
x = pd.concat([x, nwp[0][project['_id']]])
x_3d = stack_3d(x_3d, nwp[2][project['_id']])
project_id = project['_id']
data_path = project['static_data']['path_data']
dataset_x = x
ind = joblib.load(os.path.join(data_path, 'dataset_columns_order.pickle'))
columns = dataset_x.columns[ind]
dataset_x = dataset_x[columns]
return dataset_x, x_3d
def make_dataset_res(self):
lats, longs = self.get_lats_longs()
nwp = self.stack_daily_nwps(self.dates[4], self.data, lats, longs, self.path_nwp, self.nwp_model, self.projects,
self.variables)
nwp_daily = Parallel(n_jobs=self.n_jobs)(
delayed(self.stack_daily_nwps)(t, self.data, lats, longs, self.path_nwp, self.nwp_model, self.projects,
self.variables) for t in self.dates)
x = dict()
y = dict()
x_3d = dict()
for project in self.projects:
x[project['_id']] = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
import scipy.stats
from pyextremes import EVA, get_model
@pytest.fixture(scope="function")
def eva_model(battery_wl_preprocessed) -> EVA:
return EVA(data=battery_wl_preprocessed)
@pytest.fixture(scope="function")
def eva_model_bm(battery_wl_preprocessed) -> EVA:
eva_model = EVA(data=battery_wl_preprocessed)
eva_model.get_extremes(
method="BM",
extremes_type="high",
block_size="1Y",
errors="raise",
)
return eva_model
@pytest.fixture(scope="function")
def eva_model_pot(battery_wl_preprocessed) -> EVA:
eva_model = EVA(data=battery_wl_preprocessed)
eva_model.get_extremes(
method="POT",
extremes_type="high",
threshold=1.35,
r="24H",
)
return eva_model
@pytest.fixture(scope="function")
def eva_model_bm_mle(battery_wl_preprocessed) -> EVA:
eva_model = EVA(data=battery_wl_preprocessed)
eva_model.get_extremes(
method="BM",
extremes_type="high",
block_size="1Y",
errors="raise",
)
eva_model.fit_model("MLE")
return eva_model
@pytest.fixture(scope="function")
def eva_model_bm_emcee(battery_wl_preprocessed) -> EVA:
eva_model = EVA(data=battery_wl_preprocessed)
eva_model.get_extremes(
method="BM",
extremes_type="high",
block_size="1Y",
errors="raise",
)
eva_model.fit_model("Emcee", n_walkers=10, n_samples=100)
return eva_model
@pytest.fixture(scope="function")
def eva_model_pot_mle(battery_wl_preprocessed) -> EVA:
eva_model = EVA(data=battery_wl_preprocessed)
eva_model.get_extremes(
method="POT",
extremes_type="high",
threshold=1.35,
r="24H",
)
eva_model.fit_model("MLE")
return eva_model
class TestEVA:
def test_init_errors(self):
with pytest.raises(
TypeError, match=r"invalid type.*'data' argument.*pandas.Series"
):
EVA(data=1)
with pytest.warns(RuntimeWarning, match=r"'data'.*not numeric.*converted"):
eva_model = EVA(
data=pd.Series(
data=["1", "2", "3"],
index=pd.DatetimeIndex(["2020", "2021", "2022"]),
)
)
assert np.allclose(eva_model.data.values, [1, 2, 3])
with pytest.raises(TypeError, match=r"invalid dtype.*'data' argument.*numeric"):
EVA(
data=pd.Series(
data=["a", "b", "c"],
index=pd.DatetimeIndex(["2020", "2021", "2022"]),
)
)
with pytest.raises(TypeError, match=r"index of 'data'.*date-time.*not"):
EVA(data=pd.Series(data=[1, 2, 3], index=["2020", "2021", "2022"]))
with pytest.warns(RuntimeWarning, match=r"index is not sorted.*sorting"):
eva_model = EVA(
data=pd.Series(
data=[1, 2, 3],
index=pd.DatetimeIndex(["2022", "2021", "2020"]),
)
)
assert np.allclose(eva_model.data.index.year.values, [2020, 2021, 2022])
with pytest.warns(RuntimeWarning, match=r"Null values found.*removing invalid"):
eva_model = EVA(
data=pd.Series(
data=[1, 2, np.nan, 3],
index= | pd.DatetimeIndex(["2020", "2021", "2022", "2023"]) | pandas.DatetimeIndex |
# -*- coding: utf-8 -*-
# test/unit/stat/test_period.py
# Copyright (C) 2016 authors and contributors (see AUTHORS file)
#
# This module is released under the MIT License.
"""Test Period class"""
# ============================================================================
# Imports
# ============================================================================
# Stdlib imports
from asyncio import Lock
from threading import Lock as TLock
# Third-party imports
import pandas as pd
import pytest
# Local imports
from loadlimit.stat import Period
from loadlimit.util import aiter
# ============================================================================
# Test total()
# ============================================================================
def test_total():
"""Returns total number of datapoints in the data structure"""
p = Period()
for i in range(5):
p[i]['timedata'].extend(range(5))
expected = 25
assert p.total() == expected
assert p.numdata == expected
@pytest.mark.asyncio
async def test_atotal():
"""Async version of total()"""
p = Period()
async for i in aiter(range(5)):
p[i]['timedata'].extend(range(5))
expected = 25
result = await p.atotal()
assert result == expected
assert p.numdata == expected
# ============================================================================
# Test clearvals
# ============================================================================
def test_clearvals_all():
"""Clearvals empties every list in the container"""
p = Period()
for i in range(5):
p[i]['timedata'].extend(range(5))
p.clearvals()
assert p.numdata == 0
for v in p.values():
assert len(v['timedata']) == 0
def test_clearvals_key():
"""Clearvals empties only the list for the specific key"""
p = Period()
for i in range(5):
p[i]['timedata'].extend(range(5))
p.clearvals(4)
assert p.numdata == 20
for i, v in p.items():
if i == 4:
assert len(v['timedata']) == 0
else:
assert len(v['timedata']) == 5
# ============================================================================
# Test aclearvals()
# ============================================================================
@pytest.mark.asyncio
async def test_aclearvals_all():
"""Clearvals empties every list in the container"""
p = Period()
async for i in aiter(range(5)):
p[i]['timedata'].extend(range(5))
await p.aclearvals()
assert p.numdata == 0
async for v in aiter(p.values()):
assert len(v['timedata']) == 0
@pytest.mark.asyncio
async def test_aclearvals_key():
"""Clearvals empties only the list for the specific key"""
p = Period()
async for i in aiter(range(5)):
p[i]['timedata'].extend(range(5))
await p.aclearvals(4)
assert p.numdata == 20
async for i, v in aiter(p.items()):
if i == 4:
assert len(v['timedata']) == 0
else:
assert len(v['timedata']) == 5
# ============================================================================
# Test period lock
# ============================================================================
def test_period_lockarg():
"""Use custom Lock instance with Period"""
mylock = Lock()
p = Period(lock=mylock)
assert p.lock is mylock
def test_period_defaultlock():
"""Create new Lock object if lock not specified"""
p = Period()
assert p.lock
assert isinstance(p.lock, Lock)
assert not p.lock.locked()
@pytest.mark.parametrize('obj', [42, 4.2, '42', [42], (4.2, ), TLock])
def test_period_lockarg_notlock(obj):
"""Non- asyncio.Lock objects raises an error"""
expected = ('lock expected asyncio.Lock, got {} instead'.
format(type(obj).__name__))
with pytest.raises(TypeError) as err:
Period(lock=obj)
assert err.value.args == (expected, )
# ============================================================================
# Test addtimedata
# ============================================================================
@pytest.mark.parametrize('val', [42, 4.2, '42', [42]])
def test_addtimedata_not_series(val):
"""Raise error if the data arg is not a pandas.Series object"""
stat = Period()
expected = ('data expected pandas.Series, got {} instead'.
format(type(val).__name__))
with pytest.raises(TypeError) as err:
stat.addtimedata(42, val)
assert err.value.args == (expected, )
# ============================================================================
# Test adderror
# ============================================================================
@pytest.mark.parametrize('val', [42, 4.2, '42', [42]])
def test_adderror_not_series(val):
"""Raise error if the data arg is not a pandas.Series object"""
stat = Period()
expected = ('data expected pandas.Series, got {} instead'.
format(type(val).__name__))
with pytest.raises(TypeError) as err:
stat.adderror(42, val)
assert err.value.args == (expected, )
def test_adderror_series():
"""Add a series to the dict"""
stat = Period()
error = Exception('i am an error')
s = pd.Series([1, 1, 0, repr(error)])
stat.adderror('42', s)
errors = list(stat.error('42'))
assert len(errors) == 1
assert errors[0] is s
# ============================================================================
# Test addfailure
# ============================================================================
@pytest.mark.parametrize('val', [42, 4.2, '42', [42]])
def test_addfailure_not_series(val):
"""Raise error if the data arg is not a pandas.Series object"""
stat = Period()
expected = ('data expected pandas.Series, got {} instead'.
format(type(val).__name__))
with pytest.raises(TypeError) as err:
stat.addfailure(42, val)
assert err.value.args == (expected, )
def test_addfailure_series():
"""Add a series to the dict"""
stat = Period()
error = 'i am a failure'
s = pd.Series([1, 1, 0, error])
stat.addfailure('42', s)
failures = list(stat.failure('42'))
assert len(failures) == 1
assert failures[0] is s
# ============================================================================
# Test numtimedata
# ============================================================================
@pytest.mark.parametrize('maxnum', list(range(1, 6)))
def test_numtimedata(maxnum):
"""Return number of time data stored"""
key = 'hello'
stat = Period()
for i in range(maxnum):
s = pd.Series([1, 1, i])
stat.addtimedata(key, s)
assert stat.numtimedata(key) == maxnum
# ============================================================================
# Test numerror
# ============================================================================
@pytest.mark.parametrize('maxnum', list(range(1, 6)))
def test_numerror(maxnum):
"""Return number of errors stored"""
key = 'hello'
stat = Period()
err = Exception(key)
for i in range(maxnum):
s = pd.Series([1, 1, i, repr(err)])
stat.adderror(key, s)
assert stat.numerror(key) == maxnum
# ============================================================================
# Test numfailure
# ============================================================================
@pytest.mark.parametrize('maxnum', list(range(1, 6)))
def test_numfailure(maxnum):
"""Return number of failures stored"""
key = 'hello'
err = 'world'
stat = Period()
for i in range(maxnum):
s = | pd.Series([1, 1, i, err]) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `transform` package."""
import pytest
import pandas as pd
from pandas.testing import assert_series_equal, assert_frame_equal
from generic_strategy_optimization.transform import HA, gen_HA, downsample
@pytest.fixture
def candles_5m_3rows():
arr = [
[1504927800,303.11,305.1,305.02,304.39,452.56688104],
[1504928100,303.99,304.4,304.39,303.99,508.66859951],
[1504928400,303.04,304.4,303.99,303.88,526.7675571],
]
rv = pd.DataFrame(arr, columns=('ts', 'low', 'high', 'open', 'close', 'volume')).set_index('ts')
return rv
@pytest.fixture
def candles_5m_40rows():
arr = [
[1513930800,628.53,635.46,629.37,635.24,205.97453957],
[1513931100,631,635.68,635.24,634.21,74.75369283],
[1513931400,633.58,653.76,633.58,653.36,128.6972842],
[1513931700,650.22,664.66,653.42,654.77,189.07645963],
[1513932000,650.26,653.93,653.19,650.26,85.44704107],
[1513932300,641.64,651.06,650.26,647.68,169.78892461],
[1513932600,646.55,653.74,647.67,650,103.72501261],
[1513932900,649.9,653.68,649.99,651.99,113.93623401],
[1513933200,652,667.97,652.33,664.46,355.26597024],
[1513933500,664.56,671.93,667.77,669,240.7953687],
[1513933800,657.33,669.62,669.12,666.19,96.33266856],
[1513934100,666.19,673.76,667.29,670.25,123.36833368],
[1513934400,669.59,675,670.19,675,145.24845981],
[1513934700,675,688.99,675,688.12,316.32329402],
[1513935000,679.93,688.11,688.11,683,181.08651939],
[1513935300,683.04,697.96,683.04,694,264.23576236],
[1513935600,694,701,695.72,698.84,345.12446683],
[1513935900,698.01,704.98,698.84,700.99,210.90028967],
[1513936200,685.67,703.51,701.99,686.21,138.88844197],
[1513936500,672.92,690.01,685.79,689.15,213.06416502],
[1513936800,689.14,697.43,689.14,694.98,160.43862044],
[1513937100,688,694.84,694.84,694,195.78840035],
[1513937400,691.03,694,694,691.26,104.32096822],
[1513937700,690.01,697.33,690.01,697.33,236.45494184],
[1513938000,697.32,709.99,697.33,709.99,252.18568871],
[1513938300,708.67,713.01,709.99,709.8,167.70473181],
[1513938600,705.65,712,709.8,711.7,195.73482987],
[1513938900,711.02,712.59,711.7,711.02,170.57843675],
[1513939200,710.02,714.99,711.01,710.02,171.52601447],
[1513939500,706.01,715.01,706.01,712.03,155.97484788],
[1513939800,693.12,712.03,712.03,698.77,165.75384247],
[1513940100,688.74,703.29,699.54,690.21,170.95883755],
[1513940400,675,691.06,690.36,678.32,312.7283785],
[1513940700,655,682,678.31,681.99,606.61319273],
[1513941000,675.79,687.91,681.58,687.21,119.44389932],
[1513941300,672.77,687.13,686.29,673.98,195.66746861],
[1513941600,667.42,679.97,674,679.65,224.30705334],
[1513941900,672.89,679.64,679.64,676.94,163.50389894],
[1513942200,674.09,675.81,675.81,675,195.68584249],
[1513942500,664.15,675,675,664.16,129.93857446],
]
rv = pd.DataFrame(arr, columns=('ts', 'low', 'high', 'open', 'close', 'volume')).set_index('ts')
return rv
def test_HA_adds_heikin_ashi_column_to_dataframe(candles_5m_3rows):
"""Sample pytest test function with the pytest fixture as an argument."""
candles = candles_5m_3rows
df = HA(candles)
assert 'heikin_ashi' in df
assert len(df) == len(candles)
assert_series_equal(
df.heikin_ashi,
pd.Series([304.405, 304.1925, 303.8275], index=df.index),
check_names=False)
def test_coarse_downsample_works_with_8row_candles(candles_5m_40rows):
"""
Input:
low high open close volume
ts
1513930800 628.53 635.46 629.37 635.24 205.974540
1513931100 631.00 635.68 635.24 634.21 74.753693
1513931400 633.58 653.76 633.58 653.36 128.697284
1513931700 650.22 664.66 653.42 654.77 189.076460
1513932000 650.26 653.93 653.19 650.26 85.447041
1513932300 641.64 651.06 650.26 647.68 169.788925
1513932600 646.55 653.74 647.67 650.00 103.725013
1513932900 649.90 653.68 649.99 651.99 113.936234
Output:
low high open close volume
ts
1513930800 628.53 664.66 629.37 654.77 598.501976
1513932000 641.64 653.93 653.19 651.99 472.897212
"""
candles = candles_5m_40rows.iloc[:8]
df = downsample(candles, 4, 'coarse')
assert len(df) == 2
assert (df.index == pd.Int64Index([1513930800, 1513932000])).all()
assert_frame_equal(
df,
pd.DataFrame(
[
[1513930800, 628.53, 664.66, 629.37, 654.77, 598.501976],
[1513932000, 641.64, 653.93, 653.19, 651.99, 472.897212],
],
columns=('ts', 'low', 'high', 'open', 'close', 'volume')).set_index('ts')
)
def test_coarse_downsample_works_with_9row_candles(candles_5m_40rows):
"""
Input:
low high open close volume
ts
1513930800 628.53 635.46 629.37 635.24 205.974540
1513931100 631.00 635.68 635.24 634.21 74.753693
1513931400 633.58 653.76 633.58 653.36 128.697284
1513931700 650.22 664.66 653.42 654.77 189.076460
1513932000 650.26 653.93 653.19 650.26 85.447041
1513932300 641.64 651.06 650.26 647.68 169.788925
1513932600 646.55 653.74 647.67 650.00 103.725013
1513932900 649.90 653.68 649.99 651.99 113.936234
1513933200 652.00 667.97 652.33 664.46 355.265970
Output:
low high open close volume
ts
1513931100 631.00 664.66 635.24 650.26 477.974478
1513932300 641.64 667.97 650.26 664.46 742.716141
"""
candles = candles_5m_40rows.iloc[:9]
df = downsample(candles, 4, 'coarse')
assert len(df) == 2
assert (df.index == pd.Int64Index([1513931100, 1513932300])).all()
assert_frame_equal(
df,
pd.DataFrame(
[
[1513931100, 631.00, 664.66, 635.24, 650.26, 477.974478],
[1513932300, 641.64, 667.97, 650.26, 664.46, 742.716141],
],
columns=('ts', 'low', 'high', 'open', 'close', 'volume')).set_index('ts')
)
def test_coarse_downsample_works_with_10row_candles(candles_5m_40rows):
"""
Input:
low high open close volume
ts
1513930800 628.53 635.46 629.37 635.24 205.974540
1513931100 631.00 635.68 635.24 634.21 74.753693
1513931400 633.58 653.76 633.58 653.36 128.697284 <--+
1513931700 650.22 664.66 653.42 654.77 189.076460 |
1513932000 650.26 653.93 653.19 650.26 85.447041 |
1513932300 641.64 651.06 650.26 647.68 169.788925 v
1513932600 646.55 653.74 647.67 650.00 103.725013 <--+
1513932900 649.90 653.68 649.99 651.99 113.936234 |
1513933200 652.00 667.97 652.33 664.46 355.265970 |
1513933500 664.56 671.93 667.77 669.00 240.795369 v
Output:
low high open close volume
ts
1513931400 633.58 664.66 633.58 647.68 573.009710
1513932600 646.55 671.93 647.67 669.00 813.722586
"""
candles = candles_5m_40rows.iloc[:10]
df = downsample(candles, 4, 'coarse')
assert len(df) == 2
assert (df.index == | pd.Int64Index([1513931400, 1513932600]) | pandas.Int64Index |
import pandas as pd
import json
import os
import numpy
import glob
from zipfile import ZipFile
from functools import partial
from multiprocessing import Pool
### -------------------------------------Test and Help function -------------------------------------------------------
def test_me():
print("Hello World")
def version():
print("safegraph_py v1.1.0")
def help():
print('''
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
.d8888b. .d888 .d8888b. 888 8888888b. 888 888 888 d8b 888
d88P Y88b d88P" d88P Y88b 888 888 Y88b 888 888 888 Y8P 888
Y88b. 888 888 888 888 888 888 888 888 888 888
"Y888b. 8888b. 888888 .d88b. 888 888d888 8888b. 88888b. 88888b. 888 d88P 888 888 888888 88888b. .d88b. 88888b. 888 888 88888b. 888d888 8888b. 888d888 888 888
"Y88b. "88b 888 d8P Y8b 888 88888 888P" "88b 888 "88b 888 "88b 8888888P" 888 888 888 888 "88b d88""88b 888 "88b 888 888 888 "88b 888P" "88b 888P" 888 888
"888 .d888888 888 88888888 888 888 888 .d888888 888 888 888 888 888 888 888 888 888 888 888 888 888 888 888 888 888 888 888 .d888888 888 888 888
Y88b d88P 888 888 888 Y8b. Y88b d88P 888 888 888 888 d88P 888 888 888 Y88b 888 Y88b. 888 888 Y88..88P 888 888 888 888 888 d88P 888 888 888 888 Y88b 888
"Y8888P" "Y888888 888 "Y8888 "Y8888P88 888 "Y888888 88888P" 888 888 888 "Y88888 "Y888 888 888 "Y88P" 888 888 88888888 888 88888P" 888 "Y888888 888 "Y88888
888 888 888
888 Y8b d88P Y8b d88P
888 "Y88P" "Y88P"
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
HELP:
Welcome to the safegraph helper function. Below you will find a list of functions and their arguments to aid in your datascience journey. If you have further questions that cannot
be answered by this help command, please do not hesitate to ask for assistance in the #python_troubleshooting slack channel.
Key:
* - Required Argument
& - Boolean value
$ - Pandas *args and **kwargs are activated
Available Functions:
+ test_me() - A function to test the Python Libray
----------------------[JSON Section]----------------------
+ unpack_json() - a function to explode JSON objects within pandas vertically into a new DF
**Arguments:
df*
json_column
key_col_name
value_col_name
+ unpack_json_and_merge() - a function to explode JSON objects within pandas vertically and add it to the current DF
**Arguments:
df*
json_column
key_col_name
value_col_name
keep_index (&)
+ explode_json_array() - This function vertically explodes an array column in SafeGraph data and creates a second new column indicating the index value from the array
**Arguments:
df*
array_column
value_col_name
place_key
file_key
array_sequence
keep_index (&)
zero_index (&)
----------------------[JSON Fast Section]----------------------
+ unpack_json_fast() - Multi-threaded version of unpack_json(). Reference unpack_json() for details and arguments.
+ unpack_json_and_merge_fast() - Multi-threaded version of unpack_json_and_merge(). Reference unpack_json_and_merge() for details and arguments.
+ explode_json_array_fast() - Multi-threaded version of explode_json_array(). Reference explode_json_array() for details and arguments.
-----------------[CORE, GEO, and PATTERNS section]----------------------
+ read_core_folder() - a function that concats the core files together into 1 dataframe
**Arguments:
path_to_core*
compression
$
+ read_core_folder_zip() - used to read in the Core data from the zipped core file
**Arguments:
path_to_core*
compression
$
+ read_geo_zip() - used to read in the Core Geo data from a zipped file
**Arguments:
path_to_geo*
compression
$
+ read_pattern_single() - used to read in SafeGraph data pre June 15th
**Arguments:
f_path*
compression
$
+ read_pattern_multi() - used to read in SafeGraph pattern data that is broken into multiple files
**Arguments:
path_to_pattern*
compression
$
+ merge_core_pattern() - used to combine the core file and the pattern files on the SafeGraph ID
**Arguments:
core_df*
patterns_df*
how
$
-----------------[Social Distancing section]----------------------
+ merge_socialDist_by_dates() - a function that concats the multiple different dates of social distancing data together into 1 dataframe
**Arguments:
path_to_social_dist*
start_date* (date as string "year-month-day")
end_date* (date as string "year-month-day")
$
''')
### dtype dict
sg_dtypes = {'postal_code': str, 'phone_number': str, 'naics_code': str, 'latitude': float, 'longitude': float, 'poi_cbg': str, 'census_block_group': str,'primary_number': str}
### -------------------------------------- JSON Functions ---------------------------------------------------------------
# json.loads() but handling of missing/nan/non-string data.
def load_json_nan(df, json_col):
return df[json_col].apply(lambda x: json.loads(x) if type(x) == str else x)
def unpack_json(df, json_column='visitor_home_cbgs', index_name= None, key_col_name=None,
value_col_name=None):
# these checks are a inefficent for multithreading, but it's not a big deal
if key_col_name is None:
key_col_name = json_column + '_key'
if value_col_name is None:
value_col_name = json_column + '_value'
if (df.index.unique().shape[0] < df.shape[0]):
raise ("ERROR -- non-unique index found")
df = df.copy()
df[json_column + '_dict'] = load_json_nan(df,json_column)
all_sgpid_cbg_data = [] # each cbg data point will be one element in this list
if index_name is None:
for index, row in df.iterrows():
this_sgpid_cbg_data = [{'orig_index': index, key_col_name: key, value_col_name: value} for key, value in
row[json_column + '_dict'].items()]
all_sgpid_cbg_data = all_sgpid_cbg_data + this_sgpid_cbg_data
else:
for index, row in df.iterrows():
temp = row[index_name]
this_sgpid_cbg_data = [{'orig_index': index, index_name:temp, key_col_name: key, value_col_name: value} for key, value in
row[json_column + '_dict'].items()]
all_sgpid_cbg_data = all_sgpid_cbg_data + this_sgpid_cbg_data
all_sgpid_cbg_data = pd.DataFrame(all_sgpid_cbg_data)
all_sgpid_cbg_data.set_index('orig_index', inplace=True)
return all_sgpid_cbg_data
def unpack_json_and_merge(df, json_column='visitor_home_cbgs', key_col_name=None,
value_col_name=None, keep_index=False):
if (keep_index):
df['index_original'] = df.index
df.reset_index(drop=True, inplace=True) # Every row must have a unique index
df_exp = unpack_json(df, json_column=json_column, key_col_name=key_col_name, value_col_name=value_col_name)
df = df.merge(df_exp, left_index=True, right_index=True).reset_index(drop=True)
return df
def explode_json_array(df, array_column = 'visits_by_day', value_col_name=None, place_key='safegraph_place_id', file_key='date_range_start', array_sequence=None, keep_index=False, zero_index=False):
if (array_sequence is None):
array_sequence = array_column + '_sequence'
if (value_col_name is None):
value_col_name = array_column + '_value'
if(keep_index):
df['index_original'] = df.index
df = df.copy()
df.reset_index(drop=True, inplace=True) # THIS IS IMPORTANT; explode will not work correctly if index is not unique
df[array_column + '_json'] = load_json_nan(df,array_column)
day_visits_exp = df[[place_key, file_key, array_column+'_json']].explode(array_column+'_json')
day_visits_exp['dummy_key'] = day_visits_exp.index
day_visits_exp[array_sequence] = day_visits_exp.groupby([place_key, file_key])['dummy_key'].rank(method='first', ascending=True).astype('int64')
if(zero_index):
day_visits_exp[array_sequence] = day_visits_exp[array_sequence] -1
day_visits_exp.drop(['dummy_key'], axis=1, inplace=True)
day_visits_exp.rename(columns={array_column+'_json': value_col_name}, inplace=True)
day_visits_exp[value_col_name] = day_visits_exp[value_col_name].astype('int64')
df.drop([array_column+'_json'], axis=1, inplace=True)
return pd.merge(df, day_visits_exp, on=[place_key,file_key])
### ------------------------------------------ END JSON SECTION--------------------------------------------------------
### ------------------------------------------ JSON FAST SECTION--------------------------------------------------------
# index_name if you want your index (such as CBG) to be it's own column, then provide this
def unpack_json_fast(df, json_column = 'visitor_home_cbgs', index_name = None, key_col_name = None, value_col_name = None, chunk_n = 1000):
if index_name is None:
df = df[[json_column]]
else:
df = df[[json_column, index_name]]
chunks_list = [df[i:i+chunk_n] for i in range(0,df.shape[0],chunk_n)]
partial_unpack_json = partial(unpack_json, json_column=json_column, index_name= index_name, key_col_name= key_col_name, value_col_name= value_col_name)
with Pool() as pool:
results = pool.map(partial_unpack_json,chunks_list)
return pd.concat(results)
def unpack_json_and_merge_fast(df, json_column='visitor_home_cbgs', key_col_name=None,
value_col_name=None, keep_index=False, chunk_n = 1000):
if (keep_index):
df['index_original'] = df.index
df.reset_index(drop=True, inplace=True) # Every row must have a unique index
df_exp = df[[json_column]]
df_exp = unpack_json_fast(df_exp, json_column=json_column, key_col_name=key_col_name, value_col_name=value_col_name, chunk_n=chunk_n)
df = df.merge(df_exp, left_index=True, right_index=True).reset_index(drop=True)
return df
def explode_json_array_fast(df, array_column = 'visits_by_day', place_key='safegraph_place_id', file_key='date_range_start', value_col_name=None, array_sequence=None, keep_index=False, zero_index=False, chunk_n = 1000):
df_subset = df[[array_column,place_key,file_key]] # send only what we need
chunks_list = [df_subset[i:i+chunk_n] for i in range(0,df_subset.shape[0],chunk_n)]
partial_explode_json = partial(explode_json_array, array_column=array_column, value_col_name= value_col_name, place_key= place_key,
file_key = file_key, array_sequence = array_sequence, zero_index = zero_index)
with Pool() as pool:
results = pool.map(partial_explode_json,chunks_list)
df_subset = pd.concat(results)
df_subset.drop([array_column],axis=1,inplace=True) # preparing to merge by dropping duplicates
return df.merge(df_subset, on=[place_key,file_key])
### ------------------------------------------ END JSON FAST SECTION--------------------------------------------------------
### ---------------------------------------CORE, GEO, AND PATTERNS SECTION -----------------------------------------------
def read_core_folder(path_to_core, compression='gzip', dtype=sg_dtypes, *args, **kwargs):
core_files = glob.glob(os.path.join(path_to_core, "*.csv.gz"))
li = []
for core in core_files:
print(core)
df = pd.read_csv(core, compression=compression, dtype=dtype, *args, **kwargs)
li.append(df)
SG_core = pd.concat(li, axis=0)
return SG_core
### added a new core read that takes the information straight from the zipped file (like you get it from the catelog)
def read_core_folder_zip(path_to_core, compression='gzip', dtype=sg_dtypes, *args, **kwargs):
zip_file = ZipFile(path_to_core)
dfs = {text_file.filename: pd.read_csv(zip_file.open(text_file.filename), compression=compression, dtype=dtype, *args, **kwargs)
for text_file in zip_file.infolist()
if text_file.filename.endswith('.csv.gz')}
SG_core = | pd.concat(dfs, axis=0, ignore_index=True) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# # Monte-carlo simulations
# In[1]:
# %load imports.py
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('reload_kedro', '')
get_ipython().run_line_magic('config', 'Completer.use_jedi = False ## (To fix autocomplete)')
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
from src.models.vmm import ModelSimulator
import matplotlib.pyplot as plt
from src.visualization.plot import track_plots, plot, captive_plot, plot_parameters
import kedro
import numpy as np
import os.path
import anyconfig
import matplotlib
plt.style.use('presentation')
from myst_nb import glue
from wPCC_pipeline.paper import glue_table
from src.symbols import *
import src.symbols as symbols
from src.system_equations import *
from IPython.display import display, Math, Latex, Markdown
from sympy.physics.vector.printing import vpprint, vlatex
from src.parameters import df_parameters
p = df_parameters["symbol"]
# Read configs:
conf_path = os.path.join("../../conf/base/")
runs_globals_path = os.path.join(
conf_path,
"runs_globals.yml",
)
runs_globals = anyconfig.load(runs_globals_path)
model_test_ids = runs_globals["model_test_ids"]
join_globals_path = os.path.join(
conf_path,
"join_globals.yml",
)
joins = runs_globals["joins"]
join_runs_dict = anyconfig.load(join_globals_path)
globals_path = os.path.join(
conf_path,
"globals.yml",
)
global_variables = anyconfig.load(globals_path)
vmm_names = global_variables["vmms"]
only_joined = global_variables[
"only_joined"
] # (regress/predict with only models from joined runs)
ship_data = catalog.load("ship_data")
# In[2]:
from sympy import latex
from scipy.stats import norm, multivariate_normal
from wPCC_pipeline.pipelines.prediction.nodes import simulate_euler
import tqdm
import sys
from wPCC_pipeline.turning_circle import TurningCircle
import seaborn as sns
# In[9]:
#vmm_name = 'vmm_abkowitz_simple'
vmm_name = 'vmm_martins_simple'
#vmm_name = 'vmm_abkowitz'
model = catalog.load(f"{ vmm_name }.motion_regression.joined.model")
ek = catalog.load(f"{ vmm_name }.ek")
id = 22774
regression = catalog.load(f"{ vmm_name }.motion_regression.no_circle.regression")
df_smooth = catalog.load(f"{ id }.data_ek_smooth")
# In[10]:
def monte_carlo(data_smooth, df_parameter_variation, model, ek):
dataframes = {}
with tqdm.tqdm(total=len(df_parameter_variation), file=sys.stdout) as pbar:
for index, parameters_ in df_parameter_variation.iterrows():
model_ = model.copy()
model_.parameters.update(parameters_)
df_ = simulate_euler(data=data_smooth, model=model_,ek=ek, solver='Radau')
dataframes[index] = df_
pbar.update(1)
return dataframes
# In[18]:
means = regression.parameters['regressed']
stds = regression.std
cov = regression.covs.values
rv = multivariate_normal(mean=means, cov=cov, allow_singular=True)
np.random.seed(42)
N_=1000
df_parameter_variation = pd.DataFrame(data=rv.rvs(N_), columns=means.index)
# In[19]:
dataframes = monte_carlo(df_smooth, df_parameter_variation, model=model, ek=ek)
# In[ ]:
dataframes['model test'] = df_smooth
dataframes['VMM all'] = catalog.load(f"{ vmm_name }.motion_regression.joined.{ id }.data_resimulate")
dataframes['VMM circle hold out'] = catalog.load(f"{ vmm_name }.motion_regression.no_circle.{ id }.data_resimulate")
# In[41]:
dataframes_ = dataframes.copy()
displays = []
displays.append({key:value for key,value in dataframes.items() if key not in ['model test', 'VMM all', 'VMM circle hold out']})
displays.append({key:value for key,value in dataframes.items() if key not in ['model test', 'VMM all']})
displays.append({key:value for key,value in dataframes.items() if key not in ['VMM all']})
displays.append(dataframes)
# In[56]:
styles={'model test':{'style':'k-','lw':2},
'VMM all':{'style':'r-','lw':2},
'VMM circle hold out':{'style':'g-','lw':2},
}
for index, parameters_ in df_parameter_variation.iterrows():
styles[index] = {'style':'b-',
'alpha':0.1,
'label':'_Hidden label'}
for dataframes_ in displays:
fig,ax=plt.subplots()
grid.fig.set_size_inches(0.7*np.array(matplotlib.rcParams["figure.figsize"]))
ax = track_plots(dataframes_, lpp=ship_data['L'], beam=ship_data['B'], plot_boats=False, flip=True, N=7, styles=styles, ax=ax)
ax.set_xlim(0,25)
ax.set_ylim(-20,5)
# In[53]:
df_turning_results = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import matplotlib
import scipy
import netCDF4 as nc4
import numpy.ma as ma
import matplotlib.pyplot as plt
from netCDF4 import Dataset
import struct
import glob
import pandas as pd
from numpy import convolve
import datetime
import atmos
import matplotlib.dates as mdates
#"""
#Created on Wed Nov 13 10:41:35 2019
#functions to define,for each day of the dataset, which is the interval of time
#to consider to select typical boundary layer clouds and exclude other cloud types
#which can mess up the statistics. the filter is based on human selection based
#on visual inspection of cloudnet target classification and model cloud mask.
#As general rule: we exclude all clouds with cloud base above 2000 mt and vertical
#extension larger than 1000 m associated with a mixed / ice phase.
#'20130501']#'20130502']#, '20130501','20130424', '20130425', '20130427', '20130429'
#@author: cacquist
#"""
import xarray as xr
def f_selectingPBLcloudWindow(date):
""" function to select time intervals for processing cloud data in each day.
Also, we pick which algorithm to use with each type of data.
in particular:
- minmax correspond to select as cloud top the max of the cloud tops found.
Clouds above 5000 mt are filtered out (done with the function
f_calcCloudBaseTopPBLclouds)
- version2: selects boundary layer clouds with cloud base below 2500 mt and cloud tops below
CB+600mt.
"""
if date == '20130414':
timeStart = datetime.datetime(2013,4,14,6,0,0)
timeEnd = datetime.datetime(2013,4,14,23,59,59)
PBLheight = 2500.
if date == '20130420':
timeStart = datetime.datetime(2013,4,20,6,0,0)
timeEnd = datetime.datetime(2013,4,20,23,59,59)
PBLheight = 2000.
if date == '20130424':
timeStart = datetime.datetime(2013,4,24,6,0,0)
timeEnd = datetime.datetime(2013,4,24,23,59,59)
PBLheight = 2000.
if date == '20130425':
timeStart = datetime.datetime(2013,4,25,6,0,0)
timeEnd = datetime.datetime(2013,4,25,23,59,59)
PBLheight = 5000.
if date == '20130426':
timeStart = datetime.datetime(2013,4,26,6,0,0)
timeEnd = datetime.datetime(2013,4,26,23,59,59)
PBLheight = 5000.
if date == '20130427':
timeStart = datetime.datetime(2013,4,27,6,0,0)
timeEnd = datetime.datetime(2013,4,27,23,59,59)
PBLheight = 3000.
if date == '20130428':
timeStart = datetime.datetime(2013,4,28,6,0,0)
timeEnd = datetime.datetime(2013,4,28,23,59,59)
PBLheight = 3500.
if date == '20130429':
timeStart = datetime.datetime(2013,4,29,6,0,0)
timeEnd = datetime.datetime(2013,4,29,23,59,59)
PBLheight = 3000.
if date == '20130430':
timeStart = datetime.datetime(2013,4,30,6,0,0)
timeEnd = datetime.datetime(2013,4,30,23,59,59)
PBLheight = 3000.
if date == '20130501':
timeStart = datetime.datetime(2013,5,1,6,0,0)
timeEnd = datetime.datetime(2013,5,1,23,59,59)
PBLheight = 2500.
if date == '20130502':
timeStart = datetime.datetime(2013,5,2,6,0,0)
timeEnd = datetime.datetime(2013,5,2,23,59,59)
PBLheight = 4000.
if date == '20130503':
timeStart = datetime.datetime(2013,5,3,6,0,0)
timeEnd = datetime.datetime(2013,5,3,23,59,59)
PBLheight = 3000.
if date == '20130504':
timeStart = datetime.datetime(2013,5,4,6,0,0)
timeEnd = datetime.datetime(2013,5,4,23,59,59)
PBLheight = 2500.
if date == '20130505':
timeStart = datetime.datetime(2013,5,5,6,0,0)
timeEnd = datetime.datetime(2013,5,5,23,59,59)
PBLheight = 2500.
if date == '20130506':
timeStart = datetime.datetime(2013,5,6,6,0,0)
timeEnd = datetime.datetime(2013,5,6,23,59,59)
PBLheight = 3000.
if date == '20130509':
timeStart = datetime.datetime(2013,5,9,6,0,0)
timeEnd = datetime.datetime(2013,5,9,23,59,59)
PBLheight = 3000.
if date == '20130510':
timeStart = datetime.datetime(2013,5,10,6,0,0)
timeEnd = datetime.datetime(2013,5,10,23,59,59)
PBLheight = 3000.
if date == '20130518':
timeStart = datetime.datetime(2013,5,18,6,0,0)
timeEnd = datetime.datetime(2013,5,18,23,59,59)
PBLheight = 2500.
if date == '20130524':
timeStart = datetime.datetime(2013,5,24,6,0,0)
timeEnd = datetime.datetime(2013,5,24,23,59,59)
PBLheight = 4500.
if date == '20130525':
timeStart = datetime.datetime(2013,5,25,6,0,0)
timeEnd = datetime.datetime(2013,5,25,23,59,59)
PBLheight = 3000.
if date == '20130527':
timeStart = datetime.datetime(2013,5,27,6,0,0)
timeEnd = datetime.datetime(2013,5,27,23,59,59)
PBLheight = 3000.
if date == '20130528':
timeStart = datetime.datetime(2013,5,28,6,0,0)
timeEnd = datetime.datetime(2013,5,28,23,59,59)
PBLheight = 4000.
dictOut = {'timeStart':timeStart, 'timeEnd':timeEnd, 'heightPBL':PBLheight}
#,'20130504', '20130505','20130506','20130509','20130510'
# '20130414','20130420', '20130426','20130428', '20130430','20130524','20130525','20130527', '20130528'
return(dictOut)
#-------------------------------------------------------------------------------------
def f_calculateCloudBaseTopThickness(cloudMask, time, height, humanInfo):
"""
date : wednesday 13 may 2020
author: <NAME>
goal: build a function to identify all cloud base and cloud top of clouds in the vertical profile at the same time.
Human observations for the day distinguish manually PBL from non-PBL clouds. An additional dataset of
PBL clouds is delivered based on this information.
Concept of the code:
step 1: given the cloud mask, find all cloud base and cloud tops.
step 2: build cloud database with all clouds saved as xarray dataset
step 3: identify cloud properties of PBL clouds using timeStart, timeEnd, MaxCTheight
input: cloudmask,
time,
height,
humanInfo (dictionary including timeStart, timeEnd, PBLheight from human obs on the day)
output: AllCloudDataset (xarray Dataset including cloud base, cloud top, cloud thickness, level number)
PBLcloudDataset (xarray Dataset for PBL clouds with cloud base, cloud top, cloud thickness, level number)
"""
dimTime = len(time)
dimHeight = len(height)
heightPBL = humanInfo['heightPBL']
timeStart = humanInfo['timeStart']
timeEnd = humanInfo['timeEnd']
# STEP 1: identifying all cloud bases and tops
# ---------------------------------------------------
# converting cloud mask to 1 / 0 matrices
BinaryMatrix = np.zeros((dimTime, dimHeight))
for itime in range(dimTime):
for iH in range(dimHeight):
if cloudMask[itime, iH] != 0.:
BinaryMatrix[itime, iH] = 1
# calculating gradient of binary cloud mask
gradBinary = np.diff(BinaryMatrix, axis=1)
# counting max number of cloud base/cloud top found
numberCB = []
numberCT = []
for itime in range(dimTime):
column = gradBinary[itime, :]
numberCB.append(len(np.where(column == -1.)[0][:]))
numberCT.append(len(np.where(column == 1.)[0][:]))
NCB = max(numberCB)
NCT = max(numberCT)
# generating cloud base and cloud top arrays
CBarray = np.zeros((dimTime, NCB))
CBarray.fill(np.nan)
CTarray = np.zeros((dimTime, NCT))
CTarray.fill(np.nan)
NlayersArray = np.zeros((dimTime))
NlayersArray.fill(np.nan)
# if no cloud bases or no cloud tops are found, then CB and CT are assigned to nan
if (NCB == 0) or (NCT == 0):
CBarray[iTime, :] = np.nan
CTarray[iTime, :] = np.nan
else:
# if some cloud base / cloud tops are found, all the found values are stored
# storing cloud base and cloud top arrays
for iTime in range(dimTime):
column = gradBinary[iTime, :]
indCB = np.where(column == -1.)[0][:]
NfoundCB = len(indCB)
indCT = np.where(column == 1.)[0][:]
NfoundCT = len(indCT)
CBarray[iTime, 0:NfoundCB] = height[indCB]
CTarray[iTime, 0:NfoundCT] = height[indCT]
NlayersArray[iTime] = numberCB[iTime]
# calculating cloud thickness based on the cloud base and tops found ( 2d array (time, Nlevels))
cloudThicknessDatabase = CTarray - CBarray
# generating array of levels
levels = np.arange(NCB)
# step 2: build cloud database with all clouds saved as xarray dataset
clouds = xr.Dataset(
data_vars = {'cloudBase' : (('time', 'levels'), CBarray),
'cloudTop' : (('time', 'levels'), CTarray),
'cloudThick': (('time', 'levels'), cloudThicknessDatabase)},
coords = {'levels': levels,
'time' : time})
# step 3: identify cloud properties of PBL clouds using timeStart, timeEnd, MaxCTheight
cloudsTimeWindow = clouds.sel(time=slice(timeStart, timeEnd))
PBLclouds = cloudsTimeWindow.where(cloudsTimeWindow.cloudTop < heightPBL)
return(clouds, PBLclouds)
#--------------------------------------------------------------------------
def f_calculateMinCloudBaseTop(clouds, PBLclouds, date_arr):
"""author: <NAME>
date: 18/05/2020
goal: function to calculate the minimum cloud base for the PBL and the corresponding cloud top
input: clouds - list of xarray datasets of cloud properties
PBLclouds - list of xarray datasets of PBL cloud properties
date_arr - array of days to be processed
output: minimum cloud base and corresponding cloud tops in matrices of dimtime, Nfiles dimensions
"""
# definition of output matrices
dimTime = 9600
Nfiles = len(date_arr)
CBarr_obs = np.zeros((dimTime, Nfiles))
CTarr_obs = np.zeros((dimTime, Nfiles))
TKarr_obs = np.zeros((dimTime, Nfiles))
CBarr_PBL_obs = np.zeros((dimTime, Nfiles))
CTarr_PBL_obs = np.zeros((dimTime, Nfiles))
TKarr_PBL_obs = np.zeros((dimTime, Nfiles))
# for each day, reading and saving minimum cloud base and corresponding cloud top
for indFile in range(Nfiles):
# readingt the date
date = date_arr[indFile]
yy = int(date[0:4])
mm = int(date[4:6])
dd = int(date[6:8])
timeStandard = pd.date_range(start=datetime.datetime(yy,mm,dd,0,0,0), \
end=datetime.datetime(yy,mm,dd,23,59,59), freq='9s')
# reading xarray datasets of the day
PBLcloud_dataset = PBLclouds[indFile]
cloud_dataset = clouds[indFile]
PBLCloudsStandard = PBLcloud_dataset.reindex({'time':timeStandard})
meanCB_obs = np.nanmin(cloud_dataset.cloudBase.values, axis=1)
meanCT_obs = np.nanmin(cloud_dataset.cloudTop.values, axis=1)
meanTK_obs = np.nanmin(cloud_dataset.cloudThick.values, axis=1)
meanCB_obs = np.nanmin(cloud_dataset.cloudBase.values, axis=1)
meanCT_obs = np.nanmin(cloud_dataset.cloudTop.values, axis=1)
meanTK_obs = np.nanmin(cloud_dataset.cloudThick.values, axis=1)
meanCB_PBL_obs = np.nanmin(PBLCloudsStandard.cloudBase.values, axis=1)
meanCT_PBL_obs = np.nanmin(PBLCloudsStandard.cloudTop.values, axis=1)
meanTK_PBL_obs = np.nanmin(PBLCloudsStandard.cloudThick.values, axis=1)
CBarr_obs[:, indFile] = meanCB_obs
CTarr_obs[:, indFile] = meanCT_obs
TKarr_obs[:, indFile] = meanTK_obs
CBarr_PBL_obs[:, indFile] = meanCB_PBL_obs
CTarr_PBL_obs[:, indFile] = meanCT_PBL_obs
TKarr_PBL_obs[:, indFile] = meanTK_PBL_obs
return (CBarr_obs, CTarr_obs, TKarr_obs, CBarr_PBL_obs)
#---------------------------------------------------------------------------------
def f_resampleArrays2StandardData(A, index, strDate):
"""
author : <NAME>
date : 10/04/2020
goal : resample data with some misssing times (matrices of dimT < 9600) to the standard size (9600,150)
input : matrix of data to resize, datetime_array, height_array
output : ndarray of resampled matrix with nans wherever missing data are located
"""
import numpy as np
import pandas as pd
DF = pd.Series(A, index=index)
# I first construct my regular time index every 9s
# Obviously I put a little more periods (entire day would be 9600)
index = pd.date_range(strDate, periods=9600, freq='9s')
# There you go, by default missing values should be NaN
DFresampled = DF.loc[index]
return(DFresampled.values)
#---------------------------------------------------------------------------------
def f_resample2StandardData(A, index, cols, strDate):
"""
author : <NAME>
date : 10/04/2020
goal : resample data with some missing times (matrices of dimT < 9600) to the standard size (9600,150)
input : matrix of data to resize, datetime_array, height_array
output : ndarray of resampled matrix with nans wherever missing data are located
"""
import numpy as np
import pandas as pd
DF = pd.DataFrame(A, index=index, columns=cols)
# I first construct my regular time index every 9s
# Obviously I put a little more periods (entire day would be 9600)
index = pd.date_range(strDate, periods=9600, freq='9s')
# There you go, by default missing values should be NaN
DFresampled = DF.loc[index]
return(DFresampled.values)
#aa
# closest function
#---------------------------------------------------------------------------------
# date : 16.10.2017
# author: <NAME>
# goal: return the index of the element of the input array that in closest to the value provided to the function
def f_closest(array,value):
idx = (np.abs(array-value)).argmin()
return idx
def getNearestIndex(timeRef, timeStamp):
"""this function finds the nearest element of timeRef array to the value timeStamp within the given tolerance
and returns the index of the element found. If non is found within the given tolerance, it returns nan."""
try:
index = timeRef.index.get_loc(timeStamp, method='nearest')
except:
index = np.nan
return index
def getIndexList(dataTable, reference):
"""this function reads the less resolved time array (dataTable) and the time array to be used as reference (reference)
and the tolerance. then for every value in the reference array, it finds the index of the nearest element of
dataTable for a fixed tolerance. It provides as output the list of indeces of dataTable corresponding
to the closest elements of the reference array. """
#print(len(reference))
indexList = []
for value in reference:
#print(value)
index = getNearestIndex(dataTable, value)
indexList.append(index)
return indexList
def getIndexListsuka(dataTable, reference):
indexList = []
for value in reference:
#print(value)
index = dataTable.index.get_loc(value, method='nearest')
indexList.append(index)
return indexList
def getResampledDataPd(emptyDataFrame, LessResolvedDataFrame, indexList):
# it reads the dataframe to be filled with the resampled data (emptyDataFrame), then the originally less resolved
# data (dataDataFrame) and the list of indeces of the less resolved time array upsampled
# to the highly resolved resolutions. Then, with a loop on the indeces of the indexList,
# It assigns to the emptydataframe the values of the less resolved dataframe called by the
# corresponding index of the indexlist. The output is the filled emptydataFrame
for i, index in enumerate(indexList):
try:
emptyDataFrame.iloc[i]=LessResolvedDataFrame.iloc[index]
except:
pass
return emptyDataFrame
# function to calculate LWC from
#---------------------------------------------------------------------------------
# date : 23.10.2019
# author: <NAME>
# goal: calculate LWC using standard Frisch approach
# input: linear reflectivity matrix, radar range gate resolution (assumed constant), \
#time array, height attar, LWP time serie
# output: LWC matrix (time, height)
def f_calculateLWCFrisch(Ze_lin, deltaZ, datetime_ICON, height_ICON, LWP_obs_res):
LWC_Frisch = np.zeros((len(datetime_ICON), len(height_ICON)))
LWC_Frisch.fill(np.nan)
LWP_obs_res = np.insert(LWP_obs_res,0,np.nan)
for indT in range(len(datetime_ICON)):
for indH in range(len(height_ICON)):
num = LWP_obs_res[indT] * np.sqrt(Ze_lin[indT,indH])
den = deltaZ*np.nansum(np.sqrt(Ze_lin[indT,:]))
#print(den)
#print(num)
LWC_Frisch[indT,indH] = num/den
return(LWC_Frisch)
# function to define cb and ct of the first cloud layer in a column appearing when reading from the top
#---------------------------------------------------------------------------------
# date : 31.01.2019
# author: <NAME>
# goal: return the index of the element of the input array that in closest to the value provided to the function
def f_calcCloudBaseTop(cloudMask, dimTime, dimHeight, height):
# converting cloud mask to 1 / 0 matrices
BinaryMatrix = np.zeros((dimTime, dimHeight))
for itime in range(dimTime):
for iH in range(dimHeight):
if cloudMask[itime,iH] != 0.:
BinaryMatrix[itime,iH] = 1
# calculating gradient of binary cloud mask
gradBinary = np.diff(BinaryMatrix,axis=1)
# counting max number of cloud base/cloud top found
numberCB = []
numberCT = []
for itime in range(dimTime):
column = gradBinary[itime,:]
numberCB.append(len(np.where(column == 1.)[0][:]))
numberCT.append(len(np.where(column ==-1.)[0][:]))
NCB=max(numberCB)
NCT=max(numberCT)
# generating cloud base and cloud top arrays
CBarray = np.zeros((dimTime,NCB))
CBarray.fill(np.nan)
CTarray = np.zeros((dimTime,NCT))
CTarray.fill(np.nan)
# storing cloud base and cloud top arrays
for iTime in range(dimTime):
column = gradBinary[iTime,:]
indCB=np.where(column == -1.)[0][:]
NfoundCB=len(indCB)
indCT=np.where(column == 1.)[0][:]
NfoundCT=len(indCT)
CBarray[iTime,0:NfoundCB]=height[indCB]
CTarray[iTime,0:NfoundCT]=height[indCT]
return (CBarray,CTarray)
# function to define cb and ct of boundary layer clouds in a column appearing when reading from the top
#---------------------------------------------------------------------------------
# date : 22.10.2019
# author: <NAME>
# input: cloudMask, dimension of time array, dimension of height array, height from model/obs (cloudnet)
# output: array of:
# - CBarray: time array having 4 dimensions to record four different cloud base heights per time stamp
# - CTarray: time array having 4 dimensions to record four different cloud top heights per time stamp
# - NlayersArray: number of distinct cloud layers identified per time stamp
# - CB_collective: minimum cloud base identified per time stamp
# - CT_collective: maximum cloud top identified per time stamp
# goal:
def f_calcCloudBaseTopPBLclouds(cloudMask, dimTime, dimHeight, height, cloudTimeArray, time):
# cloud mask for identifying cloud base and cloud top of PBL clouds
#CloudMaskCut = cloudMask
# filtering clouds above 5000mt
# ind_above = np.where(height > 5000.)
#CloudMaskCut[:, ind_above] = 0.
# converting cloud mask to 1 / 0 matrices
BinaryMatrix = np.zeros((dimTime, dimHeight))
for itime in range(dimTime):
for iH in range(dimHeight):
if cloudMask[itime,iH] != 0.:
BinaryMatrix[itime,iH] = 1
# calculating gradient of binary cloud mask
gradBinary = np.diff(BinaryMatrix,axis=1)
# counting max number of cloud base/cloud top found
numberCB = []
numberCT = []
for itime in range(dimTime):
column = gradBinary[itime,:]
numberCB.append(len(np.where(column == 1.)[0][:]))
numberCT.append(len(np.where(column ==-1.)[0][:]))
NCB=max(numberCB)
NCT=max(numberCT)
# generating cloud base and cloud top arrays
CBarray = np.zeros((dimTime,NCB))
CBarray.fill(np.nan)
CTarray = np.zeros((dimTime,NCT))
CTarray.fill(np.nan)
NlayersArray = np.zeros((dimTime))
NlayersArray.fill(np.nan)
# if no cloud bases or no cloud tops are found, then CB and CT are assigned to nan
if (NCB == 0) or (NCT == 0):
CB_collective = np.zeros((dimTime))
CB_collective.fill(np.nan)
CT_collective = np.zeros((dimTime))
CT_collective.fill(np.nan)
CB_PBL_out = np.zeros((dimTime))
CB_PBL_out.fill(np.nan)
CT_PBL_out = np.zeros((dimTime))
CT_PBL_out.fill(np.nan)
else:
# if some cloud base / cloud tops are found, all the found values are stored
# storing cloud base and cloud top arrays
for iTime in range(dimTime):
column = gradBinary[iTime,:]
indCB = np.where(column == -1.)[0][:]
NfoundCB = len(indCB)
indCT = np.where(column == 1.)[0][:]
NfoundCT = len(indCT)
CBarray[iTime,0:NfoundCB] = height[indCB]
CTarray[iTime,0:NfoundCT] = height[indCT]
NlayersArray[iTime] = numberCB[iTime]
# we define a collective cloud base/top to consider multilayer PBL clouds as one
# we assign min CB and max CT for each PBl cloud found.
CB_collective = np.asarray(CBarray[:,0])
CT_collective = np.asarray(CTarray[:,0])
for ind in range(dimTime):
# if (np.isnan(CB[ind,0]) == True):
CB_collective[ind] = np.nanmin(CBarray[ind,:])
CT_collective[ind] = np.nanmax(CTarray[ind,:])
# filtering clouds in PBL using human filtering for hours
#if np.count_nonzero(~np.isnan(CB_collective)) != 0:
timeStart = cloudTimeArray[0]
timeEnd = cloudTimeArray[1]
CB_PBL = pd.Series(np.repeat(np.nan, len(time)), index=time)
maskt = (CB_PBL.index > timeStart) * (CB_PBL.index < timeEnd)
CB_PBL.loc[maskt] = CB_collective[maskt]
CT_PBL = pd.Series(np.repeat(np.nan, len(time)), index=time)
maskt = (CT_PBL.index > timeStart) * (CT_PBL.index < timeEnd)
CT_PBL.loc[maskt] = CT_collective[maskt]
CT_PBL_out = CT_PBL.values
CB_PBL_out = CB_PBL.values
return (CBarray, CTarray, NlayersArray, CB_PBL_out, CT_PBL_out, CB_collective, CT_collective)
def f_calcCloudBaseTopPBLcloudsV2(cloudMask, dimTime, dimHeight, height, cloudTimeArray, \
time):
"""
@ author: cacquist
@ date : 10 November 2019
@ goal : this function corresponds to the version2 processing mode. It has been
generated to detect PBL clouds over JOYCE and it has been tuned with
statistical observed mean PBL cloud properties from the site.
INPUT:
- cloudMask : matrix of 0/1 containing cloud mask
- dimTime : dimension of time array
- dimHeight : dimension of height array
- cloudTimeArry :
- time : time array
OUTPUTS:
- CBarray : array containing all cloud bases found with the gradient method
- CTarray : array containing all cloud tops found with the gradient method
- NlayersArray : number fo cloud base/top found for each time
- CB_PBL_out : array of boundary layer cloud bases found
- CT_PBL_out : array of boundary layer cloud tops found
- CB_collective : array of minimum cloud base found
- CT_collective : array of maximum cloud top found
Methodology:
It sets the cloud base to be below 2500mt and the cloud geometrical thickness
to be 600 mt.
Check for cloud base height to be below 2500 mt:
If cloud base does not fullfill the condition, no PBL cloud
base and top are found and it returns nans.
If cloud base fullfills the condition, then it checks for cloud tops.
If maximum cloud top is found above the CB + 600 mt, lower cloud
tops are searched among the cloud tops below that height and the
minimum is taken.
If none are found cloud top nd cloud base are assigned to nan.
"""
meanCloudThickness = 600.
minCBheight = 2500.
# cloud mask for identifying cloud base and cloud top of PBL clouds
# filtering clouds above 5000mt
#cloudMaskCut = cloudMask
#ind_above = np.where(height > 5000.)
#cloudMaskCut[:, ind_above] = 0.
# converting cloud mask to 1 / 0 matrices
BinaryMatrix = np.zeros((dimTime, dimHeight))
for itime in range(dimTime):
for iH in range(dimHeight):
if cloudMask[itime,iH] != 0.:
BinaryMatrix[itime,iH] = 1
# calculating gradient of binary cloud mask
gradBinary = np.diff(BinaryMatrix,axis=1)
# counting max number of cloud base/cloud top found
numberCB = []
numberCT = []
for itime in range(dimTime):
column = gradBinary[itime,:]
numberCB.append(len(np.where(column == 1.)[0][:]))
numberCT.append(len(np.where(column ==-1.)[0][:]))
NCB=max(numberCB)
NCT=max(numberCT)
# generating cloud base and cloud top arrays
CBarray = np.zeros((dimTime,NCB))
CBarray.fill(np.nan)
CTarray = np.zeros((dimTime,NCT))
CTarray.fill(np.nan)
NlayersArray = np.zeros((dimTime))
NlayersArray.fill(np.nan)
# if no cloud bases or no cloud tops are found, then CB and CT are assigned to nan
if (NCB == 0) or (NCT == 0):
CB_collective = np.zeros((dimTime))
CB_collective.fill(np.nan)
CT_collective = np.zeros((dimTime))
CT_collective.fill(np.nan)
CB_PBL_out = np.zeros((dimTime))
CB_PBL_out.fill(np.nan)
CT_PBL_out = np.zeros((dimTime))
CT_PBL_out.fill(np.nan)
else:
# if some cloud base / cloud tops are found, all the found values are stored
# storing cloud base and cloud top arrays
for iTime in range(dimTime):
column = gradBinary[iTime,:]
indCB = np.where(column == -1.)[0][:]
NfoundCB = len(indCB)
indCT = np.where(column == 1.)[0][:]
NfoundCT = len(indCT)
CBarray[iTime,0:NfoundCB] = height[indCB]
CTarray[iTime,0:NfoundCT] = height[indCT]
NlayersArray[iTime] = numberCB[iTime]
# we define a collective cloud base/top to consider multilayer PBL clouds as one
# we assign min CB and max CT for each PBl cloud found.
CB_collective = np.asarray(CBarray[:,0])
CT_collective = np.asarray(CTarray[:,0])
CB_PBL_out = np.repeat(np.nan, len(time))
CT_PBL_out = np.repeat(np.nan, len(time))
for ind in range(dimTime):
# if (np.isnan(CB[ind,0]) == True):
CB_collective[ind] = np.nanmin(CBarray[ind,:])
CT_collective[ind] = np.nanmax(CTarray[ind,:])
#selecting temporal window in which cloud top and base for PBL clouds have to be calculated
if (time[ind] > cloudTimeArray[0]) * (time[ind] < cloudTimeArray[1]):
if (CB_collective[ind] < minCBheight):
# for boundary layer clouds, we can assume the lowest cloud base is correct
# we can also assume that from the lowest cloud base, the cloud does not extend
# in the vertical for more than 1500 mt. If the max cloud top is above 1500 mt then
# we select among cloud tops, those that are located withing such distance from cloud base
maxCTheightPBL = np.nanmin(CBarray[ind,:]) + meanCloudThickness
#print('max cloud top', maxCTheightPBL)
if (np.nanmax(CTarray[ind,:]) > maxCTheightPBL):
findLowerCT = np.where(CTarray[ind,:] < maxCTheightPBL)
if (len(findLowerCT[0]) == 0): # no elements are found below the maximum allowed height for cloud top
CT_PBL_out[ind] = np.nan
CB_PBL_out[ind] = np.nan
else:
#print('sono qui')
CT_PBL_out[ind] = np.nanmin(CTarray[ind,findLowerCT]) # assigning minmum cloud top
CB_PBL_out[ind] = CB_collective[ind] # assigning cloud base if it is below 2500 mt
return (CBarray, CTarray, NlayersArray, CB_PBL_out, CT_PBL_out, CB_collective, CT_collective)
#---------------------------------------------------------------------------------
# date : 28.01.2019
# author: <NAME>
# goal: function that calculates cloud fraction over 30 minutes of time for the whole day for ICON-LEM
# input:
# QI_ICON_LEM, \
# QC_ICON_LEM,
# datetime_ICON,
# height_2_ICON_LEM,
# QiThreshold,
# QcThreshold
#
# output:
# mean_CF_liquid_ICON,
# mean_CF_ice_ICON,
# mean_CF_tot_ICON,
# datetime_out
#--------------------------------------------------------------------------------
def f_calculateCloudFractionICON(QI, QC, yy, mm, dd, time, height, QiThreshold, QcThreshold):
# calculation of cloud fraction for ICON_LEM
# creating a dataframe with for Qi and Qc with time and height using pandas dataframe
QI_ICON_DF = pd.DataFrame(QI, index=time, columns=height)
QC_ICON_DF = pd.DataFrame(QC, index=time, columns=height)
# defining mean cloud fraction matrices to contain average profile every hour for the supersite
mean_CF_liquid_ICON = np.zeros((48,150))
mean_CF_ice_ICON = np.zeros((48,150))
mean_CF_tot_ICON = np.zeros((48,150))
deltaT = datetime.timedelta(minutes=30)
indInt = 0
datetime_out = []
# --- loop on hours to calculate the mean hourly profile
for itime in range(0,48):
if indInt == 0:
HourInf = datetime.datetime(int(yy), int(mm), int(dd), 0, 0, 0)
else:
HourInf = HourInf + deltaT
HourSup = HourInf + deltaT
datetime_out.append(HourInf)
indInt = indInt + 1
Qi_sliced_t = QI_ICON_DF.loc[(QI_ICON_DF.index < HourSup) * (QI_ICON_DF.index > HourInf),:]
Qc_sliced_t = QC_ICON_DF.loc[(QC_ICON_DF.index < HourSup) * (QC_ICON_DF.index > HourInf),:]
# ---- loop on heights: for each height counting the number of elements
# larger than the threshold and
# calculating the cloud fraction as the ratio between this number and
# the number of elements counted in the hour
#print len(DF_qi_hour[DF_qi_hour.iloc[:,0] > QiThreshold])
#print len(DF_qi_hour.iloc[:,0])
for iheight in range(len(height)-1):
#for iheight in range(2):
# extracting array
DF_qi_arr = Qi_sliced_t.loc[:,height[iheight]]
DF_qc_arr = Qc_sliced_t.loc[:,height[iheight]]
NelemTot = len(DF_qi_arr)
# posing conditions on cloud fraction for liquid only
Cond_iceClouds=np.isfinite(DF_qi_arr[DF_qi_arr > QiThreshold] * DF_qc_arr[DF_qc_arr < QcThreshold])
Cond_iceClouds.apply(int)
Num_iceCloud=Cond_iceClouds.sum()
Cond_LiquidClouds=np.isfinite(DF_qc_arr[DF_qi_arr < QiThreshold] * DF_qc_arr[DF_qc_arr > QcThreshold])
Cond_LiquidClouds.apply(int)
Num_liquidCloud=Cond_LiquidClouds.sum()
#print(Num_liquidCloud)
#print(Num_iceCloud)
if float(NelemTot) == 0:
print('Houston, we have a problem!')
else:
mean_CF_ice_ICON[itime,iheight]=float(Num_iceCloud)/float(NelemTot)
mean_CF_liquid_ICON[itime,iheight]=float(Num_liquidCloud)/float(NelemTot)
mean_CF_tot_ICON[itime,iheight]=float(Num_iceCloud+Num_liquidCloud)/float(NelemTot)
# defining dictionary containing data to have as output
dict_CF = {}
# filling dictionaries with data
dict_CF = {
'TotalCloudFraction':mean_CF_tot_ICON,
'LiquidCloudFraction':mean_CF_liquid_ICON,
'IceCloudFraction':mean_CF_ice_ICON,
'height':height,
'time':datetime_out,
}
return(dict_CF)
def f_plotTest(matrix, time, height, figname):
pathFig = '/work/cacquist/HDCP2_S2/statistics/figs/patch003/figures_JAMES/'
fig, ax = plt.subplots(figsize=(10, 5))
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
matplotlib.rc('xtick', labelsize=10) # sets dimension of ticks in the plots
matplotlib.rc('ytick', labelsize=10) # sets dimension of ticks in the plots
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
ax.xaxis.set_minor_formatter(mdates.DateFormatter("%H:%M"))
cax = ax.pcolormesh(time, height, matrix, vmin=0, vmax=3,
cmap=plt.cm.get_cmap("GnBu", 4))
ax.set_ylim(0., 15000) # limits of the y-axes
# ax.set_xlim(timeStart, timeEnd) # limits of the x-axes
ax.set_title("cloud mask model", fontsize=10)
ax.set_xlabel("time [hh:mm]", fontsize=10)
ax.set_ylabel("height [m]", fontsize=10)
cbar = fig.colorbar(cax, ticks=[0, 1, 2, 3], orientation='vertical', aspect=10)
cbar.ticks = ([0, 1, 2, 3])
cbar.ax.set_yticklabels(['no cloud', 'liquid', 'ice', 'mixed phase'])
cbar.set_label(label="cloud type", size=10)
cbar.ax.tick_params(labelsize=10)
plt.tight_layout()
plt.savefig(pathFig + figname + '_cloudMask.png')
return ()
""" function to derive wind speed and direction
#---------------------------------------------------------------------------------
date : 17.12.2018
author: <NAME> (<EMAIL>)
goal: derive wind speed and direction in form of list and matrices
input:
- datetime_ICON: time array
- height_ICON: height array
- u_ms: zonal wind
- v_ms: meridional wind
output:
- ws: list of wind speed
- wd: list of wind directions
- wind_abs: matrix of wind speed
- wind_dir_trig_from_degrees: matrix of wind direction in degrees indicating
the direction from where wind is coming
"""
#--------------------------------------------------------------------------------
def f_calcWindSpeed_Dir(datetime_ICON, height_ICON, u_ms, v_ms):
import math
wind_abs = np.sqrt(u_ms**2 + v_ms**2)
wind_dir_trig_to = np.zeros((len(datetime_ICON),len(height_ICON)))
wind_dir_trig_to_degrees = np.zeros((len(datetime_ICON),len(height_ICON)))
wind_dir_trig_from_degrees = np.zeros((len(datetime_ICON),len(height_ICON)))
wind_dir_cardinal = np.zeros((len(datetime_ICON),len(height_ICON)))
ws = []
wd = []
for itime in range(len(datetime_ICON)):
for iHeight in range(len(height_ICON)):
# wind dir in unit circle coordinates (wind_dir_trig_to), which increase counterclockwise and have a zero on the x-axis
wind_dir_trig_to[itime, iHeight] = math.atan2(v_ms[itime, iHeight],u_ms[itime, iHeight])
# wind dir in degrees (wind_dir_trig_to_degrees) dir where wind goes
wind_dir_trig_to_degrees[itime, iHeight] = wind_dir_trig_to[itime, iHeight] * 180/math.pi ## -111.6 degrees
# wind dir in degrees (wind_dir_trig_to_degrees) dir from where wind comes
wind_dir_trig_from_degrees[itime, iHeight] = wind_dir_trig_to_degrees[itime, iHeight] + 180 ## 68.38 degrees
# wind dir in cardinal coordinates from the wind dir in degrees (wind_dir_trig_to_degrees) dir from where wind comes
wind_dir_cardinal[itime, iHeight] = 90 - wind_dir_trig_from_degrees[itime, iHeight]
if np.isfinite(wind_dir_trig_from_degrees[itime, iHeight]) and \
np.isfinite(wind_abs[itime, iHeight]) and \
(wind_abs[itime, iHeight] != 0.):
wd.append(wind_dir_trig_from_degrees[itime, iHeight])
ws.append(wind_abs[itime, iHeight])
WindDictionary={'windDirection':ws,
'windSpeed':wd,
}
return(WindDictionary)
# function to plot color time heigth maps from a dictionary of initial data
#---------------------------------------------------------------------------------
# date : 14.12.2018
# author: <NAME> (<EMAIL>)
# goal: function to derive pdfs of vertical and horizontal wind below cloud base
# check for vertical wind values observed below cloud base. for every time stamp.
# methodology: for observations:
# if there is cloud base in observations, store vertical wind values recorded in the 300m below cloud base.
# if there is no cloud, store vertical wind values in the 5 bins below mean estimated cloud base.
# input : vertical wind, horizontal wind, time, height
# output: verticalWindPDF_cloud, verticalWindPDF_nocloud, horizontalWindPDF_cloud, horizontalWindPDF_nocloud
#--------------------------------------------------------------------------------
def f_pdfsBelowCloudBase(w_ICON, Hwind, height, datetime_ICON, datetimeHourArr, height_ICON, mean_CB_arr_OBS, CB_array_OBS, timeStart, timeEnd):
verticalWindPDF_cloud = []
horizontalWindPDF_cloud = []
verticalWindPDF_nocloud = []
horizontalWindPDF_nocloud = []
distHeight = 400.
vertWind_ICON_DF = pd.DataFrame(w_ICON, index=datetime_ICON, columns=height)
HorWind_ICON_DF = pd.DataFrame(Hwind, index=datetime_ICON, columns=height_ICON)
limTimeInf = timeStart
limTimeSup = timeEnd
# establishing height below which to check for wind
for indTime in range(len(datetime_ICON)):
if (datetime_ICON[indTime] > limTimeInf) * (datetime_ICON[indTime] < limTimeSup):
# case of no clouds, read mean cloud base height in the hour and extract height
if np.isfinite(CB_array_OBS[indTime]) == False:
findHourInd = f_closest(np.asarray(datetimeHourArr), datetime_ICON[indTime])
CBHeight = mean_CB_arr_OBS[findHourInd]
mask_h_vertWind = (vertWind_ICON_DF.columns < CBHeight) * (vertWind_ICON_DF.columns > CBHeight-distHeight)
valuesWwind = vertWind_ICON_DF.values[indTime, mask_h_vertWind].flatten()
mask_h_horwind = (HorWind_ICON_DF.columns < CBHeight) * (HorWind_ICON_DF.columns > CBHeight-distHeight)
valuesHwind = HorWind_ICON_DF.values[indTime, mask_h_horwind].flatten()
for indValw in range(len(valuesWwind)):
verticalWindPDF_nocloud.append(valuesWwind[indValw])
for indValh in range(len(valuesHwind)):
horizontalWindPDF_nocloud.append(valuesHwind[indValh])
# case of clouds: read cloud base height and extract bins below.
else:
CBHeight = CB_array_OBS[indTime]
mask_h_vertWind = (vertWind_ICON_DF.columns < CBHeight) * (vertWind_ICON_DF.columns > CBHeight-distHeight)
valuesWwind = vertWind_ICON_DF.values[indTime, mask_h_vertWind].flatten()
mask_h_horwind = (HorWind_ICON_DF.columns < CBHeight) * (HorWind_ICON_DF.columns > CBHeight-distHeight)
valuesHwind = HorWind_ICON_DF.values[indTime, mask_h_horwind].flatten()
for indValw in range(len(valuesWwind)):
verticalWindPDF_cloud.append(valuesWwind[indValw])
for indValh in range(len(valuesHwind)):
horizontalWindPDF_cloud.append(valuesHwind[indValh])
return(verticalWindPDF_cloud, verticalWindPDF_nocloud, horizontalWindPDF_cloud, horizontalWindPDF_nocloud)
def f_calcPblHeightRN(thetaV,Uwind,Vwind,height,time,device):
"""
PBL height calculation function
--------------------------------------------------------------------------------
date created : 15.01.2018
date modifed : 05.12.2019
author: <NAME>
goal: calculate the boundary layer height following the richardson number
derivation according to Seidel Et al, 2010
#---------------------------------------------------------------------------------
"""
g = 9.8 # gravity constant
Rithreshold = 0.25 # Threshold values for Ri
#Rithreshold2 = 0.2
dimTime = len(time)
dimHeight = len(height)
if (device == 'mod'):
zs = height[149] # height of the surface reference
if (device == 'obs'):
zs = height[0]
RiMatrix = np.zeros((dimTime, dimHeight)) # Richardson number matrix
PBLheightArr = []
RiCol = np.zeros((dimHeight))
# calculating richardson number matrix
for iTime in range(dimTime):
thetaS = thetaV[iTime,149]
for iHeight in range(dimHeight):
den = ((Uwind[iTime,iHeight])**2 + (Vwind[iTime,iHeight])**2)
if den == 0.:
RiMatrix[iTime,iHeight] = 0.
else:
RiMatrix[iTime,iHeight] = (1/den) * (g/thetaS) * (thetaV[iTime,iHeight]-thetaS)*(height[iHeight]-zs)
# find index in height where Ri > Rithreshold
for iTime in range(dimTime):
RiCol=RiMatrix[iTime,:]
#print(RiCol)
#print(np.where(RiCol > Rithreshold2)[0][:])
#print(len(np.where(RiCol > Rithreshold)[0][:]))
if len(np.where(RiCol > Rithreshold)[0][:]) != 0:
PBLheightArr.append(height[np.where(RiCol > Rithreshold)[0][-1]] - height[dimHeight-1])
else:
PBLheightArr.append(0)
return PBLheightArr
def f_calcPblHeightTW(stdWmatrix,sigmaThreshold,height2,time, device):
"""
PBL height calculation function based on threshold on std w method
--------------------------------------------------------------------------------
date created : 05.12.2019
author: <NAME>
goal: calculate the boundary layer height following the method of a threshold on sigma w
as indicated in Schween et al., 2014. The algorithm takes the maximum of the heights below 2000m
at which the sigma values is larger than 0.4. 2000m is a conservative value
threshold obtained from the paper from Schween et al., 2014 on MLH at JOYCE
#---------------------------------------------------------------------------------
"""
dimTime = len(time)
PBLheightTW = np.zeros((dimTime))
PBLheightTW.fill(np.nan)
#std_matrix[:,height < height[142]] = 0.
for ind in range(len(time)):
if device == 'mod':
column = stdWmatrix[ind,:]
aboveThr = column > sigmaThreshold
#selecting heights below 2000
Hsel = height2[aboveThr]
Hbelow = Hsel[Hsel < 2000.]
if np.count_nonzero((Hbelow)) != 0:
PBLheightTW[ind] = np.nanmax(Hbelow)
return(PBLheightTW)
# function to calculate the convective condensation level height and temperature
#---------------------------------------------------------------------------------
# date : 17.05.2018
# author: <NAME>
# goal: function that calculates the convective condensation level (CCL) height and temperature.
# for the definition of the CCL check this: https://en.wikipedia.org/wiki/Convective_condensation_level
# input:
# - T field (time, height)
# - RH field (time, height) ( es: 75,0)
# - P field (time, height)
# - height [in m]
# - datetime [in datetime format]
# output:
# - Z_ccl (time) time serie of the height of the CCL
# - T_CCl (time) time serie of the temperature a parcel should have to reach the height for condensation
# - T_dew point (time, height ) dew point temperature field for every time, height
# method: the function first calculates the dew point temperature field and the dew point at the surface for every time. Then, we derive the saturation mixing ratio at the surface for T=Td. Then, we calculate the mixing ratio field
#--------------------------------------------------------------------------------
def f_CCL(T_ICON, P_ICON, RH_ICON, height_ICON, datetime_ICON, Hsurf):
# temperature has to be provided in K
# RH in % ( 70.14)
# P In Kpa
dimHeight = len(height_ICON)
# defining constants
cost_rvl = np.power(5423,-1.) #K
E0 = 0.611 # Kpa
T0 = 273. # K
Rv = 461 # J K^-1 Kg^-1
epsilon = 0.622
Ad_rate = -9.8 # K/Km
# ---- substituting RH = 0. to RH = nan to avoid log(0) cases
RH_ICON [ RH_ICON == 0.] = np.nan
T_ICON [ T_ICON == 0.] = np.nan
# ---- calculating due point temperature profile for each time (The dew point is \
# the temperature to which air must be cooled to become saturated with water vapor. )
Td = np.power(np.power(T_ICON,-1.)-cost_rvl*np.log(RH_ICON/100.),-1.)
# ---- calculating mixing ratio at the surface for T dew point
Td_surf = Td[:, dimHeight-1]
P_surf = P_ICON[:,dimHeight-1]
RH_surf = RH_ICON[:,dimHeight-1]
for indtime in range(len(datetime_ICON)):
if (~np.isfinite(RH_surf[indtime])):
RH_surf[indtime] = RH_ICON[indtime,dimHeight-2]
if (~np.isfinite(Td_surf[indtime])):
Td_surf[indtime] = Td[indtime,dimHeight-2]
# ---- calculating the saturation mixing ratio for td at the surface (assuming RH =100%)
M0 = epsilon*E0*np.exp((1./Rv)*(T0**(-1.)-Td_surf**(-1.))) / (P_surf - E0*np.exp((1./Rv)*(T0**(-1.)-Td_surf**(-1.))))
# ---- calculating mixing ratio profile for each P,T, RH using profile RH
m = (RH_ICON/100.)*epsilon*E0*np.exp((1./Rv)*(T0**(-1.)-T_ICON**(-1.))) / (P_ICON - (RH_ICON/100.)*E0*np.exp((1./Rv)*(T0**(-1.)-T_ICON**(-1.))))
#fig, ax = plt.subplots(figsize=(12,5))
#plt.plot(m[5000,:],height_ICON)
#plt.plot(np.repeat(M0[5000],len(height_ICON)), height_ICON)
#plt.ylim(0,6000)
# ---- calculating indeces of height of Z_ccl and Z_CCL height
ind_CCL = []
dimHeight=len(height_ICON)
#print(dimHeight)
for indTime in range(len(datetime_ICON)):
for indHeight in range(dimHeight-2,1,-1):
#print(height_ICON[indHeight])
if (m[indTime,indHeight] < M0[indTime] and m[indTime,indHeight-1] > M0[indTime] ):
ind_CCL.append(indHeight)
break
if indHeight == 1:
ind_CCL.append(dimHeight-1)
print(len(ind_CCL))
z_ccl = height_ICON[ind_CCL]
# ---- finding z(CCL) using the dry adiabatic lapse rate
T_ground_CCL = []
for indTime in range(len(ind_CCL)):
T_top = T_ICON[indTime, ind_CCL[indTime]]
T_ground_CCL.append(T_top - Ad_rate* z_ccl[indTime]*10.**(-3))
dict_out={'z_ccl':z_ccl,
'T_ccl':T_ground_CCL,
'Td':Td
}
return(dict_out)
def f_CCL_new(T, P, RH, height, time, date):
"""
function to calculate convective condensation level (CCL). For more info on definitions of this level, read pp.250
of Petty : A first course in atmospheric thermodynamics
input: T: temperature , to be provided in K
relative humidity, in % (es: 70.14)
pressure, in Kpa
device, string for "model" or "obs"
procedure:
step 1: calculate dew point T
step 2: calculate saturation mixing ratio m0 at t=Td, P=Psurf
step 3: calculate, for every value of P, Td(m=m0, P)
step 4: check, for every level of P, if there's a level i of P for which T(P)i-1 < Td(m0,P)i < T(P)i+1.
If the level is found, assign T_star = T(P)i and Z_ccl as the height corresponding to that pressure height.
step 5: calculate Tc using adiabatic lapse rate to come back at the height of the surface.
output: T_ccl, z_ccl
"""
#pathFig = '/work/cacquist/HDCP2_S2/statistics/figs/' + patch + '/figures_JAMES/debugging/'
print('calculating CCL height and T_CCL')
# defining constants
cost_rvl = np.power(5423, -1.) # K
E0 = 0.611 # Kpa
T0 = 273. # K
Rv = 461 # J K^-1 Kg^-1
L = 5.6 * 10 ** 6 # J/Kg
epsilon = 0.622
Ad_rate = -9.8 # K/Km
# assigning dimensions:
dimHeight = len(height)
dimTime = len(time)
# step 1: calculating due point temperature profile for each time (The dew point is \
# the temperature to which air must be cooled to become saturated with water vapor. )
# substituting RH = 0. to RH = nan to avoid log(0) cases
RH[RH == 0.] = np.nan
T[T == 0.] = np.nan
# calculating Td
Td = np.power(np.power(T, -1.) - cost_rvl * np.log(RH / 100.), -1.)
# step 2: calculating mixing ratio at the surface for T = Td and P=Psurf
# finding index of height corresponding to lowest level in height
indHmin = np.nanargmin((height))
# reading values of P, T, RH at the corresponding height
Td_surf = Td[:, indHmin]
P_surf = P[:, indHmin]
RH_surf = RH[:, indHmin]
m0 = epsilon * E0 * np.exp((1. / Rv) * (T0 ** (-1.) - Td_surf ** (-1.))) / (
P_surf - E0 * np.exp((1. / Rv) * (T0 ** (-1.) - Td_surf ** (-1.))))
#print(Td_surf, P_surf, RH_surf, m0)
# step 3: calculating Td(m=m0, P) for every P value
z_ccl = np.zeros((dimTime))
T_cclTop = np.zeros((dimTime))
z_ccl.fill(np.nan)
# indPlotCount = 0
for indTime in range(dimTime):
Tdm0_profile = np.zeros((dimHeight))
Tdm0_profile.fill(np.nan)
indCCLprofile = []
Tm0_surface = 1 / ((1 / T0) - ((1 / L) * Rv * np.log((m0 * P[:, indHmin]) / (E0 * epsilon))))
for indHeight in range(dimHeight - 1):
Tdm0_profile[indHeight] = 1 / (
(1 / T0) - ((1 / L) * Rv * np.log((m0[indTime] * P[indTime, indHeight]) / (E0 * epsilon))))
# print(T[indTime, indHmin])
if (T[indTime, indHeight] < Tdm0_profile[indHeight]) and (
T[indTime, indHeight + 1] > Tdm0_profile[indHeight]):
indCCLprofile.append(indHeight)
# print(Tdm0_profile[indHmin])
# print(T[indTime, indHmin])
#print(indCCLprofile)
##fig, ax = plt.subplots(figsize=(12, 5))
# plt.plot(Tdm0_profile, height, label='TDm0')
# plt.plot(T[indTime, :], height, label='T')
# plt.legend()
# plt.plot(time, z_ccl3)
# plt.plot(np.repeat(M0[5000],len(height)), height)
# plt.ylim(0, 6000)
# plt.savefig(pathFig + str(indPlotCount) + 'Tm0_profile_Check.png', format='png')
# indPlotCount = indPlotCount +1
# print(len(indCCLprofile))
if len(indCCLprofile) == 0:
z_ccl[indTime] = np.nan
T_cclTop[indTime] = np.nan
else:
z_ccl[indTime] = np.nanmin(height[indCCLprofile])
T_cclTop[indTime] = np.nanmin(T[indTime, np.nanargmin(height[indCCLprofile])])
# fig, ax = plt.subplots(figsize=(12,5))
# plt.plot(time, z_ccl)
# plt.ylim(0,6000)
# plt.savefig(pathFig+date+'_z_ccl_mod.png', format='png')
print(z_ccl)
# ---- finding z(CCL) using the dry adiabatic lapse rate
T_ground_CCL = np.zeros((dimTime))
for indTime in range(dimTime):
T_ground_CCL[indTime] = (T_cclTop[indTime] - Ad_rate * z_ccl[indTime] * 10. ** (-3))
# providing output as standardized xarray output format
#DatasetOut = xr.Dataset(
# data_vars={'z_ccl' : (('time'), z_ccl),
# 't_ccltop': (('time'), T_cclTop),
# 't_ccl' : (('time'), T_ground_CCL),
# 'T_dew' : (('time', 'height'), Td)},
# coords={'time' : time,
# 'height': height})
#return (DatasetOut)
DatasetOut = {'time':time,
'height':height,
'z_ccl':z_ccl,
'T_ground_ccl':T_ground_CCL,
'T_top_ccl':T_cclTop,
'T_dew':Td}
return (DatasetOut)
# moving variance calculation function
#---------------------------------------------------------------------------------
# date : 17.01.2018
# author: <NAME>
# goal: function that calculates the moving average of an array of values over a given window given as imput
#--------------------------------------------------------------------------------
def runningMeanVariance(x, N):
return np.power(pd.rolling_std(x, N),2)
# variance of vertical velocity calculation
#---------------------------------------------------------------------------------
# date : 17.01.2018
# author: <NAME>
# goal: function that calculates the variance of the vertical velocity matrix
# input:
# - matrix of vertical velocity
# - time array
# - height array
# - time window for the running mean (30 min for comparing to obs)
#--------------------------------------------------------------------------------
def f_calcWvariance(Wwind,time,height,window, res):
"""
OBSOLETE FUNCTION NOT USED ANYMORE
author: <NAME>
date: 05.12.2019
goal: calculation of variance of vertical velocity. The function performs the
calculation of the standard deviation of vertical velocity as in the paper
from Schween et al., 2014., AMT, doi:10.5194/amt-7-3685-2014
input:
- Wwind: vertical velocity matrix (time, height)
- time: time array
- height: height array
- window: time window over which to calculate the standard deviation
- res: resolution at which calculate the standard deviation (in the
paper it is 5 min)
"""
dimTime = len(time)
dimHeight = len(height)
variance = np.zeros((dimTime,dimHeight))
for iheight in range(dimHeight):
# reading array of w values at a given height
Warray = Wwind[:,iheight]
s = | pd.Series(Warray) | pandas.Series |
# ----------------------------------------------------------------------------
# Copyright (c) 2018-2020, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from qiime2.plugin.testing import TestPluginBase
from q2_diversity_lib import (faith_pd, pielou_evenness, observed_features,
shannon_entropy)
import io
import biom
import skbio
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import copy
nonphylogenetic_measures = [observed_features, pielou_evenness,
shannon_entropy]
class SmokeTests(TestPluginBase):
package = 'q2_diversity_lib.tests'
def setUp(self):
super().setUp()
self.empty_table = biom.Table(np.array([]), [], [])
def test_non_phylogenetic_passed_empty_table(self):
for measure in nonphylogenetic_measures:
with self.assertRaisesRegex(ValueError, "empty"):
measure(table=self.empty_table)
class FaithPDTests(TestPluginBase):
package = 'q2_diversity_lib.tests'
def setUp(self):
super().setUp()
self.input_table = biom.Table(np.array([[1, 0, .5, 999, 1],
[0, 1, 2, 0, 1],
[0, 0, 0, 1, 1]]),
['A', 'B', 'C'],
['S1', 'S2', 'S3', 'S4', 'S5'])
self.input_tree = skbio.TreeNode.read(io.StringIO(
'((A:0.3, B:0.50):0.2, C:100)root;'))
self.faith_pd_expected = pd.Series({'S1': 0.5, 'S2': 0.7, 'S3': 1.0,
'S4': 100.5, 'S5': 101},
name='faith_pd')
def test_receives_empty_table(self):
empty_table = biom.Table(np.array([]), [], [])
with self.assertRaisesRegex(ValueError, "empty"):
faith_pd(table=empty_table, phylogeny=self.input_tree)
def test_method(self):
actual = faith_pd(table=self.input_table, phylogeny=self.input_tree)
pdt.assert_series_equal(actual, self.faith_pd_expected)
def test_accepted_types_have_consistent_behavior(self):
freq_table = self.input_table
rel_freq_table = copy.deepcopy(self.input_table).norm(axis='sample',
inplace=False)
p_a_table = copy.deepcopy(self.input_table).pa()
accepted_tables = [freq_table, rel_freq_table, p_a_table]
for table in accepted_tables:
actual = faith_pd(table=table, phylogeny=self.input_tree)
pdt.assert_series_equal(actual, self.faith_pd_expected)
def test_error_rewriting(self):
tree = skbio.TreeNode.read(io.StringIO(
'((A:0.3):0.2, C:100)root;'))
with self.assertRaisesRegex(skbio.tree.MissingNodeError,
'feature_ids.*phylogeny'):
faith_pd(table=self.input_table, phylogeny=tree)
class ObservedFeaturesTests(TestPluginBase):
package = 'q2_diversity_lib.tests'
def setUp(self):
super().setUp()
self.input_table = biom.Table(np.array([[1, 0, .5, 999, 1],
[0, 1, 2, 0, 5],
[0, 0, 0, 1, 10]]),
['A', 'B', 'C'],
['S1', 'S2', 'S3', 'S4', 'S5'])
# Calculated by hand:
self.observed_features_expected = pd.Series(
{'S1': 1, 'S2': 1, 'S3': 2, 'S4': 2,
'S5': 3},
name='observed_features')
def test_method(self):
actual = observed_features(table=self.input_table)
pdt.assert_series_equal(actual, self.observed_features_expected)
def test_accepted_types_have_consistent_behavior(self):
freq_table = self.input_table
rel_freq_table = copy.deepcopy(self.input_table).norm(axis='sample',
inplace=False)
p_a_table = copy.deepcopy(self.input_table).pa()
accepted_tables = [freq_table, rel_freq_table, p_a_table]
for table in accepted_tables:
actual = observed_features(table)
pdt.assert_series_equal(actual, self.observed_features_expected)
class PielouEvennessTests(TestPluginBase):
package = 'q2_diversity_lib.tests'
def setUp(self):
super().setUp()
self.input_table = biom.Table(np.array([[0, 1, 1, 1, 999, 1],
[0, 0, 1, 1, 999, 1],
[0, 0, 0, 1, 999, 2]]),
['A', 'B', 'C'],
['S1', 'S2', 'S3', 'S4', 'S5', 'S6'])
# Calculated by hand:
self.pielou_evenness_expected = pd.Series(
{'S1': np.NaN, 'S2': np.NaN, 'S3': 1, 'S4': 1,
'S5': 1, 'S6': 0.946394630357186},
name='pielou_evenness')
def test_method(self):
actual = pielou_evenness(table=self.input_table)
| pdt.assert_series_equal(actual, self.pielou_evenness_expected) | pandas.util.testing.assert_series_equal |
import queue
import logging
import numpy as np
import pandas as pd
from nltk.tokenize import word_tokenize
from nltk.stem.porter import PorterStemmer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_array, check_is_fitted
ps = PorterStemmer()
class BaseDatasetDependentTSTransformer(BaseEstimator, TransformerMixin):
""" A base time series transformer for time series mapping methods that need to
be fitted on train data set before being able to map text samples into time series.
This kind of transformers will be fitted on each fold's train set and then transform
sample texts from both train and test sets of each fold.
"""
pass
class BaseDatasetIndependentTSTransformer(BaseEstimator, TransformerMixin):
""" A base time series transformer for time series mapping methods that do not need to
be fitted on train data set before being able to map text samples into time series.
This kind of transformers will directly transform text samples from the whole dataset
into time series.
"""
pass
def to_stemmed_tokens(text):
''' Split a text into stemmed tokens without converting capital letters to lowercase.
Parameters
----------
text : str
The text sample to be splitted.
Returns
-------
tokens : list
The tokens splitted from the text sample.
'''
# without lowering all letters
tokens = []
for token in word_tokenize(text):
# because porter stemmer automatically lowering all capital letters, we need to bring back any capital letter if exist
stem = ps.stem(token)
if stem != token:
stem_char_list = list(stem)
length = np.minimum(len(stem), len(token))
for i in range(length):
# if stemmer lowered a letter, bring it back to capital
if (stem[i] != token[i]) and (stem[i].upper() == token[i]):
stem_char_list[i] = token[i]
stem = "".join(stem_char_list)
tokens.append(stem)
return tokens
class TokenLenSeqTransformer(BaseDatasetIndependentTSTransformer):
""" An implementaion of token length sequence time series mapping method
Parameters
----------
name : str, default='tokenlenseq'
Name (in short) of the time series mapping method, also used as the name of the
column storing time series values in the time series dataframe output from the
transform method.
split_function : func, default=nltk.tokenize.word_tokenize
The function used to split a text into a list of tokens.
"""
def __init__(self, name="tokenlenseq", split_function=word_tokenize):
self.name = name
self.split_function = split_function
def fit(self, X, y=None):
""" Not used
"""
# Return the classifier
return self
def token_length_sequence(self, text):
""" Split the text sample and compute the length sequence.
Parameters
----------
text : str
The text to be splitted and computed into length sequence
Returns
-------
length_sequence : list
The token length sequence of the sample text.
"""
length_sequence = []
tokens = self.split_function(text)
if len(tokens) == 0: # if no token can be split from the text, the ts has only one value 0
length_sequence.append(0)
else:
for token in tokens:
length_sequence.append(len(token))
return length_sequence
def transform(self, X):
""" Map text samples into token length sequence time series
Parameters
----------
X : array-like, shape (n_samples,)
The input sample texts.
Returns
-------
ts : pandas.DataFrame, shape (n_datapoints, 2)
The dataframe containing the time series, first column stores the data points,
second column stores the ids of the time series
"""
# Input validation
check_array(np.reshape(X, (-1, 1)), dtype=str)
# map to ts
seq = []
for counter, text in enumerate(X):
seq.append(pd.DataFrame({ self.name : np.array(self.token_length_sequence(text)) }))
seq[-1]['id'] = counter
return pd.concat(seq)
class TokenFreqSeqTransformer(BaseDatasetDependentTSTransformer):
""" An implementaion of token frequency sequence time series mapping method
Parameters
----------
name : str, default='tokenfreqseq'
Name (in short) of the time series mapping method, also used as the name of the
column storing time series values in the time series dataframe output from the
transform method.
split_function : func, default=authorshipattribution.ts.to_stemmed_tokens
The function used to split a text into a list of tokens.
"""
def __init__(self, name="tokenfreqseq", split_function=to_stemmed_tokens):
self.name = name
self.split_function = split_function
def token_frequency_dictionary(self, texts):
''' Split an array of texts into words and build a token frequency dictionary from them.
The token frequency dictionary will have all unique tokens in the texts as keys and
the number of occurrence of them in the texts.
Parameters
----------
texts : array-like, shape (n_samples,)
The text samples the dictionary would be built from.
Returns
-------
tokenfreq_dict : dict
The token frequency dictionary built from the text samples.
'''
tokenfreq_dict = {}
for text in texts:
features = self.split_function(text)
for feature in features:
if feature not in tokenfreq_dict:
tokenfreq_dict[feature] = 1
else:
tokenfreq_dict[feature] += 1
return tokenfreq_dict
def fit(self, X, y=None):
""" Compute the token frequency dictionary.
Parameters
----------
X : array-like, shape (n_samples,)
The input sample texts.
y : Not used
Returns
-------
self : TokenFreqSeqTransformer
The fitted transformer.
"""
# compute token frequency dictionary
self.tokenfreq_dict = self.token_frequency_dictionary(X)
# Return the classifier
return self
def token_frequencies(self, text):
''' Splits text into tokens using the split_function and returns the corresponding
frequencies of the tokens in the token frequency dictionary. Tokens that are not found in
the token frequency dictionary will be given values of 0.
Parameters
----------
text : str
The text to be split and transform into frequencies.
Returns
-------
frequencies : list
The token frequency sequence of the sample text.
'''
tokens = self.split_function(text)
frequencies = []
for token in tokens:
try:
frequencies.append(self.tokenfreq_dict[token])
except KeyError:
frequencies.append(0)
return frequencies
def transform(self, X):
""" Map text samples into token frequency sequence time series
Parameters
----------
X : array-like, shape (n_samples,)
The input sample texts.
Returns
-------
ts : pandas.DataFrame, shape (n_datapoints, 2)
The dataframe containing the time series, first column stores the data points,
second column stores the ids of the time series
"""
# Input validation
check_array(np.reshape(X, (-1, 1)), dtype=str)
# fitted validation
check_is_fitted(self, 'tokenfreq_dict')
# map to ts
seq = []
for counter, text in enumerate(X):
frequencies = self.token_frequencies(text)
if len(frequencies) == 0:
seq.append(pd.DataFrame({ self.name : np.array([0]) }))
else:
seq.append(pd.DataFrame({ self.name : np.log1p(np.array(frequencies)) }))
seq[-1]['id'] = counter
return pd.concat(seq)
def get_classifiers(self):
""" Get the fitted classifiers/transformers.
Returns
-------
: dict
A dictionary contains names of all classifiers/transformers along with the
classifers/transformers objects.
"""
return {}
def get_fitted_data(self):
""" Get the fitted data.
Returns
-------
: dict
A dictionary contains names of all fitted data along with the actual data.
"""
return { 'tokenfreq_dict' : self.tokenfreq_dict }
class TokenRankSeqTransformer(BaseDatasetDependentTSTransformer):
""" An implementaion of token rank time series mapping method
Parameters
----------
name : str, default='tokenrankseq'
Name (in short) of the time series mapping method, also used as the name of the
column storing time series values in the time series dataframe output from the
transform method.
split_function : func, default=authorshipattribution.ts.to_stemmed_tokens
The function used to split a text into a list of tokens.
"""
def __init__(self, name="tokenrankseq", split_function=to_stemmed_tokens):
self.name = name
self.split_function = split_function
def token_rank_dictionary(self, tokenfreq_dict):
''' Rank the tokens in the token frequency dictionary.
The tokens in the token frequency dictionary will be ranked based on their numbers of
occurrences, and the tokens with the same number of occurrences will be given arbitrary rank
in their rank interval.
Parameters
----------
tokenfreq_dict : dict
The token frequency dictionary built from text samples.
Returns
-------
tokenrank_dict : dict
The token rank dictionary built from the token frequency dictionary.
'''
freq_to_rank_dict = dict.fromkeys(tokenfreq_dict.values())
# sort the frequency dictionary
token_freq_sorted = sorted([freq for freq in tokenfreq_dict.values()], reverse=True)
# give ranks for tokens, for multiple orccurrences of the same frequency, keep increment the rank, and store these ranks
# in a queue for the corresponding frequence
for rank, freq in enumerate(token_freq_sorted):
if freq_to_rank_dict[freq] == None:
freq_to_rank_dict[freq] = queue.Queue()
freq_to_rank_dict[freq].put(rank+1)
# Give each token their corresponding rank based on their frequency, for tokens with the same frequency, the ranks stored
# in the corresponding queue are given one by one. The assignment is effectively random.
tokenrank_dict = {}
for token, freq in tokenfreq_dict.items():
tokenrank_dict[token] = freq_to_rank_dict[freq].get()
return tokenrank_dict
def fit(self, X, y=None):
""" Compute the token frequency dictionary and token rank dictionary.
Parameters
----------
X : array-like, shape (n_samples,)
The input sample texts.
y : Not used
Returns
-------
self : TokenRankSeqTransformer
The fitted transformer.
"""
# compute token frequency dictionary and token rank dictionary
tft = TokenFreqSeqTransformer()
self.tokenfreq_dict = tft.token_frequency_dictionary(X)
self.tokenrank_dict = self.token_rank_dictionary(self.tokenfreq_dict)
# Return the classifier
return self
def transform(self, X):
""" Map text samples into token frequency sequence time series
Parameters
----------
X : array-like, shape (n_samples,)
The input sample texts.
Returns
-------
ts : pandas.DataFrame, shape (n_datapoints, 2)
The dataframe containing the time series, first column stores the data points,
second column stores the ids of the time series
"""
# Input validation
check_array(np.reshape(X, (-1, 1)), dtype=str)
# fitted validation
check_is_fitted(self, ['tokenfreq_dict', 'tokenrank_dict'])
# map to ts
token_max_rank = max(self.tokenrank_dict.values())
seq = []
for counter, text in enumerate(X):
features = self.split_function(text)
ranks = []
for feature in features:
try:
ranks.append(self.tokenrank_dict[feature])
except KeyError: # if the word is new, give the maximum rank + 1
ranks.append(token_max_rank + 1)
if len(ranks) == 0:
seq.append(pd.DataFrame({ self.name : np.array([0]) }))
else:
seq.append(pd.DataFrame({ self.name : np.log1p(np.array(ranks)) }))
seq[-1]['id'] = counter
return | pd.concat(seq) | pandas.concat |
from typing import List
import astral
import numpy as np
import pandas as pd
from quantities.date_time import Time, Date, DateTime, TimeDelta
from quantities.geometry import Angle
from nummath import interpolation, graphing
from sun.horizon import HorizonProfile
class Location:
def __init__(self, name: str, region: str, latitude: float, longitude: float, altitude: float,
timezone: str = 'UTC'):
self.name = name
self.region = region
self.latitude = latitude
self.longitude = longitude
self.timezone = timezone # see Wikipedia: list of tz database time zones
self.altitude = altitude
self.astral_location = astral.Location((
self.name,
self.region,
self.latitude,
self.longitude,
self.timezone,
self.altitude
))
class SunPosition:
def __init__(self, azimuth, elevation):
self.azimuth = Angle(azimuth, 'deg')
self.elevation = Angle(elevation, 'deg')
self.zenith = Angle(90.0 - elevation, 'deg')
def __str__(self):
return f"(azimuth {self.azimuth('deg'):.1f}°, " \
f"elevation {self.elevation('deg'):.1f}°, " \
f"zenith {self.zenith('deg'):.1f}°)"
@property
def coordinate(self):
return self.azimuth, self.elevation, self.zenith
class SunPositionCalculator:
@staticmethod
def calculate_position(location: Location, date: Date, time: Time):
loc = location.astral_location
py_datetime = DateTime(date=date, time=time).py_datetime
return SunPosition(
azimuth=loc.solar_azimuth(py_datetime),
elevation=loc.solar_elevation(py_datetime)
)
@staticmethod
def sunrise(location: Location, date: Date) -> Time:
loc = location.astral_location
sunrise = loc.sunrise(date.py_date)
return Time(sunrise.hour, sunrise.minute, sunrise.second)
@staticmethod
def sunset(location: Location, date: Date) -> Time:
loc = location.astral_location
sunset = loc.sunset(date.py_date)
return Time(sunset.hour, sunset.minute, sunset.second)
@staticmethod
def solar_noon(location: Location, date: Date) -> Time:
loc = location.astral_location
solar_noon = loc.solar_noon(date.py_date)
return Time(solar_noon.hour, solar_noon.minute, solar_noon.second)
@staticmethod
def daylight_time(location: Location, date: Date) -> TimeDelta:
loc = location.astral_location
start_time, end_time = loc.daylight(date.py_date)
return TimeDelta(DateTime.from_py_datetime(start_time), DateTime.from_py_datetime(end_time))
class SunPath:
def __init__(self, location: Location, date: Date):
self.label = date.py_date.strftime('%b %d') # date format string example: 'Jun 21'
self.t_ax = [Time(h, 0, 0) for h in range(24)]
sun_positions = [SunPositionCalculator.calculate_position(location, date, t) for t in self.t_ax]
self.azi_ax = [sp.azimuth('deg') for sp in sun_positions]
self.elev_ax = [sp.elevation('deg') for sp in sun_positions]
self.spi = interpolation.CubicSplineInterPol(x_data=self.azi_ax, y_data=self.elev_ax)
data = np.array([[elem[0], elem[1], elem[2]] for elem in zip(self.t_ax, self.azi_ax, self.elev_ax)])
cols = ['time', 'azimuth(°)', 'elevation(°)']
self.dataframe = | pd.DataFrame(data=data, columns=cols) | pandas.DataFrame |
import os
import math
import pandas as pd
import datetime
variables = {
'East Region Hospitals': 'resource_type',
'Current Census': 'cnt_used',
'Total Capacity': 'cnt_capacity',
'Available*': 'cnt_available',
'Current Utilization': 'pct_used',
'Available Capacity': 'pct_available'
}
def cleanData(data, fileName):
# source data frame from csv file
df = | pd.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from copy import deepcopy
import warnings
from itertools import chain, combinations
from collections import Counter
from typing import Dict, Iterable, Iterator, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from scipy.stats import (pearsonr as pearsonR,
spearmanr as spearmanR,
kendalltau as kendallTau)
from tqdm.auto import tqdm
import xgboost
from sklearn.base import RegressorMixin, ClassifierMixin, ClusterMixin, TransformerMixin
from sklearn.model_selection import train_test_split, BaseCrossValidator, KFold, StratifiedKFold
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.metrics import (r2_score as R2,
mean_squared_error as MSE,
roc_auc_score as ROCAUC,
confusion_matrix,
multilabel_confusion_matrix,
matthews_corrcoef as MCC,
explained_variance_score as eVar,
max_error as maxE,
mean_absolute_error as MAE,
mean_squared_log_error as MSLE,
mean_poisson_deviance as MPD,
mean_gamma_deviance as MGD,
)
from prodec.Descriptor import Descriptor
from prodec.Transform import Transform
from .reader import read_molecular_descriptors, read_protein_descriptors
from .preprocess import yscrambling
from .neuralnet import (BaseNN,
SingleTaskNNClassifier,
SingleTaskNNRegressor,
MultiTaskNNRegressor,
MultiTaskNNClassifier
)
pd.set_option('mode.chained_assignment', None)
def filter_molecular_descriptors(data: Union[pd.DataFrame, Iterator],
column_name: str,
keep_values: Iterable,
progress: bool = True,
total: Optional[int] = None) -> pd.DataFrame:
"""Filter the data so that the desired column contains only the desired data.
:param data: data to be filtered, either a dataframe or an iterator of chunks
:param column_name: name of the column to apply the filter on
:param keep_values: allowed values
:return: a pandas dataframe
"""
if isinstance(data, pd.DataFrame):
return data[data[column_name].isin(keep_values)]
elif progress:
return pd.concat([chunk[chunk[column_name].isin(keep_values)]
for chunk in tqdm(data, total=total, desc='Loading molecular descriptors')],
axis=0)
else:
return pd.concat([chunk[chunk[column_name].isin(keep_values)]
for chunk in data],
axis=0)
def model_metrics(model, y_true, x_test) -> dict:
"""Determine performance metrics of a model
Beware R2 = 1 - (Residual sum of squares) / (Total sum of squares) != (Pearson r)²
R2_0, R2_0_prime, K and k_prime are derived from
<NAME>., & <NAME>. (2010).
Predictive Quantitative Structure–Activity Relationships Modeling.
In <NAME> & <NAME> (Eds.),
Handbook of Chemoinformatics Algorithms.
Chapman and Hall/CRC.
https://www.taylorfrancis.com/books/9781420082999
:param model: model to check the performance of
:param y_true: true labels
:param x_test: testing set of features
:return: a dictionary of metrics
"""
y_pred = model.predict(x_test)
# Regression metrics
if isinstance(model, (RegressorMixin, SingleTaskNNRegressor, MultiTaskNNRegressor)):
# Slope of predicted vs observed
k = sum(xi * yi for xi, yi in zip(y_true, y_pred)) / sum(xi ** 2 for xi in y_true)
# Slope of observed vs predicted
k_prime = sum(xi * yi for xi, yi in zip(y_true, y_pred)) / sum(yi ** 2 for yi in y_pred)
# Mean averages
y_true_mean = y_true.mean()
y_pred_mean = y_pred.mean()
return {'number' : y_true.size,
'R2' : R2(y_true, y_pred) if len(y_pred) >= 2 else 0,
'MSE' : MSE(y_true, y_pred, squared=True) if len(y_pred) >= 2 else 0,
'RMSE' : MSE(y_true, y_pred, squared=False) if len(y_pred) >= 2 else 0,
'MSLE' : MSLE(y_true, y_pred) if len(y_pred) >= 2 else 0,
'RMSLE' : np.sqrt(MSLE(y_true, y_pred)) if len(y_pred) >= 2 else 0,
'MAE' : MAE(y_true, y_pred) if len(y_pred) >= 2 else 0,
'Explained Variance' : eVar(y_true, y_pred) if len(y_pred) >= 2 else 0,
'Max Error' : maxE(y_true, y_pred) if len(y_pred) >= 2 else 0,
'Mean Poisson Distrib' : MPD(y_true, y_pred) if len(y_pred) >= 2 else 0,
'Mean Gamma Distrib' : MGD(y_true, y_pred) if len(y_pred) >= 2 else 0,
'Pearson r': pearsonR(y_true, y_pred)[0] if len(y_pred) >= 2 else 0,
'Spearman r' : spearmanR(y_true, y_pred)[0] if len(y_pred) >= 2 else 0,
'Kendall tau': kendallTau(y_true, y_pred)[0] if len(y_pred) >= 2 else 0,
'R2_0 (pred. vs. obs.)' : 1 - (sum((xi - k_prime * yi) **2 for xi, yi in zip(y_true, y_pred)) / sum((xi - y_true_mean) ** 2 for xi in y_true)) if len(y_pred) >= 2 else 0,
'R\'2_0 (obs. vs. pred.)' : 1 - (sum((yi - k * xi) **2 for xi, yi in zip(y_true, y_pred)) / sum((yi - y_pred_mean) ** 2 for yi in y_pred)) if len(y_pred) >= 2 else 0,
'k slope (pred. vs obs.)' : k,
'k\' slope (obs. vs pred.)' : k_prime,
}
# Classification
elif isinstance(model, (ClassifierMixin, SingleTaskNNClassifier, MultiTaskNNClassifier)):
# Binary classification
if len(model.classes_) == 2:
tn, fp, fn, tp = confusion_matrix(y_true, y_pred, labels=model.classes_).ravel()
values = {}
try:
mcc = MCC(y_true, y_pred)
values['MCC'] = mcc
except RuntimeWarning:
pass
values[':'.join(str(x) for x in model.classes_)] = ':'.join([str(int(sum(y_true == class_))) for class_ in model.classes_])
values['ACC'] = (tp + tn) / (tp + tn + fp + fn) if (tp + tn + fp + fn) != 0 else 0
values['BACC'] = (tp / (tp + fn) + tn / (tn + fp)) / 2
values['Sensitivity'] = tp / (tp + fn) if tp + fn != 0 else 0
values['Specificity'] = tn / (tn + fp) if tn + fp != 0 else 0
values['PPV'] = tp / (tp + fp) if tp + fp != 0 else 0
values['NPV'] = tn / (tn + fn) if tn + fn != 0 else 0
values['F1'] = 2 * values['Sensitivity'] * values['PPV'] / (values['Sensitivity'] + values['PPV']) if (values['Sensitivity'] + values['PPV']) != 0 else 0
if hasattr(model, "predict_proba"): # able to predict probability
y_probas = model.predict_proba(x_test)
if y_probas.shape[1] == 1:
y_proba = y_probas.ravel()
values['AUC 1'] = ROCAUC(y_true, y_probas)
else:
for i in range(len(model.classes_)):
y_proba = y_probas[:, i].ravel()
try:
values['AUC %s' % model.classes_[i]] = ROCAUC(y_true, y_proba)
except ValueError:
warnings.warn('Only one class present in y_true. ROC AUC score is not defined in that case. '
'Stratify your folds to avoid such warning.')
values['AUC %s' % model.classes_[i]] = np.nan
# Multiclasses
else:
i = 0
values = {}
for contingency_matrix in multilabel_confusion_matrix(y_true, y_pred):
tn, fp, fn, tp = contingency_matrix.ravel()
try:
mcc = MCC(y_true, y_pred)
values['%s|MCC' % model.classes_[i]] = mcc
except RuntimeWarning:
pass
values['%s|number' % model.classes_[i]] = int(sum(y_true == model.classes_[i]))
values['%s|ACC' % model.classes_[i]] = (tp + tn) / (tp + tn + fp + fn) if (
tp + tn + fp + fn) != 0 else 0
values['%s|BACC' % model.classes_[i]] = (tp / (tp + fn) + tn / (tn + fp)) / 2
values['%s|Sensitivity' % model.classes_[i]] = tp / (tp + fn) if tp + fn != 0 else 0
values['%s|Specificity' % model.classes_[i]] = tn / (tn + fp) if tn + fp != 0 else 0
values['%s|PPV' % model.classes_[i]] = tp / (tp + fp) if tp + fp != 0 else 0
values['%s|NPV' % model.classes_[i]] = tn / (tn + fn) if tn + fn != 0 else 0
values['%s|F1' % model.classes_[i]] = 2 * values['%s|Sensitivity' % model.classes_[i]] * values[
'%s|PPV' % model.classes_[i]] / (values['%s|Sensitivity' % model.classes_[i]] + values[
'%s|PPV' % model.classes_[i]]) if (values['%s|Sensitivity' % model.classes_[i]] + values[
'%s|PPV' % model.classes_[i]]) != 0 else 0
i += 1
if hasattr(model, "predict_proba"): # able to predict probability
y_probas = model.predict_proba(x_test)
try:
values['AUC 1 vs 1'] = ROCAUC(y_true, y_probas, average="macro", multi_class="ovo")
values['AUC 1 vs All'] = ROCAUC(y_true, y_probas, average="macro", multi_class="ovr")
except ValueError:
warnings.warn('Only one class present in y_true. ROC AUC score is not defined in that case. '
'Stratify your folds to avoid such warning.')
values['AUC 1 vs 1'] = np.nan
values['AUC 1 vs All'] = np.nan
return values
else:
raise ValueError('model can only be classifier or regressor.')
def crossvalidate_model(data: pd.DataFrame,
model: Union[RegressorMixin, ClassifierMixin],
folds: BaseCrossValidator,
groups: List[int] = None,
verbose: bool = False
) -> Tuple[pd.DataFrame, Dict[str, Union[RegressorMixin, ClassifierMixin]]]:
"""Create a machine learning model predicting values in the first column
:param data: data containing the dependent vairable (in the first column) and other features
:param model: estimator (may be classifier or regressor) to use for model building
:param folds: cross-validator
:param groups: groups to split the labels according to
:param verbose: whether to show fold progression
:return: cross-validated performance and model trained on the entire dataset
"""
X, y = data.iloc[:, 1:], data.iloc[:, 0].values.ravel()
performance = []
if verbose:
pbar = tqdm(desc='Fitting model', total=folds.n_splits + 1)
models = {}
# Perform cross-validation
for i, (train, test) in enumerate(folds.split(X, y, groups)):
if verbose:
pbar.set_description(f'Fitting model on fold {i + 1}', refresh=True)
model.fit(X.iloc[train, :], y[train])
models[f'Fold {i + 1}'] = deepcopy(model)
performance.append(model_metrics(model, y[test], X.iloc[test, :]))
if verbose:
pbar.update()
# Organize result in a dataframe
performance = pd.DataFrame(performance)
performance.index = [f'Fold {i + 1}' for i in range(folds.n_splits)]
# Add average and sd of performance
performance.loc['Mean'] = [np.mean(performance[col]) if ':' not in col else '-' for col in performance]
performance.loc['SD'] = [np.std(performance[col]) if ':' not in col else '-' for col in performance]
# Fit model on the entire dataset
if verbose:
pbar.set_description('Fitting model on entire training set', refresh=True)
model.fit(X, y)
models['Full model'] = deepcopy(model)
if verbose:
pbar.update()
return performance, models
def train_test_proportional_group_split(data: pd.DataFrame,
groups: List[int],
test_size: float = 0.30,
verbose: bool = False
) -> Tuple[pd.DataFrame, pd.DataFrame, List[int], List[int]]:
"""Split the data into training and test sets according to the groups that respect most test_size
:param data: the data to be split up into training and test sets
:param groups: groups to split the data according to
:param test_size: approximate proportion of the input dataset to determine the test set
:param verbose: whether to log to stdout or not
:return: training and test sets and training and test groups
"""
counts = Counter(groups)
size = sum(counts.values())
# Get ordered permutations of groups without repetitions
permutations = list(chain.from_iterable(combinations(counts.keys(), r) for r in range(len(counts))))
# Get proportion of each permutation
proportions = [sum(counts[x] for x in p) / size for p in permutations]
# Get permutation minimizing difference to test_size
best, proportion = min(zip(permutations, proportions), key=lambda x: (x[1] - test_size) ** 2)
del counts, permutations, proportions
if verbose:
print(f'Best group permutation corresponds to {proportion:.2%} of the data')
# Get test set assignment
assignment = np.where(group in best for group in groups)
opposite = np.logical_not(assignment)
# Get training groups
t_groups = [x for x in groups if x not in best]
return data[opposite], data[assignment], t_groups, best
def qsar(data: pd.DataFrame,
endpoint: str = 'pchembl_value_Mean',
num_points: int = 30,
delta_activity: float = 2,
version: str = 'latest',
descriptors: str = 'mold2',
descriptor_path: Optional[str] = None,
descriptor_chunksize: Optional[int] = 50000,
activity_threshold: float = 6.5,
model: Union[RegressorMixin, ClassifierMixin] = xgboost.XGBRegressor(verbosity=0),
folds: int = 5,
stratify: bool = False,
split_by: str = 'Year',
split_year: int = 2013,
test_set_size: float = 0.30,
cluster_method: ClusterMixin = None,
custom_groups: pd.DataFrame = None,
scale: bool = False,
scale_method: TransformerMixin = StandardScaler(),
yscramble: bool = False,
random_state: int = 1234,
verbose: bool = True
) -> Tuple[pd.DataFrame,
Dict[str,
Optional[Union[TransformerMixin,
LabelEncoder,
BaseCrossValidator,
Dict[str,
Union[RegressorMixin,
ClassifierMixin]]]]]]:
"""Create QSAR models for as many targets with selected data source(s),
data quality, minimum number of datapoints and minimum activity amplitude.
:param data: Papyrus activity data
:param endpoint: value to be predicted or to derive classes from
:param num_points: minimum number of points for the activity of a target to be modelled
:param delta_activity: minimum difference between most and least active compounds for a target to be modelled
:param descriptors: type of desriptors to be used for model training
:param descriptor_path: path to Papyrus descriptors (default: pystow's default path)
:param descriptor_chunksize: chunk size of molecular descriptors to be iteratively loaded (None disables chunking)
:param activity_threshold: threshold activity between acvtive and inactive compounds (ignored if using a regressor)
:param model: machine learning model to be used for QSAR modelling
:param folds: number of cross-validation folds to be performed
:param stratify: whether to stratify folds for cross validation, ignored if model is RegressorMixin
:param split_by: how should folds be determined {'random', 'Year', 'cluster', 'custom'}
If 'random', exactly test_set_size is extracted for test set.
If 'Year', the size of the test and training set are not looked at
If 'cluster' or 'custom', the groups giving proportion closest to test_set_size will be used to defined the test set
:param split_year: Year from which on the test set is extracted (ignored if split_by is not 'Year')
:param test_set_size: proportion of the dataset to be used as test set
:param cluster_method: clustering method to use to extract test set and cross-validation folds (ignored if split_by is not 'cluster')
:param custom_groups: custom groups to use to extract test set and cross-validation fold (ignored if split_by is not 'custom').
Groups must be a pandas DataFrame with only two Series. The first Series is either InChIKey or connectivity
(depending on whether stereochemistry data are being use or not). The second Series must be the group assignment
of each compound.
:param scale: should the features be scaled using the custom scaling_method
:param scale_method: scaling method to be applied to features (ignored if scale is False)
:param yscramble: should the endpoint be shuffled to compare performance to the unshuffled endpoint
:param random_state: seed to use for train/test splitting and KFold shuffling
:param verbose: log details to stdout
:return: both:
- a dataframe of the cross-validation results where each line is a fold of QSAR modelling of an accession
- a dictionary of the feature scaler (if used), label encoder (if mode is a classifier),
the data splitter for cross-validation, and for each accession in the data:
the fitted models on each cross-validation fold and the model fitted on the complete training set.
"""
if split_by.lower() not in ['year', 'random', 'cluster', 'custom']:
raise ValueError("split not supported, must be one of {'Year', 'random', 'cluster', 'custom'}")
if not isinstance(model, (RegressorMixin, ClassifierMixin)):
raise ValueError('model type can only be a Scikit-Learn compliant regressor or classifier')
warnings.filterwarnings("ignore", category=RuntimeWarning)
if isinstance(model, (xgboost.XGBRegressor, xgboost.XGBClassifier)):
warnings.filterwarnings("ignore", category=UserWarning)
model_type = 'regressor' if isinstance(model, RegressorMixin) else 'classifier'
# Keep only required fields
merge_on = 'connectivity' if 'connectivity' in data.columns else 'InChIKey'
if model_type == 'regressor':
features_to_ignore = [merge_on, 'target_id', endpoint, 'Year']
data = data[data['relation'] == '='][features_to_ignore]
else:
features_to_ignore = [merge_on, 'target_id', 'Activity_class', 'Year']
preserved = data[~data['Activity_class'].isna()]
preserved = preserved.drop(
columns=[col for col in preserved if col not in [merge_on, 'target_id', 'Activity_class', 'Year']])
active = data[data['Activity_class'].isna() & (data[endpoint] > activity_threshold)]
active = active[~active['relation'].str.contains('<')][features_to_ignore]
active.loc[:, 'Activity_class'] = 'A'
inactive = data[data['Activity_class'].isna() & (data[endpoint] <= activity_threshold)]
inactive = inactive[~inactive['relation'].str.contains('>')][features_to_ignore]
inactive.loc[:, 'Activity_class'] = 'N'
data = pd.concat([preserved, active, inactive])
# Change endpoint
endpoint = 'Activity_class'
del preserved, active, inactive
# Get and merge molecular descriptors
descs = read_molecular_descriptors(descriptors, 'connectivity' not in data.columns,
version, descriptor_chunksize, descriptor_path)
descs = filter_molecular_descriptors(descs, merge_on, data[merge_on].unique())
data = data.merge(descs, on=merge_on)
data = data.drop(columns=[merge_on])
del descs
# Table of results
results, models = [], {}
targets = list(data['target_id'].unique())
n_targets = len(targets)
if verbose:
pbar = tqdm(total=n_targets, smoothing=0.1)
# Build QSAR model for targets reaching criteria
for i_target in range(n_targets - 1, -1, -1):
tmp_data = data[data['target_id'] == targets[i_target]]
if verbose:
pbar.set_description(f'Building QSAR for target: {targets[i_target]} #datapoints {tmp_data.shape[0]}',
refresh=True)
# Insufficient data points
if tmp_data.shape[0] < num_points:
if model_type == 'regressor':
results.append(pd.DataFrame([[targets[i_target],
tmp_data.shape[0],
f'Number of points {tmp_data.shape[0]} < {num_points}']],
columns=['target', 'number', 'error']))
else:
data_classes = Counter(tmp_data[endpoint])
results.append(
pd.DataFrame([[targets[i_target],
':'.join(str(data_classes.get(x, 0)) for x in ['A', 'N']),
f'Number of points {tmp_data.shape[0]} < {num_points}']],
columns=['target', 'A:N', 'error']))
if verbose:
pbar.update()
models[targets[i_target]] = None
continue
if model_type == 'regressor':
min_activity = tmp_data[endpoint].min()
max_activity = tmp_data[endpoint].max()
delta = max_activity - min_activity
# Not enough activity amplitude
if delta < delta_activity:
results.append(pd.DataFrame([[targets[i_target],
tmp_data.shape[0],
f'Delta activity {delta} < {delta_activity}']],
columns=['target', 'number', 'error']))
if verbose:
pbar.update()
models[targets[i_target]] = None
continue
else:
data_classes = Counter(tmp_data[endpoint])
# Only one activity class
if len(data_classes) == 1:
results.append(
pd.DataFrame([[targets[i_target],
':'.join(str(data_classes.get(x, 0)) for x in ['A', 'N']),
'Only one activity class']],
columns=['target', 'A:N', 'error']))
if verbose:
pbar.update()
models[targets[i_target]] = None
continue
# Not enough data in minority class for all folds
elif not all(x >= folds for x in data_classes.values()):
results.append(
pd.DataFrame([[targets[i_target],
':'.join(str(data_classes.get(x, 0)) for x in ['A', 'N']),
f'Not enough data in minority class for all {folds} folds']],
columns=['target', 'A:N', 'error']))
if verbose:
pbar.update()
models[targets[i_target]] = None
continue
# Set groups for fold enumerator and extract test set
if split_by.lower() == 'year':
groups = tmp_data['Year']
test_set = tmp_data[tmp_data['Year'] >= split_year]
if test_set.empty:
if model_type == 'regressor':
results.append(pd.DataFrame([[targets[i_target],
tmp_data.shape[0],
f'No test data for temporal split at {split_year}']],
columns=['target', 'number', 'error']))
else:
data_classes = Counter(tmp_data[endpoint])
results.append(
pd.DataFrame([[targets[i_target],
':'.join(str(data_classes.get(x, 0)) for x in ['A', 'N']),
f'No test data for temporal split at {split_year}']],
columns=['target', 'A:N', 'error']))
if verbose:
pbar.update()
models[targets[i_target]] = None
continue
training_set = tmp_data[~tmp_data.index.isin(test_set.index)]
if training_set.empty or training_set.shape[0] < folds:
if model_type == 'regressor':
results.append(pd.DataFrame([[targets[i_target],
tmp_data.shape[0],
f'Not enough training data for temporal split at {split_year}']],
columns=['target', 'number', 'error']))
else:
data_classes = Counter(tmp_data[endpoint])
results.append(
pd.DataFrame([[targets[i_target],
':'.join(str(data_classes.get(x, 0)) for x in ['A', 'N']),
f'Not enough training data for temporal split at {split_year}']],
columns=['target', 'A:N', 'error']))
if verbose:
pbar.update()
models[targets[i_target]] = None
continue
if model_type == 'classifier':
train_data_classes = Counter(training_set[endpoint])
test_data_classes = Counter(test_set[endpoint])
if len(train_data_classes) < 2:
results.append(pd.DataFrame([[targets[i_target],
':'.join(str(train_data_classes.get(x, 0)) for x in ['A', 'N']),
f'Only one activity class in traing set for temporal split at {split_year}']],
columns=['target', 'A:N', 'error']))
if verbose:
pbar.update()
continue
elif len(test_data_classes) < 2:
results.append(pd.DataFrame([[targets[i_target],
':'.join(str(test_data_classes.get(x, 0)) for x in ['A', 'N']),
f'Only one activity class in traing set for temporal split at {split_year}']],
columns=['target', 'A:N', 'error']))
if verbose:
pbar.update()
models[targets[i_target]] = None
continue
training_groups = training_set['Year']
elif split_by.lower() == 'random':
training_groups = None
training_set, test_set = train_test_split(tmp_data, test_size=test_set_size, random_state=random_state)
elif split_by.lower() == 'cluster':
groups = cluster_method.fit_predict(tmp_data.drop(columns=features_to_ignore))
training_set, test_set, training_groups, _ = train_test_proportional_group_split(tmp_data, groups,
test_set_size,
verbose=verbose)
elif split_by.lower() == 'custom':
# Merge from custom split DataFrame
groups = tmp_data[[merge_on]].merge(custom_groups, on=merge_on).iloc[:, 1].tolist()
training_set, test_set, training_groups, _ = train_test_proportional_group_split(tmp_data, groups,
test_set_size,
verbose=verbose)
# Drop columns not used for training
training_set = training_set.drop(columns=['Year', 'target_id'])
test_set = test_set.drop(columns=['Year', 'target_id'])
X_train, y_train = training_set.drop(columns=[endpoint]), training_set.loc[:, endpoint]
X_test, y_test = test_set.drop(columns=[endpoint]), test_set.loc[:, endpoint]
# Scale data
if scale:
X_train.loc[X_train.index, X_train.columns] = scale_method.fit_transform(X_train)
X_test.loc[X_test.index, X_test.columns] = scale_method.transform(X_test)
# Encode labels
if model_type == 'classifier':
lblenc = LabelEncoder()
y_train = pd.Series(data=lblenc.fit_transform(y_train),
index=y_train.index, dtype=y_train.dtype,
name=y_train.name)
y_test = pd.Series(data=lblenc.transform(y_test),
index=y_test.index, dtype=y_test.dtype,
name=y_test.name)
y_train = y_train.astype(np.int32)
y_test = y_test.astype(np.int32)
# Reorganize data
training_set = pd.concat([y_train, X_train], axis=1)
test_set = pd.concat([y_test, X_test], axis=1)
del X_train, y_train, X_test, y_test
# Y-scrambling
if yscramble:
training_set = yscrambling(data=training_set, y_var=endpoint, random_state=random_state)
test_set = yscrambling(data=test_set, y_var=endpoint, random_state=random_state)
# Make sure enough data
if model_type == 'classifier':
train_data_classes = Counter(training_set['Activity_class'])
train_enough_data = np.all(np.array(list(train_data_classes.values())) > folds)
test_data_classes = Counter(test_set['Activity_class'])
test_enough_data = np.all(np.array(list(test_data_classes.values())) > folds)
if not train_enough_data:
results.append(pd.DataFrame([[targets[i_target],
':'.join(str(train_data_classes.get(x, 0)) for x in ['A', 'N']),
f'Not enough data in minority class of the training set for all {folds} folds']],
columns=['target', 'A:N', 'error']))
if verbose:
pbar.update()
models[targets[i_target]] = None
continue
elif not test_enough_data:
results.append(pd.DataFrame([[targets[i_target],
':'.join(str(test_data_classes.get(x, 0)) for x in ['A', 'N']),
f'Not enough data in minority class of the training set for all {folds} folds']],
columns=['target', 'A:N', 'error']))
if verbose:
pbar.update()
models[targets[i_target]] = None
continue
# Define folding scheme for cross validation
if stratify and model_type == 'classifier':
kfold = StratifiedKFold(n_splits=folds, shuffle=True, random_state=random_state)
else:
kfold = KFold(n_splits=folds, shuffle=True, random_state=random_state)
performance, cv_models = crossvalidate_model(training_set, model, kfold, training_groups)
full_model = cv_models['Full model']
X_test, y_test = test_set.iloc[:, 1:], test_set.iloc[:, 0].values.ravel()
performance.loc['Test set'] = model_metrics(full_model, y_test, X_test)
performance.loc[:, 'target'] = targets[i_target]
results.append(performance.reset_index())
models[targets[i_target]] = cv_models
if verbose:
pbar.update()
if isinstance(model, (xgboost.XGBRegressor, xgboost.XGBClassifier)):
warnings.filterwarnings("default", category=UserWarning)
warnings.filterwarnings("default", category=RuntimeWarning)
# Formatting return values
return_val = {}
if scale:
return_val['scaler'] = deepcopy(scale_method)
if model_type == 'classifier':
return_val['label_encoder'] = deepcopy(lblenc)
if stratify:
return_val['data_splitter'] = StratifiedKFold(n_splits=folds, shuffle=True, random_state=random_state)
else:
return_val['data_splitter'] = KFold(n_splits=folds, shuffle=True, random_state=random_state)
return_val = {**return_val, **models}
if len(results) is False:
return | pd.DataFrame() | pandas.DataFrame |
from os import path
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
from pathlib import Path
import ptitprince as pt
# ----------
# Loss Plots
# ----------
def save_loss_plot(path, loss_function, v_path=None, show=True):
df = pd.read_csv(path)
if v_path is not None:
vdf = pd.read_csv(v_path)
else:
vdf = None
p = Path(path)
n = p.stem
d = p.parents[0]
out_path = os.path.join(d, n + '_loss.png')
fig, ax = plot_loss(df, vdf=vdf, x_lab='Iteration', y_lab=loss_function, save=out_path, show=show)
def plot_loss(df, vdf=None, x_lab='Iteration', y_lab='BCE Loss', save=None, show=True):
x = df['Unnamed: 0'].values
y = df['loss'].values
epochs = len(df['epoch'].unique())
no_batches = int(len(x) / epochs)
epoch_ends = np.array([((i + 1) * no_batches) - 1 for i in range(epochs)])
epoch_end_x = x[epoch_ends]
epoch_end_y = y[epoch_ends]
fig, ax = plt.subplots()
leg = ['loss',]
ax.plot(x, y, linewidth=2)
ax.scatter(epoch_end_x, epoch_end_y)
title = 'Training loss'
if vdf is not None:
if len(vdf) > epochs:
vy = vdf.groupby('batch_id').mean()['validation_loss'].values
vx = vdf['batch_id'].unique()
else:
vy = vdf['validation_loss'].values
vx = epoch_end_x
title = title + ' with validation loss'
leg.append('validation loss')
if len(vdf) > epochs:
#vy_err = v_df.groupby('batch_id').sem()['validation_loss'].values
#ax.errorbar(vx, vy, vy_err, marker='.')
ax.plot(vx, vy, linewidth=2, marker='o')
else:
ax.plot(vx, vy, linewidth=2, marker='o')
ax.set(xlabel=x_lab, ylabel=y_lab)
ax.set_title(title)
ax.legend(leg)
fig.set_size_inches(13, 9)
if save is not None:
plt.savefig(save, dpi=300)
if show:
plt.show()
return fig, ax
def save_channel_loss_plot(path, show=True):
df = pd.read_csv(path)
p = Path(path)
n = p.stem
d = p.parents[0]
out_path = os.path.join(d, n + '_channel-loss.png')
fig, ax = plot_channel_losses(df, save=out_path, show=show)
def plot_channel_losses(df, x_lab='Iteration', y_lab='BCE Loss', save=None, show=True):
cols = list(df.columns)
x = df['Unnamed: 0'].values
non_channel_cols = ['Unnamed: 0', 'epoch', 'batch_num', 'loss', 'data_id']
channel_losses = [col for col in cols if col not in non_channel_cols]
#print(channel_losses)
if len(channel_losses) > 5:
#print('four plots')
fig, axs = plt.subplots(2, 2)
zs, ys, xs, cs = [], [], [], []
for col in channel_losses:
y = df[col].values
if col.startswith('z'):
ls = _get_linestyle(zs)
axs[0, 0].plot(x, y, linewidth=1, linestyle=ls)
zs.append(col)
if col.startswith('y'):
ls = _get_linestyle(ys)
axs[0, 1].plot(x, y, linewidth=1, linestyle=ls)
ys.append(col)
if col.startswith('x'):
ls = _get_linestyle(xs)
axs[1, 0].plot(x, y, linewidth=1, linestyle=ls)
xs.append(col)
if col.startswith('cent') or col == 'mask':
ls = _get_linestyle(cs)
axs[1, 1].plot(x, y, linewidth=1, linestyle=ls)
cs.append(col)
axs[0, 0].set_title('Z affinities losses')
axs[0, 0].legend(zs)
axs[0, 1].set_title('Y affinities losses')
axs[0, 1].legend(ys)
axs[1, 0].set_title('X affinities losses')
axs[1, 0].legend(xs)
axs[1, 1].set_title('Object interior losses')
axs[1, 1].legend(cs)
fig.set_size_inches(13, 9)
elif len(channel_losses) <= 5:
#print('two plots')
fig, axs = plt.subplots(2, 1)
affs, cs = [], []
for col in channel_losses:
y = df[col].values
if col.startswith('z') or col.startswith('y') or col.startswith('x'):
ls = _get_linestyle(affs)
axs[0].plot(x, y, linewidth=2, linestyle=ls)
affs.append(col)
if col.startswith('cent') or col == 'mask':
axs[1].plot(x, y, linewidth=2)
cs.append(col)
axs[0].set_title('Affinities losses')
axs[0].legend(affs)
axs[1].set_title('Object interior losses')
axs[1].legend(cs)
fig.set_size_inches(14, 14)
for ax in axs.flat:
ax.set(xlabel=x_lab, ylabel=y_lab)
if save is not None:
plt.savefig(save, dpi=300)
if show:
plt.show()
return fig, axs
def _get_linestyle(lis):
if len(lis) == 0:
ls = '-'
elif len(lis) == 1:
ls = '--'
else:
ls = ':'
return ls
# --------
# VI Plots
# --------
def VI_plot(
path,
cond_ent_over="GT | Output",
cond_ent_under="Output | GT",
lab="",
save=False,
show=True):
df = pd.read_csv(path)
overseg = df[cond_ent_over].values
o_groups = [cond_ent_over] * len(overseg)
underseg = df[cond_ent_under].values
u_groups = [cond_ent_under] * len(underseg)
groups = o_groups + u_groups
x = 'Variation of information'
y = 'Conditional entropy'
data = {
x : groups,
y : np.concatenate([overseg, underseg])
}
data = pd.DataFrame(data)
o = 'h'
pal = 'Set2'
sigma = .2
f, ax = plt.subplots(figsize=(12, 10))
pt.RainCloud(x = x, y = y, data = data, palette = pal, bw = sigma,
width_viol = .6, ax = ax, orient = o)
p = Path(path)
plt.title(p.stem)
if save:
save_path = os.path.join(p.parents[0], p.stem + lab + '_VI_rainclout_plot.png')
plt.savefig(save_path, bbox_inches='tight')
if show:
plt.show()
def experiment_VI_plots(
paths,
names,
title,
out_name,
out_dir,
cond_ent_over="GT | Output",
cond_ent_under="Output | GT",
show=True
):
plt.rcParams.update({'font.size': 16})
groups = []
ce0 = []
ce1 = []
for i, p in enumerate(paths):
df = pd.read_csv(p)
ce0.append(df[cond_ent_over].values)
ce1.append(df[cond_ent_under].values)
groups += [names[i]] * len(df)
x = 'Experiment'
data = {
x : groups,
cond_ent_over : np.concatenate(ce0),
cond_ent_under : np.concatenate(ce1)
}
data = pd.DataFrame(data)
o = 'h'
pal = 'Set2'
sigma = .2
f, axs = plt.subplots(1, 2, figsize=(14, 10)) #, sharex=True) #, sharey=True)
ax0 = axs[0]
ax1 = axs[1]
pt.RainCloud(x = x, y = cond_ent_over, data = data, palette = pal, bw = sigma,
width_viol = .6, ax = ax0, orient = o)
ax0.set_title('Over-segmentation conditional entropy')
pt.RainCloud(x = x, y = cond_ent_under, data = data, palette = pal, bw = sigma,
width_viol = .6, ax = ax1, orient = o)
ax1.set_title('Under-segmentation conditional entropy')
f.suptitle(title)
os.makedirs(out_dir, exist_ok=True)
save_path = os.path.join(out_dir, out_name + '_VI_rainclould_plots.png')
plt.savefig(save_path, bbox_inches='tight')
if show:
plt.show()
# -----------------------
# Average Precision Plots
# -----------------------
def plot_experiment_APs(paths, names, title, out_dir, out_name, show=True):
dfs = [pd.read_csv(path) for path in paths]
os.makedirs(out_dir, exist_ok=True)
out_path = os.path.join(out_dir, out_name)
plot_AP(dfs, names, out_path, title, show=show)
def plot_AP(dfs, names, out_path, title, thresh_name='threshold', ap_name='average_precision', show=True):
plt.rcParams.update({'font.size': 16})
plt.rcParams["figure.figsize"] = (10,10)
fig = plt.figure()
for df in dfs:
plt.plot(df[thresh_name].values, df[ap_name].values)
plt.xlabel('IoU threshold')
plt.ylabel('Average precision')
plt.title(title)
plt.legend(names)
fig.savefig(out_path)
if show:
plt.show()
# ------------------------------
# Object Number Difference Plots
# ------------------------------
def plot_experiment_no_diff(paths, names, title, out_dir, out_name, col_name='n_diff', show=True):
dfs = [pd.read_csv(path) for path in paths]
plt.rcParams.update({'font.size': 16})
os.makedirs(out_dir, exist_ok=True)
out_path = os.path.join(out_dir, out_name)
groups = []
n_diff = []
for i, df in enumerate(dfs):
vals = df[col_name].values
n_diff.append(vals)
groups += [names[i]] * len(df)
x = 'Experiment'
data = {
x : groups,
'n_diff' : np.concatenate(n_diff),
}
data = | pd.DataFrame(data) | pandas.DataFrame |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = cudf.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = cudf.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
assert_exceptions_equal(
lfunc=pmdx.isin,
rfunc=gmdx.isin,
lfunc_args_and_kwargs=([values], {"level": level}),
rfunc_args_and_kwargs=([values], {"level": level}),
check_exception_type=False,
expected_error_message=re.escape(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
),
)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
pdf = data
gdf = cudf.from_pandas(pdf)
if cudf.utils.dtypes.is_scalar(values):
assert_exceptions_equal(
lfunc=pdf.isin,
rfunc=gdf.isin,
lfunc_args_and_kwargs=([values],),
rfunc_args_and_kwargs=([values],),
)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
pytest.xfail(
not PANDAS_GE_110,
"https://github.com/pandas-dev/pandas/issues/34256",
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = cudf.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = cudf.DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor (for DataFrame)
assert_eq(df, df._constructor({key1: val1, key2: val2}))
# Correct use of _constructor (for cudf.Series)
assert_eq(df[key1], df[key2]._constructor(val1, name=key1))
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for cudf.Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for cudf.Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype=dtype)
gdf["bar"] = cudf.Series(data, dtype=dtype)
insert_data = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = cudf.Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = cudf.Series(data, dtype=as_dtype)
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype="datetime64[ms]")
gdf["bar"] = cudf.Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = cudf.Series(data, dtype="str")
expect["bar"] = cudf.Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = cudf.Series(gdf["foo"], dtype="category")
expect["bar"] = cudf.Series(gdf["bar"], dtype="category")
else:
expect["foo"] = cudf.Series(data, dtype=as_dtype)
expect["bar"] = cudf.Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = cudf.DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = cudf.Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = cudf.DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
cudf.DataFrame(),
cudf.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
),
cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False
),
}
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops(data, op, skipna):
gdf = data
pdf = gdf.to_pandas()
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0, skipna=skipna)
got = getattr(gdf, op)(axis=1, ddof=0, skipna=skipna)
else:
expected = getattr(pdf, op)(axis=1, skipna=skipna)
got = getattr(gdf, op)(axis=1, skipna=skipna)
assert_eq(expected, got, check_exact=False)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops_nullable_dtypes_all_null(op):
gdf = cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
)
expected = cudf.Series([None, None, None, None], dtype="float64")
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series(
[10.0, None, np.NaN, 2234.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"min",
cudf.Series(
[10.0, None, np.NaN, 13.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"sum",
cudf.Series(
[20.0, None, np.NaN, 2247.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"product",
cudf.Series(
[100.0, None, np.NaN, 29042.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"mean",
cudf.Series(
[10.0, None, np.NaN, 1123.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"var",
cudf.Series(
[0.0, None, np.NaN, 1233210.25, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"std",
cudf.Series(
[0.0, None, np.NaN, 1110.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
],
)
def test_rowwise_ops_nullable_dtypes_partial_null(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series([10, None, None, 2234, None, 453], dtype="int64",),
),
("min", cudf.Series([10, None, None, 13, None, 15], dtype="int64",),),
(
"sum",
cudf.Series([20, None, None, 2247, None, 468], dtype="int64",),
),
(
"product",
cudf.Series([100, None, None, 29042, None, 6795], dtype="int64",),
),
(
"mean",
cudf.Series(
[10.0, None, None, 1123.5, None, 234.0], dtype="float32",
),
),
(
"var",
cudf.Series(
[0.0, None, None, 1233210.25, None, 47961.0], dtype="float32",
),
),
(
"std",
cudf.Series(
[0.0, None, None, 1110.5, None, 219.0], dtype="float32",
),
),
],
)
def test_rowwise_ops_nullable_int_dtypes(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, None, 13, None, 15],
"b": cudf.Series(
[10, None, 323, 2234, None, 453], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ns]"
),
"t3": cudf.Series(
["1960-08-31 06:00:00", "2030-08-02 10:00:00"], dtype="<M8[s]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[us]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(["1940-08-31 06:00:00", None], dtype="<M8[ms]"),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
"b1": cudf.Series([True, False], dtype="bool"),
},
],
)
@pytest.mark.parametrize("op", ["max", "min"])
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops_datetime_dtypes(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data,op,skipna",
[
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"max",
True,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
False,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
True,
),
],
)
def test_rowwise_ops_datetime_dtypes_2(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
(
{
"t1": pd.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ns]",
),
"t2": pd.Series(
["1940-08-31 06:00:00", pd.NaT], dtype="<M8[ns]"
),
}
)
],
)
def test_rowwise_ops_datetime_dtypes_pdbug(data):
pdf = pd.DataFrame(data)
gdf = cudf.from_pandas(pdf)
expected = pdf.max(axis=1, skipna=False)
got = gdf.max(axis=1, skipna=False)
if PANDAS_GE_120:
assert_eq(got, expected)
else:
# PANDAS BUG: https://github.com/pandas-dev/pandas/issues/36907
with pytest.raises(AssertionError, match="numpy array are different"):
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = cudf.DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = cudf.datasets.randomdata(10)
pdf = gdf.to_pandas()
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
cudf.Series([4, 2, 3]),
cudf.Series([4, 2, 3], index=["a", "b", "c"]),
cudf.Series([4, 2, 3], index=["a", "b", "d"]),
cudf.Series([4, 2], index=["a", "b"]),
cudf.Series([4, 2, 3], index=cudf.core.index.RangeIndex(0, 3)),
pytest.param(
cudf.Series([4, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[3.0, 2.0, 5.0], [3.0, None, 5.0], [6.0, 7.0, np.nan]]
data = dict(zip(colnames, data))
gsr = gsr.astype("float64")
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas(nullable=True)
psr = gsr.to_pandas(nullable=True)
expect = op(pdf, psr)
got = op(gdf, gsr).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
expect = op(psr, pdf)
got = op(gsr, gdf).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
# comparison ops will temporarily XFAIL
# see PR https://github.com/rapidsai/cudf/pull/7491
pytest.param(operator.eq, marks=pytest.mark.xfail()),
pytest.param(operator.lt, marks=pytest.mark.xfail()),
pytest.param(operator.le, marks=pytest.mark.xfail()),
pytest.param(operator.gt, marks=pytest.mark.xfail()),
pytest.param(operator.ge, marks=pytest.mark.xfail()),
pytest.param(operator.ne, marks=pytest.mark.xfail()),
],
)
@pytest.mark.parametrize(
"gsr", [cudf.Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas()
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = cudf.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = cudf.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = cudf.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
def test_memory_usage_list():
df = cudf.DataFrame({"A": [[0, 1, 2, 3], [4, 5, 6], [7, 8], [9]]})
expected = (
df.A._column.offsets._memory_usage()
+ df.A._column.elements._memory_usage()
)
assert expected == df.A.memory_usage()
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = cudf.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = cudf.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All columns must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(cudf.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(cudf.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(cudf.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = cudf.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, cudf.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = cudf.DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = cudf._lib.table.Table(odict)
result = cudf.DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = cudf.from_pandas(pd_idx1)
gd_idx2 = cudf.from_pandas(pd_idx2)
gd_series1 = cudf.Series([1, 2, 3], index=gd_idx1)
gd_series2 = cudf.Series([1, 2, 3], index=gd_idx2)
expect = pd.DataFrame({"a": pd_series1, "b": pd_series2})
got = cudf.DataFrame({"a": gd_series1, "b": gd_series2})
if dtype == "str":
# Pandas actually loses its index name erroneously here...
expect.index.name = "test_index"
assert_eq(expect, got)
assert expect.index.names == got.index.names
@pytest.mark.parametrize(
"arg", [slice(2, 8, 3), slice(1, 20, 4), slice(-2, -6, -2)]
)
def test_dataframe_strided_slice(arg):
mul = pd.DataFrame(
{
"Index": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"AlphaIndex": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
)
pdf = pd.DataFrame(
{"Val": [10, 9, 8, 7, 6, 5, 4, 3, 2]},
index=pd.MultiIndex.from_frame(mul),
)
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf[arg]
got = gdf[arg]
assert_eq(expect, got)
@pytest.mark.parametrize(
"data,condition,other,error",
[
(pd.Series(range(5)), pd.Series(range(5)) > 0, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, 10, None),
(
pd.Series(range(5)),
pd.Series(range(5)) > 1,
pd.Series(range(5, 10)),
None,
),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
% 3
)
== 0,
-pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) == 4,
None,
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) != 4,
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True, False],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True, True, False], [True, True, True, False]],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cuda.to_device(
np.array(
[[True, True], [False, True], [True, False], [False, True]]
)
),
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cupy.array(
[[True, True], [False, True], [True, False], [False, True]]
),
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
],
None,
ValueError,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) == 4,
None,
None,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) == 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6], dtype="category"),
pd.Series([4, np.nan, 6], dtype="category") != 4,
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
"s",
None,
),
(
pd.Series([1, 2, 3, 2, 5]),
pd.Series([1, 2, 3, 2, 5]) == 2,
pd.DataFrame(
{
"a": | pd.Series([1, 2, 3, 2, 5]) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.