prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os
import geopandas as gpd
import pandas as pd
import pytest
import trackintel as ti
from geopandas.testing import assert_geodataframe_equal
from pandas.testing import assert_frame_equal, assert_index_equal
from shapely.geometry import Point, Polygon, MultiPoint
from trackintel.io.from_geopandas import (
_trackintel_model,
read_locations_gpd,
read_positionfixes_gpd,
read_staypoints_gpd,
read_tours_gpd,
read_triplegs_gpd,
read_trips_gpd,
)
@pytest.fixture()
def example_positionfixes():
"""Model conform positionfixes to test with."""
p1 = Point(8.5067847, 47.4)
p2 = Point(8.5067847, 47.5)
p3 = Point(8.5067847, 47.6)
t1 = pd.Timestamp("1971-01-01 04:00:00", tz="utc")
t2 = pd.Timestamp("1971-01-01 05:00:00", tz="utc")
t3 = pd.Timestamp("1971-01-02 07:00:00", tz="utc")
list_dict = [
{"user_id": 0, "tracked_at": t1, "geom": p1},
{"user_id": 0, "tracked_at": t2, "geom": p2},
{"user_id": 1, "tracked_at": t3, "geom": p3},
]
pfs = gpd.GeoDataFrame(data=list_dict, geometry="geom", crs="EPSG:4326")
pfs.index.name = "id"
assert pfs.as_positionfixes
return pfs
class Test_Trackintel_Model:
"""Test `_trackintel_model()` function."""
def test_renaming(self, example_positionfixes):
"""Test renaming of columns."""
example_positionfixes["additional_col"] = [11, 22, 33]
pfs = example_positionfixes.copy()
# create new column mapping and revert it
columns = {"user_id": "_user_id", "tracked_at": "_tracked_at", "additional_col": "_additional_col"}
columns_rev = {val: key for key, val in columns.items()}
# check if columns get renamed correctly
pfs.rename(columns=columns, inplace=True)
pfs = _trackintel_model(pfs, columns_rev)
assert_geodataframe_equal(example_positionfixes, pfs)
def test_setting_geometry(self, example_positionfixes):
"""Test the setting of the geometry."""
# create pfs as dataframe
pfs = pd.DataFrame(example_positionfixes[["user_id", "tracked_at"]], copy=True)
pfs["geom"] = example_positionfixes.geometry
# check if geom column gets assigned to geometry
pfs = _trackintel_model(pfs, geom_col="geom")
assert_geodataframe_equal(example_positionfixes, pfs)
def test_set_crs(self, example_positionfixes):
"""Test if crs will be set."""
pfs = example_positionfixes.copy()
example_positionfixes.crs = "EPSG:2056"
# check if the crs is correctly set
pfs.crs = None
pfs = _trackintel_model(pfs, crs="EPSG:2056")
assert_geodataframe_equal(example_positionfixes, pfs)
def test_already_set_geometry(self, example_positionfixes):
"""Test if default checks if GeoDataFrame already has a geometry."""
pfs = _trackintel_model(example_positionfixes)
assert_geodataframe_equal(pfs, example_positionfixes)
def test_error_no_set_geometry(self, example_positionfixes):
"""Test if AttributeError will be raised if no geom_col is provided and GeoDataFrame has no geometry."""
pfs = gpd.GeoDataFrame(example_positionfixes[["user_id", "tracked_at"]])
with pytest.raises(AttributeError):
_trackintel_model(pfs)
def test_tz_cols(self, example_positionfixes):
"""Test if columns get casted to datetimes."""
pfs = example_positionfixes.copy()
pfs["tracked_at"] = ["1971-01-01 04:00:00", "1971-01-01 05:00:00", "1971-01-02 07:00:00"]
pfs = _trackintel_model(pfs, tz_cols=["tracked_at"], tz="UTC")
assert_geodataframe_equal(pfs, example_positionfixes)
def test_multiple_timezones_in_col(self, example_positionfixes):
"""Test if datetimes in column don't have the same timezone get casted to UTC."""
example_positionfixes["tracked_at"] = [
| pd.Timestamp("2021-08-01 16:00:00", tz="Europe/Amsterdam") | pandas.Timestamp |
import scipy
import pandas
import numpy
import sys
import unittest
import unittest.mock
import tempfile
import os
import io
import copy
import warnings
import random
import string
import plotly
from datetime import datetime, timedelta
sys.path.append("..")
import nPYc
from nPYc.enumerations import VariableType, AssayRole, SampleType, QuantificationType, CalibrationMethod
from generateTestDataset import generateTestDataset
class test_plotting(unittest.TestCase):
def setUp(self):
# Feature1 has lowest LLOQ and ULOQ in batch1, feature2 has lowest LLOQ and ULOQ in batch2
# On feature1 and feature2, Sample1 will be <LLOQ, Sample2 >ULOQ, Sample3 same as input
self.targetedDataset = nPYc.TargetedDataset('', fileType='empty')
self.targetedDataset.name = 'unittest'
self.targetedDataset.sampleMetadata = pandas.DataFrame({'Sample File Name': ['UnitTest_targeted_file_001',
'UnitTest_targeted_file_002',
'UnitTest_targeted_file_003'],
'Sample Name': ['Sample1-B1', 'Sample2-B2',
'Sample3-B2'],
'Sample Type': ['Analyte', 'Analyte', 'Analyte'],
'Acqu Date': ['10-Sep-16', '10-Sep-16', '10-Sep-16'],
'Acqu Time': ['03:23:02', '04:52:35', '05:46:40'],
'Vial': ['1:A,2', '1:A,3', '1:A,4'],
'Instrument': ['XEVO-TQS#UnitTest', 'XEVO-TQS#UnitTest',
'XEVO-TQS#UnitTest'],
'Acquired Time': [datetime(2016, 9, 10, 3, 23, 2),
datetime(2016, 9, 10, 4, 52, 35),
datetime(2016, 9, 10, 5, 46, 40)],
'Run Order': [0, 1, 2], 'Batch': [1, 2, 2],
'AssayRole': [AssayRole.Assay, AssayRole.Assay,
AssayRole.Assay],
'SampleType': [SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample],
'Dilution': [numpy.nan, numpy.nan, numpy.nan],
'Correction Batch': [numpy.nan, numpy.nan, numpy.nan],
'Subject ID': ['', '', ''], 'Sample ID': ['', '', ''],
'Sample Base Name': ['', '', ''],
'Exclusion Details': ['', '', '']})
self.targetedDataset.sampleMetadata['Acquired Time'] = self.targetedDataset.sampleMetadata['Acquired Time'].dt.to_pydatetime()
self.targetedDataset.featureMetadata = pandas.DataFrame({'Feature Name': ['Feature1', 'Feature2'], 'TargetLynx Feature ID': [1, 2],'calibrationMethod': [CalibrationMethod.backcalculatedIS,CalibrationMethod.backcalculatedIS],'quantificationType': [QuantificationType.QuantAltLabeledAnalogue,QuantificationType.QuantAltLabeledAnalogue],'unitCorrectionFactor': [1., 1],'Unit': ['a Unit', 'pg/uL'],'Cpd Info': ['info cpd1', 'info cpd2'],'LLOQ_batch1': [5., 20.],'LLOQ_batch2': [20., 5.],'ULOQ_batch1': [80., 100.],'ULOQ_batch2': [100., 80.],'LLOQ': [20., 20.], 'ULOQ': [80., 80.],'extID1': ['F1', 'F2'],'extID2': ['ID1', 'ID2']})
self.targetedDataset._intensityData = numpy.array([[10., 10.], [90., 90.], [25., 70.]])
self.targetedDataset.expectedConcentration = pandas.DataFrame(numpy.array([[40., 60.], [40., 60.], [40., 60.]]), columns=self.targetedDataset.featureMetadata['Feature Name'].values.tolist())
self.targetedDataset.sampleMetadataExcluded = []
self.targetedDataset.featureMetadataExcluded = []
self.targetedDataset.intensityDataExcluded = []
self.targetedDataset.expectedConcentrationExcluded = []
self.targetedDataset.excludedFlag = []
self.targetedDataset.calibration = dict()
self.targetedDataset.calibration['calibIntensityData'] = numpy.ndarray((0, 2))
self.targetedDataset.calibration['calibSampleMetadata'] = pandas.DataFrame()
self.targetedDataset.calibration['calibFeatureMetadata'] = | pandas.DataFrame(index=['Feature1', 'Feature2'], columns=['Feature Name']) | pandas.DataFrame |
import numpy as np
import pandas.util.testing as tm
from pandas import (DataFrame, Series, DatetimeIndex, MultiIndex, Index,
date_range)
from .pandas_vb_common import lib
class Reindex(object):
goal_time = 0.2
def setup(self):
rng = DatetimeIndex(start='1/1/1970', periods=10000, freq='1min')
self.df = DataFrame(np.random.rand(10000, 10), index=rng,
columns=range(10))
self.df['foo'] = 'bar'
self.rng_subset = | Index(rng[::2]) | pandas.Index |
__author__ = 'tomrunner21'
from PIL import Image
import requests
import os
import numpy as np
import pandas as pd
from googlesearch import search
from bs4 import BeautifulSoup as soup
from urllib.request import Request, urlopen
from urllib.parse import urlparse
################################################################
def image_save(link, area):
urllib.request.urlretrieve(j, area)
img = Image.open(saved_file)
img.show()
################################################################
def just_image(page_url, save_name, count):
# count = 0
html_page = requests.get(page_url)
soup_ = soup(html_page.content, 'html.parser')
warning = soup_.find('h3', class_="auch3")
book_container = warning.nextSibling.nextSibling
images = book_container.findAll('img')
image = book_container.findAll(attrs = {'class': 'img-responsive imgnobdr'})
for i in image:
testing = i.attrs['src']
j = str(testing)
saved_file = str(save_name) + "_" + str(count) + ".jpg"
area = "/content/drive/MyDrive/auction image upload tests/" + str(save_name) + "/" + saved_file
# image_save(j, area)
count += 1
sleep(2)
return count
################################################################
def single_page(url):
'''
url: input URL
returns: list of auctions
'''
titles = []
req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
html = soup(webpage, "html.parser")
title_blocks = html.find_all(attrs = {'class': 'text'})
for names in title_blocks:
test = (names.get_text(separator="\t"))
tt = test.split('\t')
titles.append(tt[0])
return titles
################################################################
def clean_currency(x):
"""
If the value is a string, then remove currency symbol and delimiters
otherwise, the value is numeric and can be converted
"""
if isinstance(x, str):
return(x.replace('$', '').replace(',', ''))
return(x)
################################################################
def get_multiple_pages(url, max_pages):
titles = []
pages = np.arange(1, (int(max_pages)+1), 1)
for page in pages:
url2 = (url + "?Page=" + str(page))
req = Request(url2, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
html = soup(webpage, "html.parser")
title_blocks = html.find_all(attrs = {'class': 'text'})
for names2 in title_blocks:
test = (names2.get_text(separator="\t"))
tt = test.split('\t')
titles.append(tt[0])
# sleep(7) #took me about 6-7 seconds to slowly scroll on computer at a slow to average speed
return titles
################################################################
def get_multiple_prices(url, max_pages):
list_of_prices = []
pages = np.arange(1, (int(max_pages)+1), 1)
for page in pages:
wide_url = (url + "?Page=" + str(page))
req = Request(wide_url, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
html = soup(webpage, "html.parser")
price_blocks = html.find_all(attrs = {'class': 'current-top'})
for price in price_blocks:
text = price.findChild().findChild().get_text()
list_of_prices.append(text)
# sleep(7)
return list_of_prices
################################################################
def get_google(dfIn, column):
'''
df: input df
column: a column containing search terms
'''
dfOut = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 4 05:29:48 2020
@author: rahikalantari
"""
import pandas as pd
import numpy as np
from datetime import datetime as dt
task = 'future' #'historic''future'
event = 'cases'#'cases''death'
if task == 'historic':
foldername = 'Historic'
else:
foldername = 'Future'
TP = -23
if event == 'death':
death_mean = pd.read_csv('results/'+foldername+'_prediction/death_mean_.csv')
death_mean.drop(death_mean.loc[death_mean['Var1']=='StateX'].index, inplace=True)
death_mean =death_mean.reset_index(drop=True)
death_mean = death_mean.rename(columns=death_mean.loc[0,:])
death_mean = death_mean.iloc[:,:]
death_mean = (death_mean.drop(index=0))#.drop('01/22/0020',axis=1)
death_lower = pd.read_csv('results/'+foldername+'_prediction/death_lowerBound_.csv')
death_lower.drop(death_lower.loc[death_lower['Var1']=='StateX'].index, inplace=True)
death_lower =death_lower.reset_index(drop=True)
death_lower = death_lower.rename(columns=death_lower.loc[0,:])
death_lower = (death_lower.drop(index=0))#.drop('01/22/0020',axis=1)
death_lower = death_lower.iloc[:,:]
death_higher = pd.read_csv('results/'+foldername+'_prediction/death_upperBound_.csv')
death_higher.drop(death_higher.loc[death_higher['Var1']=='StateX'].index, inplace=True)
death_higher =death_higher.reset_index(drop=True)
death_higher = death_higher.rename(columns=death_higher.loc[0,:])
death_higher = (death_higher.drop(index=0))
death_higher= death_higher.iloc[:,:]
# death_mean=(death_mean.drop(index=0))#.drop('01/22/0020',axis=1)
# death_lower = pd.read_csv('results/'+foldername+'_prediction/death_lowerBound_.csv')
# death_lower = death_lower.rename(columns=death_lower.loc[0,:])
# death_lower = (death_lower.drop(index=0))#.drop('01/22/0020',axis=1)
# death_higher = pd.read_csv('results/'+foldername+'_prediction/death_upperBound_.csv')
# death_higher = death_higher.rename(columns=death_higher.loc[0,:])
# death_higher = (death_higher.drop(index=0))#.drop('01/22/0020',axis=1)
realdata_death = realdata = pd.read_csv('data/new_death_cases_2020_08_23.csv')
for i in range(1,53):
death_mean.loc[i,'3/15/20':] = pd.to_numeric(death_mean.loc[i,'3/15/20':],errors='coerce')
death_lower.loc[i,'3/15/20':] = pd.to_numeric(death_lower.loc[i,'3/15/20':],errors='coerce')
death_higher.loc[i,'3/15/20':] = pd.to_numeric(death_higher.loc[i,'3/15/20':],errors='coerce')
death_mean.to_csv ('results/'+foldername+'_prediction/death_mean_2020_08_31.csv', index = False, header=True)
death_lower.to_csv ('results/'+foldername+'_prediction/death_lowerBound_2020_08_31.csv', index = False, header=True)
death_higher.to_csv ('results/'+foldername+'_prediction/death_upperBound_2020_08_31.csv', index = False, header=True)
realdata_death.loc[51] = realdata_death.sum(axis=0)
realdata_death["Province_State"].loc[51] = "US"
real_death_col = pd.melt(realdata_death, id_vars=['Province_State'], var_name='date', value_name='real_number_of_deaths')
# results_death.to_csv ('results/Future_prediction/furture_death_.csv', index = True, header=True)
death_mean = pd.read_csv('results/'+foldername+'_prediction/death_mean_2020_08_31.csv')
death_mean.loc[51] = death_mean.loc[0:50].sum(axis=0)
death_mean["Province_State"].loc[51] = "US"
death_lower = pd.read_csv('results/'+foldername+'_prediction/death_lowerBound_2020_08_31.csv')
death_higher = pd.read_csv('results/'+foldername+'_prediction/death_upperBound_2020_08_31.csv')
death_mean_col = pd.melt(death_mean, id_vars=['Province_State'], var_name='date', value_name='number_of_deaths')
death_lower_col = pd.melt(death_lower, id_vars=['Province_State'], var_name='date', value_name='number_of_deaths_lower')
death_higher_col = pd.melt(death_higher, id_vars=['Province_State'], var_name='date', value_name='number_of_deaths_higher')
results_death= pd.merge(real_death_col, death_mean_col, how='outer', on=['Province_State', 'date'])
results_death= pd.merge(results_death, death_lower_col, how='outer', on=['Province_State', 'date'])
results_death= pd.merge(results_death, death_higher_col, how='outer', on=['Province_State', 'date'])
#results_death= pd.merge(results_death, real_death_col, how='outer', on=['Province_State', 'date'])
results_death.to_csv ('results/'+foldername+'_prediction/'+foldername+'__death_2020_08_31.csv', index = True, header=True)
else:
cases_mean = pd.read_csv('results/'+foldername+'_prediction/daily_cases_mean_.csv')
cases_mean.drop(cases_mean.loc[cases_mean['Var1']=='StateX'].index, inplace=True)
cases_mean = cases_mean.reset_index(drop=True)
cases_mean = cases_mean.rename(columns=cases_mean.iloc[0,:])
cases_mean = cases_mean.iloc[:,:]
cases_mean=(cases_mean.drop(index=0))#.drop('01/22/0020',axis=1)
cases_lower = pd.read_csv('results/'+foldername+'_prediction/daily_cases_lowerBound_.csv')
cases_lower.drop(cases_lower.loc[cases_lower['Var1']=='StateX'].index, inplace=True)
cases_lower = cases_lower.reset_index(drop=True)
cases_lower = cases_lower.rename(columns=cases_lower.iloc[0,:])
cases_lower = cases_lower.iloc[:,:]
cases_lower = (cases_lower.drop(index=0))#.drop('01/22/0020',axis=1)
cases_higher = pd.read_csv('results/'+foldername+'_prediction/daily_cases_upperBound_.csv')
cases_higher.drop(cases_higher.loc[cases_higher['Var1']=='StateX'].index, inplace=True)
cases_higher = cases_higher.reset_index(drop=True)
cases_higher = cases_higher.rename(columns=cases_higher.iloc[0,:])
cases_higher = cases_higher.iloc[:,:]
cases_higher = (cases_higher.drop(index=0))#.drop('01/22/0020',axis=1)
realdata_cases = realdata = pd.read_csv('data/new_daily_cases_2020_08_23.csv')
for i in range(1,53):
cases_mean.loc[i,'3/15/20':] = pd.to_numeric(cases_mean.loc[i,'3/15/20':],errors='coerce')
cases_lower.loc[i,'3/15/20':] = pd.to_numeric(cases_lower.loc[i,'3/15/20':],errors='coerce')
cases_higher.loc[i,'3/15/20':] = pd.to_numeric(cases_higher.loc[i,'3/15/20':],errors='coerce')
cases_mean.to_csv ('results/'+foldername+'_prediction/daily_cases_mean_2020_08_31.csv', index = False, header=True)
cases_lower.to_csv ('results/'+foldername+'_prediction/daily_cases_lowerBound_2020_08_31.csv', index = False, header=True)
cases_higher.to_csv ('results/'+foldername+'_prediction/daily_cases_upperBound_2020_08_31.csv', index = False, header=True)
realdata_cases.loc[51] = realdata_cases.sum(axis=0)
realdata_cases["Province_State"].loc[51] = "US"
real_cases_col = pd.melt(realdata_cases, id_vars=['Province_State'], var_name='date', value_name='real_number_of_daily_cases')
# cases_mean_col = pd.melt(cases_mean, id_vars=['Province_State'], var_name='date', value_name='number_of_deaths')
# cases_lower_col = pd.melt(cases_lower, id_vars=['Province_State'], var_name='date', value_name='number_of_deaths_lower')
# cases_higher_col = pd.melt(cases_higher, id_vars=['Province_State'], var_name='date', value_name='number_of_deaths_higher')
# results_cases= pd.merge(real_cases_col, cases_mean_col, how='outer', on=['Province_State', 'date'])
# results_cases= pd.merge(results_cases, cases_lower_col, how='outer', on=['Province_State', 'date'])
# results_cases= pd.merge(results_cases, cases_higher_col, how='outer', on=['Province_State', 'date'])
# #results_death= pd.merge(results_death, real_death_col, how='outer', on=['Province_State', 'date'])
# results_cases.to_csv ('results/'+foldername+'_prediction/furture_death_.csv', index = True, header=True)
cases_mean = pd.read_csv('results/'+foldername+'_prediction/daily_cases_mean_2020_08_31.csv')
cases_mean.loc[51] = cases_mean.loc[0:50].sum(axis=0)
cases_mean["Province_State"].loc[51] = "US"
cases_lower = | pd.read_csv('results/'+foldername+'_prediction/daily_cases_lowerBound_2020_08_31.csv') | pandas.read_csv |
import pandas as pd
import requests
from pathlib import Path
from tqdm.auto import tqdm
tqdm.pandas()
rki_to_iso = {0: 'DE',
1: 'DE-SH',
2: 'DE-HH',
3: 'DE-NI',
4: 'DE-HB',
5: 'DE-NW',
6: 'DE-HE',
7: 'DE-RP',
8: 'DE-BW',
9: 'DE-BY',
10: 'DE-SL',
11: 'DE-BE',
12: 'DE-BB',
13: 'DE-MV',
14: 'DE-SN',
15: 'DE-ST',
16: 'DE-TH'}
def process_data(df):
df['location'] = df.Bundesland_Id.replace(rki_to_iso)
df.drop(columns = ['Bundesland', 'Bundesland_Id', '7T_Hospitalisierung_Inzidenz'], inplace = True, errors = 'ignore')
df.rename({'Datum': 'date', 'Altersgruppe': 'age_group','7T_Hospitalisierung_Faelle': 'value'},
axis = 'columns', inplace = True)
df = df[['date', 'location', 'age_group', 'value']]
return df
# get path of all available files
url = "https://api.github.com/repos/robert-koch-institut/COVID-19-Hospitalisierungen_in_Deutschland/git/trees/master?recursive=1"
r = requests.get(url)
res = r.json()
files = [file["path"] for file in res["tree"] if (file["path"].startswith('Archiv/') and file["path"].endswith('Deutschland_COVID-19-Hospitalisierungen.csv'))]
df_files = pd.DataFrame({'filename':files})
# extract dates from filenames
df_files['date'] = df_files.filename.apply(lambda f: f.split('/')[1][:10])
df_files.date = | pd.to_datetime(df_files.date) | pandas.to_datetime |
"""
Created on 21 de mar de 2018
@author: clebson
"""
import os
import calendar as ca
import numpy as np
import pandas as pd
from hidrocomp.files.fileRead import FileRead
from hydro_api.ana.hidro import Stations, SerieTemporal
from hydro_api.ana.sar import Reservoirs
class Ana(FileRead):
"""
class files read: Agência Nacinal de Águas - ANA
"""
typesData = {'FLUVIOMÉTRICO': ['Vazao{:02}', 'vazoes', '3'],
'PLUVIOMÉTRICO': ['Chuva{:02}', 'chuvas', '2'],
'COTA': ['Cota{:02}', 'cotas', '1']}
source = "ANA"
extension = "txt"
def __init__(self, path_file=None, station=None, type_data='FLUVIOMÉTRICO', consistence='1', date_start='',
date_end='', *args, **kwargs):
super().__init__(path_file=path_file, station=station, *args, **kwargs)
self.consistence = consistence
self.date_start = date_start
self.date_end = date_end
self.type_data = type_data.upper()
if 'tz' in kwargs:
self.tz = kwargs['tz']
else:
self.tz = None
self.data, inf = self.read(self.name)
self.inf_stations = inf
try:
self.mean = kwargs["mean"]
except KeyError:
self.mean = None
def list_files(self):
return super().list_files()
def read(self, name=None):
if self.api:
if name is None:
self.name = self.path
return super().read()
else:
self.name = name
_data, inf = self.hydro_series_historical()
inf_stations = {self.name: inf}
data = self.__excludes_duplicates(_data)
return data, inf_stations
else:
if name is None or name is not list:
self.name = self.list_files()
return super().read()
else:
self.name = name
data = self.__excludes_duplicates(self.__readTxt())
return data
def __lines(self):
list_lines = []
with open(os.path.join(self.path, self.name + '.' + Ana.extension),
encoding="Latin-1") as file:
l = 0
for line in file.readlines():
if line.split(";")[0] == "EstacaoCodigo":
l = 1
list_lines.append(line.split(";"))
elif l == 1:
list_lines.append(line.split(";"))
return list_lines
def __multIndex(self, date, days, consistence):
if date.day == 1:
n_days = days
else:
n_days = days - date.day
list_date = pd.date_range(date, periods=n_days, freq="D")
list_cons = [int(consistence)] * n_days
index_multi = list(zip(*[list_date, list_cons]))
return pd.MultiIndex.from_tuples(index_multi, names=["Date", "Consistence"])
def __readTxt(self):
list_lines = self.__lines()
data_flow = list()
count = 0
for line in list_lines:
count += 1
if count == 1:
idx_code = line.index("EstacaoCodigo")
start_flow = line.index(Ana.typesData[self.type_data][0].format(1))
idx_date = line.index("Data")
idx_cons = line.index("NivelConsistencia")
elif count >= 2:
code = line[idx_code]
date = pd.to_datetime(line[idx_date], dayfirst=True)
days = ca.monthrange(date.year, date.month)[1]
consistence = line[idx_cons]
index = self.__multIndex(date, days, consistence)
idx_flow = [i for i in range(start_flow, start_flow + days)]
list_flow = [np.NaN if line[i] == "" else float(
line[i].replace(",", ".")) for i in idx_flow]
data_flow.append(
pd.Series(list_flow, index=index, name="{}_{}".format(code, self.type_data[:3])))
data_flow = pd.DataFrame( | pd.concat(data_flow) | pandas.concat |
import datetime
import matplotlib
import numpy as np
import pandas as pd
import pytz
from finrl.config import config
from finrl.marketdata.utils import fetch_and_store, load
from finrl.preprocessing.preprocessors import FeatureEngineer
from finrl.preprocessing.data import calculate_split, data_split
from finrl.env.env_stocktrading import StockTradingEnv
from finrl.model.models import DRLAgent
from finrl.trade.backtest import backtest_stats, backtest_plot
matplotlib.use("Agg")
def train_one(fetch=False):
"""
train an agent
"""
if fetch:
df = fetch_and_store()
else:
df = load()
counts = df[['date', 'tic']].groupby(['date']).count().tic
assert counts.min() == counts.max()
print("==============Start Feature Engineering===========")
fe = FeatureEngineer(
use_technical_indicator=True,
tech_indicator_list=config.TECHNICAL_INDICATORS_LIST,
use_turbulence=True,
# use_turbulence=False,
user_defined_feature=False,
)
processed = fe.preprocess_data(df)
# Training & Trading data split
start_date, trade_date, end_date = calculate_split(df, start=config.START_DATE)
print(start_date, trade_date, end_date)
train = data_split(processed, start_date, trade_date)
trade = data_split(processed, trade_date, end_date)
print(f'\n******\nRunning from {start_date} to {end_date} for:\n{", ".join(config.CRYPTO_TICKER)}\n******\n')
# calculate state action space
stock_dimension = len(train.tic.unique())
state_space = (1 + (2 * stock_dimension) + (len(config.TECHNICAL_INDICATORS_LIST) * stock_dimension))
env_kwargs = {
"hmax": 100,
"initial_amount": 100000,
"buy_cost_pct": 0.0026,
"sell_cost_pct": 0.0026,
"state_space": state_space,
"stock_dim": stock_dimension,
"tech_indicator_list": config.TECHNICAL_INDICATORS_LIST,
"action_space": stock_dimension,
"reward_scaling": 1e-4
}
e_train_gym = StockTradingEnv(df=train, **env_kwargs)
e_trade_gym = StockTradingEnv(df=trade, turbulence_threshold=250, make_plots=True, **env_kwargs)
env_train, _ = e_train_gym.get_sb_env()
env_trade, obs_trade = e_trade_gym.get_sb_env()
agent = DRLAgent(env=env_train)
print("==============Model Training===========")
now = datetime.datetime.now().strftime(config.DATETIME_FMT)
model_sac = agent.get_model("sac")
trained_sac = agent.train_model(
model=model_sac,
tb_log_name="sac",
# total_timesteps=100
total_timesteps=80000
)
print("==============Start Trading===========")
df_account_value, df_actions = DRLAgent.DRL_prediction(
# model=trained_sac, test_data=trade, test_env=env_trade, test_obs=obs_trade
trained_sac,
e_trade_gym)
df_account_value.to_csv(f"./{config.RESULTS_DIR}/df_account_value_{now}.csv")
df_actions.to_csv(f"./{config.RESULTS_DIR}/df_actions_{now}.csv")
df_txns = pd.DataFrame(e_trade_gym.transactions, columns=['date', 'amount', 'price', 'symbol'])
df_txns = df_txns.set_index(pd.DatetimeIndex(df_txns['date'], tz=pytz.utc))
df_txns.to_csv(f'./{config.RESULTS_DIR}/df_txns_{now}.csv')
df_positions = pd.DataFrame(e_trade_gym.positions, columns=['date', 'cash'] + config.CRYPTO_TICKER)
df_positions = df_positions.set_index(pd.DatetimeIndex(df_positions['date'], tz=pytz.utc)).drop(columns=['date'])
df_positions['cash'] = df_positions.astype({col: np.float64 for col in df_positions.columns})
df_positions.to_csv(f'./{config.RESULTS_DIR}/df_positions_{now}.csv')
print("==============Get Backtest Results===========")
perf_stats_all = backtest_stats(df_account_value, transactions=df_txns, positions=df_positions)
perf_stats_all = | pd.DataFrame(perf_stats_all) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# ## openFDA Drug Event data parsing, processing, and output
# import libraries
# In[1]:
import os
import io
import urllib
import requests
import zipfile
import json
import time
import numpy as np
import pandas as pd
from pandas.io.json import json_normalize
# read in api token and put in header for api call
# In[2]:
api_token = pd.read_csv('../../.openFDA.params').api_key.values[0]
# In[3]:
headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(api_token)}
# get openFDA drug event links
# In[4]:
filehandle, _ = urllib.request.urlretrieve("https://api.fda.gov/download.json")
# In[5]:
with open(filehandle) as json_file:
data = json.load(json_file)
# how mmany records are there?
# In[6]:
data['results']['drug']['event']['total_records']
# how many files do we have?
# In[7]:
len(data['results']['drug']['event']['partitions'])
# put all files into a list
# In[8]:
drug_event_files = [x['file'] for x in data['results']['drug']['event']['partitions']]
# create output directory
# In[9]:
data_dir = "../../data/"
try:
os.mkdir(data_dir)
except:
print(data_dir+' exists')
out=data_dir+'openFDA_drug_event/'
try:
os.mkdir(out)
except:
print(out+' exists')
out_report=out+'report/'
try:
os.mkdir(out_report)
except:
print(out_report+' exists')
out_meta=out+'meta/'
try:
os.mkdir(out_meta)
except:
print(out_meta+' exists')
out_patient=out+'patient/'
try:
os.mkdir(out_patient)
except:
print(out_patient+' exists')
out_patient_drug=out+'patient_drug/'
try:
os.mkdir(out_patient_drug)
except:
print(out_patient_drug+' exists')
out_patient_drug_openfda=out+'patient_drug_openfda/'
try:
os.mkdir(out_patient_drug_openfda)
except:
print(out_patient_drug_openfda+' exists')
out_patient_drug_openfda_rxcui=out+'patient_drug_openfda_rxcui/'
try:
os.mkdir(out_patient_drug_openfda_rxcui)
except:
print(out_patient_drug_openfda_rxcui+' exists')
out_patient_reaction=out+'patient_reaction/'
try:
os.mkdir(out_patient_reaction)
except:
print(out_patient_reaction+' exists')
# ## drug event attributes
# ### get attributes
# In[10]:
filehandle, _ = urllib.request.urlretrieve('https://open.fda.gov/fields/drugevent.yaml')
# In[11]:
import yaml
with open(filehandle, 'r') as stream:
try:
attribute_map = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
# In[12]:
attribute_map['properties']
# ## functions
# ### retrive data from files
# In[13]:
def request_and_generate_data(drug_event_file,headers=headers,stream=True):
response = requests.get(drug_event_file,headers=headers,stream=True)
zip_file_object = zipfile.ZipFile(io.BytesIO(response.content))
first_file = zip_file_object.namelist()[0]
file = zip_file_object.open(first_file)
content = file.read()
data = json.loads(content.decode())
return data
# ### report data formatting/mapping function
# In[14]:
def report_formatter(df):
attributes_dict = attribute_map['properties']
cols = np.intersect1d(list(attributes_dict.keys()),df.columns)
for col in cols:
try:
if attributes_dict[col]['possible_values']['type']=='one_of':
attributes_dict_col = attributes_dict[col]['possible_values']['value']
df[col] = df[col].astype(float)
df[col] = (df[col].
apply(lambda x : str(int(x)) if (x>=0) else x).
map(attributes_dict_col)
)
except:
pass
return df
# ### report primarysource formatting/mapping function
# In[15]:
def primarysource_formatter(df):
keyword = 'primarysource'
attributes_dict = attribute_map['properties'][keyword]['properties']
cols = np.intersect1d(list(attributes_dict.keys()),[x.replace(keyword+'.','') for x in df.columns])
for col in cols:
try:
if attributes_dict[col]['possible_values']['type']=='one_of':
attributes_dict_col = attributes_dict[col]['possible_values']['value']
df[keyword+'.'+col] = df[keyword+'.'+col].astype(float)
df[keyword+'.'+col] = (df[keyword+'.'+col].
apply(lambda x : str(int(x)) if (x>=0) else x).
map(attributes_dict_col)
)
except:
pass
return df
# ### report serious formatting/mapping function
# In[16]:
def report_serious_formatter(df):
attributes_dict = attribute_map['properties']
col = 'serous'
try:
attributes_dict_col = attributes_dict[col]['possible_values']['value']
df[col] = df[col].astype(float)
df[col] = (df[col].
apply(lambda x : str(int(x)) if (x>=0) else x).
map(attributes_dict_col)
)
except:
pass
return df
# ### patient data formatting/mapping function
# In[17]:
def patient_formatter(df):
attributes_dict = attribute_map['properties']['patient']['properties']
cols = np.intersect1d(list(attributes_dict.keys()),[x.replace('patient.','') for x in df.columns])
for col in cols:
try:
if attributes_dict[col]['possible_values']['type']=='one_of':
attributes_dict_col = attributes_dict[col]['possible_values']['value']
df['patient.'+col] = df['patient.'+col].astype(float)
df['patient.'+col] = (df['patient.'+col].
apply(lambda x : str(int(x)) if (x>=0) else x).
map(attributes_dict_col)
)
except:
pass
if 'date' in col:
df[col] = pd.to_datetime(df[col],infer_datetime_format=True)
aged = df.copy()
aged = aged[['patient.patientonsetage','patient.patientonsetageunit']].dropna()
year_reports = (aged[aged['patient.patientonsetageunit'].astype(str)=='Year']).index.values
month_reports = (aged[aged['patient.patientonsetageunit'].astype(str)=='Month']).index.values
day_reports = (aged[aged['patient.patientonsetageunit'].astype(str)=='Day']).index.values
decade_reports = (aged[aged['patient.patientonsetageunit'].astype(str)=='Decade']).index.values
week_reports = (aged[aged['patient.patientonsetageunit'].astype(str)=='Week']).index.values
hour_reports = (aged[aged['patient.patientonsetageunit'].astype(str)=='Hour']).index.values
aged['master_age'] = np.nan
aged['master_age'].loc[year_reports] = aged['patient.patientonsetage'].loc[year_reports].astype(int)
aged['master_age'].loc[month_reports] = aged['patient.patientonsetage'].loc[month_reports].astype(int)/12.
aged['master_age'].loc[week_reports] = aged['patient.patientonsetage'].loc[week_reports].astype(int)/365.
aged['master_age'].loc[day_reports] = aged['patient.patientonsetage'].loc[day_reports].astype(int)/365.
aged['master_age'].loc[decade_reports] = aged['patient.patientonsetage'].loc[decade_reports].astype(int)*10.
aged['master_age'].loc[hour_reports] = aged['patient.patientonsetage'].loc[hour_reports].astype(int)/365./24.
return df.join(aged[['master_age']])
# ### parse patient.drug data formatting/mapping function
# #### patient.drug formatting/mapping function
# In[18]:
def patient_drug_formatter(df):
attributes_dict = attribute_map['properties']['patient']['properties']['drug']['items']['properties']
cols = np.intersect1d(list(attributes_dict.keys()),df.columns)
for col in cols:
try:
if attributes_dict[col]['possible_values']['type']=='one_of':
attributes_dict_col = attributes_dict[col]['possible_values']['value']
df[col] = df[col].astype(float)
if col=='drugadministrationroute':
df[col] = (df[col].
apply(lambda x : ''.join(np.repeat('0',3-len(str(int(x)))))+str(int(x)) if (x>=0) else x).
map(attributes_dict_col)
)
else:
df[col] = (df[col].
apply(lambda x : str(int(x)) if (x>=0) else x).
map(attributes_dict_col)
)
except:
pass
return df
# #### main parser formatting/mapping function
# In[19]:
def parse_patient_drug_data(results):
dict_name = 'patient.drug'
patientdrugs = []
for reportid in results['safetyreportid'].unique():
lst = []
dict_or_list = results[[dict_name]].loc[reportid].values[0]
if type(dict_or_list)==dict:
lst.extend(dict_or_list)
if type(dict_or_list)==list:
lst = dict_or_list
if type(dict_or_list)==np.ndarray:
lst = dict_or_list[0]
for i,l in enumerate(lst):
l = l.copy()
dict_ = {}
try:
del l['openfda']
except:
pass
dict_[str(reportid)] = l
patientdrug = (pd.DataFrame(dict_).
T.
rename_axis('safetyreportid').
reset_index()
)
patientdrug['entry'] = i
patientdrugs.append(patientdrug)
allpatientdrugs = pd.concat(patientdrugs,sort=True)
cols_to_keep = allpatientdrugs.columns[[type(x)==str for x in allpatientdrugs.columns]]
return patient_drug_formatter(allpatientdrugs[cols_to_keep])
# ### patient.drug.openfda formatting/mapping function
# #### main parser formatting/mapping function
# In[20]:
def parse_patient_drug_openfda_data(results):
dict_name = 'patient.drug'
openfdas = []
for reportid in results['safetyreportid'].unique():
lst = []
dict_or_list = results[[dict_name]].loc[reportid].values[0]
if type(dict_or_list)==dict:
lst.extend(dict_or_list)
if type(dict_or_list)==list:
lst = dict_or_list
if type(dict_or_list)==np.ndarray:
lst = dict_or_list[0]
for i,l in enumerate(lst):
try:
openfda = (pd.concat(
{k: pd.Series(v) for k, v in l['openfda'].items()}
).
reset_index().
drop('level_1',axis=1).
rename(columns={'level_0' : 'key',0 : 'value'})
)
openfda['safetyreportid']=np.repeat(reportid,openfda.shape[0])
openfda['entry'] = i
openfdas.append(openfda)
except:
pass
openfdas_df = | pd.concat(openfdas,sort=True) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Usage: %(scriptName) <feature_files_prefix>
Performs grid search for capacity parameter of svm rank as in "Mapping Bug Reports to Relevant Files:
A Ranking Model, a Fine-Grained Benchmark, and Feature Evaluation"
Requires "svm_rank_learn" in the same folder
Requires results of save_normalized_fold_dataframes.py
Converts data to svm rank format:
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12
https://www.cs.cornell.edu/people/tj/svm_light/svm_rank.html
"""
import json
import numpy as np
import pandas as pd
import subprocess
import sys
from sklearn.datasets import dump_svmlight_file
from metrics import calculate_metrics, calculate_metric_results
def main():
file_prefix = sys.argv[1]
process(file_prefix)
def load_fold_number(file_prefix):
with open(file_prefix + '_fold_info', 'r') as f:
fold_info = json.load(f)
fold_number = fold_info['fold_number']
print('fold number', fold_number)
return fold_number
def process(file_prefix):
fold_number = load_fold_number(file_prefix)
fold_training = {}
fold_testing = {}
for k in range(fold_number + 1):
fold_training[k] = pd.read_pickle(file_prefix + '_normalized_training_fold_' + str(k))
fold_testing[k] = pd.read_pickle(file_prefix + '_normalized_testing_fold_' + str(k))
print('fold_training', str(k), 'shape', fold_training[k].shape)
print('fold_testing', str(k), 'shape', fold_testing[k].shape)
grid_search_data_training = fold_training[0].copy()
grid_search_data_testing = fold_testing[0].copy()
print('grid search data training shape', grid_search_data_training.shape)
print('grid search data testing shape', grid_search_data_testing.shape)
print('grid search data used in fix equal 1.0',
grid_search_data_training['used_in_fix'][(grid_search_data_training['used_in_fix'] == 1.0)].count())
print('grid search data used in fix equal 0.0',
grid_search_data_training['used_in_fix'][(grid_search_data_training['used_in_fix'] == 0.0)].count())
grid_search_data_null_columns = grid_search_data_training.columns[grid_search_data_training.isnull().any()]
print('grid search data null columns', grid_search_data_null_columns)
# exit(0)
k_range = range(1, 21)
c = run_grid_search(grid_search_data_training, grid_search_data_testing, file_prefix, k_range)
evaluate_algorithm(c, fold_training, fold_testing, fold_number, file_prefix, k_range)
def evaluate_algorithm(c, fold_training, fold_testing, fold_number, file_prefix, k_range):
print('Using svm rank c', c)
mean_accuracy_at_k = dict.fromkeys(k_range, 0)
mean_mean_average_precision = 0.0
mean_mean_reciprocal_rank = 0.0
weights_at_fold = {}
for i in range(fold_number):
print('training on fold', i)
current_training_file = save_svm_rank_data(fold_training[i], file_prefix + '_fold_training_' + str(i))
model_file_name = file_prefix + '_fold_model_' + str(i)
run_svm_rank(c, current_training_file, model_file_name)
weights = read_weights(model_file_name, 19)
weights_at_fold[i] = weights
print('testing on fold', i + 1)
testing_df = fold_testing[i + 1]
accuracy_at_k, mean_average_precision, mean_reciprocal_rank = check_average_precision(testing_df.copy(),
weights, k_range)
for k in k_range:
mean_accuracy_at_k[k] += accuracy_at_k[k]
mean_mean_average_precision += mean_average_precision
mean_mean_reciprocal_rank += mean_reciprocal_rank
print("Accuracy at k in k range", k_range)
for k in k_range:
mean_accuracy_at_k[k] = mean_accuracy_at_k[k] / fold_number
print(mean_accuracy_at_k[k])
mean_mean_average_precision = mean_mean_average_precision / fold_number
print("Mean mean average prediction", mean_mean_average_precision)
mean_mean_reciprocal_rank = mean_mean_reciprocal_rank / fold_number
print("Mean mean reciprocal rank", mean_mean_reciprocal_rank)
print("Evaluate on whole dataset")
all_data = []
for i in range(fold_number):
df = apply_weights(fold_testing[i + 1].copy(), weights_at_fold[i])
all_data.append(df)
all_data_df = | pd.concat(all_data) | pandas.concat |
from typing import List, Tuple
import pandas as pd
import altair as alt
import glob
import itertools as it
import hydra
from src.common.utils import log_hyperparameters, PROJECT_ROOT
from hydra.core.hydra_config import HydraConfig
import omegaconf
from omegaconf import DictConfig, OmegaConf
#Amazon 64d 1/2l
def run(cfg: DictConfig):
for m,ds,day in it.product(cfg.charts.models, cfg.charts.dataset, cfg.charts.days):
file = f"{m}_{ds}_{day}d_*.csv"
whole_df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import sys
sys.path.insert(0,"../../src")
import math
import functools
import time
import torch
import numpy as np
from scipy.special import gamma
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import emcee
import pandas as pd
import seaborn as sns
import pymc3
from source_1d_likelihood_fn import compute_log_likelihood
np.random.seed(100)
torch.manual_seed(100)
#%% This part of the code is not important.
def logit_t(x,a=0,b=1):
return torch.log(((x-a)/(b-a))/(1.0-(x-a)/(b-a)))
def sigmoid(x,a=0,b=1):
return (b-a)*1.0/(1.0+np.exp(-x)) + a
def dsigmoid(x,a=0,b=1):
return (b-a)*np.exp(x)/((1+np.exp(x))**2)
def exp(x):
return np.exp(x)
def dexp(x):
return np.exp(x)
def unwarped_logjoint_np(x0,Ts,q0,rho):
# x0,Ts,q0,rho = x[0],x[1],x[2],x[3]
ll = compute_log_likelihood(x0,Ts,q0,rho)
ll += -np.log(1+(q0/10.0)**2)
ll += -np.log(1+(rho/0.1)**2)
# ll += -lambd_q*q0
# ll += np.exp(-lambd_rho*rho) + np.exp(-lambd_q*q0)
return ll
def logjoint_np(x):
x0,Ts,q0,rho = x[0],x[1],x[2],x[3]
ll = unwarped_logjoint_np(sigmoid(x0),sigmoid(Ts,b=0.4),
exp(q0),exp(rho)) + \
np.log(dsigmoid(x0)) + np.log(dsigmoid(Ts,b=0.4)) + \
np.log(dexp(q0)) + np.log(dexp(rho))
return ll
counter=0
def logjoint_emcee(x):
global counter
counter += 1
return logjoint_np(x)
#%%
ndim, nwalkers = 4, 10
p0 = [np.random.rand(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, logjoint_emcee)
sampler.run_mcmc(p0, 1)
#np.savez("testheat_1g_emcee_3",sampler=sampler)
#%% Data generation start here
sampler = np.load("testheat_1a_emcee.npz")["sampler"][()]
def sample_sampler(nsamples,sampler,burnin):
chain = sampler.flatchain[burnin:,:]
choices = np.random.choice(chain.shape[0],nsamples)
return chain[choices,:]
samples = sample_sampler(5000,sampler,10000)
samples[:,0] = sigmoid(samples[:,0])
samples[:,1] = sigmoid(samples[:,1],b=0.4)
samples[:,2] = exp(samples[:,2])
samples[:,3] = exp(samples[:,3])
for i in range(4):
print("Data %i"%i)
mean = samples[:,i].mean()
print(pymc3.stats.hpd(samples[:,i],0.3))
names = [r"$x_0$",r"$t_s$",r"$q_0$",r"$\rho$"]
datadict = dict([(names[i],samples[:,i]) for i in range(len(names))])
dataframe = | pd.DataFrame(datadict) | pandas.DataFrame |
# Copyright (c) 2019 Microsoft Corporation
# Distributed under the MIT software license
import pytest
import numpy as np
import numpy.ma as ma
import pandas as pd
import scipy as sp
import math
from itertools import repeat, chain
from ..bin import *
from ..bin import _process_column_initial, _encode_categorical_existing, _process_continuous
class StringHolder:
def __init__(self, internal_str):
self.internal_str = internal_str
def __str__(self):
return self.internal_str
def __lt__(self, other):
return True # make all objects of this type identical to detect sorting failures
def __hash__(self):
return 0 # make all objects of this type identical to detect hashing failures
def __eq__(self,other):
return True # make all objects of this type identical to detect hashing failures
class DerivedStringHolder(StringHolder):
def __init__(self, internal_str):
StringHolder.__init__(self, internal_str)
class FloatHolder:
def __init__(self, internal_float):
self.internal_float = internal_float
def __float__(self):
return self.internal_float
def __lt__(self, other):
return True # make all objects of this type identical to detect sorting failures
def __hash__(self):
return 0 # make all objects of this type identical to detect hashing failures
def __eq__(self,other):
return True # make all objects of this type identical to detect hashing failures
class DerivedFloatHolder(FloatHolder):
def __init__(self, internal_float):
FloatHolder.__init__(self, internal_float)
class FloatAndStringHolder:
def __init__(self, internal_float, internal_str):
self.internal_float = internal_float
self.internal_str = internal_str
def __float__(self):
return self.internal_float
def __str__(self):
return self.internal_str
def __lt__(self, other):
return True # make all objects of this type identical to detect sorting failures
def __hash__(self):
return 0 # make all objects of this type identical to detect hashing failures
def __eq__(self,other):
return True # make all objects of this type identical to detect hashing failures
class DerivedFloatAndStringHolder(FloatAndStringHolder):
def __init__(self, internal_float, internal_str):
FloatAndStringHolder.__init__(self, internal_float, internal_str)
class NothingHolder:
# the result of calling str(..) includes the memory address, so they won't be dependable categories
def __init__(self, internal_str):
self.internal_str = internal_str
def check_pandas_normal(dtype, val1, val2):
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([val1, val2], dtype=np.object_), dtype=dtype)
feature_types_given = ['nominal']
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
X_cols = list(unify_columns(X, [(0, None)], feature_names_in, None))
assert(len(X_cols) == 1)
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(len(X_cols[0][2]) == 2)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 2)
assert(X_cols[0][1][0] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val2)])
c1 = {str(val1) : 1, str(val2) : 2}
X_cols = list(unify_columns(X, [(0, c1)], feature_names_in, feature_types_given))
assert(len(X_cols) == 1)
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c1)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 2)
assert(X_cols[0][1][0] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val2)])
c2 = {str(val2) : 1, str(val1) : 2}
X_cols = list(unify_columns(X, [(0, c2)], feature_names_in, feature_types_given))
assert(len(X_cols) == 1)
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c2)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 2)
assert(X_cols[0][1][0] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val2)])
def check_pandas_missings(dtype, val1, val2):
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([val2, val1, val1], dtype=np.object_), dtype=dtype)
X["feature2"] = pd.Series(np.array([None, val2, val1], dtype=np.object_), dtype=dtype)
X["feature3"] = pd.Series(np.array([val1, None, val2], dtype=np.object_), dtype=dtype)
X["feature4"] = pd.Series(np.array([val2, val1, None], dtype=np.object_), dtype=dtype)
c1 = {str(val1) : 1, str(val2) : 2}
c2 = {str(val2) : 1, str(val1) : 2}
feature_types_given = ['nominal', 'nominal', 'nominal', 'nominal']
X, n_samples = clean_X(X)
assert(n_samples == 3)
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None), (3, None)], feature_names_in, None))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(len(X_cols[0][2]) == 2)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 3)
assert(X_cols[0][1][0] == X_cols[0][2][str(val2)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][2] == X_cols[0][2][str(val1)])
assert(X_cols[1][0] == 'nominal')
assert(X_cols[1][3] is None)
assert(len(X_cols[1][2]) == 2)
assert(X_cols[1][1].dtype == np.int64)
assert(len(X_cols[1][1]) == 3)
assert(X_cols[1][1][0] == 0)
assert(X_cols[1][1][1] == X_cols[1][2][str(val2)])
assert(X_cols[1][1][2] == X_cols[1][2][str(val1)])
assert(X_cols[2][0] == 'nominal')
assert(X_cols[2][3] is None)
assert(len(X_cols[2][2]) == 2)
assert(X_cols[2][1].dtype == np.int64)
assert(len(X_cols[2][1]) == 3)
assert(X_cols[2][1][0] == X_cols[2][2][str(val1)])
assert(X_cols[2][1][1] == 0)
assert(X_cols[2][1][2] == X_cols[2][2][str(val2)])
assert(X_cols[3][0] == 'nominal')
assert(X_cols[3][3] is None)
assert(len(X_cols[3][2]) == 2)
assert(X_cols[3][1].dtype == np.int64)
assert(len(X_cols[3][1]) == 3)
assert(X_cols[3][1][0] == X_cols[3][2][str(val2)])
assert(X_cols[3][1][1] == X_cols[3][2][str(val1)])
assert(X_cols[3][1][2] == 0)
assert(np.array_equal(X_cols[1][1] == 0, X.iloc[:, 1].isna()))
assert(np.array_equal(X_cols[2][1] == 0, X.iloc[:, 2].isna()))
assert(np.array_equal(X_cols[3][1] == 0, X.iloc[:, 3].isna()))
X_cols = list(unify_columns(X, [(0, c1), (1, c1), (2, c1), (3, c1)], feature_names_in, feature_types_given))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c1)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 3)
assert(X_cols[0][1][0] == X_cols[0][2][str(val2)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][2] == X_cols[0][2][str(val1)])
assert(X_cols[1][0] == 'nominal')
assert(X_cols[1][3] is None)
assert(X_cols[1][2] is c1)
assert(X_cols[1][1].dtype == np.int64)
assert(len(X_cols[1][1]) == 3)
assert(X_cols[1][1][0] == 0)
assert(X_cols[1][1][1] == X_cols[1][2][str(val2)])
assert(X_cols[1][1][2] == X_cols[1][2][str(val1)])
assert(X_cols[2][0] == 'nominal')
assert(X_cols[2][3] is None)
assert(X_cols[2][2] is c1)
assert(X_cols[2][1].dtype == np.int64)
assert(len(X_cols[2][1]) == 3)
assert(X_cols[2][1][0] == X_cols[2][2][str(val1)])
assert(X_cols[2][1][1] == 0)
assert(X_cols[2][1][2] == X_cols[2][2][str(val2)])
assert(X_cols[3][0] == 'nominal')
assert(X_cols[3][3] is None)
assert(X_cols[3][2] is c1)
assert(X_cols[3][1].dtype == np.int64)
assert(len(X_cols[3][1]) == 3)
assert(X_cols[3][1][0] == X_cols[3][2][str(val2)])
assert(X_cols[3][1][1] == X_cols[3][2][str(val1)])
assert(X_cols[3][1][2] == 0)
assert(np.array_equal(X_cols[1][1] == 0, X.iloc[:, 1].isna()))
assert(np.array_equal(X_cols[2][1] == 0, X.iloc[:, 2].isna()))
assert(np.array_equal(X_cols[3][1] == 0, X.iloc[:, 3].isna()))
X_cols = list(unify_columns(X, [(0, c2), (1, c2), (2, c2), (3, c2)], feature_names_in, feature_types_given))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c2)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 3)
assert(X_cols[0][1][0] == X_cols[0][2][str(val2)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][2] == X_cols[0][2][str(val1)])
assert(X_cols[1][0] == 'nominal')
assert(X_cols[1][3] is None)
assert(X_cols[1][2] is c2)
assert(X_cols[1][1].dtype == np.int64)
assert(len(X_cols[1][1]) == 3)
assert(X_cols[1][1][0] == 0)
assert(X_cols[1][1][1] == X_cols[1][2][str(val2)])
assert(X_cols[1][1][2] == X_cols[1][2][str(val1)])
assert(X_cols[2][0] == 'nominal')
assert(X_cols[2][3] is None)
assert(X_cols[2][2] is c2)
assert(X_cols[2][1].dtype == np.int64)
assert(len(X_cols[2][1]) == 3)
assert(X_cols[2][1][0] == X_cols[2][2][str(val1)])
assert(X_cols[2][1][1] == 0)
assert(X_cols[2][1][2] == X_cols[2][2][str(val2)])
assert(X_cols[3][0] == 'nominal')
assert(X_cols[3][3] is None)
assert(X_cols[3][2] is c2)
assert(X_cols[3][1].dtype == np.int64)
assert(len(X_cols[3][1]) == 3)
assert(X_cols[3][1][0] == X_cols[3][2][str(val2)])
assert(X_cols[3][1][1] == X_cols[3][2][str(val1)])
assert(X_cols[3][1][2] == 0)
assert(np.array_equal(X_cols[1][1] == 0, X.iloc[:, 1].isna()))
assert(np.array_equal(X_cols[2][1] == 0, X.iloc[:, 2].isna()))
assert(np.array_equal(X_cols[3][1] == 0, X.iloc[:, 3].isna()))
X_cols = list(unify_columns(X, [(0, c1), (1, c2), (2, c1), (3, c2)], feature_names_in, feature_types_given))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c1)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 3)
assert(X_cols[0][1][0] == X_cols[0][2][str(val2)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][2] == X_cols[0][2][str(val1)])
assert(X_cols[1][0] == 'nominal')
assert(X_cols[1][3] is None)
assert(X_cols[1][2] is c2)
assert(X_cols[1][1].dtype == np.int64)
assert(len(X_cols[1][1]) == 3)
assert(X_cols[1][1][0] == 0)
assert(X_cols[1][1][1] == X_cols[1][2][str(val2)])
assert(X_cols[1][1][2] == X_cols[1][2][str(val1)])
assert(X_cols[2][0] == 'nominal')
assert(X_cols[2][3] is None)
assert(X_cols[2][2] is c1)
assert(X_cols[2][1].dtype == np.int64)
assert(len(X_cols[2][1]) == 3)
assert(X_cols[2][1][0] == X_cols[2][2][str(val1)])
assert(X_cols[2][1][1] == 0)
assert(X_cols[2][1][2] == X_cols[2][2][str(val2)])
assert(X_cols[3][0] == 'nominal')
assert(X_cols[3][3] is None)
assert(X_cols[3][2] is c2)
assert(X_cols[3][1].dtype == np.int64)
assert(len(X_cols[3][1]) == 3)
assert(X_cols[3][1][0] == X_cols[3][2][str(val2)])
assert(X_cols[3][1][1] == X_cols[3][2][str(val1)])
assert(X_cols[3][1][2] == 0)
assert(np.array_equal(X_cols[1][1] == 0, X.iloc[:, 1].isna()))
assert(np.array_equal(X_cols[2][1] == 0, X.iloc[:, 2].isna()))
assert(np.array_equal(X_cols[3][1] == 0, X.iloc[:, 3].isna()))
def check_pandas_float(dtype, val1, val2):
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([val2, val1, val1], dtype=np.object_), dtype=dtype)
X["feature2"] = pd.Series(np.array([None, val2, val1], dtype=np.object_), dtype=dtype)
X["feature3"] = pd.Series(np.array([val1, None, val2], dtype=np.object_), dtype=dtype)
X["feature4"] = pd.Series(np.array([val2, val1, None], dtype=np.object_), dtype=dtype)
X, n_samples = clean_X(X)
assert(n_samples == 3)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in, min_unique_continuous=0))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'continuous')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is None)
assert(X_cols[0][1].dtype == np.float64)
assert(X_cols[0][1][0] == np.float64(dtype(val2)))
assert(X_cols[0][1][1] == np.float64(dtype(val1)))
assert(X_cols[0][1][2] == np.float64(dtype(val1)))
assert(X_cols[1][0] == 'continuous')
assert(X_cols[1][3] is None)
assert(X_cols[1][2] is None)
assert(X_cols[1][1].dtype == np.float64)
assert(np.isnan(X_cols[1][1][0]))
assert(X_cols[1][1][1] == np.float64(dtype(val2)))
assert(X_cols[1][1][2] == np.float64(dtype(val1)))
assert(X_cols[2][0] == 'continuous')
assert(X_cols[2][3] is None)
assert(X_cols[2][2] is None)
assert(X_cols[2][1].dtype == np.float64)
assert(X_cols[2][1][0] == np.float64(dtype(val1)))
assert(np.isnan(X_cols[2][1][1]))
assert(X_cols[2][1][2] == np.float64(dtype(val2)))
assert(X_cols[3][0] == 'continuous')
assert(X_cols[3][3] is None)
assert(X_cols[3][2] is None)
assert(X_cols[3][1].dtype == np.float64)
assert(X_cols[3][1][0] == np.float64(dtype(val2)))
assert(X_cols[3][1][1] == np.float64(dtype(val1)))
assert(np.isnan(X_cols[3][1][2]))
def check_numpy_throws(dtype_src, val1, val2):
X = np.array([[val1, val2], [val1, val2]], dtype=dtype_src)
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
try:
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(False)
except:
pass
def test_process_continuous_float64():
vals, bad = _process_continuous(np.array([3.5, 4.5], dtype=np.float64), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([3.5, 4.5], dtype=np.float64)))
def test_process_continuous_float32():
vals, bad = _process_continuous(np.array([3.1, np.nan], dtype=np.float32), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 2)
assert(vals[0] == 3.0999999046325684)
assert(np.isnan(vals[1]))
def test_process_continuous_int8():
vals, bad = _process_continuous(np.array([7, -9], dtype=np.int8), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([7, -9], dtype=np.float64)))
def test_process_continuous_uint16_missing():
vals, bad = _process_continuous(np.array([7], dtype=np.uint16), np.array([True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 2)
assert(vals[0] == 7)
assert(np.isnan(vals[1]))
def test_process_continuous_bool():
vals, bad = _process_continuous(np.array([False, True], dtype=np.bool_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([0, 1], dtype=np.float64)))
def test_process_continuous_bool_missing():
vals, bad = _process_continuous(np.array([False, True], dtype=np.bool_), np.array([True, False, True], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 3)
assert(vals[0] == 0)
assert(np.isnan(vals[1]))
assert(vals[2] == 1)
def test_process_continuous_obj_simple():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5")], dtype=np.object_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([1, 2.5, 3, 4.5, 5.5], dtype=np.float64)))
def test_process_continuous_obj_simple_missing():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5")], dtype=np.object_), np.array([True, True, True, True, True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 6)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(vals[2] == 3)
assert(vals[3] == 4.5)
assert(vals[4] == 5.5)
assert(np.isnan(vals[5]))
def test_process_continuous_obj_hard():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5"), StringHolder("6.5"), DerivedStringHolder("7.5"), FloatHolder(8.5), DerivedFloatHolder(9.5), FloatAndStringHolder(10.5, "88"), DerivedFloatAndStringHolder(11.5, "99")], dtype=np.object_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([1, 2.5, 3, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5], dtype=np.float64)))
def test_process_continuous_obj_hard_missing():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5"), StringHolder("6.5")], dtype=np.object_), np.array([True, True, True, True, True, True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 7)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(vals[2] == 3)
assert(vals[3] == 4.5)
assert(vals[4] == 5.5)
assert(vals[5] == 6.5)
assert(np.isnan(vals[6]))
def test_process_continuous_obj_hard_bad():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5"), StringHolder("6.5"), "bad", StringHolder("bad2"), NothingHolder("bad3")], dtype=np.object_), np.array([True, True, True, True, True, True, True, False, True, True], dtype=np.bool_))
assert(len(bad) == 10)
assert(bad[0] is None)
assert(bad[1] is None)
assert(bad[2] is None)
assert(bad[3] is None)
assert(bad[4] is None)
assert(bad[5] is None)
assert(bad[6] == "bad")
assert(bad[7] is None)
assert(bad[8] == "bad2")
assert(isinstance(bad[9], str))
assert(vals.dtype == np.float64)
assert(len(vals) == 10)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(vals[2] == 3)
assert(vals[3] == 4.5)
assert(vals[4] == 5.5)
assert(vals[5] == 6.5)
assert(np.isnan(vals[7]))
def test_process_continuous_str_simple():
vals, bad = _process_continuous(np.array(["1", "2.5"], dtype=np.unicode_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([1, 2.5], dtype=np.float64)))
def test_process_continuous_str_simple_missing():
vals, bad = _process_continuous(np.array(["1", "2.5"], dtype=np.unicode_), np.array([True, True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 3)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(np.isnan(vals[2]))
def test_process_continuous_str_hard_bad():
vals, bad = _process_continuous(np.array(["1", "2.5", "bad"], dtype=np.unicode_), np.array([True, True, True, False], dtype=np.bool_))
assert(len(bad) == 4)
assert(bad[0] is None)
assert(bad[1] is None)
assert(bad[2] == "bad")
assert(bad[3] is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 4)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(np.isnan(vals[3]))
def test_process_column_initial_int_float():
# this test is hard since np.unique seems to think int(4) == float(4.0) so naively it returns just "4"
encoded, c = _process_column_initial(np.array([4, 4.0], dtype=np.object_), None, None, None)
assert(len(c) == 2)
assert(c["4"] == 1)
assert(c["4.0"] == 2)
assert(np.array_equal(encoded, np.array([c["4"], c["4.0"]], dtype=np.int64)))
def test_process_column_initial_float32_float64():
# np.float64(np.float32(0.1)) != np.float64(0.1) since the float32 to float64 version has the lower mantisa bits
# all set to zero, and there will be another float64 that will be closer to "0.1" in float64 representation, so
# they aren't the same, but if to convert them to strings first then they are identical. Strings are the
# ultimate arbiter of categorical membership since strings are cross-platform and JSON encodable. np.unique
# will tend to separate the float32 and the float64 values since they aren't the same, but then serialize
# them to the same string. The our model has ["0.1", "0.1"] as the categories if we don't convert to float64!
encoded, c = _process_column_initial(np.array([np.float32(0.1), np.float64(0.1)], dtype=np.object_), None, None, None)
assert(len(c) == 2)
assert(c["0.1"] == 1)
assert(c["0.10000000149011612"] == 2)
assert(np.array_equal(encoded, np.array([c["0.10000000149011612"], c["0.1"]], dtype=np.int64)))
def test_process_column_initial_obj_obj():
encoded, c = _process_column_initial(np.array([StringHolder("abc"), StringHolder("def")], dtype=np.object_), None, None, None)
assert(len(c) == 2)
assert(c["abc"] == 1)
assert(c["def"] == 2)
assert(np.array_equal(encoded, np.array([c["abc"], c["def"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), None, 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["abc"] == 1)
assert(c["xyz"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], c["xyz"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["abc"] == 1)
assert(c["xyz"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], 0, c["xyz"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), None, 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["xyz"] == 1)
assert(c["abc"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], c["xyz"]], dtype=np.int64)))
def test_process_column_initial_prevalence_missing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["xyz"] == 1)
assert(c["abc"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], 0, c["xyz"]], dtype=np.int64)))
def test_process_column_initial_float64_nomissing():
encoded, c = _process_column_initial(np.array(["11.1", "2.2", "11.1"], dtype=np.unicode_), None, 'ANYTHING_ELSE', None)
assert(len(c) == 2)
assert(c["2.2"] == 1)
assert(c["11.1"] == 2)
assert(np.array_equal(encoded, np.array([c["11.1"], c["2.2"], c["11.1"]], dtype=np.int64)))
def test_process_column_initial_float64_missing():
encoded, c = _process_column_initial(np.array(["11.1", "2.2", "11.1"], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), 'ANYTHING_ELSE', None)
assert(len(c) == 2)
assert(c["2.2"] == 1)
assert(c["11.1"] == 2)
assert(np.array_equal(encoded, np.array([c["11.1"], c["2.2"], 0, c["11.1"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing_int8():
encoded, c = _process_column_initial(np.array([1, -1, 1], dtype=np.int8), None, 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["-1"] == 1)
assert(c["1"] == 2)
assert(np.array_equal(encoded, np.array([c["1"], c["-1"], c["1"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing_int8():
encoded, c = _process_column_initial(np.array([1, -1, 1], dtype=np.int8), np.array([True, True, False, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["-1"] == 1)
assert(c["1"] == 2)
assert(np.array_equal(encoded, np.array([c["1"], c["-1"], 0, c["1"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing_int8():
encoded, c = _process_column_initial(np.array([1, -1, 1], dtype=np.int8), None, 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["1"] == 1)
assert(c["-1"] == 2)
assert(np.array_equal(encoded, np.array([c["1"], c["-1"], c["1"]], dtype=np.int64)))
def test_process_column_initial_prevalence_missing_int8():
encoded, c = _process_column_initial(np.array([1, -1, 1], dtype=np.int8), np.array([True, True, False, True], dtype=np.bool_), 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["1"] == 1)
assert(c["-1"] == 2)
assert(np.array_equal(encoded, np.array([c["1"], c["-1"], 0, c["1"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing_one_bool():
encoded, c = _process_column_initial(np.array([True, True, True], dtype=np.bool_), None, 'nominal_alphabetical', None)
assert(len(c) == 1)
assert(c["True"] == 1)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], c["True"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing_two_bool():
encoded, c = _process_column_initial(np.array([True, True, False, True], dtype=np.bool_), None, 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["False"] == 1)
assert(c["True"] == 2)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], c["False"], c["True"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing_one_bool():
encoded, c = _process_column_initial(np.array([True, True, True], dtype=np.bool_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 1)
assert(c["True"] == 1)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], 0, c["True"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing_two_bool():
encoded, c = _process_column_initial(np.array([True, True, False, True], dtype=np.bool_), np.array([True, True, False, True, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["False"] == 1)
assert(c["True"] == 2)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], 0, c["False"], c["True"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing_one_bool():
encoded, c = _process_column_initial(np.array([True, True, True], dtype=np.bool_), None, 'nominal_prevalence', None)
assert(len(c) == 1)
assert(c["True"] == 1)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], c["True"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing_two_bool():
encoded, c = _process_column_initial(np.array([True, True, False, True], dtype=np.bool_), None, 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["True"] == 1)
assert(c["False"] == 2)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], c["False"], c["True"]], dtype=np.int64)))
def test_process_column_initial_prevalence_missing_one_bool():
encoded, c = _process_column_initial(np.array([True, True, True], dtype=np.bool_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_prevalence', None)
assert(len(c) == 1)
assert(c["True"] == 1)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], 0, c["True"]], dtype=np.int64)))
def test_process_column_initial_prevalence_missing_two_bool():
encoded, c = _process_column_initial(np.array([True, True, False, True], dtype=np.bool_), np.array([True, True, False, True, True], dtype=np.bool_), 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["True"] == 1)
assert(c["False"] == 2)
assert(np.array_equal(encoded, np.array([c["True"], c["True"], 0, c["False"], c["True"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str():
c = {"cd": 1, "ab": 2}
encoded, bad = _encode_categorical_existing(np.array(["ab", "cd"], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["ab"], c["cd"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_bool():
c = {"True": 1, "False": 2}
encoded, bad = _encode_categorical_existing(np.array([True, False], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["True"], c["False"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_int_small():
c = {"-2": 1, "3": 2, "1": 3}
encoded, bad = _encode_categorical_existing(np.array([int(1), np.int8(-2), np.uint64(3)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["1"], c["-2"], c["3"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_int_big():
c = {"-2": 1, "18446744073709551615": 2, "1": 3}
encoded, bad = _encode_categorical_existing(np.array([int(1), np.int8(-2), np.uint64("18446744073709551615")], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["1"], c["-2"], c["18446744073709551615"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_floats():
# np.float64(np.float32(0.1)) != np.float64(0.1) since the float32 to float64 version has the lower mantisa bits
# all set to zero, and there will be another float64 that will be closer to "0.1" in float64 representation, so
# they aren't the same, but if to convert them to strings first then they are identical. Strings are the
# ultimate arbiter of categorical membership since strings are cross-platform and JSON encodable. np.unique
# will tend to separate the float32 and the float64 values since they aren't the same, but then serialize
# them to the same string. The our model has ["0.1", "0.1"] as the categories if we don't convert to float64!
c = {"1.1": 1, "2.19921875": 2, "3.299999952316284": 3, "4.4": 4, "5.5": 5}
encoded, bad = _encode_categorical_existing(np.array([float(1.1), np.float16(2.2), np.float32(3.3), np.float64(4.4), np.longfloat(5.5)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["1.1"], c["2.19921875"], c["3.299999952316284"], c["4.4"], c["5.5"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str_int():
c = {"abc": 1, "1": 2}
encoded, bad = _encode_categorical_existing(np.array(["abc", int(1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["1"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str_float():
c = {"abc": 1, "1.1": 2}
encoded, bad = _encode_categorical_existing(np.array(["abc", float(1.1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["1.1"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str_float64():
c = {"abc": 1, "1.1": 2}
encoded, bad = _encode_categorical_existing(np.array(["abc", np.float64(1.1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["1.1"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_str_float32():
c = {"abc": 1, "1.100000023841858": 2}
encoded, bad = _encode_categorical_existing(np.array(["abc", np.float32(1.1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["1.100000023841858"]], dtype=np.int64)))
def test_encode_categorical_existing_int_float():
# this test is hard since np.unique seems to think int(4) == float(4) so naively it returns just "4"
c = {"4": 1, "4.0": 2}
encoded, bad = _encode_categorical_existing(np.array([int(4), 4.0], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["4"], c["4.0"]], dtype=np.int64)))
def test_encode_categorical_existing_int_float32():
# if you take np.float64(np.float32(0.1)) != np.float64(0.1) since the float32 version has the lower mantisa
# bits all set to zero, and there will be another float64 that will be closer to "0.1" for float64s, so
# they aren't the same, but if to convert them to strings first then they are identical. I tend to think
# of strings are the ultimate arbiter of categorical membership since strings are cross-platform
# np.unique will tend to separate the float32 and the float64 values since they aren't the same, but then
# serialize them to the same string. The our model has ["0.1", "0.1"] as the categories!!
c = {"4": 1, "0.10000000149011612": 2}
encoded, bad = _encode_categorical_existing(np.array([int(4), np.float32(0.1)], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["4"], c["0.10000000149011612"]], dtype=np.int64)))
def test_encode_categorical_existing_obj_obj():
c = {"abc": 1, "def": 2}
encoded, bad = _encode_categorical_existing(np.array([StringHolder("abc"), StringHolder("def")], dtype=np.object_), None, c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["abc"], c["def"]], dtype=np.int64)))
def test_encode_categorical_existing_str():
c = {"abc": 1, "def": 2, "ghi": 3}
encoded, bad = _encode_categorical_existing(np.array(["abc", "ghi", "def", "something"], dtype=np.unicode_), np.array([True, True, False, True, True], dtype=np.bool_), c)
assert(np.array_equal(bad, np.array([None, None, None, None, "something"], dtype=np.object_)))
assert(np.array_equal(encoded, np.array([c["abc"], c["ghi"], 0, c["def"], -1], dtype=np.int64)))
def test_encode_categorical_existing_int8():
c = {"5": 1, "0": 2, "-9": 3}
encoded, bad = _encode_categorical_existing(np.array([5, -9, 0, 0, -9, 5, 99], dtype=np.int8), np.array([True, True, True, False, True, True, True, True], dtype=np.bool_), c)
assert(np.array_equal(bad, np.array([None, None, None, None, None, None, None, "99"], dtype=np.object_)))
assert(np.array_equal(encoded, np.array([c["5"], c["-9"], c["0"], 0, c["0"], c["-9"], c["5"], -1], dtype=np.int64)))
def test_encode_categorical_existing_bool():
c = {"False": 1, "True": 2}
encoded, bad = _encode_categorical_existing(np.array([False, True, False], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), c)
assert(bad is None)
assert(np.array_equal(encoded, np.array([c["False"], c["True"], 0, c["False"]], dtype=np.int64)))
def test_encode_categorical_existing_bool_true():
c = {"True": 1}
encoded, bad = _encode_categorical_existing(np.array([False, True, False], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), c)
assert(np.array_equal(bad, np.array(["False", None, None, "False"], dtype=np.object_)))
assert(np.array_equal(encoded, np.array([-1, c["True"], 0, -1], dtype=np.int64)))
def test_encode_categorical_existing_bool_false():
c = {"False": 1}
encoded, bad = _encode_categorical_existing(np.array([False, True, False], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), c)
assert(np.array_equal(bad, np.array([None, "True", None, None], dtype=np.object_)))
assert(np.array_equal(encoded, np.array([c["False"], -1, 0, c["False"]], dtype=np.int64)))
def test_process_column_initial_choose_floatcategories():
encoded, c = _process_column_initial(np.array([11.11, 2.2, np.float32(2.2), "2.2", StringHolder("2.2")], dtype=np.object_), None, None, 4)
assert(c["2.2"] == 1)
assert(c["2.200000047683716"] == 2)
assert(c["11.11"] == 3)
assert(np.array_equal(encoded, np.array([c["11.11"], c["2.2"], c["2.200000047683716"], c["2.2"], c["2.2"]], dtype=np.int64)))
def test_process_column_initial_choose_floats():
encoded, c = _process_column_initial(np.array([11.11, 2.2, np.float32(2.2), "2.2", StringHolder("2.2"), 3.3, 3.3], dtype=np.object_), None, None, 3)
assert(c is None)
assert(np.array_equal(encoded, np.array([11.11, 2.2, 2.200000047683716, 2.2, 2.2, 3.3, 3.3], dtype=np.float64)))
def test_unify_columns_numpy1():
X = np.array([1, 2, 3])
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(np.array_equal(X_cols[0][1], np.array([X_cols[0][2]["1"]], dtype=np.int64)))
assert(np.array_equal(X_cols[1][1], np.array([X_cols[1][2]["2"]], dtype=np.int64)))
assert(np.array_equal(X_cols[2][1], np.array([X_cols[2][2]["3"]], dtype=np.int64)))
def test_unify_columns_numpy2():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(np.array_equal(X_cols[0][1], np.array([X_cols[0][2]["1"], X_cols[0][2]["4"]], dtype=np.int64)))
assert(np.array_equal(X_cols[1][1], np.array([X_cols[1][2]["2"], X_cols[1][2]["5"]], dtype=np.int64)))
assert(np.array_equal(X_cols[2][1], np.array([X_cols[2][2]["3"], X_cols[2][2]["6"]], dtype=np.int64)))
def test_unify_columns_numpy_ignore():
X = np.array([["abc", None, "def"], ["ghi", "jkl", None]])
feature_types_given=['ignore', 'ignore', 'ignore']
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in, feature_types_given))
assert(3 == len(X_cols))
assert(X_cols[0][0] == 'ignore')
assert(X_cols[0][2] is None)
assert(X_cols[0][1] is None)
assert(np.array_equal(X_cols[0][3], np.array(["abc", "ghi"], dtype=np.object_)))
assert(X_cols[1][0] == 'ignore')
assert(X_cols[1][2] is None)
assert(X_cols[1][1] is None)
assert(np.array_equal(X_cols[1][3], np.array([None, "jkl"], dtype=np.object_)))
assert(X_cols[2][0] == 'ignore')
assert(X_cols[2][2] is None)
assert(X_cols[2][1] is None)
assert(np.array_equal(X_cols[2][3], np.array(["def", None], dtype=np.object_)))
def test_unify_columns_scipy():
X = sp.sparse.csc_matrix([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([X_cols[0][2]["1"], X_cols[0][2]["4"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
assert(np.array_equal(X_cols[1][1], np.array([X_cols[1][2]["2"], X_cols[1][2]["5"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
assert(np.array_equal(X_cols[2][1], np.array([X_cols[2][2]["3"], X_cols[2][2]["6"]], dtype=np.int64)))
def test_unify_columns_dict1():
X = {"feature1" : [1], "feature2" : "hi", "feature3" : None}
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X, feature_names_given=["feature3", "feature2", "feature1"])
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(X_cols[0][1][0] == 0)
assert(X_cols[1][1].dtype == np.int64)
assert(X_cols[1][1][0] == X_cols[1][2]["hi"])
assert(X_cols[2][1].dtype == np.int64)
assert(X_cols[2][1][0] == X_cols[2][2]["1"])
def test_unify_columns_dict2():
X = {"feature1" : [1, 4], "feature2" : [2, 5], "feature3" : [3, 6]}
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_names_given=["feature3", "feature2", "feature1"])
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([X_cols[0][2]["3"], X_cols[0][2]["6"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
assert(np.array_equal(X_cols[1][1], np.array([X_cols[1][2]["2"], X_cols[1][2]["5"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
assert(np.array_equal(X_cols[2][1], np.array([X_cols[2][2]["1"], X_cols[2][2]["4"]], dtype=np.int64)))
def test_unify_columns_list1():
X = [1, 2.0, "hi", None]
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(4 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(X_cols[0][1][0] == X_cols[0][2]["1"])
assert(X_cols[1][1].dtype == np.int64)
assert(X_cols[1][1][0] == X_cols[1][2]["2.0"])
assert(X_cols[2][1].dtype == np.int64)
assert(X_cols[2][1][0] == X_cols[2][2]["hi"])
assert(X_cols[3][1].dtype == np.int64)
assert(X_cols[3][1][0] == 0)
def test_unify_columns_list2():
P1 = pd.DataFrame()
P1["feature1"] = pd.Series(np.array([1, None, np.nan], dtype=np.object_))
P2 = pd.DataFrame()
P2["feature1"] = pd.Series(np.array([1], dtype=np.float32))
P2["feature2"] = pd.Series(np.array([None], dtype=np.object_))
P2["feature3"] = pd.Series(np.array([np.nan], dtype=np.object_))
S1 = sp.sparse.csc_matrix([[1, 2, 3]])
S2 = sp.sparse.csc_matrix([[1], [2], [3]])
X = [np.array([1, 2, 3], dtype=np.int8), pd.Series([4.0, None, np.nan]), [1, 2.0, "hi"], (np.double(4.0), "bye", None), {1, 2, 3}, {"abc": 1, "def": 2, "ghi":3}.keys(), {"abc": 1, "def": 2, "ghi":3}.values(), range(1, 4), (x for x in [1, 2, 3]), np.array([1, 2, 3], dtype=np.object_), np.array([[1, 2, 3]], dtype=np.int8), np.array([[1], [2], [3]], dtype=np.int8), P1, P2, S1, S2]
X, n_samples = clean_X(X)
assert(n_samples == 16)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([c["1"], c["4.0"], c["1"], c["4.0"], c["1"], c["abc"], c["1"], c["1"], c["1"], c["1"], c["1"], c["1"], c["1"], c["1.0"], c["1"], c["1"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
c = X_cols[1][2]
assert(np.array_equal(X_cols[1][1], np.array([c["2"], 0, c["2.0"], c["bye"], c["2"], c["def"], c["2"], c["2"], c["2"], c["2"], c["2"], c["2"], 0, 0, c["2"], c["2"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
c = X_cols[2][2]
assert(np.array_equal(X_cols[2][1], np.array([c["3"], 0, c["hi"], 0, c["3"], c["ghi"], c["3"], c["3"], c["3"], c["3"], c["3"], c["3"], 0, 0, c["3"], c["3"]], dtype=np.int64)))
def test_unify_columns_tuple1():
X = (1, 2.0, "hi", None)
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(4 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(X_cols[0][1][0] == X_cols[0][2]["1"])
assert(X_cols[1][1].dtype == np.int64)
assert(X_cols[1][1][0] == X_cols[1][2]["2.0"])
assert(X_cols[2][1].dtype == np.int64)
assert(X_cols[2][1][0] == X_cols[2][2]["hi"])
assert(X_cols[3][1].dtype == np.int64)
assert(X_cols[3][1][0] == 0)
def test_unify_columns_tuple2():
X = (np.array([1, 2, 3], dtype=np.int8), pd.Series([4, 5, 6]), [1, 2.0, "hi"], (np.double(4.0), "bye", None), {1, 2, 3}, {"abc": 1, "def": 2, "ghi":3}.keys(), {"abc": 1, "def": 2, "ghi":3}.values(), range(1, 4), (x for x in [1, 2, 3]), np.array([1, 2, 3], dtype=np.object_))
X, n_samples = clean_X(X)
assert(n_samples == 10)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([c["1"], c["4"], c["1"], c["4.0"], c["1"], c["abc"], c["1"], c["1"], c["1"], c["1"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
c = X_cols[1][2]
assert(np.array_equal(X_cols[1][1], np.array([c["2"], c["5"], c["2.0"], c["bye"], c["2"], c["def"], c["2"], c["2"], c["2"], c["2"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
c = X_cols[2][2]
assert(np.array_equal(X_cols[2][1], np.array([c["3"], c["6"], c["hi"], 0, c["3"], c["ghi"], c["3"], c["3"], c["3"], c["3"]], dtype=np.int64)))
def test_unify_columns_generator1():
X = (x for x in [1, 2.0, "hi", None])
X, n_samples = clean_X(X)
assert(n_samples == 1)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(4 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
assert(X_cols[0][1][0] == X_cols[0][2]["1"])
assert(X_cols[1][1].dtype == np.int64)
assert(X_cols[1][1][0] == X_cols[1][2]["2.0"])
assert(X_cols[2][1].dtype == np.int64)
assert(X_cols[2][1][0] == X_cols[2][2]["hi"])
assert(X_cols[3][1].dtype == np.int64)
assert(X_cols[3][1][0] == 0)
def test_unify_columns_generator2():
X = (x for x in [np.array([1, 2, 3], dtype=np.int8), pd.Series([4, 5, 6]), [1, 2.0, "hi"], (np.double(4.0), "bye", None), {1, 2, 3}, {"abc": 1, "def": 2, "ghi":3}.keys(), {"abc": 1, "def": 2, "ghi":3}.values(), range(1, 4), (x for x in [1, 2, 3]), np.array([1, 2, 3], dtype=np.object_)])
X, n_samples = clean_X(X)
assert(n_samples == 10)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(3 == len(X_cols))
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([c["1"], c["4"], c["1"], c["4.0"], c["1"], c["abc"], c["1"], c["1"], c["1"], c["1"]], dtype=np.int64)))
assert(X_cols[1][1].dtype == np.int64)
c = X_cols[1][2]
assert(np.array_equal(X_cols[1][1], np.array([c["2"], c["5"], c["2.0"], c["bye"], c["2"], c["def"], c["2"], c["2"], c["2"], c["2"]], dtype=np.int64)))
assert(X_cols[2][1].dtype == np.int64)
c = X_cols[2][2]
assert(np.array_equal(X_cols[2][1], np.array([c["3"], c["6"], c["hi"], 0, c["3"], c["ghi"], c["3"], c["3"], c["3"], c["3"]], dtype=np.int64)))
def test_unify_columns_pandas_normal_int8():
check_pandas_normal(np.int8, -128, 127)
def test_unify_columns_pandas_normal_uint8():
check_pandas_normal(np.uint8, 0, 255)
def test_unify_columns_pandas_normal_int16():
check_pandas_normal(np.int16, -32768, 32767)
def test_unify_columns_pandas_normal_uint16():
check_pandas_normal(np.uint16, 0, 65535)
def test_unify_columns_pandas_normal_int32():
check_pandas_normal(np.int32, -2147483648, 2147483647)
def test_unify_columns_pandas_normal_uint32():
check_pandas_normal(np.uint32, 0, 4294967295)
def test_unify_columns_pandas_normal_int64():
check_pandas_normal(np.int64, -9223372036854775808, 9223372036854775807)
def test_unify_columns_pandas_normal_uint64():
check_pandas_normal(np.uint64, np.uint64("0"), np.uint64("18446744073709551615"))
def test_unify_columns_pandas_normal_bool():
check_pandas_normal(np.bool_, False, True)
def test_unify_columns_pandas_missings_float64():
check_pandas_float(np.float64, -1.1, 2.2)
def test_unify_columns_pandas_missings_longfloat():
check_pandas_float(np.longfloat, -1.1, 2.2)
def test_unify_columns_pandas_missings_float32():
check_pandas_float(np.float32, -1.1, 2.2)
def test_unify_columns_pandas_missings_float16():
check_pandas_float(np.float16, -1.1, 2.2)
def test_unify_columns_pandas_missings_Int8Dtype():
check_pandas_missings(pd.Int8Dtype(), -128, 127)
def test_unify_columns_pandas_missings_UInt8Dtype():
check_pandas_missings(pd.UInt8Dtype(), 0, 255)
def test_unify_columns_pandas_missings_Int16Dtype():
check_pandas_missings(pd.Int16Dtype(), -32768, 32767)
def test_unify_columns_pandas_missings_UInt16Dtype():
check_pandas_missings(pd.UInt16Dtype(), 0, 65535)
def test_unify_columns_pandas_missings_Int32Dtype():
check_pandas_missings(pd.Int32Dtype(), -2147483648, 2147483647)
def test_unify_columns_pandas_missings_UInt32Dtype():
check_pandas_missings(pd.UInt32Dtype(), 0, 4294967295)
def test_unify_columns_pandas_missings_Int64Dtype():
check_pandas_missings(pd.Int64Dtype(), -9223372036854775808, 9223372036854775807)
def test_unify_columns_pandas_missings_UInt64Dtype():
check_pandas_missings(pd.UInt64Dtype(), np.uint64("0"), np.uint64("18446744073709551615"))
def test_unify_columns_pandas_missings_BooleanDtype():
check_pandas_missings(pd.BooleanDtype(), False, True)
def test_unify_columns_pandas_missings_str():
check_pandas_missings(np.object_, "abc", "def")
def test_unify_columns_pandas_missings_nice_str():
check_pandas_missings(np.object_, StringHolder("abc"), "def")
def test_unify_columns_pandas_missings_pure_ints():
check_pandas_missings(np.object_, 1, 2)
def test_unify_columns_pandas_missings_pure_floats():
check_pandas_missings(np.object_, 1.1, 2.2)
def test_unify_columns_pandas_missings_mixed_floats():
check_pandas_missings(np.object_, 1.1, "2.2")
def test_unify_columns_pandas_missings_mixed_floats2():
check_pandas_missings(np.object_, StringHolder("1.1"), "2.2")
def test_unify_columns_str_throw():
X = "abc"
try:
X, n_samples = clean_X(X)
assert(False)
except:
pass
try:
feature_names_in = unify_feature_names(X)
assert(False)
except:
pass
try:
feature_names_in = ["ANYTHING"]
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(False)
except:
pass
def test_unify_columns_int_throw():
X = 1
try:
X, n_samples = clean_X(X)
assert(False)
except:
pass
try:
feature_names_in = unify_feature_names(X)
assert(False)
except:
pass
try:
feature_names_in = ["ANYTHING"]
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(False)
except:
pass
def test_unify_columns_duplicate_colnames_throw():
X = pd.DataFrame()
X["0"] = [1, 2]
X[0] = [3, 4]
try:
feature_names_in = unify_feature_names(X)
assert(False)
except:
pass
try:
feature_names_in = ["ANYTHING"]
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(False)
except:
pass
def test_unify_columns_opaque_str_throw():
# this should fail since the default string generator makes a useless as a category string like:
# <interpret.glassbox.ebm.test.test_bin.NothingHolder object at 0x0000019525E9FE48>
check_numpy_throws(np.object_, NothingHolder("abc"), "def")
def test_unify_columns_list_throw():
check_numpy_throws(np.object_, ["abc", "bcd"], "def")
def test_unify_columns_tuple_throw():
check_numpy_throws(np.object_, ("abc", "bcd"), "def")
def test_unify_columns_set_throw():
check_numpy_throws(np.object_, {"abc", "bcd"}, "def")
def test_unify_columns_dict_throw():
check_numpy_throws(np.object_, {"abc": 1, "bcd": 2}, "def")
def test_unify_columns_keys_throw():
check_numpy_throws(np.object_, {"abc": 1, "bcd": 2}.keys(), "def")
def test_unify_columns_values_throw():
check_numpy_throws(np.object_, {"abc": 1, "bcd": 2}.values(), "def")
def test_unify_columns_range_throw():
check_numpy_throws(np.object_, range(1, 2), "def")
def test_unify_columns_generator_throw():
check_numpy_throws(np.object_, (x for x in [1, 2]), "def")
def test_unify_columns_ndarray_throw():
check_numpy_throws(np.object_, np.array([1, "abc"], dtype=np.object_), "def")
def test_unify_columns_pandas_obj_to_float():
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([None, np.nan, np.float16(np.nan), 0, -1, 2.2, "-3.3", np.float16("4.4"), StringHolder("-5.5"), np.float32("6.6").item()], dtype=np.object_), dtype=np.object_)
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 10)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(X_cols[0][0] == 'continuous')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is None)
assert(X_cols[0][1].dtype == np.float64)
assert(np.isnan(X_cols[0][1][0]))
assert(np.isnan(X_cols[0][1][1]))
assert(np.isnan(X_cols[0][1][2]))
assert(X_cols[0][1][3] == 0)
assert(X_cols[0][1][4] == -1)
assert(X_cols[0][1][5] == 2.2)
assert(X_cols[0][1][6] == -3.3)
assert(X_cols[0][1][7] == 4.3984375)
assert(X_cols[0][1][8] == -5.5)
assert(X_cols[0][1][9] == 6.5999999046325684) # python internal objects are float64
def test_unify_columns_pandas_obj_to_str():
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([None, np.nan, np.float16(np.nan), 0, -1, 2.2, "-3.3", np.float16("4.4"), StringHolder("-5.5"), 5.6843418860808014e-14, "None", "nan"], dtype=np.object_), dtype=np.object_)
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 12)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
# For "5.684341886080802e-14", we need to round the 16th digit up for this to be the shortest string since
# "5.684341886080801e-14" doesn't work
# https://www.exploringbinary.com/the-shortest-decimal-string-that-round-trips-may-not-be-the-nearest/
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["0"], c["-1"], c["2.2"], c["-3.3"], c["4.3984375"], c["-5.5"], c["5.684341886080802e-14"], c["None"], c["nan"]], dtype=np.int64)))
assert(np.array_equal(na, X_cols[0][1] == 0))
def test_unify_columns_pandas_categorical():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "0", "bcd"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(len(X_cols[0][2]) == 3)
assert(X_cols[0][2]["a"] == 1)
assert(X_cols[0][2]["0"] == 2)
assert(X_cols[0][2]["bcd"] == 3)
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_ordinal():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "0", "bcd"], ordered=True))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'ordinal')
assert(X_cols[0][3] is None)
assert(len(X_cols[0][2]) == 3)
assert(X_cols[0][2]["a"] == 1)
assert(X_cols[0][2]["0"] == 2)
assert(X_cols[0][2]["bcd"] == 3)
assert(X_cols[0][1].dtype == np.int64)
c = X_cols[0][2]
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_shorter():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "0"], dtype=pd.CategoricalDtype(categories=["a", "0"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 5)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_equals():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "0", "bcd"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_longer():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "in_categories", "a", "bcd", "0"], dtype=pd.CategoricalDtype(categories=["a", "0", "bcd", "in_categories"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:2]))
assert(all(~na[2:]))
X, n_samples = clean_X(X)
assert(n_samples == 6)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(np.array_equal(X_cols[0][3], np.array([None, None, "in_categories", None, None, None], dtype=np.object_)))
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, -1, c["a"], c["bcd"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_reordered_shorter():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "0"], dtype=pd.CategoricalDtype(categories=["0", "a"], ordered=False))
na = X["feature1"].isna()
assert(all(na[0:3]))
assert(all(~na[3:]))
X, n_samples = clean_X(X)
assert(n_samples == 5)
feature_names_in = unify_feature_names(X)
c = {"a": 1, "0": 2, "bcd": 3}
X_cols = list(unify_columns(X, [(0, c)], feature_names_in))
assert(1 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c)
assert(X_cols[0][1].dtype == np.int64)
assert(np.array_equal(X_cols[0][1], np.array([0, 0, 0, c["a"], c["0"]], dtype=np.int64)))
def test_unify_columns_pandas_categorical_reordered_equals():
X = pd.DataFrame()
X["feature1"] = pd.Series([None, np.nan, "not_in_categories", "a", "bcd", "0"], dtype= | pd.CategoricalDtype(categories=["a", "bcd", "0"], ordered=False) | pandas.CategoricalDtype |
import os
import numpy as np, sys, os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.io import loadmat
import wfdb
import tarfile
from sklearn import preprocessing
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import StratifiedKFold
# from keras.preprocessing.sequence import pad_sequences
import math
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold
from sklearn.utils.class_weight import compute_class_weight
# import tensorflow_addons as tfa
# import tensorflow as tf
# from tensorflow import keras
# from keras.utils import plot_model
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.signal import butter, lfilter, filtfilt
from scipy.signal import find_peaks
from scipy.signal import peak_widths
from scipy.signal import savgol_filter
def load_challenge_data(filename):
x = loadmat(filename)
data = np.asarray(x['val'], dtype=np.float64)
new_file = filename.replace('.mat', '.hea')
input_header_file = os.path.join(new_file)
with open(input_header_file, 'r') as f:
header_data = f.readlines()
return header_data
def clean_up_gender_data(gender):
gender = np.asarray(gender)
gender[np.where(gender == "Male")] = 0
gender[np.where(gender == "male")] = 0
gender[np.where(gender == "M")] = 0
gender[np.where(gender == "Female")] = 1
gender[np.where(gender == "female")] = 1
gender[np.where(gender == "F")] = 1
gender[np.where(gender == "NaN")] = 2
np.unique(gender)
gender = gender.astype(np.int)
return gender
def clean_up_age_data(age):
age = np.asarray(age)
age[np.where(age == "NaN")] = -1
np.unique(age)
age = age.astype(np.int)
return age
def import_gender_and_age(age, gender):
gender_binary = clean_up_gender_data(gender)
age_clean = clean_up_age_data(age)
print("gender data shape: {}".format(gender_binary.shape[0]))
print("age data shape: {}".format(age_clean.shape[0]))
return age_clean, gender_binary
def import_key_data(path):
gender = []
age = []
labels = []
ecg_filenames = []
for subdir, dirs, files in sorted(os.walk(path)):
for filename in files:
filepath = subdir + os.sep + filename
if filepath.endswith(".mat"):
data, header_data = load_challenge_data(filepath)
labels.append(header_data[15][5:-1])
ecg_filenames.append(filepath)
gender.append(header_data[14][6:-1])
age.append(header_data[13][6:-1])
return gender, age, labels, ecg_filenames
def get_signal_lengths(path, title):
signal_lenght = []
for subdir, dirs, files in sorted(os.walk(path)):
for filename in files:
filepath = subdir + os.sep + filename
if filepath.endswith(".mat"):
data, header_data = load_challenge_data(filepath)
splitted = header_data[0].split()
signal_lenght.append(splitted[3])
signal_lenght_df = | pd.DataFrame(signal_lenght) | pandas.DataFrame |
import os
import shutil
import pickle
from typing import Dict, Optional
from transformers import EvalPrediction
import numpy as np
import pandas as pd
def delete_checkpoint_files_except_the_best(file_path="."):
"""
已经checkpoint最大的那个是最好的文件,所以我们删去除了最大的那个之外的所有checkpoint文件。
之后将那个保存到该目录,可以直接from_pretrain()这个目录
"""
file_list = []
for root, dirs, files in os.walk(file_path):
if "checkpoint" in root:
file_list.append(root)
# import IPython; IPython.embed(); exit(1)
file_list.sort(key = lambda x: int(x.split('-')[-1]))
best_file_path = file_list[-1]
# import IPython; IPython.embed(); exit(1)
# 删除除了最后一个文件之外的文件
for file in file_list[:-1]:
shutil.rmtree(file)
file_best = os.listdir(best_file_path)
for f in file_best:
print(f)
shutil.move(os.path.join(best_file_path,f), os.path.join(file_path,f))
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def compute_metrics(p: EvalPrediction) -> Dict:
preds = np.argmax(p.predictions, axis=1)
return {"acc": simple_accuracy(preds, p.label_ids)}
def write_answer_to_file(answer):
name = "subtask1.csv" if "task1" in args.data_dir else "subtask2.csv"
file_path = os.path.join("./answer_file", name)
# turn to Int
answer = answer.astype(int)
b = | pd.DataFrame(answer, columns=['a']) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import sys # to determine Python version number
import matplotlib # to determine Matplotlib version number
print('Python version ' + sys.version)
print('Pandas version ' + pd.__version__)
print('Matplotlib version ' + matplotlib.__version__)
print()
# Create Data --------------------------
# http://www.onthesnow.com/montana/bridger-bowl/historical-snowfall.html
years = [2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018] # bridger bowl year
total_snowfall = [253, 304, 388, 265, 283, 209, 194, 271, 177] # inches
largest_snowfall = [19, 16, 19, 25, 20, 14, 13, 20, 15] # inches
BridgerDataSet = list(zip(years, total_snowfall, largest_snowfall))
print("BridgerDataSet:", BridgerDataSet, "\n")
data = pd.DataFrame(data = BridgerDataSet, columns=["Year", "Total", "Largest"])
print("Bridger DataFrame")
print("-----------------")
print(data)
data.to_csv('bridger.csv',index=False,header=False)
# Get Data -----------------------------
bridger = | pd.read_csv('bridger.csv', names=['Year', 'Total', 'Largest']) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
result = values.str.split('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.split('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.split('[,_]')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
def test_rsplit(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.rsplit('__')
tm.assert_series_equal(result, exp)
result = values.str.rsplit('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.rsplit('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.rsplit('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.rsplit('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.rsplit('[,_]')
exp = Series([[u('a,b_c')], [u('c_d,e')], NA, [u('f,g,h')]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_', n=1)
exp = Series([['a_b', 'c'], ['c_d', 'e'], NA, ['f_g', 'h']])
tm.assert_series_equal(result, exp)
def test_split_noargs(self):
# #1859
s = Series(['<NAME>', '<NAME>'])
result = s.str.split()
expected = ['Travis', 'Oliphant']
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
def test_split_maxsplit(self):
# re.split 0, str.split -1
s = Series(['bd asdf jfg', 'kjasdflqw asdfnfk'])
result = s.str.split(n=-1)
xp = s.str.split()
tm.assert_series_equal(result, xp)
result = s.str.split(n=0)
tm.assert_series_equal(result, xp)
xp = s.str.split('asdf')
result = s.str.split('asdf', n=0)
tm.assert_series_equal(result, xp)
result = s.str.split('asdf', n=-1)
tm.assert_series_equal(result, xp)
def test_split_no_pat_with_nonzero_n(self):
s = Series(['split once', 'split once too!'])
result = s.str.split(n=1)
expected = Series({0: ['split', 'once'], 1: ['split', 'once too!']})
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_unequal_splits', 'one_of_these_things_is_not'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'one'],
1: ['unequal', 'of'],
2: ['splits', 'these'],
3: [NA, 'things'],
4: [NA, 'is'],
5: [NA, 'not']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
with tm.assert_raises_regex(ValueError, "expand must be"):
s.str.split('_', expand="not_a_boolean")
def test_split_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.split('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_unequal_splits', 'one_of_these_things_is_not'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'unequal', 'splits', NA, NA, NA
), ('one', 'of', 'these', 'things',
'is', 'not')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with tm.assert_raises_regex(ValueError, "expand must be"):
idx.str.split('_', expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=2)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=1)
exp = DataFrame({0: ['some_equal', 'with_no'], 1: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.rsplit('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True, n=1)
exp = MultiIndex.from_tuples([('some_equal', 'splits'),
('with_no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(self):
# gh-18450
s = Series(["foo,bar,baz", NA])
result = s.str.split(",", expand=True)
exp = DataFrame([["foo", "bar", "baz"], [NA, NA, NA]])
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
assert all(np.isnan(x) for x in result.iloc[1])
def test_split_with_name(self):
# GH 12617
# should preserve name
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.split(',')
exp = Series([['a', 'b'], ['c', 'd']], name='xxx')
tm.assert_series_equal(res, exp)
res = s.str.split(',', expand=True)
exp = DataFrame([['a', 'b'], ['c', 'd']])
tm.assert_frame_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.split(',')
exp = Index([['a', 'b'], ['c', 'd']], name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
res = idx.str.split(',', expand=True)
exp = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')])
assert res.nlevels == 2
tm.assert_index_equal(res, exp)
def test_partition_series(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([('a', '_', 'b_c'), ('c', '_', 'd_e'), NA,
('f', '_', 'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('a_b', '_', 'c'), ('c_d', '_', 'e'), NA,
('f_g', '_', 'h')])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.partition('__', expand=False)
exp = Series([('a', '__', 'b__c'), ('c', '__', 'd__e'), NA,
('f', '__', 'g__h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('__', expand=False)
exp = Series([('a__b', '__', 'c'), ('c__d', '__', 'e'), NA,
('f__g', '__', 'h')])
tm.assert_series_equal(result, exp)
# None
values = Series(['a b c', 'c d e', NA, 'f g h'])
result = values.str.partition(expand=False)
exp = Series([('a', ' ', 'b c'), ('c', ' ', 'd e'), NA,
('f', ' ', 'g h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition(expand=False)
exp = Series([('a b', ' ', 'c'), ('c d', ' ', 'e'), NA,
('f g', ' ', 'h')])
tm.assert_series_equal(result, exp)
# Not splited
values = Series(['abc', 'cde', NA, 'fgh'])
result = values.str.partition('_', expand=False)
exp = Series([('abc', '', ''), ('cde', '', ''), NA, ('fgh', '', '')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('', '', 'abc'), ('', '', 'cde'), NA, ('', '', 'fgh')])
tm.assert_series_equal(result, exp)
# unicode
values = Series([u'a_b_c', u'c_d_e', NA, u'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([(u'a', u'_', u'b_c'), (u'c', u'_', u'd_e'),
NA, (u'f', u'_', u'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([(u'a_b', u'_', u'c'), (u'c_d', u'_', u'e'),
NA, (u'f_g', u'_', u'h')])
tm.assert_series_equal(result, exp)
# compare to standard lib
values = Series(['A_B_C', 'B_C_D', 'E_F_G', 'EFGHEF'])
result = values.str.partition('_', expand=False).tolist()
assert result == [v.partition('_') for v in values]
result = values.str.rpartition('_', expand=False).tolist()
assert result == [v.rpartition('_') for v in values]
def test_partition_index(self):
values = Index(['a_b_c', 'c_d_e', 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Index(np.array([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_',
'g_h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.rpartition('_', expand=False)
exp = Index(np.array([('a_b', '_', 'c'), ('c_d', '_', 'e'), (
'f_g', '_', 'h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.partition('_')
exp = Index([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_', 'g_h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
result = values.str.rpartition('_')
exp = Index([('a_b', '_', 'c'), ('c_d', '_', 'e'), ('f_g', '_', 'h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
def test_partition_to_dataframe(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_')
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_')
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=True)
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_', expand=True)
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
def test_partition_with_name(self):
# GH 12617
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.partition(',')
exp = DataFrame({0: ['a', 'c'], 1: [',', ','], 2: ['b', 'd']})
tm.assert_frame_equal(res, exp)
# should preserve name
res = s.str.partition(',', expand=False)
exp = Series([('a', ',', 'b'), ('c', ',', 'd')], name='xxx')
tm.assert_series_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.partition(',')
exp = MultiIndex.from_tuples([('a', ',', 'b'), ('c', ',', 'd')])
assert res.nlevels == 3
tm.assert_index_equal(res, exp)
# should preserve name
res = idx.str.partition(',', expand=False)
exp = Index(np.array([('a', ',', 'b'), ('c', ',', 'd')]), name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
def test_pipe_failures(self):
# #2119
s = Series(['A|B|C'])
result = s.str.split('|')
exp = Series([['A', 'B', 'C']])
tm.assert_series_equal(result, exp)
result = s.str.replace('|', ' ')
exp = Series(['A B C'])
tm.assert_series_equal(result, exp)
def test_slice(self):
values = Series(['aafootwo', 'aabartwo', NA, 'aabazqux'])
result = values.str.slice(2, 5)
exp = Series(['foo', 'bar', NA, 'baz'])
tm.assert_series_equal(result, exp)
for start, stop, step in [(0, 3, -1), (None, None, -1), (3, 10, 2),
(3, 0, -1)]:
try:
result = values.str.slice(start, stop, step)
expected = Series([s[start:stop:step] if not isna(s) else NA
for s in values])
tm.assert_series_equal(result, expected)
except:
print('failed on %s:%s:%s' % (start, stop, step))
raise
# mixed
mixed = Series(['aafootwo', NA, 'aabartwo', True, datetime.today(),
None, 1, 2.])
rs = Series(mixed).str.slice(2, 5)
xp = Series(['foo', NA, 'bar', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.slice(2, 5, -1)
xp = Series(['oof', NA, 'rab', NA, NA, NA, NA, NA])
# unicode
values = Series([u('aafootwo'), u('aabartwo'), NA, u('aabazqux')])
result = values.str.slice(2, 5)
exp = Series([u('foo'), u('bar'), NA, u('baz')])
tm.assert_series_equal(result, exp)
result = values.str.slice(0, -1, 2)
exp = Series([u('afow'), u('abrw'), NA, u('abzu')])
tm.assert_series_equal(result, exp)
def test_slice_replace(self):
values = Series(['short', 'a bit longer', 'evenlongerthanthat', '', NA
])
exp = Series(['shrt', 'a it longer', 'evnlongerthanthat', '', NA])
result = values.str.slice_replace(2, 3)
tm.assert_series_equal(result, exp)
exp = Series(['shzrt', 'a zit longer', 'evznlongerthanthat', 'z', NA])
result = values.str.slice_replace(2, 3, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA
])
result = values.str.slice_replace(2, 2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA
])
result = values.str.slice_replace(2, 1, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shorz', 'a bit longez', 'evenlongerthanthaz', 'z', NA])
result = values.str.slice_replace(-1, None, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'zer', 'zat', 'z', NA])
result = values.str.slice_replace(None, -2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shortz', 'a bit znger', 'evenlozerthanthat', 'z', NA])
result = values.str.slice_replace(6, 8, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'a zit longer', 'evenlongzerthanthat', 'z', NA])
result = values.str.slice_replace(-10, 3, 'z')
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip(self):
values = Series([' aa ', ' bb \n', NA, 'cc '])
result = values.str.strip()
exp = Series(['aa', 'bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series(['aa ', 'bb \n', NA, 'cc '])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([' aa', ' bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_mixed(self):
# mixed
mixed = Series([' aa ', NA, ' bb \t\n', True, datetime.today(), None,
1, 2.])
rs = Series(mixed).str.strip()
xp = Series(['aa', NA, 'bb', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.lstrip()
xp = Series(['aa ', NA, 'bb \t\n', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rstrip()
xp = Series([' aa', NA, ' bb', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
def test_strip_lstrip_rstrip_unicode(self):
# unicode
values = Series([u(' aa '), u(' bb \n'), NA, u('cc ')])
result = values.str.strip()
exp = Series([u('aa'), u('bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series([u('aa '), u('bb \n'), NA, u('cc ')])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([u(' aa'), u(' bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_args(self):
values = Series(['xxABCxx', 'xx BNSD', 'LDFJH xx'])
rs = values.str.strip('x')
xp = Series(['ABC', ' BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
rs = values.str.lstrip('x')
xp = Series(['ABCxx', ' BNSD', 'LDFJH xx'])
assert_series_equal(rs, xp)
rs = values.str.rstrip('x')
xp = Series(['xxABC', 'xx BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
def test_strip_lstrip_rstrip_args_unicode(self):
values = Series([u('xxABCxx'), u('xx BNSD'), u('LDFJH xx')])
rs = values.str.strip(u('x'))
xp = Series(['ABC', ' BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
rs = values.str.lstrip(u('x'))
xp = Series(['ABCxx', ' BNSD', 'LDFJH xx'])
assert_series_equal(rs, xp)
rs = values.str.rstrip(u('x'))
xp = Series(['xxABC', 'xx BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
def test_wrap(self):
# test values are: two words less than width, two words equal to width,
# two words greater than width, one word less than width, one word
# equal to width, one word greater than width, multiple tokens with
# trailing whitespace equal to width
values = Series([u('hello world'), u('hello world!'), u(
'hello world!!'), u('abcdefabcde'), u('abcdefabcdef'), u(
'abcdefabcdefa'), u('ab ab ab ab '), u('ab ab ab ab a'), u(
'\t')])
# expected values
xp = Series([u('hello world'), u('hello world!'), u('hello\nworld!!'),
u('abcdefabcde'), u('abcdefabcdef'), u('abcdefabcdef\na'),
u('ab ab ab ab'), u('ab ab ab ab\na'), u('')])
rs = values.str.wrap(12, break_long_words=True)
assert_series_equal(rs, xp)
# test with pre and post whitespace (non-unicode), NaN, and non-ascii
# Unicode
values = Series([' pre ', np.nan, u('\xac\u20ac\U00008000 abadcafe')
])
xp = Series([' pre', NA, u('\xac\u20ac\U00008000 ab\nadcafe')])
rs = values.str.wrap(6)
assert_series_equal(rs, xp)
def test_get(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.get(1)
expected = Series(['b', 'd', np.nan, 'g'])
tm.assert_series_equal(result, expected)
# mixed
mixed = Series(['a_b_c', NA, 'c_d_e', True, datetime.today(), None, 1,
2.])
rs = Series(mixed).str.split('_').str.get(1)
xp = Series(['b', NA, 'd', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.get(1)
expected = Series([u('b'), u('d'), np.nan, u('g')])
tm.assert_series_equal(result, expected)
# bounds testing
values = Series(['1_2_3_4_5', '6_7_8_9_10', '11_12'])
# positive index
result = values.str.split('_').str.get(2)
expected = Series(['3', '8', np.nan])
tm.assert_series_equal(result, expected)
# negative index
result = values.str.split('_').str.get(-3)
expected = Series(['3', '8', np.nan])
tm.assert_series_equal(result, expected)
def test_more_contains(self):
# PR #1179
s = Series(['A', 'B', 'C', 'Aaba', 'Baca', '', NA,
'CABA', 'dog', 'cat'])
result = s.str.contains('a')
expected = Series([False, False, False, True, True, False, np.nan,
False, False, True])
assert_series_equal(result, expected)
result = s.str.contains('a', case=False)
expected = Series([True, False, False, True, True, False, np.nan, True,
False, True])
assert_series_equal(result, expected)
result = s.str.contains('Aa')
expected = Series([False, False, False, True, False, False, np.nan,
False, False, False])
assert_series_equal(result, expected)
result = s.str.contains('ba')
expected = Series([False, False, False, True, False, False, np.nan,
False, False, False])
assert_series_equal(result, expected)
result = s.str.contains('ba', case=False)
expected = Series([False, False, False, True, True, False, np.nan,
True, False, False])
assert_series_equal(result, expected)
def test_contains_nan(self):
# PR #14171
s = Series([np.nan, np.nan, np.nan], dtype=np.object_)
result = s.str.contains('foo', na=False)
expected = Series([False, False, False], dtype=np.bool_)
assert_series_equal(result, expected)
result = s.str.contains('foo', na=True)
expected = Series([True, True, True], dtype=np.bool_)
assert_series_equal(result, expected)
result = s.str.contains('foo', na="foo")
expected = Series(["foo", "foo", "foo"], dtype=np.object_)
assert_series_equal(result, expected)
result = s.str.contains('foo')
expected = Series([np.nan, np.nan, np.nan], dtype=np.object_)
assert_series_equal(result, expected)
def test_more_replace(self):
# PR #1179
s = Series(['A', 'B', 'C', 'Aaba', 'Baca', '', NA, 'CABA',
'dog', 'cat'])
result = s.str.replace('A', 'YYY')
expected = Series(['YYY', 'B', 'C', 'YYYaba', 'Baca', '', NA,
'CYYYBYYY', 'dog', 'cat'])
assert_series_equal(result, expected)
result = s.str.replace('A', 'YYY', case=False)
expected = Series(['YYY', 'B', 'C', 'YYYYYYbYYY', 'BYYYcYYY', '', NA,
'CYYYBYYY', 'dog', 'cYYYt'])
assert_series_equal(result, expected)
result = s.str.replace('^.a|dog', 'XX-XX ', case=False)
expected = Series(['A', 'B', 'C', 'XX-XX ba', 'XX-XX ca', '', NA,
'XX-XX BA', 'XX-XX ', 'XX-XX t'])
assert_series_equal(result, expected)
def test_string_slice_get_syntax(self):
s = Series(['YYY', 'B', 'C', 'YYYYYYbYYY', 'BYYYcYYY', NA, 'CYYYBYYY',
'dog', 'cYYYt'])
result = s.str[0]
expected = s.str.get(0)
assert_series_equal(result, expected)
result = s.str[:3]
expected = s.str.slice(stop=3)
assert_series_equal(result, expected)
result = s.str[2::-1]
expected = s.str.slice(start=2, step=-1)
assert_series_equal(result, expected)
def test_string_slice_out_of_bounds(self):
s = Series([(1, 2), (1, ), (3, 4, 5)])
result = s.str[1]
expected = Series([2, np.nan, 4])
assert_series_equal(result, expected)
s = Series(['foo', 'b', 'ba'])
result = s.str[1]
expected = Series(['o', np.nan, 'a'])
assert_series_equal(result, expected)
def test_match_findall_flags(self):
data = {'Dave': '<EMAIL>',
'Steve': '<EMAIL>',
'Rob': '<EMAIL>',
'Wes': np.nan}
data = Series(data)
pat = r'([A-Z0-9._%+-]+)@([A-Z0-9.-]+)\.([A-Z]{2,4})'
result = data.str.extract(pat, flags=re.IGNORECASE, expand=True)
assert result.iloc[0].tolist() == ['dave', 'google', 'com']
result = data.str.match(pat, flags=re.IGNORECASE)
assert result[0]
result = data.str.findall(pat, flags=re.IGNORECASE)
assert result[0][0] == ('dave', 'google', 'com')
result = data.str.count(pat, flags=re.IGNORECASE)
assert result[0] == 1
with tm.assert_produces_warning(UserWarning):
result = data.str.contains(pat, flags=re.IGNORECASE)
assert result[0]
def test_encode_decode(self):
base = Series([u('a'), u('b'), u('a\xe4')])
series = base.str.encode('utf-8')
f = lambda x: x.decode('utf-8')
result = series.str.decode('utf-8')
exp = series.map(f)
tm.assert_series_equal(result, exp)
def test_encode_decode_errors(self):
encodeBase = Series([u('a'), u('b'), u('a\x9d')])
pytest.raises(UnicodeEncodeError, encodeBase.str.encode, 'cp1252')
f = lambda x: x.encode('cp1252', 'ignore')
result = encodeBase.str.encode('cp1252', 'ignore')
exp = encodeBase.map(f)
tm.assert_series_equal(result, exp)
decodeBase = Series([b'a', b'b', b'a\x9d'])
pytest.raises(UnicodeDecodeError, decodeBase.str.decode, 'cp1252')
f = lambda x: x.decode('cp1252', 'ignore')
result = decodeBase.str.decode('cp1252', 'ignore')
exp = decodeBase.map(f)
tm.assert_series_equal(result, exp)
def test_normalize(self):
values = ['ABC', u'ABC', u'123', np.nan, u'アイエ']
s = Series(values, index=['a', 'b', 'c', 'd', 'e'])
normed = [u'ABC', u'ABC', u'123', np.nan, u'アイエ']
expected = Series(normed, index=['a', 'b', 'c', 'd', 'e'])
result = s.str.normalize('NFKC')
tm.assert_series_equal(result, expected)
expected = Series([u'ABC', u'ABC', u'123', np.nan, u'アイエ'],
index=['a', 'b', 'c', 'd', 'e'])
result = s.str.normalize('NFC')
tm.assert_series_equal(result, expected)
with tm.assert_raises_regex(ValueError,
"invalid normalization form"):
s.str.normalize('xxx')
s = Index([u'ABC', u'123', u'アイエ'])
expected = Index([u'ABC', u'123', u'アイエ'])
result = s.str.normalize('NFKC')
tm.assert_index_equal(result, expected)
def test_cat_on_filtered_index(self):
df = DataFrame(index=MultiIndex.from_product(
[[2011, 2012], [1, 2, 3]], names=['year', 'month']))
df = df.reset_index()
df = df[df.month > 1]
str_year = df.year.astype('str')
str_month = df.month.astype('str')
str_both = str_year.str.cat(str_month, sep=' ')
assert str_both.loc[1] == '2011 2'
str_multiple = str_year.str.cat([str_month, str_month], sep=' ')
assert str_multiple.loc[1] == '2011 2 2'
def test_str_cat_raises_intuitive_error(self):
# https://github.com/pandas-dev/pandas/issues/11334
s = Series(['a', 'b', 'c', 'd'])
message = "Did you mean to supply a `sep` keyword?"
with tm.assert_raises_regex(ValueError, message):
s.str.cat('|')
with tm.assert_raises_regex(ValueError, message):
s.str.cat(' ')
def test_index_str_accessor_visibility(self):
from pandas.core.strings import StringMethods
if not compat.PY3:
cases = [(['a', 'b'], 'string'), (['a', u('b')], 'mixed'),
([u('a'), u('b')], 'unicode'),
(['a', 'b', 1], 'mixed-integer'),
(['a', 'b', 1.3], 'mixed'),
(['a', 'b', 1.3, 1], 'mixed-integer'),
(['aa', datetime(2011, 1, 1)], 'mixed')]
else:
cases = [(['a', 'b'], 'string'), (['a', u('b')], 'string'),
([u('a'), u('b')], 'string'),
(['a', 'b', 1], 'mixed-integer'),
(['a', 'b', 1.3], 'mixed'),
(['a', 'b', 1.3, 1], 'mixed-integer'),
(['aa', datetime(2011, 1, 1)], 'mixed')]
for values, tp in cases:
idx = Index(values)
assert isinstance(Series(values).str, StringMethods)
assert isinstance(idx.str, StringMethods)
assert idx.inferred_type == tp
for values, tp in cases:
idx = Index(values)
assert isinstance(Series(values).str, StringMethods)
assert isinstance(idx.str, StringMethods)
assert idx.inferred_type == tp
cases = [([1, np.nan], 'floating'),
([datetime(2011, 1, 1)], 'datetime64'),
([timedelta(1)], 'timedelta64')]
for values, tp in cases:
idx = Index(values)
message = 'Can only use .str accessor with string values'
with | tm.assert_raises_regex(AttributeError, message) | pandas.util.testing.assert_raises_regex |
import functools
import re
from typing import Any
import pandas as pd
class ScoreTable:
def __repr__(self) -> str:
return "table"
def Male_areaA(self, D: Any, prefix: str) -> Any:
wquant = 15 - D[[f"{prefix}1", f"{prefix}2", f"{prefix}3"]].sum(axis=1)
wquant = wquant.replace(
{3: 5, 4: 5, 5: 5, 6: 4, 7: 4, 8: 3, 9: 3, 10: 2, 11: 2, 12: 1}
)
wquant.name = "心理的な仕事の負担(量)"
wquali = 15 - D[[f"{prefix}4", f"{prefix}5", f"{prefix}6"]].sum(axis=1)
wquali = wquali.replace(
{3: 5, 4: 5, 5: 5, 6: 4, 7: 4, 8: 3, 9: 3, 10: 2, 11: 2, 12: 1}
)
wquali.name = "心理的な仕事の負担(質)"
body = 5 - D[f"{prefix}7"]
body = body.replace({1: 4, 2: 3, 3: 2, 4: 1})
body.name = "自覚的な身体的負担度"
human = 10 - D[[f"{prefix}12", f"{prefix}13"]].sum(axis=1) + D[f"{prefix}14"]
human = human.replace(
{3: 5, 4: 4, 5: 4, 6: 3, 7: 3, 8: 2, 9: 2, 10: 1, 11: 1, 12: 1}
)
human.name = "職場の対人関係でのストレス"
env = 5 - D[f"{prefix}15"]
env = env.replace({1: 4, 2: 3, 3: 2, 4: 1})
env.name = "職場環境によるストレス"
ctrl = 15 - D[[f"{prefix}8", f"{prefix}9", f"{prefix}10"]].sum(axis=1)
ctrl = ctrl.replace(
{3: 1, 4: 1, 5: 2, 6: 2, 7: 3, 8: 3, 9: 4, 10: 4, 11: 5, 12: 5}
)
ctrl.name = "仕事のコントロール度"
utl = D[f"{prefix}11"]
utl.name = "技能の活用度"
apt = 5 - D[f"{prefix}16"]
apt = apt.replace({4: 5})
apt.name = "仕事の適性度"
pleasure = 5 - D[f"{prefix}17"]
pleasure = pleasure.replace({4: 5})
pleasure.name = "働きがい"
r = pd.concat(
[wquant, wquali, body, human, env, ctrl, utl, apt, pleasure],
axis=1,
)
return r
def Female_areaA(self, D: Any, prefix: str) -> Any:
wquant = 15 - D[[f"{prefix}1", f"{prefix}2", f"{prefix}3"]].sum(axis=1)
wquant = wquant.replace(
{3: 5, 4: 5, 5: 4, 6: 4, 7: 3, 8: 3, 9: 3, 10: 2, 11: 2, 12: 1}
)
wquant.name = "心理的な仕事の負担(量)"
wquali = 15 - D[[f"{prefix}4", f"{prefix}5", f"{prefix}6"]].sum(axis=1)
wquali = wquali.replace(
{3: 5, 4: 5, 5: 4, 6: 4, 7: 3, 8: 3, 9: 2, 10: 2, 11: 1, 12: 1}
)
wquali.name = "心理的な仕事の負担(質)"
body = 5 - D[f"{prefix}7"]
body = body.replace({1: 4, 2: 3, 3: 2, 4: 1})
body.name = "自覚的な身体的負担度"
human = 10 - D[[f"{prefix}12", f"{prefix}13"]].sum(axis=1) + D[f"{prefix}14"]
human = human.replace(
{3: 5, 4: 4, 5: 4, 6: 3, 7: 3, 8: 2, 9: 2, 10: 1, 11: 1, 12: 1}
)
human.name = "職場の対人関係でのストレス"
env = 5 - D[f"{prefix}15"]
env = env.replace({1: 5, 2: 3, 3: 2, 4: 1})
env.name = "職場環境によるストレス"
ctrl = 15 - D[[f"{prefix}8", f"{prefix}9", f"{prefix}10"]].sum(axis=1)
ctrl = ctrl.replace(
{3: 1, 4: 2, 5: 2, 6: 3, 7: 3, 8: 3, 9: 4, 10: 4, 11: 5, 12: 5}
)
ctrl.name = "仕事のコントロール度"
utl = D[f"{prefix}11"]
utl.name = "技能の活用度"
apt = 5 - D[f"{prefix}16"]
apt = apt.replace({4: 5})
apt.name = "仕事の適性度"
pleasure = 5 - D[f"{prefix}17"]
pleasure = pleasure.replace({4: 5})
pleasure.name = "働きがい"
r = pd.concat(
[wquant, wquali, body, human, env, ctrl, utl, apt, pleasure],
axis=1,
)
return r
def Male_areaB(self, D: Any, prefix: str) -> Any:
lively = D[[f"{prefix}18", f"{prefix}19", f"{prefix}20"]].sum(axis=1)
lively = lively.replace(
{3: 1, 4: 2, 5: 2, 6: 3, 7: 3, 8: 4, 9: 4, 10: 5, 11: 5, 12: 5}
)
lively.name = "活気"
frust = D[[f"{prefix}21", f"{prefix}22", f"{prefix}23"]].sum(axis=1)
frust = frust.replace(
{3: 5, 4: 4, 5: 4, 6: 3, 7: 3, 8: 2, 9: 2, 10: 1, 11: 1, 12: 1}
)
frust.name = "イライラ感"
fatigue = D[[f"{prefix}24", f"{prefix}25", f"{prefix}26"]].sum(axis=1)
fatigue = fatigue.replace(
{3: 5, 4: 4, 5: 3, 6: 3, 7: 3, 8: 2, 9: 2, 10: 2, 11: 1, 12: 1}
)
fatigue.name = "疲労感"
anxious = D[[f"{prefix}27", f"{prefix}28", f"{prefix}29"]].sum(axis=1)
anxious = anxious.replace(
{3: 5, 4: 4, 5: 3, 6: 3, 7: 3, 8: 2, 9: 2, 10: 1, 11: 1, 12: 1}
)
anxious.name = "不安感"
dep = D[
[
f"{prefix}30",
f"{prefix}31",
f"{prefix}32",
f"{prefix}33",
f"{prefix}34",
f"{prefix}35",
]
].sum(axis=1)
dep = dep.replace({6: 5, 7: 4, 8: 4})
dep[(dep >= 9) & (dep <= 12)] = 3
dep[(dep >= 13) & (dep <= 16)] = 2
dep[(dep >= 17) & (dep <= 24)] = 1
dep.name = "抑うつ感"
bc = D[
[
f"{prefix}36",
f"{prefix}37",
f"{prefix}38",
f"{prefix}39",
f"{prefix}40",
f"{prefix}41",
f"{prefix}42",
f"{prefix}43",
f"{prefix}44",
f"{prefix}45",
f"{prefix}46",
]
].sum(axis=1)
bc = bc.replace({11: 5})
bc[(bc >= 12) & (bc <= 15)] = 4
bc[(bc >= 16) & (bc <= 21)] = 3
bc[(bc >= 22) & (bc <= 26)] = 2
bc[(bc >= 27) & (bc <= 44)] = 1
bc.name = "身体愁訴"
r = pd.concat([lively, frust, fatigue, anxious, dep, bc], axis=1)
return r
def Female_areaB(self, D: Any, prefix: str) -> Any:
lively = D[[f"{prefix}18", f"{prefix}19", f"{prefix}20"]].sum(axis=1)
lively = lively.replace(
{3: 1, 4: 2, 5: 2, 6: 3, 7: 3, 8: 4, 9: 4, 10: 5, 11: 5, 12: 5}
)
lively.name = "活気"
frust = D[[f"{prefix}21", f"{prefix}22", f"{prefix}23"]].sum(axis=1)
frust = frust.replace(
{3: 5, 4: 4, 5: 4, 6: 3, 7: 3, 8: 3, 9: 2, 10: 2, 11: 1, 12: 1}
)
frust.name = "イライラ感"
fatigue = D[[f"{prefix}24", f"{prefix}25", f"{prefix}26"]].sum(axis=1)
fatigue = fatigue.replace(
{3: 5, 4: 4, 5: 4, 6: 3, 7: 3, 8: 3, 9: 2, 10: 2, 11: 2, 12: 1}
)
fatigue.name = "疲労感"
anxious = D[[f"{prefix}27", f"{prefix}28", f"{prefix}29"]].sum(axis=1)
anxious = anxious.replace(
{3: 5, 4: 4, 5: 3, 6: 3, 7: 3, 8: 2, 9: 2, 10: 2, 11: 1, 12: 1}
)
anxious.name = "不安感"
dep = D[
[
f"{prefix}30",
f"{prefix}31",
f"{prefix}32",
f"{prefix}33",
f"{prefix}34",
f"{prefix}35",
]
].sum(axis=1)
dep = dep.replace({6: 5, 7: 4, 8: 4})
dep[(dep >= 9) & (dep <= 12)] = 3
dep[(dep >= 13) & (dep <= 17)] = 2
dep[(dep >= 18) & (dep <= 24)] = 1
dep.name = "抑うつ感"
bc = D[
[
f"{prefix}36",
f"{prefix}37",
f"{prefix}38",
f"{prefix}39",
f"{prefix}40",
f"{prefix}41",
f"{prefix}42",
f"{prefix}43",
f"{prefix}44",
f"{prefix}45",
f"{prefix}46",
]
].sum(axis=1)
bc = bc.replace({11: 5, 12: 5, 13: 5})
bc[(bc >= 14) & (bc <= 17)] = 4
bc[(bc >= 18) & (bc <= 23)] = 3
bc[(bc >= 24) & (bc <= 29)] = 2
bc[(bc >= 30) & (bc <= 44)] = 1
bc.name = "身体愁訴"
r = pd.concat([lively, frust, fatigue, anxious, dep, bc], axis=1)
return r
def Male_areaC(self, D: Any, prefix: str) -> Any:
boss = 15 - D[[f"{prefix}47", f"{prefix}50", f"{prefix}53"]].sum(axis=1)
boss = boss.replace(
{3: 1, 4: 1, 5: 2, 6: 2, 7: 3, 8: 3, 9: 4, 10: 4, 11: 5, 12: 5}
)
boss.name = "上司からのサポート"
coll = 15 - D[[f"{prefix}48", f"{prefix}51", f"{prefix}54"]].sum(axis=1)
coll = coll.replace(
{3: 1, 4: 1, 5: 1, 6: 2, 7: 2, 8: 3, 9: 3, 10: 4, 11: 4, 12: 5}
)
coll.name = "同僚からのサポート"
fam = 15 - D[[f"{prefix}49", f"{prefix}52", f"{prefix}55"]].sum(axis=1)
fam[(fam >= 3) & (fam <= 6)] = 1
fam = fam.replace({7: 2, 8: 2, 9: 3, 10: 4, 11: 4, 12: 5})
fam.name = "家族・友人からのサポート"
r = pd.concat([boss, coll, fam], axis=1)
return r
def Female_areaC(self, D: Any, prefix: str) -> Any:
boss = 15 - D[[f"{prefix}47", f"{prefix}50", f"{prefix}53"]].sum(axis=1)
boss = boss.replace(
{3: 1, 4: 2, 5: 2, 6: 3, 7: 3, 8: 4, 9: 4, 10: 4, 11: 5, 12: 5}
)
boss.name = "上司からのサポート"
coll = 15 - D[[f"{prefix}48", f"{prefix}51", f"{prefix}54"]].sum(axis=1)
coll = coll.replace(
{3: 1, 4: 1, 5: 1, 6: 2, 7: 2, 8: 3, 9: 3, 10: 4, 11: 4, 12: 5}
)
coll.name = "同僚からのサポート"
fam = 15 - D[[f"{prefix}49", f"{prefix}52", f"{prefix}55"]].sum(axis=1)
fam[(fam >= 3) & (fam <= 6)] = 1
fam = fam.replace({7: 2, 8: 2, 9: 3, 10: 4, 11: 4, 12: 5})
fam.name = "家族・友人からのサポート"
r = | pd.concat([boss, coll, fam], axis=1) | pandas.concat |
from io import StringIO
import requests
import os
import pandas as pd
import regex as re
import numpy as np
import collections
import textwrap
from bs4 import BeautifulSoup, UnicodeDammit
SECTION_NAMES = ['Business', 'Risk Factors', 'Unresolved Staff Comments', 'Properties', 'Legal Proceedings',
'Mine Safety Disclosures', "Market for the Registrant's Common Equity",
'Related Stockholder Matters and Issuer Purchases of Equity Securities', 'Selected Financial Data',
"Management's Discussion and Analysis of Financial Condition and Results of Operations",
'Quantitative and Qualitative Disclosures About Market Risk',
'Financial Statements and Supplementary Data',
'Changes in and Disagreements With Accountants on Accounting and Financial Disclosures',
'Controls and Procedures', 'Other Information', 'Directors and Executive Officers of the Registrant',
'Executive Compensation', 'Security and Ownership of Certain Beneficial Owners and Management']
SECTION_LIST = ['1', '1a', '1b', '2', '3', '4', '5', '6', '7', '7a', '8', '9', '9a', '9b', '10', '11', '12',
'13', '14', '15']
DESKTOP_PATH = r'C:/Users/<NAME>/Desktop'
FOLDER_PATH = r'/quant-finance-main'
CIK_CSV = r'/cik_ticker.csv'
FINAL_PATH = DESKTOP_PATH + '/Proof_Of_Concept/2020'
CIK_SAVE_CSV = r'/2020_Company.csv'
BASE_URL = r"https://www.sec.gov/cgi-bin/browse-edgar"
BASE_URL2 = r"https://www.sec.gov"
HEADERS = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/90.0.4430.93 Safari/537.36'}
PARAMETERS = {'action': 'getcompany',
'CIK': None,
'type': '10-K',
'owner': 'exclude',
'count': '100'}
YEARS = ['2020']
TEST_START = 0
TEST_END = -1
def CIK_Processing(file_path):
CIK_numbers = []
Names = []
Ticker = []
CIK_File = pd.read_csv(file_path, sep='|')
for num, company in enumerate(CIK_File['Exchange']):
if company == 'NYSE':
CIK_numbers.append(CIK_File['CIK'][num])
Names.append(CIK_File['Name'][num])
Ticker.append(CIK_File['Ticker'][num])
CIK_numbers = CIK_numbers[0:]
return Names, Ticker, CIK_numbers
def get_10K_doc_link(table):
link_list = []
table_row = table.find_all('tr')
for tr in table_row[1:]:
table_columns = tr.find_all('td')
filing_type = table_columns[0].text
if filing_type == '10-K':
for td in table_columns:
document_href = td.find('a', dict(href=True, id='documentsbutton'))
if document_href is not None:
filing_doc_link = BASE_URL2 + document_href['href']
link_list.append(filing_doc_link)
return link_list
def get_10K_references(link_list):
text_file_link = ''
filing_date = ''
for link in link_list:
response_link = requests.get(link, headers=HEADERS)
pre_txt_link = BeautifulSoup(response_link.content, features='lxml')
time_period = pre_txt_link.find('div', class_='formContent')
txt_table = pre_txt_link.find('table', class_='tableFile')
xlbr_checker_column = txt_table.findAll('tr')[-1].findAll('td')[2]
txt_href = xlbr_checker_column.find('a')['href']
txt_url = BASE_URL2 + txt_href
period_time = time_period.findAll('div', class_='formGrouping')[1].findAll('div')[1].text
filing_time = time_period.findAll('div', class_='formGrouping')[0].findAll('div')[1].text
for year in YEARS:
if year in period_time:
text_file_link = txt_url
filing_date = filing_time
return text_file_link, filing_date
if __name__ == "__main__":
data_dict = collections.defaultdict(list)
processing_path = DESKTOP_PATH + FOLDER_PATH + CIK_CSV
Name_List = CIK_Processing(processing_path)[0]
Ticker_List = CIK_Processing(processing_path)[1]
CIK_List = CIK_Processing(processing_path)[2]
Twenty_file = | pd.read_csv(FINAL_PATH+r'/0_1000.csv', sep=',') | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import operator
from collections import OrderedDict
from datetime import datetime
from itertools import chain
import warnings
import numpy as np
from pandas import (notna, DataFrame, Series, MultiIndex, date_range,
Timestamp, compat)
import pandas as pd
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.apply import frame_apply
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.conftest import _get_cython_table_params
from pandas.tests.frame.common import TestData
class TestDataFrameApply(TestData):
def test_apply(self):
with np.errstate(all='ignore'):
# ufunc
applied = self.frame.apply(np.sqrt)
tm.assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
assert applied['A'] == np.mean(self.frame['A'])
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
assert applied[d] == np.mean(self.frame.xs(d))
assert applied.index is self.frame.index # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
pytest.raises(ValueError, df.apply, lambda x: x, 2)
# see gh-9573
df = DataFrame({'c0': ['A', 'A', 'B', 'B'],
'c1': ['C', 'C', 'D', 'D']})
df = df.apply(lambda ts: ts.astype('category'))
assert df.shape == (4, 2)
assert isinstance(df['c0'].dtype, CategoricalDtype)
assert isinstance(df['c1'].dtype, CategoricalDtype)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({'A': date_range('20130101', periods=3),
'B': pd.to_timedelta(np.arange(3), unit='s')})
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
assert applied.empty
applied = self.empty.apply(np.mean)
assert applied.empty
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=self.frame.columns)
assert_series_equal(result, expected)
no_cols = self.frame.loc[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=self.frame.index)
assert_series_equal(result, expected)
# 2476
xp = DataFrame(index=['a'])
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
def test_apply_with_reduce_empty(self):
# reduce with an empty DataFrame
x = []
result = self.empty.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, self.empty)
result = self.empty.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
# Ensure that x.append hasn't been called
assert x == []
def test_apply_deprecate_reduce(self):
with warnings.catch_warnings(record=True):
x = []
self.empty.apply(x.append, axis=1, result_type='reduce')
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
rs = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(rs, xp)
def test_with_string_args(self):
for arg in ['sum', 'mean', 'min', 'max', 'std']:
result = self.frame.apply(arg)
expected = getattr(self.frame, arg)()
tm.assert_series_equal(result, expected)
result = self.frame.apply(arg, axis=1)
expected = getattr(self.frame, arg)(axis=1)
tm.assert_series_equal(result, expected)
def test_apply_broadcast_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
self.frame.apply(np.mean, broadcast=True)
def test_apply_broadcast(self):
# scalars
result = self.frame.apply(np.mean, result_type='broadcast')
expected = DataFrame([self.frame.mean()], index=self.frame.index)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(np.mean, axis=1, result_type='broadcast')
m = self.frame.mean(axis=1)
expected = DataFrame({c: m for c in self.frame.columns})
tm.assert_frame_equal(result, expected)
# lists
result = self.frame.apply(
lambda x: list(range(len(self.frame.columns))),
axis=1,
result_type='broadcast')
m = list(range(len(self.frame.columns)))
expected = DataFrame([m] * len(self.frame.index),
dtype='float64',
index=self.frame.index,
columns=self.frame.columns)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: list(range(len(self.frame.index))),
result_type='broadcast')
m = list(range(len(self.frame.index)))
expected = DataFrame({c: m for c in self.frame.columns},
dtype='float64',
index=self.frame.index)
tm.assert_frame_equal(result, expected)
# preserve columns
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: [1, 2, 3],
axis=1,
result_type='broadcast')
tm.assert_frame_equal(result, df)
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: Series([1, 2, 3], index=list('abc')),
axis=1,
result_type='broadcast')
expected = df.copy()
tm.assert_frame_equal(result, expected)
def test_apply_broadcast_error(self):
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
# > 1 ndim
with pytest.raises(ValueError):
df.apply(lambda x: np.array([1, 2]).reshape(-1, 2),
axis=1,
result_type='broadcast')
# cannot broadcast
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2],
axis=1,
result_type='broadcast')
with pytest.raises(ValueError):
df.apply(lambda x: Series([1, 2]),
axis=1,
result_type='broadcast')
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
result1 = self.frame.apply(np.mean, axis=1, raw=True)
expected0 = self.frame.apply(lambda x: x.values.mean())
expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)
assert_series_equal(result0, expected0)
assert_series_equal(result1, expected1)
# no reduction
result = self.frame.apply(lambda x: x * 2, raw=True)
expected = self.frame * 2
assert_frame_equal(result, expected)
def test_apply_axis1(self):
d = self.frame.index[0]
tapplied = self.frame.apply(np.mean, axis=1)
assert tapplied[d] == np.mean(self.frame.xs(d))
def test_apply_ignore_failures(self):
result = frame_apply(self.mixed_frame,
np.mean, 0,
ignore_failures=True).apply_standard()
expected = self.mixed_frame._get_numeric_data().apply(np.mean)
assert_series_equal(result, expected)
def test_apply_mixed_dtype_corner(self):
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df[:0].apply(np.mean, axis=1)
# the result here is actually kind of ambiguous, should it be a Series
# or a DataFrame?
expected = Series(np.nan, index=pd.Index([], dtype='int64'))
assert_series_equal(result, expected)
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df.apply(lambda x: x['A'], axis=1)
expected = Series(['foo'], index=[0])
assert_series_equal(result, expected)
result = df.apply(lambda x: x['B'], axis=1)
expected = Series([1.], index=[0])
assert_series_equal(result, expected)
def test_apply_empty_infer_type(self):
no_cols = DataFrame(index=['a', 'b', 'c'])
no_index = DataFrame(columns=['a', 'b', 'c'])
def _check(df, f):
with warnings.catch_warnings(record=True):
test_res = f(np.array([], dtype='f8'))
is_reduction = not isinstance(test_res, np.ndarray)
def _checkit(axis=0, raw=False):
res = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
assert isinstance(res, Series)
assert res.index is agg_axis
else:
assert isinstance(res, DataFrame)
_checkit()
_checkit(axis=1)
_checkit(raw=True)
_checkit(axis=0, raw=True)
with np.errstate(all='ignore'):
_check(no_cols, lambda x: x)
_check(no_cols, lambda x: x.mean())
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), result_type='broadcast')
assert isinstance(result, DataFrame)
def test_apply_with_args_kwds(self):
def add_some(x, howmuch=0):
return x + howmuch
def agg_and_add(x, howmuch=0):
return x.mean() + howmuch
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
result = self.frame.apply(add_some, howmuch=2)
exp = self.frame.apply(lambda x: x + 2)
assert_frame_equal(result, exp)
result = self.frame.apply(agg_and_add, howmuch=2)
exp = self.frame.apply(lambda x: x.mean() + 2)
assert_series_equal(result, exp)
res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)
exp = self.frame.apply(lambda x: (x - 2.) / 2.)
assert_frame_equal(res, exp)
def test_apply_yield_list(self):
result = self.frame.apply(list)
assert_frame_equal(result, self.frame)
def test_apply_reduce_Series(self):
self.frame.loc[::2, 'A'] = np.nan
expected = self.frame.mean(1)
result = self.frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df.T)),
columns=df.index).T
assert_frame_equal(result1, expected1)
def test_apply_modify_traceback(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
data.loc[4, 'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
row['D'] = 7
return row
def transform2(row):
if (notna(row['C']) and row['C'].startswith('shin') and
row['A'] == 'foo'):
row['D'] = 7
return row
try:
data.apply(transform, axis=1)
except AttributeError as e:
assert len(e.args) == 2
assert e.args[1] == 'occurred at index 4'
assert e.args[0] == "'float' object has no attribute 'startswith'"
def test_apply_bug(self):
# GH 6125
positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],
[1, 'DEF0', 20], [2, 'ABC1', 50],
[2, 'YUM1', 20], [2, 'DEF1', 20]],
columns=['a', 'market', 'position'])
def f(r):
return r['market']
expected = positions.apply(f, axis=1)
positions = DataFrame([[datetime(2013, 1, 1), 'ABC0', 50],
[datetime(2013, 1, 2), 'YUM0', 20],
[datetime(2013, 1, 3), 'DEF0', 20],
[datetime(2013, 1, 4), 'ABC1', 50],
[datetime(2013, 1, 5), 'YUM1', 20],
[datetime(2013, 1, 6), 'DEF1', 20]],
columns=['a', 'market', 'position'])
result = positions.apply(f, axis=1)
assert_series_equal(result, expected)
def test_apply_convert_objects(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
result = data.apply(lambda x: x, axis=1)
assert_frame_equal(result._convert(datetime=True), data)
def test_apply_attach_name(self):
result = self.frame.apply(lambda x: x.name)
expected = Series(self.frame.columns, index=self.frame.columns)
assert_series_equal(result, expected)
result = self.frame.apply(lambda x: x.name, axis=1)
expected = Series(self.frame.index, index=self.frame.index)
assert_series_equal(result, expected)
# non-reductions
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))
expected = DataFrame(np.tile(self.frame.columns,
(len(self.frame.index), 1)),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
expected = Series(np.repeat(t[0], len(self.frame.columns))
for t in self.frame.itertuples())
expected.index = self.frame.index
assert_series_equal(result, expected)
def test_apply_multi_index(self):
index = MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'd']])
s = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['col1', 'col2'])
result = s.apply(
lambda x: Series({'min': min(x), 'max': max(x)}), 1)
expected = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['min', 'max'])
assert_frame_equal(result, expected, check_like=True)
def test_apply_dict(self):
# GH 8735
A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
A_dicts = Series([dict([(0, 'foo'), (1, 'spam')]),
dict([(0, 'bar'), (1, 'eggs')])])
B = DataFrame([[0, 1], [2, 3]])
B_dicts = Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
fn = lambda x: x.to_dict()
for df, dicts in [(A, A_dicts), (B, B_dicts)]:
reduce_true = df.apply(fn, result_type='reduce')
reduce_false = df.apply(fn, result_type='expand')
reduce_none = df.apply(fn)
assert_series_equal(reduce_true, dicts)
assert_frame_equal(reduce_false, df)
assert_series_equal(reduce_none, dicts)
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
tm.assert_frame_equal(applied, self.frame * 2)
self.frame.applymap(type)
# gh-465: function returning tuples
result = self.frame.applymap(lambda x: (x, x))
assert isinstance(result['A'][0], tuple)
# gh-2909: object conversion to float in constructor?
df = DataFrame(data=[1, 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
df = DataFrame(data=[1., 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
# see gh-2786
df = DataFrame(np.random.random((3, 4)))
df2 = df.copy()
cols = ['a', 'a', 'a', 'a']
df.columns = cols
expected = df2.applymap(str)
expected.columns = cols
result = df.applymap(str)
tm.assert_frame_equal(result, expected)
# datetime/timedelta
df['datetime'] = Timestamp('20130101')
df['timedelta'] = | pd.Timedelta('1 min') | pandas.Timedelta |
"""
Helper functions to convert the data to the format expected by run_robot.py
"""
import sys
import seir
import pandas as pd
import numpy as np
import numpy.linalg as la
import os.path as path
# To use PyJulia
print('Loading PyJulia module...')
from julia.api import Julia
jl = Julia(compiled_modules=False)
from julia import Main as Julia
Julia.eval('ENV["OMP_NUM_THREADS"] = 8')
print('Loading PyJulia module... Ok!')
print('Loading Robot-dance Julia module...')
Julia.eval('include("robot_dance.jl")')
print('Loading Robot-dance Julia module... Ok!')
def save_basic_parameters(tinc=5.2, tinf=2.9, rep=2.5, ndays=400, window=14, min_level=1.0):
"""Save the basic_paramters.csv file using the data used in the report.
All values are optional. If not present the values used in the report wihtout
an initial hammer phase are used.
"""
basic_prm = pd.Series(dtype=np.float)
basic_prm["tinc"] = tinc
basic_prm["tinf"] = tinf
basic_prm["rep"] = rep
basic_prm["ndays"] = ndays
basic_prm["window"] = window
basic_prm["min_level"] = min_level
basic_prm.to_csv(path.join("data", "basic_parameters.csv"), header=False)
return basic_prm
def initial_conditions(basic_prm, city_data, min_days, Julia, correction=1.0):
"""Fits data and define initial contidions of the SEIR model.
"""
population = city_data["estimated_population_2019"].iloc[0]
confirmed = city_data["confirmed"]
# Compute the new cases from the confirmed sum
new_cases = confirmed.values[1:] - confirmed.values[:-1]
# Use a mean in a week to smooth the data (specially to deal with weekends)
observed_I = np.convolve(new_cases, np.ones(7, dtype=int), 'valid') / 7.0
# Now accumulate in the inf_window
inf_window = int(round(basic_prm["tinf"]))
observed_I = np.convolve(observed_I, np.ones(inf_window, dtype=int), 'valid')
ndays = len(observed_I)
if ndays >= min_days and sum(observed_I) > 0:
observed_I /= population
Julia.observed_I = correction*observed_I
Julia.tinc = basic_prm["tinc"]
Julia.tinf = basic_prm["tinf"]
Julia.rep = basic_prm["rep"]
Julia.eval('initialc = fit_initial(tinc, tinf, rep, observed_I)')
S1 = Julia.initialc[0]
E1 = Julia.initialc[1]
I1 = Julia.initialc[2]
R1 = Julia.initialc[3]
return (S1, E1, I1, R1, ndays), observed_I, population
else:
raise ValueError("Not enough data for %s only %d days available" %
(city_data["city"].iloc[0], len(observed_I)))
def simulate(parameters, city_data, min_days):
"""Simulate from the computed initial parameters until the last day.
"""
c = city_data["city"].iloc[0]
last_day = city_data["date"].iloc[-1]
S1, E1, I1, R1, ndays = parameters[c]
covid = seir.seir(ndays)
print("Simulating", c, "until", last_day)
result = covid.run((S1, E1, I1, R1))
return result[:, -1], last_day
def compute_initial_condition_evolve_and_save(basic_prm, state, large_cities, min_pop, correction,
raw_name="data/covid_with_cities.csv"):
"""Compute the initial conditions and population and save it to data/cities_data.csv.
The population andinitial condition is estimated from a file with the information on
the total number of confimed cases for the cities. See the example in
data/covid_with_cities.csv.
Parameters: large_cities: list with the name of cities tha are pre_selected.
basic_prm: basic paramters for SEIR model.
state: state to subselect or None.
large_cinties: minimal subset of cities do be selected.
min_pop: minimal population to select more cities.
correction: a constant to multiply the observed cases to try to correct
subnotification.
raw_name: name of the file with the accumulated infected data to estimate the
initial conditions.
"""
raw_epi_data = pd.read_csv(raw_name)
if state is not None:
raw_epi_data = raw_epi_data[raw_epi_data["state"] == state]
large_cities.extend(
raw_epi_data[raw_epi_data["estimated_population_2019"] > min_pop]["city"].unique()
)
large_cities = list(set(large_cities))
large_cities.sort()
# Create a new Dataframe with only the needed information
raw_epi_data = raw_epi_data[["city", "date", "confirmed", "estimated_population_2019"]]
epi_data = raw_epi_data[raw_epi_data["city"] == large_cities[0]].copy()
epi_data.sort_values(by=["date"], inplace=True)
for city_name in large_cities[1:]:
city = raw_epi_data[raw_epi_data["city"] == city_name].copy()
city.sort_values(by = ["date"], inplace=True)
epi_data = epi_data.append(city)
epi_data.reset_index(inplace=True, drop=True)
# Compute initial parameters fitting the data
min_days = 5
parameters = {}
ignored = []
population = []
n_cities = len(large_cities)
for i in range(n_cities):
city_name = large_cities[i]
print("%d/%d" %(i + 1, n_cities), city_name)
try:
city_data = epi_data[epi_data["city"] == city_name]
parameters[city_name], observed_I, city_pop = initial_conditions(basic_prm,
city_data, min_days, Julia, correction)
population.append(city_pop)
except ValueError:
print("Ignoring ", city_name, "not enough data.")
ignored.append(city_name)
# Simulate the data until the last day to start the optimization phase.
cities_data = {}
for city_name in large_cities:
if city_name in ignored:
continue
city_data = epi_data[epi_data["city"] == city_name]
cities_data[city_name], last_day = simulate(parameters, city_data, min_days)
# Save results
cities_data = pd.DataFrame.from_dict(cities_data,
orient="index", columns=["S1", "E1", "I1", "R1"])
cities_data["population"] = population
cities_data.to_csv(path.join("data", "cities_data.csv"))
return cities_data
def convert_mobility_matrix_and_save(cities_data, max_neighbors, drs=False):
"""Read the mobility matrix data given by Pedro and save it in the format needed by
robot_dance.
cd: a data frame in the format of cities_data.csv
max_neighbors: maximum number of neighbors allowed in the mobility matrix.
"""
# Read the mobility_matrix
large_cities = cities_data.index
if drs:
mobility_matrix = pd.read_csv("data/drs_mobility.csv", index_col=0).T
mobility_matrix = mobility_matrix.mask(
mobility_matrix.rank(axis=1, method='min', ascending=False) > max_neighbors + 1, 0
)
elif path.exists("data/move_mat_SÃO PAULO_SP-Municipios_norm.csv"):
mobility_matrix = pd.read_csv("data/move_mat_SÃO PAULO_SP-Municipios_norm.csv",
header=None, sep=" ")
cities_names = pd.read_csv("data/move_mat_SÃO PAULO_SP-Municipios_reg_names.txt",
header=None)
# Cut the matrix to see only the desired cities
cities_names = [i.title() for i in cities_names[0]]
mobility_matrix.index = cities_names
mobility_matrix.columns = cities_names
mobility_matrix = mobility_matrix.loc[large_cities, large_cities].T
mobility_matrix = mobility_matrix.mask(
mobility_matrix.rank(axis=1, method='min', ascending=False) > max_neighbors + 1, 0
)
else:
ncities = len(large_cities)
pre_M = np.zeros((ncities, ncities))
mobility_matrix = pd.DataFrame(data=pre_M, index=large_cities, columns=large_cities)
# Adjust the mobility matrix
np.fill_diagonal(mobility_matrix.values, 0.0)
# out vector has at entry i the proportion of the population of city i that leaves the
# city during the day
out = mobility_matrix.sum(axis = 1)
# The M matrix has at entry [i, j] the proportion, with respect to the population of j,
# of people from i that spend the day in j
population = cities_data["population"]
for i in mobility_matrix.index:
mobility_matrix.loc[i] = (mobility_matrix.loc[i] * population[i] /
population)
mobility_matrix["out"] = out
mobility_matrix.to_csv(path.join("data", "mobility_matrix.csv"))
return mobility_matrix
def save_target(cities_data, target):
"""Save the target for maximum level of inffected.
"""
large_cities = cities_data.index
_ncities, ndays = target.shape
days = list(range(1, ndays + 1))
target_df = | pd.DataFrame(data=target, index=cities_data.index, columns=days) | pandas.DataFrame |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# You can run this test by first running `nPython.exe` (with mono or otherwise):
# $ ./nPython.exe ReportChartTests.py
import numpy as np
import pandas as pd
from datetime import datetime
from ReportCharts import ReportCharts
charts = ReportCharts()
## Test GetReturnsPerTrade
backtest = list(np.random.normal(0, 1, 1000))
live = list(np.random.normal(0.5, 1, 400))
result = charts.GetReturnsPerTrade([], [])
result = charts.GetReturnsPerTrade(backtest, [])
result = charts.GetReturnsPerTrade(backtest, live)
## Test GetCumulativeReturnsPlot
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2012-10-01T00:00:00', periods=365)]
strategy = np.linspace(1, 25, 365)
benchmark = np.linspace(2, 26, 365)
backtest = [time, strategy, time, benchmark]
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2013-10-01T00:00:00', periods=50)]
strategy = np.linspace(25, 29, 50)
benchmark = np.linspace(26, 30, 50)
live = [time, strategy, time, benchmark]
result = charts.GetCumulativeReturns()
result = charts.GetCumulativeReturns(backtest)
result = charts.GetCumulativeReturns(backtest, live)
## Test GetDailyReturnsPlot
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2012-10-01T00:00:00', periods=365)]
data = list(np.random.normal(0, 1, 365))
backtest = [time, data]
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2013-10-01T00:00:00', periods=120)]
data = list(np.random.normal(0.5, 1.5, 120))
live = [time, data]
empty = [[], []]
result = charts.GetDailyReturns(empty, empty)
result = charts.GetDailyReturns(backtest, empty)
result = charts.GetDailyReturns(backtest, live)
## Test GetMonthlyReturnsPlot
backtest = {'2016': [0.5, 0.7, 0.2, 0.23, 1.3, 1.45, 1.67, -2.3, -0.5, 1.23, 1.23, -3.5],
'2017': [0.5, 0.7, 0.2, 0.23, 1.3, 1.45, 1.67, -2.3, -0.5, 1.23, 1.23, -3.5][::-1]}
live = {'2018': [0.5, 0.7, 0.2, 0.23, 1.3, 1.45, 1.67, -2.3, -0.5, 1.23, 1.23, -3.5],
'2019': [1.5, 2.7, -3.2, -0.23, 4.3, -2.45, -1.67, 2.3, np.nan, np.nan, np.nan, np.nan]}
result = charts.GetMonthlyReturns({}, {})
result = charts.GetMonthlyReturns(backtest, pd.DataFrame())
result = charts.GetMonthlyReturns(backtest, live)
## Test GetAnnualReturnsPlot
time = ['2012', '2013', '2014', '2015', '2016']
strategy = list(np.random.normal(0, 1, 5))
backtest = [time, strategy]
time = ['2017', '2018']
strategy = list(np.random.normal(0.5, 1.5, 2))
live = [time, strategy]
empty = [[], []]
result = charts.GetAnnualReturns()
result = charts.GetAnnualReturns(backtest)
result = charts.GetAnnualReturns(backtest, live)
## Test GetDrawdownPlot
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2012-10-01', periods=365)]
data = list(np.random.uniform(-5, 0, 365))
backtest = [time, data]
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2013-10-01', periods=100)]
data = list(np.random.uniform(-5, 0, 100))
live = [time, data]
worst = [{'Begin': datetime(2012, 10, 1), 'End': datetime(2012, 10, 11)},
{'Begin': datetime(2012, 12, 1), 'End': datetime(2012, 12, 11)},
{'Begin': datetime(2013, 3, 1), 'End': datetime(2013, 3, 11)},
{'Begin': datetime(2013, 4, 1), 'End': datetime(2013, 4, 1)},
{'Begin': datetime(2013, 6, 1), 'End': datetime(2013, 6, 11)}]
empty = [[], []]
result = charts.GetDrawdown(empty, empty, {})
result = charts.GetDrawdown(backtest, empty, worst)
result = charts.GetDrawdown(backtest, live, worst)
## Test GetCrisisPlots (backtest only)
equity = list(np.linspace(1, 25, 365))
benchmark = list(np.linspace(2, 26, 365))
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2012-10-01 00:00:00', periods=365)]
backtest = [time, equity, benchmark]
empty = [[], [], []]
result = charts.GetCrisisEventsPlots(empty, 'empty_crisis')
result = charts.GetCrisisEventsPlots(backtest, 'dummy_crisis')
## Test GetRollingBetaPlot
empty = [[], [], [], []]
twelve = [np.nan for x in range(180)] + list(np.random.uniform(-1, 1, 185))
six = list(np.random.uniform(-1, 1, 365))
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2012-10-01 00:00:00', periods=365)]
backtest = [time, six, twelve]
result = charts.GetRollingBeta([time, six, time, twelve], empty)
result = charts.GetRollingBeta([time, six, [], []], empty)
result = charts.GetRollingBeta(empty, empty)
twelve = [np.nan for x in range(180)] + list(np.random.uniform(-1, 1, 185))
six = list(np.random.uniform(-1, 1, 365))
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2013-10-01 00:00:00', periods=365)]
live = [time, six, time, twelve]
result = charts.GetRollingBeta(live)
## Test GetRollingSharpeRatioPlot
data = list(np.random.uniform(1, 3, 365 * 2))
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2012-10-01 00:00:00', periods=365 * 2)]
backtest = [time, data]
data = list(np.random.uniform(1, 3, 365))
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2014-10-01 00:00:00', periods=365)]
live = [time, data]
empty = [[], []]
result = charts.GetRollingSharpeRatio(empty, empty)
result = charts.GetRollingSharpeRatio(backtest, empty)
result = charts.GetRollingSharpeRatio(backtest, live)
## Test GetAssetAllocationPlot
backtest = [['SPY', 'IBM', 'NFLX', 'AAPL'], [0.50, 0.25, 0.125, 0.125]]
live = [['SPY', 'IBM', 'AAPL'], [0.4, 0.4, 0.2]]
empty = [[], []]
result = charts.GetAssetAllocation(empty, empty)
result = charts.GetAssetAllocation(backtest, empty)
result = charts.GetAssetAllocation(backtest, live)
## Test GetLeveragePlot
backtest = [[pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2014-10-01', periods=365)],
list(np.random.uniform(0.5, 1.5, 365))]
live = [[pd.Timestamp(x).to_pydatetime() for x in | pd.date_range('2015-10-01', periods=100) | pandas.date_range |
import pandas as pd
import ast
main_data = pd.read_csv('Data/Stack+Twitter+Ocean Data.csv')
twitter = | pd.read_csv('Data/twitter_data.csv') | pandas.read_csv |
import os
import tempfile
import logging
from azureml.core.model import Model
import pickle
import pandas as pd
from azureml.core import Run
import os
import mlflow
def init():
global model
model_dir =os.getenv('AZUREML_MODEL_DIR')
model_file = os.listdir(model_dir)[0]
model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), model_file)
model = mlflow.sklearn.load_model(model_path)
def run(mini_batch):
print(f"run method start: {__file__}, run({mini_batch})")
resultList = []
# Set up logging
for batch in mini_batch:
# prepare each image
data = pd.read_json(batch)
predictions = model.predict(data)
data["prediction"] =predictions
resultList.append(data)
result = | pd.concat(resultList) | pandas.concat |
import pandas as pd
import numpy as np
# Choose the number of weeks
w = 4*6
# Setup a database of appropriate length
db = | pd.DataFrame(columns=['weeks', 'cost']) | pandas.DataFrame |
import pytest
from ..dataset import (
magnitude_and_scale,
get_type,
_try_import,
indent,
get_df_type,
cast_and_clean_df,
sql_dataset,
)
import pandas as pd
import numpy as np
import datetime
import pyodbc
import requests
CMD_DROP_TEST_TABLE_IF_EXISTS = "IF OBJECT_ID('test_table', 'U') IS NOT NULL DROP TABLE test_table;"
CMD_CREATE_TEST_TABLE = """
CREATE TABLE test_table (
[dt] datetime NULL,
[dt2] date NOT NULL,
[uid] nvarchar(10) NOT NULL,
[strcol] nvarchar(max) NOT NULL,
[name] nvarchar(10) NULL,
[empty_col] nvarchar(100) NULL,
[float] decimal(22,3) NULL,
[float_k] decimal(22,3) NULL,
[float_m] decimal(22,13) NULL,
[float_b] decimal(22,9) NULL,
[float_na] decimal(22,3) NULL,
[bit] bit NULL,
[bit_na] bit NULL,
[tinyint] tinyint NULL,
[tinyint_na] tinyint NULL,
[smallint] smallint NOT NULL,
[smallint_na] smallint NULL,
[int] int NOT NULL,
[int_na] int NULL,
[bigint] bigint NULL,
[bigint_na] bigint NULL,
[bool] bit NULL,
[bool_na] bit NULL,
[empty_str_col] nvarchar(100) NULL
);
"""
expected_schema = [
['dt', 'datetime', [], True, ''],
['dt2', 'date', [], False, ''],
['uid', 'nvarchar', [10], False, ''],
['strcol', 'nvarchar', ['max'], False, ''],
['name', 'nvarchar', [10], True, ''],
['empty_col', 'nvarchar', [100], True, ''],
['float', 'decimal', [22,3], True, ''],
['float_k', 'decimal', [22,3], True, ''],
['float_m', 'decimal', [22,13], True, ''],
['float_b', 'decimal', [22,9], True, ''],
['float_na', 'decimal', [22,3], True, ''],
['bit', 'bit', [], True, ''],
['bit_na', 'bit', [], True, ''],
['tinyint', 'tinyint', [], True, ''],
['tinyint_na', 'tinyint', [], True, ''],
['smallint', 'smallint', [], False, ''],
['smallint_na', 'smallint', [], True, ''],
['int', 'int', [], False, ''],
['int_na', 'int', [], True, ''],
['bigint', 'bigint', [], True, ''],
['bigint_na', 'bigint', [], True, ''],
['bool', 'bit', [], True, ''],
['bool_na', 'bit', [], True, ''],
['empty_str_col', 'nvarchar', [100], True, ''],
]
# dataset.magnitude_and_scale
def test_magnitude_and_scale_int():
mag, scale = magnitude_and_scale(pd.Series([1, 2, 3]).astype(int))
assert mag == 1
assert scale == 0
def test_magnitude_and_scale_float_type_int():
mag, scale = magnitude_and_scale(pd.Series([123.0, 1.0, 1234.0, np.nan]))
assert mag == 4
assert scale == 0
def test_magnitude_and_scale_float_with_inf():
mag, scale = magnitude_and_scale(pd.Series([1.0, 2.0, np.inf, -np.inf]))
assert mag == 1
assert scale == 0
def test_magnitude_and_scale_zero():
mag, scale = magnitude_and_scale(pd.Series([0]))
assert mag == 1
assert scale == 0
def test_magnitude_and_scale_float():
mag, scale = magnitude_and_scale(pd.Series([123.1234, 12345.1234567, 12.1234567800]))
assert mag == 5
assert scale == 8
def test_magnitude_and_scale_only_frac_part():
mag, scale = magnitude_and_scale(pd.Series([0.12345, 0.123456, 0.123]))
assert mag == 1
assert scale == 6
def test_magnitude_and_scale_empty_raises_error():
with pytest.raises(ValueError) as e_info:
mag, scale = magnitude_and_scale(pd.Series([], dtype='float64'))
def test_magnitude_and_scale_nan_raises_error():
with pytest.raises(ValueError) as e_info:
mag, scale = magnitude_and_scale(pd.Series([np.nan]))
def test_magnitude_and_scale_inf_raises_error():
with pytest.raises(ValueError) as e_info:
mag, scale = magnitude_and_scale(pd.Series([np.inf]))
# dataset.get_type
def test_get_type_decimal():
dtype, params, has_null, comment = get_type(pd.Series([1.1, 2.1, 3.0]))
assert dtype == 'decimal'
assert params == [2, 1]
assert has_null == False
assert comment == ''
dtype, params, has_null, comment = get_type(pd.Series([123.1234, 12345.1234567, 12.1234567800]))
assert dtype == 'decimal'
assert params == [19, 12]
assert has_null == False
assert comment == ''
dtype, params, has_null, comment = get_type(pd.Series([0.12345, 0.123456, 0.123]))
assert dtype == 'decimal'
assert params == [10, 9]
assert has_null == False
assert comment == ''
def test_get_type_decimal_na_inf():
dtype, params, has_null, comment = get_type(pd.Series([1.1, 2.1, 3.0, np.nan]))
assert dtype == 'decimal'
assert params == [2, 1]
assert has_null == True
assert comment == ''
dtype, params, has_null, comment = get_type(pd.Series([1.1, 2.1, 3.0, np.nan, np.inf]))
assert dtype == 'decimal'
assert params == [2, 1]
assert has_null == True
assert comment == ''
def test_get_type_str():
dtype, params, has_null, comment = get_type(pd.Series(['123']))
assert dtype == 'nvarchar'
assert params == [6]
assert has_null == False
assert comment == ''
dtype, params, has_null, comment = get_type(pd.Series(['a' * 1000]))
assert dtype == 'nvarchar'
assert params == [2000]
assert has_null == False
assert comment == ''
dtype, params, has_null, comment = get_type(pd.Series(['a' * 2001]))
assert dtype == 'nvarchar'
assert params == [4000]
assert has_null == False
assert comment == ''
def test_get_type_str_max():
with pytest.warns(None):
dtype, params, has_null, comment = get_type(pd.Series(['a' * 4001]))
assert dtype == 'nvarchar'
assert params == ['max']
assert has_null == False
assert comment == 'Maximum string length is 4001. Using nvarchar(max).'
def test_get_type_str_na():
dtype, params, has_null, comment = get_type(pd.Series(['a', 'b', 'c', 'def', np.nan]))
assert dtype == 'nvarchar'
assert params == [6]
assert has_null == True
assert comment == ''
def test_get_type_str_empty():
dtype, params, has_null, comment = get_type(pd.Series(['', '', '', '', '']))
assert dtype == 'nvarchar'
assert params == [255]
assert has_null == False
assert comment == 'zero-length string column, defaulting to nvarchar(255)'
def test_get_type_bool():
dtype, params, has_null, comment = get_type( | pd.Series([True, False]) | pandas.Series |
import os
import pandas as pd
import matplotlib.pyplot as plt
import sys
sys.path.append('../')
from load_paths import load_box_paths
import matplotlib as mpl
import matplotlib.dates as mdates
from datetime import date, timedelta, datetime
import seaborn as sns
from processing_helpers import *
mpl.rcParams['pdf.fonttype'] = 42
def plot_over_time(df, time_var):
fig = plt.figure(figsize=(10, 5))
fig.subplots_adjust(right=0.97, left=0.05, hspace=0.4, wspace=0.2, top=0.95, bottom=0.09)
palette = sns.color_palette('husl', 8)
ax = fig.add_subplot(1, 1, 1)
ax.plot(df[time_var], df['median_f_inf_det'], color=palette[0])
ax.fill_between(df[time_var].values, df['p25_f_inf_det'], df['p975_f_inf_det'], color=palette[0], linewidth=0, alpha=0.2)
ax.set_title('d_Sys')
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d\n%b'))
ax.set_ylim(0, 1.5)
fig.savefig(os.path.join(wdir, 'inputs', 'testing', f'dSym_by_{time_var}_levincsv.png'))
if __name__ == '__main__':
fname_levin = 'f_inf_det_Illinois_201124_excessdeaths_and_nonstationary.csv'
fname_odriscoll = 'f_inf_det_Illinois_201124_excessdeaths_and_nonstationary_odriscoll.csv'
fname=fname_levin
df = pd.read_csv(os.path.join(projectpath, 'Plots + Graphs', 'detection_estimation', fname))
df.columns
df['date'] = | pd.to_datetime(df['date']) | pandas.to_datetime |
####################################################################################################
"""
dashboard.py
This script implements a dashboard-application for the efficient planning of the municipal
enforcement process, based on housing fraud signals, within the municipality of Amsterdam.
<NAME> & <NAME> 2019
Basic intro on working with Dash: https://dash.plot.ly/getting-started
Example dashboards using maps in Dash (from dash-gallery.plotly.host/Portal):
github.com/plotly/dash-sample-apps/blob/master/apps/dash-oil-and-gas/app.py
github.com/plotly/dash-oil-gas-ternary
This dashboard took some inspiration from this video:
https://www.youtube.com/watch?v=lu0PtsMor4E
Inspiration has also been taken from the corresponding codebase:
https://github.com/amyoshino/Dash_Tutorial_Series (careful: this repo seems to be full of errors!!)
"""
####################################################################################################
#############
## Imports ##
#############
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table as dt
import dash_table.FormatTemplate as FormatTemplate
from dash.dependencies import Input, Output, State, ClientsideFunction
import pandas as pd
import urllib
import json
import sys
import os
import re
import q
from copy import deepcopy
import plotly.graph_objs as go
# Add the parent paths to sys.path, so our own modules on the root dir can also be imported.
SCRIPT_PATH = os.path.abspath(__file__)
SCRIPT_DIR = os.path.dirname(SCRIPT_PATH)
PARENT_PATH = os.path.join(SCRIPT_DIR, os.path.pardir)
sys.path.append(PARENT_PATH)
# Import own modules.
import config
import dashboard_helper
#################################
## Load server or mock-up data ##
#################################
# Try to create a list of 100 meldingen from the data.
try:
df = dashboard_helper.process_recent_signals()
print('Succesfully created prediction for recent signals.')
except:
df = pd.read_csv(os.path.join(SCRIPT_DIR, 'mockup_dataset.csv'), sep=';', skipinitialspace=True)
print('Cannot generate predictions from the data. Falling back to using the mockup_dataset.csv')
df_proactief = pd.read_csv(os.path.join(SCRIPT_DIR, 'mockup_dataset_proactief.csv'), sep=';', skipinitialspace=True)
df_unsupervised = pd.read_csv(os.path.join(SCRIPT_DIR, 'mockup_dataset_unsupervised.csv'), sep=';', skipinitialspace=True)
#########################
## Define site visuals ##
#########################
colors = {'paper': '#DDDDDD',
'background': '#F2F2F2',
'container_background': '#F9F9F9',
'text': '#1E4363',
'marker': '#1E4363',
'fraud': 'rgb(200, 50, 50)',
'no_fraud': 'rgb(150, 150, 150)',
'selected': 'rgb(75, 75, 75)',
}
###############################
## Set some global variables ##
###############################
# Get dictionary of columns for DataTable.
SELECTED_COLUMNS = ['fraude_kans', 'woonfraude', 'adres_id', 'sdl_naam', 'categorie', 'eigenaar']
TABLE_COLUMNS = [{'name': i, 'id': i} for i in SELECTED_COLUMNS]
# Define styling for the first column (fraude_kans), to reduce the decimals after comma.
TABLE_COLUMNS[0]['name'] = 'Fraude kans (%)'
TABLE_COLUMNS[0]['type'] = 'numeric'
TABLE_COLUMNS[0]['format'] = FormatTemplate.percentage(2)
##########################
## Define the dashboard ##
##########################
# external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
# app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app = dash.Dash(__name__)
server = app.server
app.title = 'Woonfraude Dashboard'
# Defines the meldingen tab.
meldingen_tab = html.Div(
[
# Div containing a selection of the data based on dropdown selection.
html.Div(id='intermediate_value', style={'display': 'none'}),
# Divs contain a lists of points which have been selected with on-clicks on the map.
html.Div(id='point_selection', style={'display': 'none'}),
html.Div(id='filtered_point_selection', style={'display': 'none'}),
# Row containing filters, info boxes, and map.
html.Div(
[
# Filters div.
html.Div(
[
# Create drop down filter for categories.
html.P('Selecteer categorieën:', className="control_label"),
dcc.Dropdown(
id='categorie_dropdown',
placeholder='Selecteer categorieën',
options=[{'label': x, 'value': x} for x in sorted(df.categorie.unique())],
multi=True,
value=df.categorie.unique(),
),
# Create drop down filter for city parts.
html.P('Selecteer stadsdelen:', className="control_label"),
dcc.Dropdown(
id='stadsdeel_dropdown',
placeholder='Selecteer stadsdelen',
options=[{'label': x, 'value': x} for x in sorted(df.sdl_naam.unique())],
multi=True,
value=sorted(df.sdl_naam.unique()),
),
# Show info of items selected on map (using click).
html.Div(
[
html.P('Geselecteerde adressen:', className="control_label"),
dt.DataTable(
id='filtered_point_selection_table',
columns = TABLE_COLUMNS[1:-1],
sort_action='native',
sort_by=[{'column_id': 'fraude_kans', 'direction': 'desc'}],
page_action='native',
page_current=0,
page_size=20,
style_data_conditional=[
{
'if': {
'column_id': 'woonfraude',
'filter_query': '{woonfraude} eq True'
},
'backgroundColor': colors['fraud'],
},
{
'if': {
'column_id': 'woonfraude',
'filter_query': '{woonfraude} eq False'
},
'backgroundColor': colors['no_fraud'],
},
{
'if': {
'column_id': 'fraude_kans',
'filter_query': '{fraude_kans} ge 0.5'
},
'backgroundColor': colors['fraud'],
},
{
'if': {
'column_id': 'fraude_kans',
'filter_query': '{fraude_kans} lt 0.5'
},
'backgroundColor': colors['no_fraud'],
},
]
),
],
),
# Link to download csv with all selected addresses.
html.A(
'Download lijst geselecteerde adressen (CSV)',
id='download_selected_addresses_list',
download="geselecteerde_adressen.csv",
href="",
target="_blank",
),
# Button test.
html.P(''),
html.Button('Test', id='button'),
html.P('', id='button_n_clicks')
],
id='leftCol',
className="pretty_container four columns",
),
# Widgets and map div.
html.Div(
[
# # Row with 4 statistics widgets
# html.Div(
# [
# # Aantal meldingen (info box).
# html.Div(
# [
# html.P("Aantal meldingen"),
# html.H6(
# id="aantal_meldingen",
# className="info_text"
# )
# ],
# className="pretty_container"
# ),
# # Percentage fraude verwacht (info box).
# html.Div(
# [
# html.P("% Fraude verwacht"),
# html.H6(
# id="percentage_fraude_verwacht",
# className="info_text"
# )
# ],
# className="pretty_container"
# ),
# # Aantal geselecteerde meldingen (info box).
# html.Div(
# [
# html.P("Aantal geselecteerde meldingen"),
# html.H6(
# id="aantal_geselecteerde_meldingen",
# className="info_text"
# )
# ],
# className="pretty_container",
# style={'backgroundColor': '#F7D7D7'}
# ),
# # Percentage fraude verwacht bij geselecteerde meldingen (info box).
# html.Div(
# [
# html.P("% Fraude verwacht bij geselecteerde meldingen"),
# html.H6(
# id="percentage_fraude_verwacht_geselecteerd",
# className="info_text"
# )
# ],
# className="pretty_container",
# style={'backgroundColor': '#F7D7D7'}
# ),
# ],
# id="infoContainer",
# className="row"
# ),
# Map with selectable points.
html.Div(
dcc.Graph(
id='map',
config={'displayModeBar': False}, # Turned off to disable selection with box/lasso etc.
),
className="pretty_container",
# style={'height': 500}
),
],
id="rightCol",
className="eight columns"
),
],
className="row",
),
# Data table div.
html.Div(
[
# Filtered entries data table.
html.Div(
[
html.P('Gefilterde meldingen'),
dt.DataTable(
id='filtered_table',
columns = TABLE_COLUMNS,
sort_action='native',
sort_by=[{'column_id': 'fraude_kans', 'direction': 'desc'}],
# filter_action='native', # Maybe turn off? A text field to filter feels clunky..
# row_selectable='multi',
# selected_rows=[],
page_action='native',
page_current=0,
page_size=20,
style_data_conditional=[
{
'if': {
'column_id': 'woonfraude',
'filter_query': '{woonfraude} eq True'
},
'backgroundColor': colors['fraud'],
},
{
'if': {
'column_id': 'woonfraude',
'filter_query': '{woonfraude} eq False'
},
'backgroundColor': colors['no_fraud'],
},
{
'if': {
'column_id': 'fraude_kans',
'filter_query': '{fraude_kans} ge 0.5'
},
'backgroundColor': colors['fraud'],
},
{
'if': {
'column_id': 'fraude_kans',
'filter_query': '{fraude_kans} lt 0.5'
},
'backgroundColor': colors['no_fraud'],
}
]
),
],
className="pretty_container eight columns",
),
# Filtered entries stadsdeel split (pie chart).
html.Div(
[
html.P("Gefilterde meldingen - Stadsdeel split"),
dcc.Graph(
id="stadsdeel_split",
config={'displayModeBar': False},
)
],
id="stadsdeel",
className="pretty_container two columns"
),
# Filtered entries categorie split (pie chart).
html.Div(
[
html.P("Gefilterde meldingen - Categorie split"),
dcc.Graph(
id="categorie_split",
config={'displayModeBar': False},
)
],
id="categorie",
className="pretty_container two columns"
),
],
className="row"
),
],
id="mainContainer",
style={
"display": "flex",
"flex-direction": "column"
}
)
# Defines the proactief tab.
proactief_tab = html.Div(
[
# For creating a map_proactief callback function with an empty input.
html.Div(id='none_proactief',children=[],style={'display': 'none'}),
# Div for containing a selection of the data based on filters.
html.Div(id='intermediate_value_proactief', style={'display': 'none'}),
# Row containing filters, info boxes, and map.
html.Div(
[
# Filters div.
html.Div(
[
# Create range slider for number of meldingen.
html.P('Minimaal aantal meldingen op adres:', className="control_label"),
dcc.RangeSlider(
id='aantal_meldingen_rangeslider_proactief',
min=min(df_proactief.aantal_meldingen),
max=max(df_proactief.aantal_meldingen),
marks={i: f"{i}" for i in range(min(df_proactief.aantal_meldingen), max(df_proactief.aantal_meldingen)+1)},
value=[min(df_proactief.aantal_meldingen), max(df_proactief.aantal_meldingen)]
),
# Padding (temporary hack)
html.P(' '),
# Create slider for number of adults.
html.P('Aantal volwassenen', className="control_label"),
dcc.RangeSlider(
id='aantal_volwassenen_rangeslider_proactief',
min=min(df_proactief.aantal_volwassenen),
max=max(df_proactief.aantal_volwassenen),
marks={i: f"{i}" for i in range(min(df_proactief.aantal_volwassenen), max(df_proactief.aantal_volwassenen)+1)},
value=[min(df_proactief.aantal_volwassenen), max(df_proactief.aantal_volwassenen)]
),
# Padding (temporary hack)
html.P(' '),
# Create m2 per person slider.
html.P('Aantal m2 per persoon:', className="control_label"),
dcc.RangeSlider(
id='aantal_m2_per_persoon_rangeslider_proactief',
min=min(df_proactief.m2_per_persoon),
max=max(df_proactief.m2_per_persoon),
marks={i: f"{i}" for i in range(min(df_proactief.m2_per_persoon), max(df_proactief.m2_per_persoon)+1, 3)},
value=[min(df_proactief.m2_per_persoon), max(df_proactief.m2_per_persoon)]
),
# Padding (temporary hack)
html.P(' '),
# Create drop down filter for city parts.
html.P('Selecteer stadsdelen:', className="control_label"),
dcc.Dropdown(
id='stadsdeel_dropdown_proactief',
placeholder='Selecteer stadsdelen',
options=[{'label': x, 'value': x} for x in sorted(df_proactief.sdl_naam.unique())],
multi=True,
value=sorted(df_proactief.sdl_naam.unique()),
),
# Create hotline dropdown.
html.P('Is hotline melding:', className="control_label"),
dcc.Dropdown(
id='hotline_dropdown_proactief',
placeholder='Selecteer waarden',
options=[{'label': 'Ja', 'value': 'True'}, {'label': 'Nee', 'value': 'False'}],
multi=True,
value=['True', 'False']
),
# Create gebruikersdoel dropdown.
html.P('Selecteer gebruikersdoel:', className="control_label"),
dcc.Dropdown(
id='gebruikersdoel_dropdown_proactief',
placeholder='Selecteer gebruikersdoel',
options=[{'label': x, 'value': x} for x in sorted(df_proactief.gebruikersdoel.unique())],
multi=True,
value=sorted(df_proactief.gebruikersdoel.unique()),
),
# Create profiel dropdown.
html.P('Selecteer profiel:', className="control_label"),
dcc.Dropdown(
id='profiel_dropdown_proactief',
placeholder='Selecteer profiel',
options=[{'label': x, 'value': x} for x in sorted(df_proactief.profiel.unique())],
multi=True,
value=sorted(df_proactief.profiel.unique()),
),
],
id='leftCol_proactief',
className="pretty_container four columns",
),
# Map div.
html.Div(
[
# Map with selectable points.
html.Div(
dcc.Graph(
id='map_proactief',
config={'displayModeBar': False}, # Turned off to disable selection with box/lasso etc.
),
className="pretty_container",
# style={'height': 500}
),
],
id="rightCol_proactief",
className="eight columns"
),
],
className="row",
),
# Data table div.
html.Div(
[
# Filtered entries data table.
html.Div(
[
html.P('Gefilterde meldingen'),
dt.DataTable(
id='filtered_table_proactief',
columns = TABLE_COLUMNS,
sort_action='native',
sort_by=[{'column_id': 'fraude_kans', 'direction': 'desc'}],
# filter_action='native', # Maybe turn off? A text field to filter feels clunky..
# row_selectable='multi',
# selected_rows=[],
page_action='native',
page_current=0,
page_size=20,
style_data_conditional=[
{
'if': {
'column_id': 'woonfraude',
'filter_query': '{woonfraude} eq True'
},
'backgroundColor': colors['fraud'],
},
{
'if': {
'column_id': 'woonfraude',
'filter_query': '{woonfraude} eq False'
},
'backgroundColor': colors['no_fraud'],
},
{
'if': {
'column_id': 'fraude_kans',
'filter_query': '{fraude_kans} ge 0.5'
},
'backgroundColor': colors['fraud'],
},
{
'if': {
'column_id': 'fraude_kans',
'filter_query': '{fraude_kans} lt 0.5'
},
'backgroundColor': colors['no_fraud'],
}
]
),
],
className="pretty_container ten columns",
),
# Filtered entries stadsdeel split (pie chart).
html.Div(
[
html.P("Gefilterde meldingen - Stadsdeel split"),
dcc.Graph(
id="stadsdeel_split_proactief",
config={'displayModeBar': False},
)
],
id="stadsdeel_proactief",
className="pretty_container two columns"
),
],
className="row"
),
# html.Div(
# dcc.Graph(
# id='map_proactief',
# config={'displayModeBar': False}, # Turned off to disable selection with box/lasso etc.
# ),
# className="pretty_container",
# ),
],
style={
"display": "flex",
"flex-direction": "column"
}
)
# Defines the unsupervised tab.
unsupervised_tab = html.Div(
[
# For creating a map_unsupervised callback function with an empty input.
html.Div(id='none_unsupervised',children=[],style={'display': 'none'}),
# Div for containing a selection of the data based on filters.
html.Div(id='intermediate_value_unsupervised', style={'display': 'none'}),
html.Div(
dcc.Graph(
id='map_unsupervised',
config={'displayModeBar': False}, # Turned off to disable selection with box/lasso etc.
),
className="pretty_container",
),
],
style={
"display": "flex",
"flex-direction": "column"
}
)
# Combines the two tabs into a single app.
app.layout = html.Div([
# Title
html.H1("Woonfraude Dashboard", style={'textAlign': 'center'}),
# Tabs for meldingen & proactieve handhaving.
dcc.Tabs(id='tabs', value='meldingen_tab', children=[
dcc.Tab(label='Meldingen', value='meldingen_tab', children=[meldingen_tab]),
dcc.Tab(label='Proactieve handhaving', value='proactief_tab', children=[proactief_tab]),
dcc.Tab(label='Unsupervised', value='unsupervised_tab', children=[unsupervised_tab]),
])
])
# Updates the intermediate data based on the dropdown selection.
@app.callback(
Output('intermediate_value', 'children'),
[Input('categorie_dropdown', 'value'),
Input('stadsdeel_dropdown', 'value')]
)
def create_data_selection(selected_categories, selected_stadsdelen):
# Create a copy of the original dataframe.
df_filtered = deepcopy(df)
# Filter the original dataframe by selected categories.
df_filtered = df_filtered[df_filtered.categorie.isin(selected_categories)]
# Filter the dataframe by selected stadsdelen.
df_filtered = df_filtered[df_filtered.sdl_naam.isin(selected_stadsdelen)]
return df_filtered.to_json(date_format='iso', orient='split')
'''
# Updates the aantal_meldingen info box.
@app.callback(
Output('aantal_meldingen', 'children'),
[Input('intermediate_value', 'children')]
)
def count_items(intermediate_value):
# Load the pre-filtered version of the dataframe.
df = pd.read_json(intermediate_value, orient='split')
return len(df)
# Updates the percentage_fraude_verwacht info box.
@app.callback(
Output('percentage_fraude_verwacht', 'children'),
[Input('intermediate_value', 'children')]
)
def compute_fraud_percentage(intermediate_value):
# Load the pre-filtered version of the dataframe.
df = pd.read_json(intermediate_value, orient='split')
# Compute what percentage of cases is expected to be fraudulent. If/else to prevent division by 0.
if len(df.woonfraude) > 0:
fraude_percentage = len(df.woonfraude[df.woonfraude == True]) / len(df.woonfraude) * 100
else:
fraude_percentage = 0
# Return truncated value (better for printing on dashboard)
return round(fraude_percentage, 1)
# Updates the aantal_geselecteerde_meldingen info box.
@app.callback(
Output('aantal_geselecteerde_meldingen', 'children'),
[Input('filtered_point_selection', 'children')]
)
def count_items_selected(filtered_point_selection):
# Just return the amount of filtered selected points.
return len(filtered_point_selection)
# Updates the percentage_fraude_verwacht_geselecteerd info box.
@app.callback(
Output('percentage_fraude_verwacht_geselecteerd', 'children'),
[Input('intermediate_value', 'children'),
Input('filtered_point_selection', 'children')]
)
def compute_fraud_percentage_selected(intermediate_value, filtered_point_selection):
# Load the pre-filtered version of the dataframe.
df = pd.read_json(intermediate_value, orient='split')
# Reduce the dataframe using the point selection.
df = df[df.adres_id.isin(filtered_point_selection)]
# Compute what percentage of cases is expected to be fraudulent. If/else to prevent division by 0.
if len(df.woonfraude) > 0:
fraude_percentage = len(df.woonfraude[df.woonfraude == True]) / len(df.woonfraude) * 100
else:
fraude_percentage = 0
# Return truncated value (better for printing on dashboard)
return round(fraude_percentage, 1)
'''
# Updates the map based on dropdown-selections.
@app.callback(
Output('map', 'figure'),
[Input('intermediate_value', 'children'),
Input('point_selection', 'children')],
[State('map', 'figure')]
)
def plot_map(intermediate_value, point_selection, map_state):
# Define which input triggers the callback (map.figure or intermediate_value.children).
trigger_event = dash.callback_context.triggered[0]['prop_id']
# Load the pre-filtered version of the dataframe.
df_map = pd.read_json(intermediate_value, orient='split')
# Select positive and negative samples for plotting.
pos = df_map[df_map.woonfraude==True]
neg = df_map[df_map.woonfraude==False]
# Create a df of the selected points, for highlighting.
selected_point_ids = [int(x) for x in point_selection]
sel = df_map.loc[df_map.adres_id.isin(selected_point_ids)]
# Create texts for when hovering the mouse over items.
def make_hover_string(row):
return f"Adres id: {row.adres_id}\
<br>Categorie: {row.categorie}\
<br>Aantal inwoners: {row.aantal_personen}\
<br>Aantal achternamen: {row.aantal_achternamen}\
<br>Eigenaar: {row.eigenaar}"
pos_text = pos.apply(make_hover_string, axis=1)
neg_text = neg.apply(make_hover_string, axis=1)
sel_text = sel.apply(make_hover_string, axis=1)
figure={
'data': [
# Plot border for selected samples (plot first, so its behind the pos/neg samples).
go.Scattermapbox(
name='Geselecteerd',
lat=sel['wzs_lat'],
lon=sel['wzs_lon'],
text=sel_text,
mode='markers',
marker=dict(
size=17,
color=colors['selected'],
),
),
# Plot positive samples.
go.Scattermapbox(
name='Woonfraude verwacht',
lat=pos['wzs_lat'],
lon=pos['wzs_lon'],
text=pos_text,
hoverinfo='text',
mode='markers',
marker=dict(
size=12,
color=colors['fraud'],
),
),
# Plot negative samples.
go.Scattermapbox(
name='<NAME>',
lat=neg['wzs_lat'],
lon=neg['wzs_lon'],
text=neg_text,
hoverinfo='text',
mode='markers',
marker=dict(
size=12,
color=colors['no_fraud'],
),
),
],
'layout': go.Layout(
uirevision='never',
autosize=True,
hovermode='closest',
# width=1000,
height=700,
margin=go.layout.Margin(l=0, r=0, b=0, t=0, pad=0),
showlegend=False, # Set to False, since legend selection breaks custom point selection.
legend=dict(orientation='h'),
plot_bgcolor=colors['background'],
paper_bgcolor=colors['paper'],
mapbox=dict(
accesstoken=config.mapbox_access_token,
style="light",
center=dict(
lat=52.36,
lon=4.89
),
zoom=11,
),
)
}
return figure
# Updates the table showing all data points after dropdown-selections.
@app.callback(
Output('filtered_table', 'data'),
[Input('intermediate_value', 'children')]
)
def generate_filtered_table(intermediate_value):
# Load the pre-filtered version of the dataframe.
df_table = pd.read_json(intermediate_value, orient='split')
# Transform True and False boolean values to strings.
df_table.woonfraude = df_table.woonfraude.replace({True: 'True', False: 'False'})
# Only use a selection of the columns.
df_table = df_table[SELECTED_COLUMNS]
# Create a table, with all positive woonfraude examples at the top.
columns = [{"name": i, "id": i} for i in df_table.columns]
data = df_table.to_dict('records')
return data
# Enable the selection of map points using click-events.
@app.callback(
Output('point_selection', 'children'),
[Input('map', 'clickData'),
Input('intermediate_value', 'children')],
[State('point_selection', 'children')]
)
def update_point_selection_on_click(clickData, intermediate_value, existing_point_selection):
"""
Update point selection with newly selected points, or according to dropdown filters.
The input "intermediate_value:children" is only used to activate a callback.
"""
# Define which input triggers the callback (map.clickData or intermediate_value.children).
trigger_event = dash.callback_context.triggered[0]['prop_id']
# Re-use previous point selection (if it already existed).
point_selection = []
if existing_point_selection != None:
point_selection = existing_point_selection
# Add a clicked point to the selection, or remove it when it already existed in the selection.
if trigger_event == 'map.clickData':
if clickData != None:
point_id = re.match("Adres id: (\d+)", clickData['points'][0]['text']).group(1)
if point_id in point_selection:
point_selection.remove(point_id)
else:
point_selection.append(point_id)
return point_selection
# Create a filtered version of the point_selection, based on the categorie and stadsdeel filters.
@app.callback(
Output('filtered_point_selection', 'children'),
[Input('point_selection', 'children'),
Input('intermediate_value', 'children')]
)
def show_selected(existing_point_selection, intermediate_value):
# Re-use previous point selection (if it already existed).
point_selection = []
if existing_point_selection != None:
point_selection = existing_point_selection
# Filter any previously selected points, if the dropdown selections rule them out.
df = pd.read_json(intermediate_value, orient='split') # Load the pre-filtered version of the dataframe.
point_ids_list = [str(x) for x in list(df.adres_id)]
for point_id in point_selection:
if point_id not in point_ids_list:
point_selection.remove(point_id)
return point_selection
# Updates the table showing a list of the selected & filtered points.
@app.callback(
Output('filtered_point_selection_table', 'data'),
[Input('intermediate_value', 'children'),
Input('filtered_point_selection', 'children')]
)
def generate_filtered_point_selection_table(intermediate_value, filtered_point_selection):
# First check if any points have been selected.
if filtered_point_selection == []:
return []
else:
# Turn list of point_ids into a list of numbers instead of strings
point_selection = [int(x) for x in filtered_point_selection]
# Load the pre-filtered version of the dataframe.
df = pd.read_json(intermediate_value, orient='split')
# Reduce the dataframe using the point selection.
df = df[df.adres_id.isin(point_selection)]
# Transform True and False boolean values to strings.
df.woonfraude = df.woonfraude.replace({True: 'True', False: 'False'})
# Only use a selection of the columns.
df = df[SELECTED_COLUMNS]
# Create a table, with all positive woonfraude examples at the top.
columns = [{"name": i, "id": i} for i in df.columns]
data = df.to_dict('records')
return data
# TODO: CHANGE WHEN THE DOWNLOAD LINK IS UPDATED WITH NEW DATA.
# NOW THIS CODE BELOW IS RAN EVERY TIME A POINT IS (DE)SELECTED,
# THIS IS TERRIBLY INEFFICIENT. ACCEPTABLE FOR THE MVP, BUT SHOULD BE CHANGED.
# Creates a download link for the filtered_point_selection_table data.
@app.callback(
Output('download_selected_addresses_list', 'href'),
[Input('filtered_point_selection_table', 'data')])
def update_download_link(filtered_point_selection_table):
"""Updates the csv download link with the data in the filtered point selection table."""
if filtered_point_selection_table == []:
point_selection = []
else:
# Turn list of point_ids into a list of numbers instead of strings
point_selection = filtered_point_selection_table
# Convert to df, then to csv string, then return for downloading.
df = | pd.DataFrame(point_selection) | pandas.DataFrame |
import matplotlib
import numpy as np
matplotlib.use("Agg")
import numpy
import pandas
import logging
logger = logging.getLogger('gssnng')
def error_checking(
adata,
samp_neighbors,
gs_obj,
score_method,
ranked
):
"""
QC on the adata. Need to make sure there's enough neighbors available given the sampling size.
:param adata: the AnnData object
:param samp_neighbors: integer, number of neighbors to sample
"""
if ranked == False and score_method == 'singscore':
return('ERROR: singscore requires ranked data, set ranked parameter to True')
n_neighbors = adata.uns['neighbors']['params']['n_neighbors'] #[0] #in older AnnData versions need this??
if n_neighbors < samp_neighbors:
print('*******')
print('WARNING: Number of neighbors too low for sampling parameter!')
print('Please reduce number of neighbor samples or recompute neighbor graph.')
return('ERROR')
else:
return('OK')
def read_gene_sets(filepath):
txt = open(filepath).read().split('\n')
gd = dict()
for line in txt:
bits = line.split('\t')
if len(bits) >= 3:
gd[bits[0]] = bits[2:]
return(gd)
def add_noise(df, n, noise_low, noise_high):
#df2 = df.copy()
# draw all random noise in one call
dfs = pandas.Series(df.gene_counts)
randmat = pandas.DataFrame(numpy.random.uniform(noise_low, noise_high, (dfs.size, n) ) )
randmat.index = df.index
randmat = randmat.apply(lambda x: x + dfs)
return(randmat)
def get_conn_dist(q, celli, nn):
# q is an adata
# celli is cell i in q
# nn is the number of neighbors
jdx = numpy.where(q.obsp['distances'][celli].todense() > 0)[1]
y = [q.obsp['distances'][celli].todense().tolist()[0][ i ] for i in jdx]
x = [q.obsp['connectivities'][celli].todense().tolist()[0][ i ] for i in jdx]
df = | pandas.DataFrame({'idx':jdx, 'conn':x, 'dist':y}) | pandas.DataFrame |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = cudf.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = cudf.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
assert_exceptions_equal(
lfunc=pmdx.isin,
rfunc=gmdx.isin,
lfunc_args_and_kwargs=([values], {"level": level}),
rfunc_args_and_kwargs=([values], {"level": level}),
check_exception_type=False,
expected_error_message=re.escape(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
),
)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
pdf = data
gdf = cudf.from_pandas(pdf)
if cudf.utils.dtypes.is_scalar(values):
assert_exceptions_equal(
lfunc=pdf.isin,
rfunc=gdf.isin,
lfunc_args_and_kwargs=([values],),
rfunc_args_and_kwargs=([values],),
)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
pytest.xfail(
not PANDAS_GE_110,
"https://github.com/pandas-dev/pandas/issues/34256",
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = cudf.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = cudf.DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor (for DataFrame)
assert_eq(df, df._constructor({key1: val1, key2: val2}))
# Correct use of _constructor (for cudf.Series)
assert_eq(df[key1], df[key2]._constructor(val1, name=key1))
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for cudf.Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for cudf.Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype=dtype)
gdf["bar"] = cudf.Series(data, dtype=dtype)
insert_data = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = cudf.Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = cudf.Series(data, dtype=as_dtype)
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype="datetime64[ms]")
gdf["bar"] = cudf.Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = cudf.Series(data, dtype="str")
expect["bar"] = cudf.Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = cudf.Series(gdf["foo"], dtype="category")
expect["bar"] = cudf.Series(gdf["bar"], dtype="category")
else:
expect["foo"] = cudf.Series(data, dtype=as_dtype)
expect["bar"] = cudf.Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = cudf.DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = cudf.Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = cudf.DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
cudf.DataFrame(),
cudf.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
),
cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False
),
}
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops(data, op, skipna):
gdf = data
pdf = gdf.to_pandas()
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0, skipna=skipna)
got = getattr(gdf, op)(axis=1, ddof=0, skipna=skipna)
else:
expected = getattr(pdf, op)(axis=1, skipna=skipna)
got = getattr(gdf, op)(axis=1, skipna=skipna)
assert_eq(expected, got, check_exact=False)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops_nullable_dtypes_all_null(op):
gdf = cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
)
expected = cudf.Series([None, None, None, None], dtype="float64")
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series(
[10.0, None, np.NaN, 2234.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"min",
cudf.Series(
[10.0, None, np.NaN, 13.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"sum",
cudf.Series(
[20.0, None, np.NaN, 2247.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"product",
cudf.Series(
[100.0, None, np.NaN, 29042.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"mean",
cudf.Series(
[10.0, None, np.NaN, 1123.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"var",
cudf.Series(
[0.0, None, np.NaN, 1233210.25, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"std",
cudf.Series(
[0.0, None, np.NaN, 1110.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
],
)
def test_rowwise_ops_nullable_dtypes_partial_null(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series([10, None, None, 2234, None, 453], dtype="int64",),
),
("min", cudf.Series([10, None, None, 13, None, 15], dtype="int64",),),
(
"sum",
cudf.Series([20, None, None, 2247, None, 468], dtype="int64",),
),
(
"product",
cudf.Series([100, None, None, 29042, None, 6795], dtype="int64",),
),
(
"mean",
cudf.Series(
[10.0, None, None, 1123.5, None, 234.0], dtype="float32",
),
),
(
"var",
cudf.Series(
[0.0, None, None, 1233210.25, None, 47961.0], dtype="float32",
),
),
(
"std",
cudf.Series(
[0.0, None, None, 1110.5, None, 219.0], dtype="float32",
),
),
],
)
def test_rowwise_ops_nullable_int_dtypes(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, None, 13, None, 15],
"b": cudf.Series(
[10, None, 323, 2234, None, 453], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ns]"
),
"t3": cudf.Series(
["1960-08-31 06:00:00", "2030-08-02 10:00:00"], dtype="<M8[s]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[us]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(["1940-08-31 06:00:00", None], dtype="<M8[ms]"),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
"b1": cudf.Series([True, False], dtype="bool"),
},
],
)
@pytest.mark.parametrize("op", ["max", "min"])
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops_datetime_dtypes(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data,op,skipna",
[
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"max",
True,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
False,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
True,
),
],
)
def test_rowwise_ops_datetime_dtypes_2(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
(
{
"t1": pd.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ns]",
),
"t2": pd.Series(
["1940-08-31 06:00:00", pd.NaT], dtype="<M8[ns]"
),
}
)
],
)
def test_rowwise_ops_datetime_dtypes_pdbug(data):
pdf = pd.DataFrame(data)
gdf = cudf.from_pandas(pdf)
expected = pdf.max(axis=1, skipna=False)
got = gdf.max(axis=1, skipna=False)
if PANDAS_GE_120:
assert_eq(got, expected)
else:
# PANDAS BUG: https://github.com/pandas-dev/pandas/issues/36907
with pytest.raises(AssertionError, match="numpy array are different"):
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = cudf.DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = cudf.datasets.randomdata(10)
pdf = gdf.to_pandas()
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
cudf.Series([4, 2, 3]),
cudf.Series([4, 2, 3], index=["a", "b", "c"]),
cudf.Series([4, 2, 3], index=["a", "b", "d"]),
cudf.Series([4, 2], index=["a", "b"]),
cudf.Series([4, 2, 3], index=cudf.core.index.RangeIndex(0, 3)),
pytest.param(
cudf.Series([4, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[3.0, 2.0, 5.0], [3.0, None, 5.0], [6.0, 7.0, np.nan]]
data = dict(zip(colnames, data))
gsr = gsr.astype("float64")
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas(nullable=True)
psr = gsr.to_pandas(nullable=True)
expect = op(pdf, psr)
got = op(gdf, gsr).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
expect = op(psr, pdf)
got = op(gsr, gdf).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
# comparison ops will temporarily XFAIL
# see PR https://github.com/rapidsai/cudf/pull/7491
pytest.param(operator.eq, marks=pytest.mark.xfail()),
pytest.param(operator.lt, marks=pytest.mark.xfail()),
pytest.param(operator.le, marks=pytest.mark.xfail()),
pytest.param(operator.gt, marks=pytest.mark.xfail()),
pytest.param(operator.ge, marks=pytest.mark.xfail()),
pytest.param(operator.ne, marks=pytest.mark.xfail()),
],
)
@pytest.mark.parametrize(
"gsr", [cudf.Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas()
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = cudf.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = cudf.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = cudf.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
def test_memory_usage_list():
df = cudf.DataFrame({"A": [[0, 1, 2, 3], [4, 5, 6], [7, 8], [9]]})
expected = (
df.A._column.offsets._memory_usage()
+ df.A._column.elements._memory_usage()
)
assert expected == df.A.memory_usage()
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = cudf.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = cudf.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All columns must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(cudf.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(cudf.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(cudf.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = cudf.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, cudf.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = cudf.DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = cudf._lib.table.Table(odict)
result = cudf.DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = cudf.from_pandas(pd_idx1)
gd_idx2 = cudf.from_pandas(pd_idx2)
gd_series1 = cudf.Series([1, 2, 3], index=gd_idx1)
gd_series2 = cudf.Series([1, 2, 3], index=gd_idx2)
expect = pd.DataFrame({"a": pd_series1, "b": pd_series2})
got = cudf.DataFrame({"a": gd_series1, "b": gd_series2})
if dtype == "str":
# Pandas actually loses its index name erroneously here...
expect.index.name = "test_index"
assert_eq(expect, got)
assert expect.index.names == got.index.names
@pytest.mark.parametrize(
"arg", [slice(2, 8, 3), slice(1, 20, 4), slice(-2, -6, -2)]
)
def test_dataframe_strided_slice(arg):
mul = pd.DataFrame(
{
"Index": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"AlphaIndex": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
)
pdf = pd.DataFrame(
{"Val": [10, 9, 8, 7, 6, 5, 4, 3, 2]},
index=pd.MultiIndex.from_frame(mul),
)
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf[arg]
got = gdf[arg]
assert_eq(expect, got)
@pytest.mark.parametrize(
"data,condition,other,error",
[
(pd.Series(range(5)), pd.Series(range(5)) > 0, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, 10, None),
(
pd.Series(range(5)),
pd.Series(range(5)) > 1,
pd.Series(range(5, 10)),
None,
),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
% 3
)
== 0,
-pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) == 4,
None,
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) != 4,
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True, False],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True, True, False], [True, True, True, False]],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cuda.to_device(
np.array(
[[True, True], [False, True], [True, False], [False, True]]
)
),
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cupy.array(
[[True, True], [False, True], [True, False], [False, True]]
),
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
],
None,
ValueError,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) == 4,
None,
None,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) == 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6], dtype="category"),
pd.Series([4, np.nan, 6], dtype="category") != 4,
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
"s",
None,
),
(
pd.Series([1, 2, 3, 2, 5]),
pd.Series([1, 2, 3, 2, 5]) == 2,
pd.DataFrame(
{
"a": pd.Series([1, 2, 3, 2, 5]),
"b": pd.Series([1, 2, 3, 2, 5]),
}
),
NotImplementedError,
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_df_sr_mask_where(data, condition, other, error, inplace):
ps_where = data
gs_where = cudf.from_pandas(data)
ps_mask = ps_where.copy(deep=True)
gs_mask = gs_where.copy(deep=True)
if hasattr(condition, "__cuda_array_interface__"):
if type(condition).__module__.split(".")[0] == "cupy":
ps_condition = cupy.asnumpy(condition)
else:
ps_condition = np.array(condition).astype("bool")
else:
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
if error is None:
expect_where = ps_where.where(
ps_condition, other=ps_other, inplace=inplace
)
got_where = gs_where.where(
gs_condition, other=gs_other, inplace=inplace
)
expect_mask = ps_mask.mask(
ps_condition, other=ps_other, inplace=inplace
)
got_mask = gs_mask.mask(gs_condition, other=gs_other, inplace=inplace)
if inplace:
expect_where = ps_where
got_where = gs_where
expect_mask = ps_mask
got_mask = gs_mask
if pd.api.types.is_categorical_dtype(expect_where):
np.testing.assert_array_equal(
expect_where.cat.codes,
got_where.cat.codes.astype(expect_where.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_where.cat.categories, got_where.cat.categories)
np.testing.assert_array_equal(
expect_mask.cat.codes,
got_mask.cat.codes.astype(expect_mask.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_mask.cat.categories, got_mask.cat.categories)
else:
assert_eq(
expect_where.fillna(-1),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1), got_mask.fillna(-1), check_dtype=False
)
else:
assert_exceptions_equal(
lfunc=ps_where.where,
rfunc=gs_where.where,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False
if error is NotImplementedError
else True,
)
assert_exceptions_equal(
lfunc=ps_mask.mask,
rfunc=gs_mask.mask,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False,
)
@pytest.mark.parametrize(
"data,condition,other,has_cat",
[
(
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
),
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
)
!= "a",
None,
None,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
"a",
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
"a",
True,
),
],
)
def test_df_string_cat_types_mask_where(data, condition, other, has_cat):
ps = data
gs = cudf.from_pandas(data)
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
expect_where = ps.where(ps_condition, other=ps_other)
got_where = gs.where(gs_condition, other=gs_other)
expect_mask = ps.mask(ps_condition, other=ps_other)
got_mask = gs.mask(gs_condition, other=gs_other)
if has_cat is None:
assert_eq(
expect_where.fillna(-1).astype("str"),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1).astype("str"),
got_mask.fillna(-1),
check_dtype=False,
)
else:
assert_eq(expect_where, got_where, check_dtype=False)
assert_eq(expect_mask, got_mask, check_dtype=False)
@pytest.mark.parametrize(
"data,expected_upcast_type,error",
[
(
pd.Series([random.random() for _ in range(10)], dtype="float32"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float16"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float64"),
np.dtype("float64"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float128"),
None,
NotImplementedError,
),
],
)
def test_from_pandas_unsupported_types(data, expected_upcast_type, error):
pdf = pd.DataFrame({"one_col": data})
if error == NotImplementedError:
with pytest.raises(error):
cudf.from_pandas(data)
with pytest.raises(error):
cudf.Series(data)
with pytest.raises(error):
cudf.from_pandas(pdf)
with pytest.raises(error):
cudf.DataFrame(pdf)
else:
df = cudf.from_pandas(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.Series(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.from_pandas(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
df = cudf.DataFrame(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
@pytest.mark.parametrize("nan_as_null", [True, False])
@pytest.mark.parametrize("index", [None, "a", ["a", "b"]])
def test_from_pandas_nan_as_null(nan_as_null, index):
data = [np.nan, 2.0, 3.0]
if index is None:
pdf = pd.DataFrame({"a": data, "b": data})
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
else:
pdf = pd.DataFrame({"a": data, "b": data}).set_index(index)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = expected.set_index(index)
got = cudf.from_pandas(pdf, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_from_pandas_for_series_nan_as_null(nan_as_null):
data = [np.nan, 2.0, 3.0]
psr = pd.Series(data)
expected = cudf.Series(column.as_column(data, nan_as_null=nan_as_null))
got = cudf.from_pandas(psr, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_copy(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype="float", copy=copy),
pdf.astype(dtype="float", copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype="float", copy=copy),
psr.astype(dtype="float", copy=copy),
)
assert_eq(gsr, psr)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype(dtype="int64", copy=copy)
expected = psr.astype(dtype="int64", copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_dtype_dict(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype={"col1": "float"}, copy=copy),
pdf.astype(dtype={"col1": "float"}, copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype={None: "float"}, copy=copy),
psr.astype(dtype={None: "float"}, copy=copy),
)
assert_eq(gsr, psr)
assert_exceptions_equal(
lfunc=psr.astype,
rfunc=gsr.astype,
lfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
rfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype({None: "int64"}, copy=copy)
expected = psr.astype({None: "int64"}, copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize(
"data,columns",
[
([1, 2, 3, 100, 112, 35464], ["a"]),
(range(100), None),
([], None),
((-10, 21, 32, 32, 1, 2, 3), ["p"]),
((), None),
([[1, 2, 3], [1, 2, 3]], ["col1", "col2", "col3"]),
([range(100), range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3), (1, 2, 3)), ["tuple0", "tuple1", "tuple2"]),
([[1, 2, 3]], ["list col1", "list col2", "list col3"]),
([range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3),), ["k1", "k2", "k3"]),
],
)
def test_dataframe_init_1d_list(data, columns):
expect = pd.DataFrame(data, columns=columns)
actual = cudf.DataFrame(data, columns=columns)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
expect = pd.DataFrame(data, columns=None)
actual = cudf.DataFrame(data, columns=None)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
@pytest.mark.parametrize(
"data,cols,index",
[
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
["a", "b", "c", "d"],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 20, 30, 10],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 1, 2, 3],
),
(np.array([11, 123, -2342, 232]), ["a"], [1, 2, 11, 12]),
(np.array([11, 123, -2342, 232]), ["a"], ["khsdjk", "a", "z", "kk"]),
(
cupy.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "z"],
["a", "z", "a", "z"],
),
(cupy.array([11, 123, -2342, 232]), ["z"], [0, 1, 1, 0]),
(cupy.array([11, 123, -2342, 232]), ["z"], [1, 2, 3, 4]),
(cupy.array([11, 123, -2342, 232]), ["z"], ["a", "z", "d", "e"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
],
)
def test_dataframe_init_from_arrays_cols(data, cols, index):
gd_data = data
if isinstance(data, cupy.core.ndarray):
# pandas can't handle cupy arrays in general
pd_data = data.get()
# additional test for building DataFrame with gpu array whose
# cuda array interface has no `descr` attribute
numba_data = cuda.as_cuda_array(data)
else:
pd_data = data
numba_data = None
# verify with columns & index
pdf = pd.DataFrame(pd_data, columns=cols, index=index)
gdf = cudf.DataFrame(gd_data, columns=cols, index=index)
assert_eq(pdf, gdf, check_dtype=False)
# verify with columns
pdf = pd.DataFrame(pd_data, columns=cols)
gdf = cudf.DataFrame(gd_data, columns=cols)
assert_eq(pdf, gdf, check_dtype=False)
pdf = pd.DataFrame(pd_data)
gdf = cudf.DataFrame(gd_data)
assert_eq(pdf, gdf, check_dtype=False)
if numba_data is not None:
gdf = cudf.DataFrame(numba_data)
assert_eq(pdf, gdf, check_dtype=False)
@pytest.mark.parametrize(
"col_data",
[
range(5),
["a", "b", "x", "y", "z"],
[1.0, 0.213, 0.34332],
["a"],
[1],
[0.2323],
[],
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar(col_data, assign_val):
pdf = pd.DataFrame({"a": col_data})
gdf = cudf.DataFrame({"a": col_data})
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"col_data",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar_with_scalar_cols(col_data, assign_val):
pdf = pd.DataFrame(
{
"a": cupy.asnumpy(col_data)
if isinstance(col_data, cupy.ndarray)
else col_data
},
index=["dummy_mandatory_index"],
)
gdf = cudf.DataFrame({"a": col_data}, index=["dummy_mandatory_index"])
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
def test_dataframe_info_basic():
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 10 entries, a to 1111
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 0 10 non-null float64
1 1 10 non-null float64
2 2 10 non-null float64
3 3 10 non-null float64
4 4 10 non-null float64
5 5 10 non-null float64
6 6 10 non-null float64
7 7 10 non-null float64
8 8 10 non-null float64
9 9 10 non-null float64
dtypes: float64(10)
memory usage: 859.0+ bytes
"""
)
df = pd.DataFrame(
np.random.randn(10, 10),
index=["a", "2", "3", "4", "5", "6", "7", "8", "100", "1111"],
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
def test_dataframe_info_verbose_mem_usage():
buffer = io.StringIO()
df = pd.DataFrame({"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]})
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Columns: 2 entries, a to b
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=False)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]},
index=["sdfdsf", "sdfsdfds", "dsfdf"],
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 3 entries, sdfdsf to dsfdf
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 91.0 bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True, memory_usage="deep")
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0 bytes
"""
)
df.info(buf=buffer, verbose=True, memory_usage="deep")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
def test_dataframe_info_null_counts():
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Dtype
--- ------ -----
0 int_col int64
1 text_col object
2 float_col float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0+ bytes
"""
)
df.info(buf=buffer, verbose=True, null_counts=False)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, verbose=True, max_cols=0)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 0 entries
Empty DataFrame"""
)
df.info(buf=buffer, verbose=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame(
{
"a": [1, 2, 3, None, 10, 11, 12, None],
"b": ["a", "b", "c", "sd", "sdf", "sd", None, None],
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Dtype
--- ------ -----
0 a int64
1 b object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
pd.options.display.max_info_rows = 2
df.info(buf=buffer, max_cols=2, null_counts=None)
pd.reset_option("display.max_info_rows")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 6 non-null int64
1 b 6 non-null object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
df.info(buf=buffer, max_cols=2, null_counts=None)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, null_counts=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
@pytest.mark.parametrize(
"data1",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize("rtol", [0, 0.01, 1e-05, 1e-08, 5e-1, 50.12])
@pytest.mark.parametrize("atol", [0, 0.01, 1e-05, 1e-08, 50.12])
def test_cudf_isclose(data1, data2, rtol, atol):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, rtol=rtol, atol=atol))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(data1, data2, rtol=rtol, atol=atol)
assert_eq(expected, actual)
actual = cudf.isclose(
cupy.array(data1), cupy.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
np.array(data1), np.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
pd.Series(data1), pd.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data1",
[
[
-1.9876543,
-2.9876654,
np.nan,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
np.nan,
-21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
np.nan,
np.nan,
np.nan,
21.1212,
],
],
)
@pytest.mark.parametrize("equal_nan", [True, False])
def test_cudf_isclose_nulls(data1, data2, equal_nan):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, equal_nan=equal_nan))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), equal_nan=equal_nan
)
assert_eq(expected, actual, check_dtype=False)
actual = cudf.isclose(data1, data2, equal_nan=equal_nan)
assert_eq(expected, actual, check_dtype=False)
def test_cudf_isclose_different_index():
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 3, 4, 2],
)
expected = cudf.Series([True] * 6, index=s1.index)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 10, 4, 2],
)
expected = cudf.Series(
[True, True, True, False, True, True], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[100, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 100, 10, 4, 2],
)
expected = cudf.Series(
[False, True, True, False, True, False], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
def test_dataframe_to_dict_error():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [9, 5, 3]})
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df.to_dict()
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df["a"].to_dict()
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": [1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]}),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
}
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=[10, 20, 30, 40, 50, 60],
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=["a", "b", "c", "d", "e", "f"],
),
pd.DataFrame(index=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(columns=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(index=[10, 11, 12]),
pd.DataFrame(columns=[10, 11, 12]),
pd.DataFrame(),
pd.DataFrame({"one": [], "two": []}),
pd.DataFrame({2: [], 1: []}),
pd.DataFrame(
{
0: [1, 2, 3, 4, 5, 10],
1: ["abc", "def", "ghi", "xyz", "pqr", "abc"],
100: ["a", "b", "b", "x", "z", "a"],
},
index=[10, 20, 30, 40, 50, 60],
),
],
)
def test_dataframe_keys(df):
gdf = cudf.from_pandas(df)
assert_eq(df.keys(), gdf.keys())
@pytest.mark.parametrize(
"ps",
[
| pd.Series([1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]) | pandas.Series |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
##find parent directory and import model
#parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
#sys.path.append(parentddir)
from ..agdrift_exe import Agdrift
test = {}
class TestAgdrift(unittest.TestCase):
"""
IEC unit tests.
"""
def setUp(self):
"""
setup the test as needed
e.g. pandas to open agdrift qaqc csv
Read qaqc csv and create pandas DataFrames for inputs and expected outputs
:return:
"""
pass
def tearDown(self):
"""
teardown called after each test
e.g. maybe write test results to some text file
:return:
"""
pass
def create_agdrift_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty agdrift object
agdrift_empty = Agdrift(df_empty, df_empty)
return agdrift_empty
def test_validate_sim_scenarios(self):
"""
:description determines if user defined scenarios are valid for processing
:param application_method: type of Tier I application method employed
:param aquatic_body_def: type of endpoint of concern (e.g., pond, wetland); implies whether
: endpoint of concern parameters (e.g.,, pond width) are set (i.e., by user or EPA standard)
:param drop_size_*: qualitative description of spray droplet size for aerial & ground applications
:param boom_height: qualitative height above ground of spray boom
:param airblast_type: type of orchard being sprayed
:NOTE we perform an additional validation check related to distances later in the code just before integration
:return
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
agdrift_empty.out_sim_scenario_chk = pd.Series([], dtype='object')
expected_result = pd.Series([
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Aquatic Ground Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Aquatic Ground Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Valid Tier I Aquatic Airblast Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Valid Tier I Aquatic Airblast Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Invalid Tier I Aquatic Aerial Scenario',
'Invalid Tier I Aquatic Ground Scenario',
'Invalid Tier I Aquatic Airblast Scenario',
'Invalid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Invalid scenario ecosystem_type',
'Invalid Tier I Aquatic Assessment application method',
'Invalid Tier I Terrestrial Assessment application method'],dtype='object')
try:
#set test data
agdrift_empty.num_simulations = len(expected_result)
agdrift_empty.application_method = pd.Series(
['tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_aerial',
'tier_1_ground',
'tier_1_airblast',
'tier_1_aerial',
'tier_1_ground',
'tier_1_airblast',
'tier_1_aerial',
'Tier II Aerial',
'Tier III Aerial'], dtype='object')
agdrift_empty.ecosystem_type = pd.Series(
['aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'terrestrial_assessment',
'terrestrial_assessment',
'Field Assessment',
'aquatic_assessment',
'terrestrial_assessment'], dtype='object')
agdrift_empty.aquatic_body_type = pd.Series(
['epa_defined_pond',
'NaN',
'epa_defined_wetland',
'NaN',
'user_defined_pond',
'NaN',
'user_defined_wetland',
'NaN',
'epa_defined_wetland',
'NaN',
'user_defined_pond',
'NaN',
'user_defined_wetland',
'NaN',
'Defined Pond',
'user_defined_pond',
'epa_defined_pond',
'NaN',
'NaN',
'NaN',
'epa_defined_pond',
'user_defined_wetland',
'user_defined_pond'], dtype='object')
agdrift_empty.terrestrial_field_type = pd.Series(
['NaN',
'user_defined_terrestrial',
'NaN',
'epa_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'epa_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'NaN',
'NaN',
'user_defined_terrestrial',
'user_defined_terrestrial',
'user_defined_terrestrial',
'NaN',
'NaN',
'user_defined_terrestrial'], dtype='object')
agdrift_empty.drop_size_aerial = pd.Series(
['very_fine_to_fine',
'fine_to_medium',
'medium_to_coarse',
'coarse_to_very_coarse',
'fine_to_medium',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'medium_to_coarse',
'NaN',
'very_fine_to_medium',
'NaN',
'very_fine Indeed',
'NaN',
'very_fine_to_medium',
'medium_to_coarse',
'NaN'], dtype='object')
agdrift_empty.drop_size_ground = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'fine_to_medium-coarse',
'very_fine',
'fine_to_medium-coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'NaN',
'fine_to_medium-coarse',
'very_fine',
'NaN',
'very_fine_to_medium',
'NaN',
'very_fine'], dtype='object')
agdrift_empty.boom_height = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'high',
'low',
'high',
'low',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'high',
'NaN',
'NaN',
'NaN',
'NaN'],dtype='object')
agdrift_empty.airblast_type = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'normal',
'dense',
'sparse',
'orchard',
'vineyard',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'vineyard',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.validate_sim_scenarios()
result = agdrift_empty.out_sim_scenario_chk
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_set_sim_scenario_id(self):
"""
:description provides scenario ids per simulation that match scenario names (i.e., column_names) from SQL database
:param out_sim_scenario_id: scenario name as assigned to individual simulations
:param num_simulations: number of simulations to assign scenario names
:param out_sim_scenario_chk: from previous method where scenarios were checked for validity
:param application_method: application method of scenario
:param drop_size_*: qualitative description of spray droplet size for aerial and ground applications
:param boom_height: qualitative height above ground of spray boom
:param airblast_type: type of airblast application (e.g., vineyard, orchard)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series(['aerial_vf2f',
'aerial_f2m',
'aerial_m2c',
'aerial_c2vc',
'ground_low_vf',
'ground_low_fmc',
'ground_high_vf',
'ground_high_fmc',
'airblast_normal',
'airblast_dense',
'airblast_sparse',
'airblast_vineyard',
'airblast_orchard',
'Invalid'], dtype='object')
try:
agdrift_empty.num_simulations = len(expected_result)
agdrift_empty.out_sim_scenario_chk = pd.Series(['Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Invalid Scenario'], dtype='object')
agdrift_empty.application_method = pd.Series(['tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_aerial'], dtype='object')
agdrift_empty.drop_size_aerial = pd.Series(['very_fine_to_fine',
'fine_to_medium',
'medium_to_coarse',
'coarse_to_very_coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.drop_size_ground = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'fine_to_medium-coarse',
'very_fine',
'fine_to_medium-coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.boom_height = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'low',
'low',
'high',
'high',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.airblast_type = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'normal',
'dense',
'sparse',
'vineyard',
'orchard',
'NaN'], dtype='object')
agdrift_empty.set_sim_scenario_id()
result = agdrift_empty.out_sim_scenario_id
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_assign_column_names(self):
"""
:description assigns column names (except distaqnce column) from sql database to internal scenario names
:param column_name: short name for pesiticide application scenario for which distance vs deposition data is provided
:param scenario_name: internal variable for holding scenario names
:param scenario_number: index for scenario_name (this method assumes the distance values could occur in any column
:param distance_name: internal name for the column holding distance data
:NOTE to test both outputs of this method I simply appended them together
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
agdrift_empty.scenario_name = pd.Series([], dtype='object')
expected_result = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc',
'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse',
'airblast_vineyard', 'airblast_orchard'], dtype='object')
try:
agdrift_empty.column_names = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc',
'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse',
'airblast_vineyard', 'airblast_orchard', 'distance_ft'])
#call method to assign scenario names
agdrift_empty.assign_column_names()
result = agdrift_empty.scenario_name
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_distances(self):
"""
:description retrieves distance values for deposition scenario datasets
: all scenarios use same distances
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE any blank fields are filled with 'nan'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
expected_result = pd.Series([], dtype='float')
try:
expected_result = [0.,0.102525,0.20505,0.4101,0.8202,1.6404,3.2808,4.9212,6.5616,9.8424,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016,997.3632]
agdrift_empty.distance_name = 'distance_ft'
agdrift_empty.num_db_values = len(expected_result)
result = agdrift_empty.get_distances(agdrift_empty.num_db_values)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_scenario_deposition_data(self):
"""
:description retrieves deposition data for all scenarios from sql database
: and checks that for each the first, last, and total number of values
: are correct
:param scenario: name of scenario for which data is to be retrieved
:param num_values: number of values included in scenario datasets
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
#scenario_data = pd.Series([[]], dtype='float')
result = pd.Series([], dtype='float')
#changing expected values to the 161st
expected_result = [0.50013,0.041273,161.0, #aerial_vf2f
0.49997,0.011741,161.0, #aerial_f2m
0.4999,0.0053241,161.0, #aerial_m2c
0.49988,0.0031189,161.0, #aerial_c2vc
1.019339,9.66E-04,161.0, #ground_low_vf
1.007885,6.13E-04,161.0, #ground_low_fmc
1.055205,1.41E-03,161.0, #ground_high_vf
1.012828,7.72E-04,161.0, #ground_high_fmc
8.91E-03,3.87E-05,161.0, #airblast_normal
0.1155276,4.66E-04,161.0, #airblast_dense
0.4762651,5.14E-05,161.0, #airblast_sparse
3.76E-02,3.10E-05,161.0, #airblast_vineyard
0.2223051,3.58E-04,161.0] #airblast_orchard
try:
agdrift_empty.num_db_values = 161 #set number of data values in sql db
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
#agdrift_empty.db_name = 'sqlite_agdrift_distance.db'
#this is the list of scenario names (column names) in sql db (the order here is important because
#the expected values are ordered in this manner
agdrift_empty.scenario_name = ['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard']
#cycle through reading scenarios and building result list
for i in range(len(agdrift_empty.scenario_name)):
#get scenario data
scenario_data = agdrift_empty.get_scenario_deposition_data(agdrift_empty.scenario_name[i],
agdrift_empty.num_db_values)
print(scenario_data)
#extract 1st and last values of scenario data and build result list (including how many values are
#retrieved for each scenario
if i == 0:
#fix this
result = [scenario_data[0], scenario_data[agdrift_empty.num_db_values - 1],
float(len(scenario_data))]
else:
result.extend([scenario_data[0], scenario_data[agdrift_empty.num_db_values - 1],
float(len(scenario_data))])
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_column_names(self):
"""
:description retrieves column names from sql database (sqlite_agdrift_distance.db)
: (each column name refers to a specific deposition scenario;
: the scenario name is used later to retrieve the deposition data)
:parameter output name of sql database table from which to retrieve requested data
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
result = pd.Series([], dtype='object')
expected_result = ['distance_ft','aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard']
try:
result = agdrift_empty.get_column_names()
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_filter_arrays(self):
"""
:description eliminate blank data cells (i.e., distances for which no deposition value is provided)
(and thus reduce the number of x,y values to be used)
:parameter x_in: array of distance values associated with values for a deposition scenario (e.g., Aerial/EPA Defined Pond)
:parameter y_in: array of deposition values associated with a deposition scenario (e.g., Aerial/EPA Defined Pond)
:parameter x_out: processed array of x_in values eliminating indices of blank distance/deposition values
:parameter y_out: processed array of y_in values eliminating indices of blank distance/deposition values
:NOTE y_in array is assumed to be populated by values >= 0. except for the blanks as 'nan' entries
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([0.,1.,4.,5.,6.,7.], dtype='float')
expected_result_y = pd.Series([10.,11.,14.,15.,16.,17.], dtype='float')
try:
x_in = pd.Series([0.,1.,2.,3.,4.,5.,6.,7.], dtype='float')
y_in = pd.Series([10.,11.,'nan','nan',14.,15.,16.,17.], dtype='float')
x_out, y_out = agdrift_empty.filter_arrays(x_in, y_in)
result_x = x_out
result_y = y_out
npt.assert_allclose(result_x, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(result_y, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result_x, expected_result_x]
tab = [result_y, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_list_sims_per_scenario(self):
"""
:description scan simulations and count number and indices of simulations that apply to each scenario
:parameter num_scenarios number of deposition scenarios included in SQL database
:parameter num_simulations number of simulations included in this model execution
:parameter scenario_name name of deposition scenario as recorded in SQL database
:parameter out_sim_scenario_id identification of deposition scenario specified per model run simulation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_num_sims = pd.Series([2,2,2,2,2,2,2,2,2,2,2,2,2], dtype='int')
expected_sim_indices = pd.Series([[0,13,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[1,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[2,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[3,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[4,17,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[5,18,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[6,19,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[7,20,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[8,21,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[9,22,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[10,23,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[11,24,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[12,25,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]], dtype='int')
try:
agdrift_empty.scenario_name = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard'], dtype='object')
agdrift_empty.out_sim_scenario_id = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard','aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard'], dtype='object')
agdrift_empty.num_simulations = len(agdrift_empty.out_sim_scenario_id)
agdrift_empty.num_scenarios = len(agdrift_empty.scenario_name)
result_num_sims, result_sim_indices = agdrift_empty.list_sims_per_scenario()
npt.assert_array_equal(result_num_sims, expected_num_sims, err_msg='', verbose=True)
npt.assert_array_equal(result_sim_indices, expected_sim_indices, err_msg='', verbose=True)
finally:
tab = [result_num_sims, expected_num_sims, result_sim_indices, expected_sim_indices]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_determine_area_dimensions(self):
"""
:description determine relevant area/length/depth of waterbody or terrestrial area
:param i: simulation number
:param ecosystem_type: type of assessment to be conducted
:param aquatic_body_type: source of dimensional data for area (EPA or User defined)
:param terrestrial_field_type: source of dimensional data for area (EPA or User defined)
:param *_width: default or user specified width of waterbody or terrestrial field
:param *_length: default or user specified length of waterbody or terrestrial field
:param *_depth: default or user specified depth of waterbody or terrestrial field
:NOTE all areas, i.e., ponds, wetlands, and terrestrial fields are of 1 hectare size; the user can elect
to specify a width other than the default width but it won't change the area size; thus for
user specified areas the length is calculated and not specified by the user)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_width = pd.Series([208.7, 208.7, 100., 400., 150., 0.], dtype='float')
expected_length = pd.Series([515.8, 515.8, 1076.39, 269.098, 717.593, 0.], dtype='float')
expected_depth = pd.Series([6.56, 0.4921, 7., 23., 0., 0.], dtype='float')
try:
agdrift_empty.ecosystem_type = pd.Series(['aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'terrestrial_assessment'], dtype='object')
agdrift_empty.aquatic_body_type = pd.Series(['epa_defined_pond',
'epa_defined_wetland',
'user_defined_pond',
'user_defined_wetland',
'NaN',
'NaN'], dtype='object')
agdrift_empty.terrestrial_field_type = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'user_defined_terrestrial',
'epa_defined_terrestrial'], dtype='object')
num_simulations = len(agdrift_empty.ecosystem_type)
agdrift_empty.default_width = 208.7
agdrift_empty.default_length = 515.8
agdrift_empty.default_pond_depth = 6.56
agdrift_empty.default_wetland_depth = 0.4921
agdrift_empty.user_pond_width = pd.Series(['NaN', 'NaN', 100., 'NaN', 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_pond_depth = pd.Series(['NaN', 'NaN', 7., 'NaN', 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_wetland_width = pd.Series(['NaN', 'NaN', 'NaN', 400., 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_wetland_depth = pd.Series(['NaN','NaN', 'NaN', 23., 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_terrestrial_width = pd.Series(['NaN', 'NaN', 'NaN', 'NaN', 150., 'NaN'], dtype='float')
width_result = pd.Series(num_simulations * ['NaN'], dtype='float')
length_result = pd.Series(num_simulations * ['NaN'], dtype='float')
depth_result = pd.Series(num_simulations * ['NaN'], dtype='float')
agdrift_empty.out_area_width = pd.Series(num_simulations * ['nan'], dtype='float')
agdrift_empty.out_area_length = pd.Series(num_simulations * ['nan'], dtype='float')
agdrift_empty.out_area_depth = pd.Series(num_simulations * ['nan'], dtype='float')
agdrift_empty.sqft_per_hectare = 107639
for i in range(num_simulations):
width_result[i], length_result[i], depth_result[i] = agdrift_empty.determine_area_dimensions(i)
npt.assert_allclose(width_result, expected_width, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(length_result, expected_length, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(depth_result, expected_depth, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [width_result, expected_width, length_result, expected_length, depth_result, expected_depth]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_foa(self):
"""
:description calculation of average deposition over width of water body
:param integration_result result of integration of deposition curve across the distance
: beginning at the near distance and extending to the far distance of the water body
:param integration_distance effectively the width of the water body
:param avg_dep_foa average deposition rate across the width of the water body
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([0.1538462, 0.5, 240.])
try:
integration_result = pd.Series([1.,125.,3e5], dtype='float')
integration_distance = pd.Series([6.5,250.,1250.], dtype='float')
result = agdrift_empty.calc_avg_dep_foa(integration_result, integration_distance)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac(self):
"""
Deposition calculation.
:param avg_dep_foa: average deposition over width of water body as fraction of applied
:param application_rate: actual application rate
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([6.5, 3.125e4, 3.75e8])
try:
avg_dep_foa = pd.Series([1.,125.,3e5], dtype='float')
application_rate = pd.Series([6.5,250.,1250.], dtype='float')
result = agdrift_empty.calc_avg_dep_lbac(avg_dep_foa, application_rate)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_foa_from_lbac(self):
"""
Deposition calculation.
:param avg_dep_foa: average deposition over width of water body as fraction of applied
:param application_rate: actual application rate
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([1.553846e-01, 8.8e-06, 4.e-08])
try:
avg_dep_lbac = pd.Series([1.01, 0.0022, 0.00005], dtype='float')
application_rate = pd.Series([6.5,250.,1250.], dtype='float')
result = agdrift_empty.calc_avg_dep_foa_from_lbac(avg_dep_lbac, application_rate)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac_from_gha(self):
"""
Deposition calculation.
:param avg_dep_gha: average deposition over width of water body in units of grams/hectare
:param gms_per_lb: conversion factor to convert lbs to grams
:param acres_per_hectare: conversion factor to convert hectares to acres
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([0.01516739, 0.111524, 0.267659])
try:
avg_dep_gha = pd.Series([17., 125., 3e2], dtype='float')
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.acres_per_hectare = 2.471
result = agdrift_empty.calc_avg_dep_lbac_from_gha(avg_dep_gha)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac_from_waterconc_ngl(self):
"""
:description calculate the average deposition onto the pond/wetland/field
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_width: average width of water body
:parem area_length: average length of water body
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param ng_per_gram conversion factor
:param sqft_per_acre conversion factor
:param liters_per_ft3 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([2.311455e-05, 2.209479e-03, 2.447423e-03])
try:
avg_waterconc_ngl = pd.Series([17., 125., 3e2], dtype='float')
area_width = pd.Series([50., 200., 500.], dtype='float')
area_length = pd.Series([6331., 538., 215.], dtype='float')
area_depth = pd.Series([0.5, 6.5, 3.], dtype='float')
agdrift_empty.liters_per_ft3 = 28.3168
agdrift_empty.sqft_per_acre = 43560.
agdrift_empty.ng_per_gram = 1.e9
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.acres_per_hectare = 2.471
result = agdrift_empty.calc_avg_dep_lbac_from_waterconc_ngl(avg_waterconc_ngl, area_width,
area_length, area_depth)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_lbac_from_mgcm2(self):
"""
:description calculate the average deposition of pesticide over the terrestrial field in lbs/acre
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param mg_per_gram conversion factor
:param sqft_per_acre conversion factor
:param cm2_per_ft2 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([2.676538e-02, 2.2304486, 44.608973])
try:
avg_fielddep_mgcm2 = pd.Series([3.e-4, 2.5e-2, 5.e-01])
agdrift_empty.sqft_per_acre = 43560.
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.cm2_per_ft2 = 929.03
agdrift_empty.mg_per_gram = 1.e3
result = agdrift_empty.calc_avg_dep_lbac_from_mgcm2(avg_fielddep_mgcm2)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_dep_gha(self):
"""
:description average deposition over width of water body in grams per acre
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param gms_per_lb: conversion factor to convert lbs to grams
:param acres_per_hectare: conversion factor to convert acres to hectares
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([1.401061, 0.3648362, 0.03362546])
try:
avg_dep_lbac = pd.Series([1.25e-3,3.255e-4,3e-5], dtype='float')
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.acres_per_hectare = 2.47105
result = agdrift_empty.calc_avg_dep_gha(avg_dep_lbac)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_waterconc_ngl(self):
"""
:description calculate the average concentration of pesticide in the pond/wetland
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_width: average width of water body
:parem area_length: average length of water body
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param ng_per_gram conversion factor
:param sqft_per_acre conversion factor
:param liters_per_ft3 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([70.07119, 18.24654, 22.41823])
try:
avg_dep_lbac = pd.Series([1.25e-3,3.255e-4,3e-5], dtype='float')
area_width = pd.Series([6.56, 208.7, 997.], dtype='float')
area_length = pd.Series([1.640838e4, 515.7595, 107.9629], dtype='float')
area_depth = pd.Series([6.56, 6.56, 0.4921], dtype='float')
agdrift_empty.ng_per_gram = 1.e9
agdrift_empty.liters_per_ft3 = 28.3168
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.sqft_per_acre = 43560.
result = agdrift_empty.calc_avg_waterconc_ngl(avg_dep_lbac ,area_width, area_length, area_depth)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_avg_fielddep_mgcm2(self):
"""
:description calculate the average deposition of pesticide over the terrestrial field
:param avg_dep_lbac: average deposition over width of water body in lbs per acre
:param area_depth: average depth of water body
:param gms_per_lb: conversion factor to convert lbs to grams
:param mg_per_gram conversion factor
:param sqft_per_acre conversion factor
:param cm2_per_ft2 conversion factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series([1.401063e-5, 3.648369e-6, 3.362552e-7])
try:
avg_dep_lbac = pd.Series([1.25e-3,3.255e-4,3e-5], dtype='float')
agdrift_empty.gms_per_lb = 453.592
agdrift_empty.sqft_per_acre = 43560.
agdrift_empty.mg_per_gram = 1.e3
agdrift_empty.cm2_per_ft2 = 929.03
result = agdrift_empty.calc_avg_fielddep_mgcm2(avg_dep_lbac)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_generate_running_avg(self):
"""
:description retrieves values for distance and the first deposition scenario from the sql database
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE any blank fields are filled with 'nan'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
expected_result_x = pd.Series([], dtype='float')
expected_result_y = pd.Series([], dtype='float')
expected_result_npts = pd.Series([], dtype='object')
x_array_in = pd.Series([], dtype='float')
y_array_in = | pd.Series([], dtype='float') | pandas.Series |
import sys
import os
import torch
import numpy as np
import torch_geometric.datasets
import pyximport
from torch_geometric.data import InMemoryDataset, download_url
import pandas as pd
from sklearn import preprocessing
pyximport.install(setup_args={'include_dirs': np.get_include()})
import os.path as osp
from torch_geometric.data import Data
import time
from torch_geometric.utils import add_self_loops, negative_sampling
from torch_geometric.data import Dataset
from functools import lru_cache
import copy
from fairseq.data import (
NestedDictionaryDataset,
NumSamplesDataset,
)
import json
import pathlib
from pathlib import Path
BASE = Path(os.path.realpath(__file__)).parent
GLOBAL_ROOT = str(BASE / 'graphormer_repo' / 'graphormer')
sys.path.insert(1, (GLOBAL_ROOT))
from data.wrapper import preprocess_item
import datetime
def find_part(hour):
if hour < 11:
part = 1
elif (hour > 11) & (hour < 20):
part = 2
else:
part = 3
return part
def prepare_raw_dataset_edge(dataset_name):
if dataset_name == 'abakan':
raw_data = pd.read_csv('datasets/abakan/raw/abakan_full_routes_final_weather_L_NaN_filtered_FIXED.csv')
all_roads_graph = list(pd.read_pickle('datasets/abakan/raw/all_roads_graph.pickle').to_networkx().edges())
all_nodes = pd.read_pickle('datasets/abakan/raw/clear_nodes.pkl')
init = pd.read_csv('datasets/abakan/raw/graph_abakan_init.csv')
elif dataset_name == 'omsk':
raw_data = pd.read_csv('datasets/omsk/raw/omsk_full_routes_final_weather_L_NaN_filtered_FIXED.csv')
all_roads_graph = list(pd.read_pickle('datasets/omsk/raw/all_roads_graph.pickle').to_networkx().edges())
# all_nodes = pd.read_pickle('datasets/omsk/raw/clear_nodes.pkl')
init = pd.read_csv('datasets/omsk/raw/graph_omsk_init.csv')
all_roads_dataset = pd.DataFrame()
all_edge_list = [list((all_roads_graph)[i]) for i in range(0,len( (all_roads_graph)))]
all_roads_dataset['edge_id']= range(0,len(init['edge_id'].unique()))
all_roads_dataset['speed'] = ' 1'
all_roads_dataset['length'] = ' 1'
all_roads_dataset[' start_point_part'] = init['quartal_id'] / len(init['quartal_id'].unique())
all_roads_dataset['finish_point_part'] = init['quartal_id'] / len(init['quartal_id'].unique())
all_roads_dataset_edges = pd.DataFrame()
all_roads_dataset_edges['source'] = [x[0] for x in all_edge_list]
all_roads_dataset_edges['target'] = [x[1] for x in all_edge_list]
# all_roads_dataset_edges = all_roads_dataset_edges.drop_duplicates().reset_index(drop = True)
trip_part = all_roads_dataset[['edge_id', 'speed', 'length', ' start_point_part', 'finish_point_part']].copy()
source_merge = pd.merge(all_roads_dataset_edges, trip_part.rename(columns = {'edge_id':'source'}), on = ['source'], how = 'left')
target_merge = pd.merge(all_roads_dataset_edges, trip_part.rename(columns = {'edge_id':'target'}), on = ['target'], how = 'left')
total_table = pd.DataFrame()
total_table['speed'] = (source_merge['speed'].apply(lambda x: [x]) + target_merge['speed'].apply(lambda x: [x]))
total_table['length'] = (source_merge['length'].apply(lambda x: [x]) + target_merge['length'].apply(lambda x: [x]))
total_table['edges'] = (source_merge['source'].apply(lambda x: [x]) + target_merge['target'].apply(lambda x: [x]))
total_table[' start_point_part'] = source_merge[' start_point_part']
total_table['finish_point_part'] = target_merge['finish_point_part']
total_table['week_period'] = datetime.datetime.now().hour
total_table['hour'] = datetime.datetime.now().weekday()
total_table['day_period'] = total_table['hour'].apply(lambda x: find_part(x))
total_table['RTA'] = 1
total_table['clouds'] = 1
total_table['snow'] = 0
total_table['temperature'] = 10
total_table['wind_dir'] = 180
total_table['wind_speed'] = 3
total_table['pressure'] = 747
total_table['source'] = source_merge['source']
total_table['target'] = source_merge['target']
# total_table = total_table.drop_duplicates().reset_index(drop = True)
return total_table
def prepare_raw_dataset_node(dataset_name):
if dataset_name == 'abakan':
raw_data = pd.read_csv('datasets/abakan/raw/abakan_full_routes_final_weather_L_NaN_filtered_FIXED.csv')
all_roads_graph = list(pd.read_pickle('datasets/abakan/raw/all_roads_graph.pickle').to_networkx().edges())
all_nodes = pd.read_pickle('datasets/abakan/raw/clear_nodes.pkl')
init = pd.read_csv('datasets/abakan/raw/graph_abakan_init.csv')
elif dataset_name == 'omsk':
raw_data = pd.read_csv('datasets/omsk/raw/omsk_full_routes_final_weather_L_NaN_filtered_FIXED.csv')
all_roads_graph = list(pd.read_pickle('datasets/omsk/raw/all_roads_graph.pickle').to_networkx().edges())
# all_nodes = pd.read_pickle('datasets/omsk/raw/clear_nodes.pkl')
init = | pd.read_csv('datasets/omsk/raw/graph_omsk_init.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 28 10:19:16 2019
@author: zl
"""
import os
import argparse
import glob
import shutil
from collections import defaultdict
import tqdm
import numpy as np
import pandas as pd
from PIL import Image
import imagehash
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', dest='data_dir',
help='the directory of the data',
default='data', type=str)
return parser.parse_args()
def main():
args = parse_args()
raw_images_dir = os.path.join(args.data_dir, 'rgby')
external_dir = os.path.join(raw_images_dir, 'external')
external_filenames = list(glob.glob(os.path.join(external_dir, '*.png')))
# print(external_filenames)
records = []
for file_name in external_filenames:
name = file_name.split('/')[-1]
# print('dir ', file_name, ' name ', name)
records.append((name, '1'))
# break
df = | pd.DataFrame.from_records(records, columns=['Id', 'x']) | pandas.DataFrame.from_records |
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
def plot_info(full_data, x_axis, y_axis, title):
kys = full_data.keys()
colors = cm.rainbow(np.linspace(0,1, len(kys)))
clr = 0
for i in kys:
plt.plot(range(len(full_data[i][y_axis])), full_data[i][y_axis], label=i,
color=colors[clr], marker=None, linestyle='-')
clr = clr + 1
plt.title(title, fontsize= 28, fontweight="bold")
plt.xlabel(x_axis, fontsize= 22)
plt.ylabel(y_axis, fontsize= 22)
plt.legend(fontsize=15)
#plt.show()111
plt.savefig(title + '.png')
plt.close()
def main():
df = pd.read_csv("data.csv");
print(df)
column_names = list(df)
methods = pd.unique(df[column_names[0]])
noise_levels = | pd.unique(df[column_names[1]]) | pandas.unique |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from dask import dataframe as dd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Categorical, Datetime, Double, Integer
import featuretools as ft
from featuretools import Timedelta
from featuretools.computational_backends.feature_set import FeatureSet
from featuretools.computational_backends.feature_set_calculator import (
FeatureSetCalculator
)
from featuretools.entityset.relationship import RelationshipPath
from featuretools.feature_base import DirectFeature, IdentityFeature
from featuretools.primitives import (
And,
Count,
CumSum,
EqualScalar,
GreaterThanEqualToScalar,
GreaterThanScalar,
LessThanEqualToScalar,
LessThanScalar,
Mean,
Min,
Mode,
Negate,
NMostCommon,
NotEqualScalar,
NumTrue,
Sum,
TimeSinceLast,
Trend
)
from featuretools.primitives.base import AggregationPrimitive
from featuretools.tests.testing_utils import backward_path, to_pandas
from featuretools.utils import Trie
from featuretools.utils.gen_utils import Library
def test_make_identity(es):
f = IdentityFeature(es['log'].ww['datetime'])
feature_set = FeatureSet([f])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[f.get_name()][0]
assert (v == datetime(2011, 4, 9, 10, 30, 0))
def test_make_dfeat(es):
f = DirectFeature(ft.Feature(es['customers'].ww['age']),
child_dataframe_name='sessions')
feature_set = FeatureSet([f])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[f.get_name()][0]
assert (v == 33)
def test_make_agg_feat_of_identity_column(es):
agg_feat = ft.Feature(es['log'].ww['value'], parent_dataframe_name='sessions', primitive=Sum)
feature_set = FeatureSet([agg_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[agg_feat.get_name()][0]
assert (v == 50)
# full_dataframe not supported with Dask
def test_full_dataframe_trans_of_agg(pd_es):
agg_feat = ft.Feature(pd_es['log'].ww['value'], parent_dataframe_name='customers',
primitive=Sum)
trans_feat = ft.Feature(agg_feat, primitive=CumSum)
feature_set = FeatureSet([trans_feat])
calculator = FeatureSetCalculator(pd_es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([1]))
v = df[trans_feat.get_name()].values[0]
assert v == 82
def test_full_dataframe_error_dask(dask_es):
agg_feat = ft.Feature(dask_es['log'].ww['value'], parent_dataframe_name='customers',
primitive=Sum)
trans_feat = ft.Feature(agg_feat, primitive=CumSum)
feature_set = FeatureSet([trans_feat])
calculator = FeatureSetCalculator(dask_es,
time_last=None,
feature_set=feature_set)
error_text = "Cannot use primitives that require full dataframe with Dask"
with pytest.raises(ValueError, match=error_text):
calculator.run(np.array([1]))
def test_make_agg_feat_of_identity_index_column(es):
agg_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)
feature_set = FeatureSet([agg_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[agg_feat.get_name()][0]
assert (v == 5)
def test_make_agg_feat_where_count(es):
agg_feat = ft.Feature(es['log'].ww['id'],
parent_dataframe_name='sessions',
where=IdentityFeature(es['log'].ww['product_id']) == 'coke zero',
primitive=Count)
feature_set = FeatureSet([agg_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[agg_feat.get_name()][0]
assert (v == 3)
def test_make_agg_feat_using_prev_time(es):
agg_feat = ft.Feature(es['log'].ww['id'],
parent_dataframe_name='sessions',
use_previous=Timedelta(10, 's'),
primitive=Count)
feature_set = FeatureSet([agg_feat])
calculator = FeatureSetCalculator(es,
time_last=datetime(2011, 4, 9, 10, 30, 10),
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[agg_feat.get_name()][0]
assert (v == 2)
calculator = FeatureSetCalculator(es,
time_last=datetime(2011, 4, 9, 10, 30, 30),
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[agg_feat.get_name()][0]
assert (v == 1)
def test_make_agg_feat_using_prev_n_events(es):
if es.dataframe_type != Library.PANDAS.value:
pytest.xfail('Distrubuted entitysets do not support use_previous')
agg_feat_1 = ft.Feature(es['log'].ww['value'],
parent_dataframe_name='sessions',
use_previous=Timedelta(1, 'observations'),
primitive=Min)
agg_feat_2 = ft.Feature(es['log'].ww['value'],
parent_dataframe_name='sessions',
use_previous=Timedelta(3, 'observations'),
primitive=Min)
assert agg_feat_1.get_name() != agg_feat_2.get_name(), \
'Features should have different names based on use_previous'
feature_set = FeatureSet([agg_feat_1, agg_feat_2])
calculator = FeatureSetCalculator(es,
time_last=datetime(2011, 4, 9, 10, 30, 6),
feature_set=feature_set)
df = calculator.run(np.array([0]))
# time_last is included by default
v1 = df[agg_feat_1.get_name()][0]
v2 = df[agg_feat_2.get_name()][0]
assert v1 == 5
assert v2 == 0
calculator = FeatureSetCalculator(es,
time_last=datetime(2011, 4, 9, 10, 30, 30),
feature_set=feature_set)
df = calculator.run(np.array([0]))
v1 = df[agg_feat_1.get_name()][0]
v2 = df[agg_feat_2.get_name()][0]
assert v1 == 20
assert v2 == 10
def test_make_agg_feat_multiple_dtypes(es):
if es.dataframe_type != Library.PANDAS.value:
pytest.xfail('Currently no Dask or Koalas compatible agg prims that use multiple dtypes')
compare_prod = IdentityFeature(es['log'].ww['product_id']) == 'coke zero'
agg_feat = ft.Feature(es['log'].ww['id'],
parent_dataframe_name='sessions',
where=compare_prod,
primitive=Count)
agg_feat2 = ft.Feature(es['log'].ww['product_id'],
parent_dataframe_name='sessions',
where=compare_prod,
primitive=Mode)
feature_set = FeatureSet([agg_feat, agg_feat2])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0]))
v = df[agg_feat.get_name()][0]
v2 = df[agg_feat2.get_name()][0]
assert (v == 3)
assert (v2 == 'coke zero')
def test_make_agg_feat_where_different_identity_feat(es):
feats = []
where_cmps = [LessThanScalar, GreaterThanScalar, LessThanEqualToScalar,
GreaterThanEqualToScalar, EqualScalar, NotEqualScalar]
for where_cmp in where_cmps:
feats.append(ft.Feature(es['log'].ww['id'],
parent_dataframe_name='sessions',
where=ft.Feature(es['log'].ww['datetime'], primitive=where_cmp(datetime(2011, 4, 10, 10, 40, 1))),
primitive=Count))
df = ft.calculate_feature_matrix(entityset=es, features=feats, instance_ids=[0, 1, 2, 3])
df = to_pandas(df, index='id', sort_index=True)
for i, where_cmp in enumerate(where_cmps):
name = feats[i].get_name()
instances = df[name]
v0, v1, v2, v3 = instances[0:4]
if where_cmp == LessThanScalar:
assert (v0 == 5)
assert (v1 == 4)
assert (v2 == 1)
assert (v3 == 1)
elif where_cmp == GreaterThanScalar:
assert (v0 == 0)
assert (v1 == 0)
assert (v2 == 0)
assert (v3 == 0)
elif where_cmp == LessThanEqualToScalar:
assert (v0 == 5)
assert (v1 == 4)
assert (v2 == 1)
assert (v3 == 2)
elif where_cmp == GreaterThanEqualToScalar:
assert (v0 == 0)
assert (v1 == 0)
assert (v2 == 0)
assert (v3 == 1)
elif where_cmp == EqualScalar:
assert (v0 == 0)
assert (v1 == 0)
assert (v2 == 0)
assert (v3 == 1)
elif where_cmp == NotEqualScalar:
assert (v0 == 5)
assert (v1 == 4)
assert (v2 == 1)
assert (v3 == 1)
def test_make_agg_feat_of_grandchild_dataframe(es):
agg_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='customers', primitive=Count)
feature_set = FeatureSet([agg_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index='id')
v = df[agg_feat.get_name()].values[0]
assert (v == 10)
def test_make_agg_feat_where_count_feat(es):
"""
Feature we're creating is:
Number of sessions for each customer where the
number of logs in the session is less than 3
"""
log_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)
feat = ft.Feature(es['sessions'].ww['id'],
parent_dataframe_name='customers',
where=log_count_feat > 1,
primitive=Count)
feature_set = FeatureSet([feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0, 1]))
df = to_pandas(df, index='id', sort_index=True)
name = feat.get_name()
instances = df[name]
v0, v1 = instances[0:2]
assert (v0 == 2)
assert (v1 == 2)
def test_make_compare_feat(es):
"""
Feature we're creating is:
Number of sessions for each customer where the
number of logs in the session is less than 3
"""
log_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)
mean_agg_feat = ft.Feature(log_count_feat, parent_dataframe_name='customers', primitive=Mean)
mean_feat = DirectFeature(mean_agg_feat, child_dataframe_name='sessions')
feat = log_count_feat > mean_feat
feature_set = FeatureSet([feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0, 1, 2]))
df = to_pandas(df, index='id', sort_index=True)
name = feat.get_name()
instances = df[name]
v0, v1, v2 = instances[0:3]
assert v0
assert v1
assert not v2
def test_make_agg_feat_where_count_and_device_type_feat(es):
"""
Feature we're creating is:
Number of sessions for each customer where the
number of logs in the session is less than 3
"""
log_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)
compare_count = log_count_feat == 1
compare_device_type = IdentityFeature(es['sessions'].ww['device_type']) == 1
and_feat = ft.Feature([compare_count, compare_device_type], primitive=And)
feat = ft.Feature(es['sessions'].ww['id'],
parent_dataframe_name='customers',
where=and_feat,
primitive=Count)
feature_set = FeatureSet([feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index='id')
name = feat.get_name()
instances = df[name]
assert (instances.values[0] == 1)
def test_make_agg_feat_where_count_or_device_type_feat(es):
"""
Feature we're creating is:
Number of sessions for each customer where the
number of logs in the session is less than 3
"""
log_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)
compare_count = log_count_feat > 1
compare_device_type = IdentityFeature(es['sessions'].ww['device_type']) == 1
or_feat = compare_count.OR(compare_device_type)
feat = ft.Feature(es['sessions'].ww['id'],
parent_dataframe_name='customers',
where=or_feat,
primitive=Count)
feature_set = FeatureSet([feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index='id', int_index=True)
name = feat.get_name()
instances = df[name]
assert (instances.values[0] == 3)
def test_make_agg_feat_of_agg_feat(es):
log_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)
customer_sum_feat = ft.Feature(log_count_feat, parent_dataframe_name='customers', primitive=Sum)
feature_set = FeatureSet([customer_sum_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index='id')
v = df[customer_sum_feat.get_name()].values[0]
assert (v == 10)
@pytest.fixture
def pd_df():
return pd.DataFrame({
"id": ["a", "b", "c", "d", "e"],
"e1": ["h", "h", "i", "i", "j"],
"e2": ["x", "x", "y", "y", "x"],
"e3": ["z", "z", "z", "z", "z"],
"val": [1, 1, 1, 1, 1]
})
@pytest.fixture
def dd_df(pd_df):
return dd.from_pandas(pd_df, npartitions=2)
@pytest.fixture
def ks_df(pd_df):
ks = pytest.importorskip('databricks.koalas', reason="Koalas not installed, skipping")
return ks.from_pandas(pd_df)
@pytest.fixture(params=['pd_df', 'dd_df', 'ks_df'])
def df(request):
return request.getfixturevalue(request.param)
def test_make_3_stacked_agg_feats(df):
"""
Tests stacking 3 agg features.
The test specifically uses non numeric indices to test how ancestor columns are handled
as dataframes are merged together
"""
if isinstance(df, dd.DataFrame):
pytest.xfail('normalize_datdataframe fails with dask DataFrame')
es = ft.EntitySet()
ltypes = {
'e1': Categorical,
'e2': Categorical,
'e3': Categorical,
'val': Double
}
es.add_dataframe(dataframe=df,
index="id",
dataframe_name="e0",
logical_types=ltypes)
es.normalize_dataframe(base_dataframe_name="e0",
new_dataframe_name="e1",
index="e1",
additional_columns=["e2", "e3"])
es.normalize_dataframe(base_dataframe_name="e1",
new_dataframe_name="e2",
index="e2",
additional_columns=["e3"])
es.normalize_dataframe(base_dataframe_name="e2",
new_dataframe_name="e3",
index="e3")
sum_1 = ft.Feature(es["e0"].ww["val"], parent_dataframe_name="e1", primitive=Sum)
sum_2 = ft.Feature(sum_1, parent_dataframe_name="e2", primitive=Sum)
sum_3 = ft.Feature(sum_2, parent_dataframe_name="e3", primitive=Sum)
feature_set = FeatureSet([sum_3])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array(["z"]))
v = df[sum_3.get_name()][0]
assert (v == 5)
def test_make_dfeat_of_agg_feat_on_self(es):
"""
The graph looks like this:
R R = Regions, a parent of customers
|
C C = Customers, the dataframe we're trying to predict on
|
etc.
We're trying to calculate a DFeat from C to R on an agg_feat of R on C.
"""
customer_count_feat = ft.Feature(es['customers'].ww['id'], parent_dataframe_name=u'régions', primitive=Count)
num_customers_feat = DirectFeature(customer_count_feat, child_dataframe_name='customers')
feature_set = FeatureSet([num_customers_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index='id')
v = df[num_customers_feat.get_name()].values[0]
assert (v == 3)
def test_make_dfeat_of_agg_feat_through_parent(es):
"""
The graph looks like this:
R C = Customers, the dataframe we're trying to predict on
/ \\ R = Regions, a parent of customers
S C S = Stores, a child of regions
|
etc.
We're trying to calculate a DFeat from C to R on an agg_feat of R on S.
"""
store_id_feat = IdentityFeature(es['stores'].ww['id'])
store_count_feat = ft.Feature(store_id_feat, parent_dataframe_name=u'régions', primitive=Count)
num_stores_feat = DirectFeature(store_count_feat, child_dataframe_name='customers')
feature_set = FeatureSet([num_stores_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index='id')
v = df[num_stores_feat.get_name()].values[0]
assert (v == 3)
def test_make_deep_agg_feat_of_dfeat_of_agg_feat(es):
"""
The graph looks like this (higher implies parent):
C C = Customers, the dataframe we're trying to predict on
| S = Sessions, a child of Customers
P S L = Log, a child of both Sessions and Log
\\ / P = Products, a parent of Log which is not a descendent of customers
L
We're trying to calculate a DFeat from L to P on an agg_feat of P on L, and
then aggregate it with another agg_feat of C on L.
"""
log_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='products', primitive=Count)
product_purchases_feat = DirectFeature(log_count_feat,
child_dataframe_name='log')
purchase_popularity = ft.Feature(product_purchases_feat, parent_dataframe_name='customers', primitive=Mean)
feature_set = FeatureSet([purchase_popularity])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index='id')
v = df[purchase_popularity.get_name()].values[0]
assert (v == 38.0 / 10.0)
def test_deep_agg_feat_chain(es):
"""
Agg feat of agg feat:
region.Mean(customer.Count(Log))
"""
customer_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='customers', primitive=Count)
region_avg_feat = ft.Feature(customer_count_feat, parent_dataframe_name=u'régions', primitive=Mean)
feature_set = FeatureSet([region_avg_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array(['United States']))
df = to_pandas(df, index='id')
v = df[region_avg_feat.get_name()][0]
assert (v == 17 / 3.)
# NMostCommon not supported with Dask or Koalas
def test_topn(pd_es):
topn = ft.Feature(pd_es['log'].ww['product_id'],
parent_dataframe_name='customers',
primitive=NMostCommon(n=2))
feature_set = FeatureSet([topn])
calculator = FeatureSetCalculator(pd_es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0, 1, 2]))
true_results = pd.DataFrame([
['toothpaste', 'coke zero'],
['coke zero', 'Haribo sugar-free gummy bears'],
['taco clock', np.nan]
])
assert ([name in df.columns for name in topn.get_feature_names()])
for i in range(df.shape[0]):
true = true_results.loc[i]
actual = df.loc[i]
if i == 0:
# coke zero and toothpase have same number of occurrences
assert set(true.values) == set(actual.values)
else:
for i1, i2 in zip(true, actual):
assert (pd.isnull(i1) and pd.isnull(i2)) or (i1 == i2)
# Trend not supported with Dask or Koalas
def test_trend(pd_es):
trend = ft.Feature([ft.Feature(pd_es['log'].ww['value']), ft.Feature(pd_es['log'].ww['datetime'])],
parent_dataframe_name='customers',
primitive=Trend)
feature_set = FeatureSet([trend])
calculator = FeatureSetCalculator(pd_es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0, 1, 2]))
true_results = [-0.812730, 4.870378, np.nan]
np.testing.assert_almost_equal(df[trend.get_name()].tolist(), true_results, decimal=5)
def test_direct_squared(es):
feature = IdentityFeature(es['log'].ww['value'])
squared = feature * feature
feature_set = FeatureSet([feature, squared])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0, 1, 2])))
for i, row in df.iterrows():
assert (row[0] * row[0]) == row[1]
def test_agg_empty_child(es):
customer_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='customers', primitive=Count)
feature_set = FeatureSet([customer_count_feat])
# time last before the customer had any events, so child frame is empty
calculator = FeatureSetCalculator(es,
time_last=datetime(2011, 4, 8),
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])), index='id')
assert df["COUNT(log)"].iloc[0] == 0
def test_diamond_entityset(diamond_es):
es = diamond_es
amount = ft.IdentityFeature(es['transactions'].ww['amount'])
path = backward_path(es, ['regions', 'customers', 'transactions'])
through_customers = ft.AggregationFeature(amount, 'regions',
primitive=ft.primitives.Sum,
relationship_path=path)
path = backward_path(es, ['regions', 'stores', 'transactions'])
through_stores = ft.AggregationFeature(amount, 'regions',
primitive=ft.primitives.Sum,
relationship_path=path)
feature_set = FeatureSet([through_customers, through_stores])
calculator = FeatureSetCalculator(es,
time_last=datetime(2011, 4, 8),
feature_set=feature_set)
df = calculator.run(np.array([0, 1, 2]))
df = to_pandas(df, index='id', sort_index=True)
assert (df['SUM(stores.transactions.amount)'] == [94, 261, 128]).all()
assert (df['SUM(customers.transactions.amount)'] == [72, 411, 0]).all()
def test_two_relationships_to_single_dataframe(games_es):
es = games_es
home_team, away_team = es.relationships
path = RelationshipPath([(False, home_team)])
mean_at_home = ft.AggregationFeature(ft.Feature(es['games'].ww['home_team_score']),
'teams',
relationship_path=path,
primitive=ft.primitives.Mean)
path = RelationshipPath([(False, away_team)])
mean_at_away = ft.AggregationFeature(ft.Feature(es['games'].ww['away_team_score']),
'teams',
relationship_path=path,
primitive=ft.primitives.Mean)
home_team_mean = ft.DirectFeature(mean_at_home, 'games',
relationship=home_team)
away_team_mean = ft.DirectFeature(mean_at_away, 'games',
relationship=away_team)
feature_set = FeatureSet([home_team_mean, away_team_mean])
calculator = FeatureSetCalculator(es,
time_last=datetime(2011, 8, 28),
feature_set=feature_set)
df = calculator.run(np.array(range(3)))
df = to_pandas(df, index='id', sort_index=True)
assert (df[home_team_mean.get_name()] == [1.5, 1.5, 2.5]).all()
assert (df[away_team_mean.get_name()] == [1, 0.5, 2]).all()
@pytest.fixture
def pd_parent_child():
parent_df = pd.DataFrame({"id": [1]})
child_df = pd.DataFrame({"id": [1, 2, 3],
"parent_id": [1, 1, 1],
"time_index": pd.date_range(start='1/1/2018', periods=3),
"value": [10, 5, 2],
"cat": ['a', 'a', 'b']}).astype({'cat': 'category'})
return (parent_df, child_df)
@pytest.fixture
def dd_parent_child(pd_parent_child):
parent_df, child_df = pd_parent_child
parent_df = dd.from_pandas(parent_df, npartitions=2)
child_df = dd.from_pandas(child_df, npartitions=2)
return (parent_df, child_df)
@pytest.fixture
def ks_parent_child(pd_parent_child):
ks = pytest.importorskip('databricks.koalas', reason="Koalas not installed, skipping")
parent_df, child_df = pd_parent_child
parent_df = ks.from_pandas(parent_df)
child_df = ks.from_pandas(child_df)
return (parent_df, child_df)
@pytest.fixture(params=['pd_parent_child', 'dd_parent_child', 'ks_parent_child'])
def parent_child(request):
return request.getfixturevalue(request.param)
def test_empty_child_dataframe(parent_child):
parent_df, child_df = parent_child
child_ltypes = {
'parent_id': Integer,
'time_index': Datetime,
'value': Double,
'cat': Categorical
}
es = ft.EntitySet(id="blah")
es.add_dataframe(dataframe_name="parent",
dataframe=parent_df,
index="id")
es.add_dataframe(dataframe_name="child",
dataframe=child_df,
index="id",
time_index="time_index",
logical_types=child_ltypes)
es.add_relationship("parent", "id", "child", "parent_id")
# create regular agg
count = ft.Feature(es["child"].ww["id"], parent_dataframe_name="parent", primitive=Count)
# create agg feature that requires multiple arguments
trend = ft.Feature([ft.Feature(es["child"].ww["value"]), ft.Feature(es["child"].ww['time_index'])],
parent_dataframe_name="parent",
primitive=Trend)
# create multi-output agg feature
n_most_common = ft.Feature(es["child"].ww["cat"], parent_dataframe_name="parent", primitive=NMostCommon)
# create aggs with where
where = ft.Feature(es["child"].ww["value"]) == 1
count_where = ft.Feature(es["child"].ww["id"], parent_dataframe_name="parent", where=where, primitive=Count)
trend_where = ft.Feature([ft.Feature(es["child"].ww["value"]), ft.Feature(es["child"].ww["time_index"])],
parent_dataframe_name="parent",
where=where,
primitive=Trend)
n_most_common_where = ft.Feature(es["child"].ww["cat"], parent_dataframe_name="parent", where=where, primitive=NMostCommon)
if isinstance(parent_df, pd.DataFrame):
features = [count, count_where, trend, trend_where, n_most_common, n_most_common_where]
data = {count.get_name(): pd.Series([0], dtype="Int64"),
count_where.get_name(): pd.Series([0], dtype="Int64"),
trend.get_name(): pd.Series([np.nan], dtype="float"),
trend_where.get_name(): pd.Series([np.nan], dtype="float")}
for name in n_most_common.get_feature_names():
data[name] = pd.Series([np.nan], dtype="category")
for name in n_most_common_where.get_feature_names():
data[name] = pd.Series([np.nan], dtype="category")
else:
features = [count, count_where]
data = {count.get_name(): pd.Series([0], dtype="Int64"),
count_where.get_name(): pd.Series([0], dtype="Int64")}
answer = pd.DataFrame(data)
# cutoff time before all rows
fm = ft.calculate_feature_matrix(entityset=es,
features=features,
cutoff_time=pd.Timestamp("12/31/2017"))
fm = to_pandas(fm)
for column in data.keys():
| pd.testing.assert_series_equal(fm[column], answer[column], check_names=False, check_index=False) | pandas.testing.assert_series_equal |
# -*- coding: utf-8 -*-
import tensorflow as tf
import scipy.sparse as sp
import numpy as np
import pandas as pd
def normalized_adj(adj):
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
normalized_adj = adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
normalized_adj = normalized_adj.astype(np.float32)
return normalized_adj
def sparse_to_tuple(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
L = tf.SparseTensor(coords, mx.data, mx.shape)
return tf.sparse_reorder(L)
def calculate_laplacian(adj, lambda_max=1):
adj = normalized_adj(adj + sp.eye(adj.shape[0]))
adj = sp.csr_matrix(adj)
adj = adj.astype(np.float32)
return sparse_to_tuple(adj)
def weight_variable_glorot(input_dim, output_dim, name=""):
init_range = np.sqrt(6.0 / (input_dim + output_dim))
initial = tf.random_uniform([input_dim, output_dim], minval=-init_range,
maxval=init_range, dtype=tf.float32)
return tf.Variable(initial,name=name)
def huanyuan(test1,real1,path,pre_len):
#test处理
data_test1 = pd.DataFrame()
a = test1.shape[0]-pre_len
num = a / 354
for j in range(432):
data1 = test1.iloc[:, j]
ser1 = []
for i in range(0, 354):
a = data1[i * num]
b = data1[i * num + 1]
mean = (a + b) / 2
ser1.append(mean)
data_one1 = | pd.DataFrame(ser1) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
DWD-Pilotstation software source code file
by <NAME>. Non-commercial use only.
'''
import numpy as np
import pandas as pd
import xarray as xr
import re
import datetime
import itertools as it
import operator as op
import warnings
# import packackes used for plotting quicklooks
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.colors as mcolors
import matplotlib.cm as cm
from matplotlib.ticker import MultipleLocator
from pathlib import Path
# import
from hpl2netCDF_client.hpl_files.hpl_files import hpl_files
from hpl2netCDF_client.config.config import config
from scipy.linalg import diagsvd, svdvals
### functions used for plotting
def cmap_discretize(cmap, N):
"""Return a discrete colormap from the continuous colormap cmap.
cmap: colormap instance, eg. cm.jet.
N: number of colors.
"""
if type(cmap) == str:
cmap = plt.get_cmap(cmap)
colors_i = np.concatenate((np.linspace(0, 1., N), (0.,0.,0.,0.)))
colors_rgba = cmap(colors_i)
indices = np.linspace(0, 1., N+1)
cdict = {}
for ki, key in enumerate(('red','green','blue')):
cdict[key] = [(indices[i], colors_rgba[i-1,ki], colors_rgba[i,ki]) for i in range(N+1)]
# Return colormap object.
return mcolors.LinearSegmentedColormap(cmap.name + "_%d"%N, cdict, 1024)
### functions for the retrieval
def build_Amatrix(azimuth_vec,elevation_vec):
return np.einsum('ij -> ji',
np.vstack(
[
np.sin((np.pi/180)*(azimuth_vec))*np.sin((np.pi/180)*(90-elevation_vec))
,np.cos((np.pi/180)*(azimuth_vec))*np.sin((np.pi/180)*(90-elevation_vec))
,np.cos((np.pi/180)*(90-elevation_vec))
])
)
#Note: numpy's lstsq-function uses singular value decomposition already
def VAD_retrieval(azimuth_vec,elevation_vec,Vr):
# u, s, vh = np.linalg.svd(build_Amatrix(azimuth_vec,elevation_vec), full_matrices=True)
# A = build_Amatrix(azimuth_vec,elevation_vec)
# return vh.transpose() @ np.linalg.pinv(diagsvd(s,u.shape[0],vh.shape[0])) @ u.transpose() @ Vr
return np.linalg.lstsq(build_Amatrix(azimuth_vec,elevation_vec), Vr, rcond=-1)
def uvw_2_spd(uvw,uvw_unc):
if (np.isfinite(uvw[0]) * np.isfinite(uvw[1])) & (~np.isnan(uvw[0]) * ~np.isnan(uvw[1])):
speed = np.sqrt((uvw[0])**2.+(uvw[1])**2.)
else:
speed = np.nan
if speed > 0:
df_du = uvw[0] * 1/speed
df_dv = uvw[1] * 1/speed
error = np.sqrt((df_du*uvw_unc[0])**2 + (df_dv*uvw_unc[1])**2)
else:
error = np.nan
return {'speed': speed, 'error': error}
def uvw_2_dir(uvw,uvw_unc):
if (np.isfinite(uvw[0]) * np.isfinite(uvw[1])) & (~np.isnan(uvw[0]) * ~np.isnan(uvw[1])):
wdir = np.arctan2(uvw[0],uvw[1])*180/np.pi + 180
else:
wdir = np.nan
if np.isfinite(wdir):
error = (180/np.pi)*np.sqrt((uvw[0]*uvw_unc[0])**2 + (uvw[1]*uvw_unc[1])**2)/(uvw[0]**2 + uvw[1]**2)
else:
error = np.nan
return {'wdir': wdir, 'error': error}
def calc_sigma_single(SNR_dB,Mpts,nsmpl,BW,delta_v):
'calculates the instrument uncertainty: SNR in dB!'
# SNR_dB = np.ma.masked_values(SNR_dB, np.nan)
# SNR_dB = np.ma.masked_invalid(SNR_dB)
SNR= 10**(SNR_dB/10)
bb = np.sqrt(2.*np.pi)*(delta_v/BW)
alpha = SNR/bb
Np = Mpts*nsmpl*SNR
# a1 = (2.*np.sqrt(np.sqrt(np.pi)/alpha)).filled(np.nan)
# a1 = 2.*np.sqrt( np.divide(np.sqrt(np.pi), alpha
# , out=np.full((alpha.shape), np.nan)
# , where=alpha!=0)
# )
a1 = 2.*(np.sqrt(np.ma.divide(np.sqrt(np.pi), alpha)))#.filled(np.nan)
a2 = (1+0.16*alpha)#.filled(np.nan)
a3 = np.ma.divide(delta_v, np.sqrt(Np))#.filled(np.nan) ##here, Cramer Rao lower bound!
SNR= SNR#.filled(np.nan)
sigma = np.ma.masked_where( SNR_dB > -5
, (a1*a2*a3).filled(np.nan)
).filled(a3.filled(np.nan))
# sigma= np.where(~np.isnan(SNR)
# ,np.where(SNR_dB <= -5., (a1*a2*a3), a3)
# ,np.nan)
return sigma
def log10_inf(x):
result = np.zeros(x.shape)
result[x>0] = np.log10(x[x>0])
result[x<0] = -float('Inf')
return result
# def in_dB(x):
# return np.real(10*log10_inf(np.float64(x)))
def consensus_mean(Vr,SNR,CNS_range,CNS_percentage,SNR_threshold):
if SNR_threshold < 0:
SNR_threshold= 10**(SNR_threshold/10)
with np.errstate(divide='ignore', invalid='ignore'):
Vr_X= np.expand_dims(Vr, axis=0)
AjdM = (abs(np.einsum('ij... -> ji...',Vr_X)-Vr_X)<CNS_range).astype(int)
SUMlt= np.sum(AjdM, axis=0)
X= np.sum( np.einsum('il...,lj... -> ij...',AjdM
, np.where( np.sum(SNR>SNR_threshold, axis=0)/SNR.shape[0] >= CNS_percentage/100
, np.apply_along_axis(np.diag, 0,(SUMlt/np.sum(SNR>SNR_threshold, axis=0) >= CNS_percentage/100).astype(int))
,0))#[:,:,kk]
, axis=0)#[:,kk]
W= np.where(X>0,X/np.sum(X, axis=0),np.nan)
mask= np.isnan(W)
Wm= np.ma.masked_where(mask,W)
Xm= np.ma.masked_where(mask,Vr)
OutCNS=Xm*Wm
MEAN= OutCNS.sum(axis=0).filled(np.nan)
diff= Vr- MEAN
mask_m= abs(diff)<3
Vr_m = np.ma.masked_where(~mask_m,Vr)
# Vr_m.mean(axis=0).filled(np.nan)
IDX= mask_m
UNC= (Vr_m.max(axis=0)-Vr_m.min(axis=0)).filled(np.nan)/2
return MEAN, IDX, UNC
def consensus_median(Vr,SNR,CNS_range,CNS_percentage,SNR_threshold):
if SNR_threshold < 0:
SNR_threshold= 10**(SNR_threshold/10)
with np.errstate(divide='ignore', invalid='ignore'):
Vr_X= np.expand_dims(Vr, axis=0)
AjdM = (abs(np.einsum('ij... -> ji...',Vr_X)-Vr_X)<CNS_range).astype(int)
SUMlt= np.sum(AjdM, axis=0)
X= np.sum(np.einsum('il...,lj... -> ij...',AjdM
,np.where(np.sum(SNR>SNR_threshold, axis=0)/SNR.shape[0] >= CNS_percentage/100
,np.apply_along_axis(np.diag, 0,(SUMlt/np.sum(SNR>SNR_threshold, axis=0) >= CNS_percentage/100).astype(int))
,0))#[:,:,kk]
, axis=0)#[:,kk]
W= np.where(X>0,X/np.sum(X, axis=0),np.nan)
mask= np.isnan(W)
Wm= np.ma.masked_where(mask,W)
Xm= np.ma.masked_where(mask,Vr)
OutCNS=Xm*Wm
MEAN= OutCNS.sum(axis=0).filled(np.nan)
diff= Vr- MEAN
diff= np.ma.masked_values(diff, np.nan)
mask_m= (abs(diff)<3)*(~np.isnan(diff))
Vr_m= np.ma.masked_where(~mask_m,Vr)
MEAN= np.ma.median(Vr_m, axis =0).filled(np.nan)
IDX= ~np.isnan(W)
UNC= (Vr_m.max(axis=0)-Vr_m.min(axis=0)).filled(np.nan)/2
return MEAN, IDX, UNC
###############################################################################################
# functions used to identify single cycles
###############################################################################################
def process(lst,mon):
# Guard clause against empty lists
if len(lst) < 1:
return lst
# use an object here to work around closure limitations
state = type('State', (object,), dict(prev=lst[0], n=0))
def grouper_proc(x):
if mon==1:
if x < state.prev:
state.n += 1
elif mon==-1:
if x > state.prev:
state.n += 1
state.prev = x
return state.n
return { k: list(g) for k, g in it.groupby(lst, grouper_proc) }
def get_cycles(lst,mon):
ll= 0
res= {}
for key, lst in process(lst,int(np.median(np.sign(np.diff(np.array(lst)))))).items():
# print(key,np.arange(ll,ll+len(lst)),lst)
id_tmp= np.arange(ll,ll+len(lst))
ll+= len(lst)
res.update( { key:{'indices': list(id_tmp), 'values': lst} } )
return res
###############################################################################################
def grouper(iterable, n, fillvalue=None):
'''Collect data into fixed-length chunks or blocks'''
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return it.zip_longest(*args, fillvalue=fillvalue)
# def calc_node_degree(Vr,CNS_range):
# '''takes masked array as input'''
# f_abs_pairdiff = lambda x,y: op.abs(op.sub(x,y))<CNS_range
# with np.errstate(invalid='ignore'):
# return np.array(list(grouper(it.starmap(f_abs_pairdiff,((it.permutations(Vr.filled(np.nan),2)))),Vr.shape[0]-1))).sum(axis=1)
def calc_node_degree(Vr,CNS_range,B, metric='l1norm'):
'''takes masked array as input'''
if metric == 'l1norm':
f_abs_pairdiff = lambda x,y: op.abs(op.sub(x,y))<CNS_range
if metric == 'l1norm_aa':
f_abs_pairdiff = lambda x,y: op.sub(B,op.abs(op.sub(op.abs(op.sub(x,y)),B)))<CNS_range
with np.errstate(invalid='ignore'):
return np.array(list(grouper(it.starmap(f_abs_pairdiff,((it.permutations(Vr.filled(np.nan),2)))),Vr.shape[0]-1))).sum(axis=1)
def diff_aa(x,y,c):
'''calculate aliasing independent differences'''
return (c-abs(abs(x-y)-c))
# def consensus(Vr,SNR,CNS_range,CNS_percentage,SNR_threshold):
def consensus(Vr,SNR,BETA,CNS_range,CNS_percentage,SNR_threshold,B):
'''
consensus(Vr,SNR,CNS_range,CNS_percentage,SNR_threshold)
Calculate consensus average:
--> row-wise, calculate the arithmetic mean using only the members of the most likely cluster
, i.e. the cluster with the maximum number of edges, if the total number of valid clusters is greater than a specified limit.
Parameters
----------
Vr(time,height) : array_like (intended for using numpy array)
n-dimensional array representing a signal.
SNR(time,height) : array_like (intended for using numpy array)
n-dimensional array of the same dimension as x representing the signal to noise ratio.
CNS_range(scalar) : scalar value, i.e. 0-dimensional
scalar value giving the radius for neighboring values in Vr.
CNS_percentage(scalar) : scalar value, i.e. 0-dimensional
scalar value stating the minimum percentage for the relative number of valid clusters compared to the totaö number of clusters.
SNR_threshold(scalar) : scalar value, i.e. 0-dimensional
scalar value giving the lower bounded threshold of the signal to noise threshold.
order : {'Vr', 'SNR', 'CNS_range', 'CNS_percentage', 'SNR_threshold'}
Returns
-------
(MEAN, IDX, UNC) --> [numpy array, boolean array, numpy array]
MEAN - consensus average of array, ...
IDX - index of values used for the consensus, ...
UNC - standard deviation of centered around the consensus average of array, ...
...for each row
Dependencies
------------
functions : check_if_db(x), in_mag(snr), filter_by_snr(vr,snr,snr_threshold)
Notes
-----
All array inputs must have the same dimensions, namely (time,height).
If the input SNR is already given in dB, do NOT filter the input SNR in advance for missing values, because filtering will be done during the calculation
Translation between dB and magnitude can be done with the functions "in_dB(x)" and "in_mag(x)".
This function implicitely uses machine epsilon (np.float16) for the numerical value of 0, see "filter_by_snr(vr,snr,snr_threshold)".
'''
condi_0 = SNR > 0
if SNR_threshold == 0:
condi_snr = condi_0
else:
condi_snr = (10*np.log10(SNR.astype(np.complex)).real > SNR_threshold) & (BETA > 0)
Vr_m = np.ma.masked_where( ~condi_snr, Vr)
condi_vr = (abs(Vr_m.filled(-999.)) <= B)
Vr_m = np.ma.masked_where( ~condi_vr, Vr_m)
### calculate the number of points within the consensusrange
## easy-to-understand way
# SUMlt= 1 + np.sum(
# np.einsum( 'ij...,ik...-> ij...'
# , (abs(np.einsum('ij... -> ji...', Vr_m[None,...]) - Vr_m[None,...]) < CNS_range).filled(False).astype(int)
# , np.apply_along_axis(np.diag, 0, condi_vr).astype(int))
# , axis=0) - (condi_vr).astype(int)
## performance strong way using iterators
SUMlt= 1 + calc_node_degree(Vr_m, CNS_range, B, metric='l1norm')
Vr_maxim= np.ma.masked_where( ~((100*np.max(SUMlt, axis=0)/CNS_percentage >= condi_vr.sum(axis=0)) & (condi_vr.sum(axis=0) >= Vr.shape[0]/100*60.))
# ~((100*np.max(SUMlt, axis=0)/condi_vr.sum(axis=0) >= CNS_percentage) & (100*condi_vr.sum(axis=0)/Vr.shape[0] > 60.))
, Vr_m[-(np.argmax(np.flipud(SUMlt),axis=0)+1), np.arange(0,SUMlt.shape[1])]
# , Vr_m[np.argmax(SUMlt,axis=0), np.arange(0,SUMlt.shape[1])]
)
mask_m= abs(Vr_m.filled(999.) - Vr_maxim.filled(-999.)) < CNS_range
Vr_m= np.ma.masked_where(~(mask_m), Vr_m.filled(-999.))
MEAN= Vr_m.sum(axis=0).filled(np.nan)/np.max(SUMlt, axis=0)
IDX= mask_m
UNC= np.nanstd(Vr_m-MEAN.T, axis=0)
UNC[np.isnan(MEAN)]= np.nan
### memory and time efficient option
# SUMlt= 1 + calc_node_degree(Vr_m, CNS_range, B, metric='l1norm')
### this code is more efficient, but less intuitive and accounts for one-time velocity folding
#SUMlt= 1 + calc_node_degree(Vr_m, CNS_range, B, metric='l1norm_aa')
# Vr_maxim= np.ma.masked_where( ~((100*np.max(SUMlt, axis=0)/condi_vr.sum(axis=0) >= CNS_percentage) & (100*condi_vr.sum(axis=0)/Vr.shape[0] > 60.))
# , Vr_m[-(np.argmax(np.flipud(SUMlt),axis=0)+1), np.arange(0,SUMlt.shape[1])]
# # , Vr_m[np.argmax(SUMlt,axis=0), np.arange(0,SUMlt.shape[1])]
# )
# mask_m= diff_aa(Vr_m, V_max, B) < 3
# Vr_m = np.ma.masked_where((mask_m), Vr).filled(Vr-np.sign(Vr-V_max)*2*B*np.heaviside(abs(Vr-V_max)-B, 1))
# Vr_m = np.ma.masked_where(~(mask_m), Vr_m)
# MEAN= Vr_m.mean(axis=0).filled(np.nan)
# IDX= mask_m
# UNC= np.nanstd(Vr_m-MEAN.T, axis=0)
# UNC[np.isnan(MEAN)]= np.nan
return np.round(MEAN, 4), IDX, UNC
def check_if_db(x):
'''
check_if_db(X)
Static method for checking if the input is in dB
Parameters
----------
x : array or scalar
representing the signal to noise of a lidar signal.
Returns
-------
bool
stating wether the input "likely" in dB.
Notes
-----
The method is only tested empirically and therefore not absolute.
'''
return np.any(x<-1)
def filter_by_snr(x,snr,snr_threshold):
'''
filter_by_snr(X,SNR,SNR_threshold)
Masking an n-dimensional array (X) according to a given signal to noise ratio (SNR) and specified threshold (SNR_threshold).
Parameters
----------
x : array_like (intended for using numpy array)
n-dimensional array representing a signal.
snr : array_like (intended for using numpy array)
n-dimensional array of the same dimension as x representing the signal to noise ratio.
snr_threshold : scalar value, i.e. 0-dimensional
scalar value giving the lower bounded threshold of the signal to noise threshold.
order : {'x', 'snr', 'snr_threshold'}
Returns
-------
masked_array, i.e. [data, mask]
Masked numpy array to be used in further processing.
Dependencies
------------
functions : check_if_db(x), in_mag(snr)
Notes
-----
If the input SNR is already given in dB, do NOT filter the input SNR in advance for missing values.
Translation between dB and magnitude can be done with the functions "in_dB(x)" and "in_mag(x)".
This functions uses machine epsilon (np.float16) for the numerical value of 0.
'''
if check_if_db(snr)==True:
print('SNR interpreted as dB')
print(snr.min(),snr.max())
snr= in_mag(snr)
if check_if_db(snr_threshold)==True:
print('SNR-threshold interpreted as dB')
snr_threshold= in_mag(snr_threshold)
snr_threshold+= np.finfo(np.float32).eps
return np.ma.masked_where(~(snr>snr_threshold), x)
def in_db(x):
'''
in_db(X)
Calculates dB values of a given input (X). The intended input is the signal to noise ratio of a Doppler lidar.
Parameters
----------
x : array_like (intended for using numpy array) OR numerical scalar
n-dimensional array
Returns
-------
X in dB
Dependencies
------------
functions : check_if_db(x)
Notes
-----
If the input X is already given in dB, X is returned without further processing.
Please, do NOT filter the input in advance for missing values.
This functions uses machine epsilon (np.float32) for the numerical value of 0.
'''
if check_if_db(x)==True:
print('Input already in dB')
return x
else:
epsilon_val= np.finfo(np.float32).eps
if np.ma.size(x)==0:
print('0-dimensional input!')
else:
if np.ma.size(x)>1:
x[x<=0]= epsilon_val
return 10*np.log10(np.ma.masked_where((x<= epsilon_val), x)).filled(10*np.log10(epsilon_val))
else:
if x<=0:
x= epsilon_val
return 10*np.log10(np.ma.masked_where((x<= epsilon_val), x)).filled(10*np.log10(epsilon_val))
def in_mag(x):
'''
in_mag(X)
Calculates the magnitude values of a given dB input (X). The intended input is the signal to noise ratio of a Doppler lidar.
Parameters
----------
x : array_like (intended for using numpy array) OR numerical scalar
n-dimensional array
Returns
-------
X in magnitude
Dependencies
------------
functions : check_if_db(x)
Notes
-----
If the input X is already given in magnitde, X is returned without further processing.
Please, do NOT filter the input in advance for missing values.
This functions uses machine epsilon (np.float32) for the numerical value of 0.
'''
if check_if_db(x)==False:
print('Input already in magnitude')
return x
else:
epsilon_val= np.finfo(np.float32).eps
if np.ma.size(x)==0:
print('0-dimensional input!')
else:
if np.ma.size(x)>1:
res= 10**(x/10)
res[res<epsilon_val]= epsilon_val
return res
else:
res= 10**(x/10)
if res<=epsilon_val:
res= epsilon_val
return res
def CN_est(X):
Fill_Val = 0
X_f = X.filled(Fill_Val)
if np.all(X_f == 0):
return np.inf
else:
max_val = svdvals(X_f).max()
min_val = svdvals(X_f).min()
if min_val == 0:
return np.inf
else:
return max_val/min_val
def check_num_dir(n_rays,calc_idx,azimuth,idx_valid):
h, be = np.histogram(np.mod(azimuth[calc_idx[idx_valid]],360), bins=2*n_rays, range=(0, 360))
counts = np.sum(np.r_[h[-1], h[:-1]].reshape(-1, 2), axis=1) # rotate and sum
edges = np.r_[np.r_[be[-2], be[:-2]][::2], be[-2]] # rotate and skip
kk_idx= counts >= 3
return kk_idx, np.arange(0,360,360//n_rays), edges
def find_num_dir(n_rays,calc_idx,azimuth,idx_valid):
if np.all(check_num_dir(n_rays,calc_idx,azimuth,idx_valid)[0]):
return np.all(check_num_dir(n_rays,calc_idx,azimuth,idx_valid)[0]), n_rays, check_num_dir(n_rays,calc_idx,azimuth,idx_valid)[1], check_num_dir(n_rays,calc_idx,azimuth,idx_valid)[2]
elif ~np.all(check_num_dir(n_rays,calc_idx,azimuth,idx_valid)[0]):
if n_rays > 4:
print('number of directions to high...try' + str(n_rays//2) + '...instead of ' + str(n_rays))
return find_num_dir(n_rays//2,calc_idx,azimuth,idx_valid)
elif n_rays < 4:
print('number of directions to high...try' + str(4) + '...instead' )
return find_num_dir(4,calc_idx,azimuth,idx_valid)
else:
print('not enough valid directions!-->skip non-convergent time windows' )
return np.all(check_num_dir(n_rays,calc_idx,azimuth,idx_valid)[0]), n_rays, check_num_dir(n_rays,calc_idx,azimuth,idx_valid)[1], check_num_dir(n_rays,calc_idx,azimuth,idx_valid)[2]
### the actual processing is done in this class
class hpl2netCDFClient(object):
def __init__(self, config_dir, cmd, date2proc):
self.config_dir = config_dir
self.cmd= cmd
self.date2proc= date2proc
def display_config_dir(self):
print('config-file taken from ' + self.config_dir)
def display_configDict(self):
confDict= config.gen_confDict(url= self.config_dir)
print(confDict)
def dailylvl1(self):
date_chosen = self.date2proc
confDict= config.gen_confDict(url= self.config_dir)
hpl_list= hpl_files.make_file_list(date_chosen, confDict, url=confDict['PROC_PATH'])
if not hpl_list.name:
print('no files found')
else:
print('combining files to daily lvl1...')
print(' ...')
## look at the previous and following day for potential files
# and add to hpl_list
print('looking at the previous day')
hpl_listm1 = hpl_files.make_file_list(date_chosen + datetime.timedelta(minutes=-30), confDict, url=confDict['PROC_PATH'])
print('looking at the following day')
hpl_listp1 = hpl_files.make_file_list(date_chosen + datetime.timedelta(days=+1, minutes=30), confDict, url=confDict['PROC_PATH'])
namelist = hpl_list.name
timelist = hpl_list.time
#print('check 1')
if len(hpl_listm1.time) > 0:
if date_chosen - hpl_listm1.time[-1] <= datetime.timedelta(minutes=30):
namelist = [hpl_listm1.name[-1]] + namelist
timelist = [hpl_listm1.time[-1]] + timelist
print('adding last file of previous day before')
#print('check 2')
if len(hpl_listp1.time) > 0:
if hpl_listp1.time[0] - date_chosen <= datetime.timedelta(days=1, minutes=30):
namelist = namelist + [hpl_listp1.name[0]]
timelist = timelist + [hpl_listp1.time[0]]
print('adding first file of following day after')
hpl_list = hpl_files(namelist, timelist)
# print('check 3')
# read_idx= hpl_files.reader_idx(hpl_list,confDict,chunks=False)
nc_name= hpl_files.combine_lvl1(hpl_list, confDict, date_chosen)
print(nc_name)
ds_tmp= xr.open_dataset(nc_name)
print(ds_tmp.info)
ds_tmp.close()
def dailylvl2(self):
date_chosen = self.date2proc
confDict= config.gen_confDict(url= self.config_dir)
path= Path(confDict['NC_L1_PATH'] + '/'
+ date_chosen.strftime("%Y") + '/'
+ date_chosen.strftime("%Y%m")
)
search_pattern = '**/' + confDict['NC_L1_BASENAME'] + '*' + date_chosen.strftime("%Y%m%d")+ '*.nc'
mylist= list(path.glob(search_pattern))
if not mylist:
raise FileNotFoundError(f"Could not find file matching pattern: {search_pattern}")
print(mylist[0])
if len(mylist)>1:
print('!!!multiple files found!!!, only first is processed!')
try:
ds_tmp= xr.open_dataset(mylist[0])
except:
print('no such file exists: ' + path.name + '... .nc')
if not ds_tmp:
print('unable to continue processing!')
else:
print('processing lvl1 to lvl2...')
## do processiong!!
# read lidar parameters
n_rays= int(confDict['NUMBER_OF_DIRECTIONS'])
# number of gates
n_gates= int(confDict['NUMBER_OF_GATES'])
# number of pulses used in the data point aquisition
n= ds_tmp.prf.data
# number of points per range gate
M= ds_tmp.nsmpl.data
# half of detector bandwidth in velocity space
B= ds_tmp.nqv.data
# filter Stares within scan
elevation= 90-ds_tmp.zenith.data
azimuth= ds_tmp.azi.data[elevation < 89] % 360
time_ds = ds_tmp.time.data[elevation < 89]
dv= ds_tmp.dv.data[elevation < 89]
snr= ds_tmp.intensity.data[elevation < 89]-1
beta= ds_tmp.beta.data[elevation < 89]
height= ds_tmp.range.data*np.sin(np.nanmedian(elevation[elevation < 89])*np.pi/180)
width= ds_tmp.range.data*2*np.cos(np.nanmedian(elevation[elevation < 89])*np.pi/180)
height_bnds= ds_tmp.range_bnds.data
height_bnds[:,0]= np.sin(np.nanmedian(elevation[elevation < 89])*np.pi/180)*(height_bnds[:,0])
height_bnds[:,1]= np.sin(np.nanmedian(elevation[elevation < 89])*np.pi/180)*(height_bnds[:,1])
# define time chunks
## Look for UTC_OFFSET in config
if 'UTC_OFFSET' in confDict:
time_offset = np.timedelta64(int(confDict['UTC_OFFSET']), 'h')
time_delta = int(confDict['UTC_OFFSET'])
else:
time_offset = np.timedelta64(0, 'h')
time_delta = 0
time_vec= np.arange(date_chosen - datetime.timedelta(hours=time_delta)
,date_chosen+datetime.timedelta(days = 1) - datetime.timedelta(hours=time_delta)
+datetime.timedelta(minutes= int(confDict['AVG_MIN']))
,datetime.timedelta(minutes= int(confDict['AVG_MIN'])))
calc_idx= [np.where((ii <= time_ds)*(time_ds < iip1))
for ii,iip1 in zip(time_vec[0:-1],time_vec[1::])]
time_start= np.array([int(pd.to_datetime(time_ds[t[0][-1]]).replace(tzinfo=datetime.timezone.utc).timestamp())
if len(t[0]) != 0
else int(pd.to_datetime(time_vec[ii+1]).replace(tzinfo=datetime.timezone.utc).timestamp())
for ii,t in enumerate(calc_idx)
])
time_bnds= np.array([[ int(pd.to_datetime(time_ds[t[0][0]]).replace(tzinfo=datetime.timezone.utc).timestamp())
,int(pd.to_datetime(time_ds[t[0][-1]]).replace(tzinfo=datetime.timezone.utc).timestamp())]
if len(t[0]) != 0
else [int( | pd.to_datetime(time_vec[ii]) | pandas.to_datetime |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
| pd.Timestamp("2015-01-22") | pandas.Timestamp |
import pandas as pd
import numpy as np
def smape(y_true, y_pred):
numerator = np.abs(y_true - y_pred)
denominator = (np.abs(y_true) + np.abs(y_pred)) / 2
ratio = numerator / denominator
return (ratio.mean())
def SES_model(data, horizon, alpha_high, alpha_low):
from statsmodels.tsa.holtwinters import SimpleExpSmoothing
ses_high = SimpleExpSmoothing(data['High'], initialization_method='legacy-heuristic')
res_high = ses_high.fit(smoothing_level=alpha_high, optimized=False)
fore_high = res_high.forecast(horizon)
fore_high = fore_high.to_frame()
fore_high.columns = ['Forecast_High']
pred_high = res_high.predict(start=data.index[0], end=data.index[-1])
smap_high = round(smape(data['High'], pred_high),3)
#pred_high = pred_high.to_frame()
#pred_high.columns = ['Pred_High']
ses_low = SimpleExpSmoothing(data['Low'], initialization_method='legacy-heuristic')
res_low = ses_low.fit(smoothing_level=alpha_low, optimized=False)
fore_low = res_low.forecast(horizon)
fore_low = fore_low.to_frame()
fore_low.columns = ['Forecast_Low']
pred_low = res_low.predict(start=data.index[0], end=data.index[-1])
smap_low = round(smape(data['Low'], pred_low),3)
#pred_low = pred_low.to_frame()
#pred_low.columns = ['Pred_Low']
data_final = pd.concat([data,pred_low,pred_high, fore_high, fore_low], axis=1)
data_final.loc[data.index[-1], 'Forecast_High'] = data_final.loc[data.index[-1], 'High']
data_final.loc[data.index[-1], 'Forecast_Low'] = data_final.loc[data.index[-1], 'Low']
optim_alpha_high = round(ses_high.fit().params['smoothing_level'],2)
optim_alpha_low = round(ses_low.fit().params['smoothing_level'],2)
return [data_final,smap_low,smap_high,optim_alpha_high,optim_alpha_low]
def Holt_model(data,horizon, level_high, level_low,trend_high,trend_low):
from statsmodels.tsa.holtwinters import Holt
holt_high = Holt(data['High'], initialization_method='legacy-heuristic')
res_high = holt_high.fit(smoothing_level=level_high,smoothing_trend= trend_high,optimized=False)
fore_high = res_high.forecast(horizon)
fore_high = fore_high.to_frame()
fore_high.columns = ['Forecast_High']
pred_high = res_high.predict(start=data.index[0], end=data.index[-1])
smap_high = round(smape(data['High'], pred_high), 3)
holt_low = Holt(data['Low'], initialization_method='legacy-heuristic')
res_low = holt_low.fit(smoothing_level= level_low,smoothing_trend= trend_low, optimized=False)
fore_low = res_low.forecast(horizon)
fore_low = fore_low.to_frame()
fore_low.columns = ['Forecast_Low']
pred_low = res_low.predict(start=data.index[0], end=data.index[-1])
smap_low = round(smape(data['Low'], pred_low), 3)
data_final = pd.concat([data,pred_low,pred_high, fore_high, fore_low], axis=1)
data_final.loc[data.index[-1], 'Forecast_High'] = data_final.loc[data.index[-1], 'High']
data_final.loc[data.index[-1], 'Forecast_Low'] = data_final.loc[data.index[-1], 'Low']
optim_level_high = round(holt_high.fit().params['smoothing_level'],2)
optim_level_low = round(holt_low.fit().params['smoothing_level'],2)
optim_trend_high = round(holt_high.fit().params['smoothing_trend'],2)
optim_trend_low = round(holt_low.fit().params['smoothing_trend'],2)
return [data_final,smap_low,smap_high,optim_level_high,optim_level_low,optim_trend_high,optim_trend_low]
def Holt_Winter_Model(data,horizon, level_high, level_low,trend_high,trend_low,season_high,season_low):
from statsmodels.tsa.holtwinters import ExponentialSmoothing
hw_high =ExponentialSmoothing(data['High'], initialization_method='legacy-heuristic',trend = 'add',seasonal='add')
res_high = hw_high.fit(smoothing_level=level_high, smoothing_trend=trend_high, smoothing_seasonal= season_high,optimized=False)
fore_high = res_high.forecast(horizon)
fore_high = fore_high.to_frame()
fore_high.columns = ['Forecast_High']
pred_high = res_high.predict(start=data.index[0], end=data.index[-1])
smap_high = round(smape(data['High'], pred_high), 3)
hw_low = ExponentialSmoothing(data['Low'], initialization_method='legacy-heuristic',trend = 'add',seasonal='add')
res_low = hw_low.fit(smoothing_level=level_low, smoothing_trend= trend_low, smoothing_seasonal= season_low ,optimized=False)
fore_low = res_low.forecast(horizon)
fore_low = fore_low.to_frame()
fore_low.columns = ['Forecast_Low']
pred_low = res_low.predict(start=data.index[0], end=data.index[-1])
smap_low = round(smape(data['Low'], pred_low), 3)
data_final = pd.concat([data, pred_low, pred_high, fore_high, fore_low], axis=1)
data_final.loc[data.index[-1], 'Forecast_High'] = data_final.loc[data.index[-1], 'High']
data_final.loc[data.index[-1], 'Forecast_Low'] = data_final.loc[data.index[-1], 'Low']
optim_model_high = hw_high.fit()
optim_model_low = hw_low.fit()
optim_level_high = round(optim_model_high.params['smoothing_level'], 2)
optim_level_low = round(optim_model_low.params['smoothing_level'], 2)
optim_trend_high = round(optim_model_high.params['smoothing_trend'], 2)
optim_trend_low = round(optim_model_low.params['smoothing_trend'], 2)
optim_season_high = round(optim_model_high.params['smoothing_seasonal'],2)
optim_season_low = round(optim_model_low.params['smoothing_seasonal'], 2)
return [data_final, smap_low, smap_high, optim_level_high, optim_level_low, optim_trend_high, optim_trend_low,optim_season_high,optim_season_low]
from preprocess import process_high, process_low
def AR_model(data,horizon, p_high,p_low):
from statsmodels.tsa.arima.model import ARIMA
ar_high = ARIMA(data['High'],order = (p_high,0,0))
res_high = ar_high.fit()
fore_high = res_high.forecast(horizon)
fore_high = fore_high.to_frame()
fore_high.columns = ['Forecast_High']
pred_high = res_high.predict(start=data.index[0], end=data.index[-1])
smap_high = round(smape(data['High'], pred_high), 3)
ar_low = ARIMA(data['Low'],order = (p_low,0,0))
res_low = ar_low.fit()
fore_low = res_low.forecast(horizon)
fore_low = fore_low.to_frame()
fore_low.columns = ['Forecast_Low']
pred_low = res_low.predict(start=data.index[0], end=data.index[-1])
smap_low = round(smape(data['Low'], pred_low), 3)
data_final = pd.concat([data, pred_low, pred_high, fore_high, fore_low], axis=1)
data_final.loc[data.index[-1], 'Forecast_High'] = data_final.loc[data.index[-1], 'High']
data_final.loc[data.index[-1], 'Forecast_Low'] = data_final.loc[data.index[-1], 'Low']
return [data_final,smap_high,smap_low]
def MA_model(data,horizon, q_high,q_low):
from statsmodels.tsa.arima.model import ARIMA
ma_high = ARIMA(data['High'],order = (0,0,q_high))
res_high = ma_high.fit()
fore_high = res_high.forecast(horizon)
fore_high = fore_high.to_frame()
fore_high.columns = ['Forecast_High']
pred_high = res_high.predict(start=data.index[0], end=data.index[-1])
smap_high = round(smape(data['High'], pred_high), 3)
ma_low = ARIMA(data['Low'],order = (0,0,q_low))
res_low = ma_low.fit()
fore_low = res_low.forecast(horizon)
fore_low = fore_low.to_frame()
fore_low.columns = ['Forecast_Low']
pred_low = res_low.predict(start=data.index[0], end=data.index[-1])
smap_low = round(smape(data['Low'], pred_low), 3)
data_final = pd.concat([data, pred_low, pred_high, fore_high, fore_low], axis=1)
data_final.loc[data.index[-1], 'Forecast_High'] = data_final.loc[data.index[-1], 'High']
data_final.loc[data.index[-1], 'Forecast_Low'] = data_final.loc[data.index[-1], 'Low']
return [data_final,smap_high,smap_low]
def ARMA_model(data,horizon,p_high,p_low, q_high,q_low):
from statsmodels.tsa.arima.model import ARIMA
arma_high = ARIMA(data['High'],order = (p_high,0,q_high))
res_high = arma_high.fit()
fore_high = res_high.forecast(horizon)
fore_high = fore_high.to_frame()
fore_high.columns = ['Forecast_High']
pred_high = res_high.predict(start=data.index[0], end=data.index[-1])
smap_high = round(smape(data['High'], pred_high), 3)
arma_low = ARIMA(data['Low'],order = (p_low,0,q_low))
res_low = arma_low.fit()
fore_low = res_low.forecast(horizon)
fore_low = fore_low.to_frame()
fore_low.columns = ['Forecast_Low']
pred_low = res_low.predict(start=data.index[0], end=data.index[-1])
smap_low = round(smape(data['Low'], pred_low), 3)
data_final = pd.concat([data, pred_low, pred_high, fore_high, fore_low], axis=1)
data_final.loc[data.index[-1], 'Forecast_High'] = data_final.loc[data.index[-1], 'High']
data_final.loc[data.index[-1], 'Forecast_Low'] = data_final.loc[data.index[-1], 'Low']
return [data_final,smap_high,smap_low]
def ARIMA_model(data,horizon,p_high,p_low,q_high,q_low,i_high,i_low):
from statsmodels.tsa.arima.model import ARIMA
arima_high = ARIMA(data['High'], order=(p_high, i_high, q_high))
res_high = arima_high.fit()
fore_high = res_high.forecast(horizon)
fore_high = fore_high.to_frame()
fore_high.columns = ['Forecast_High']
pred_high = res_high.predict(start=data.index[0], end=data.index[-1])
smap_high = round(smape(data['High'], pred_high), 3)
arima_low = ARIMA(data['Low'], order=(p_low, i_low, q_low))
res_low = arima_low.fit()
fore_low = res_low.forecast(horizon)
fore_low = fore_low.to_frame()
fore_low.columns = ['Forecast_Low']
pred_low = res_low.predict(start=data.index[0], end=data.index[-1])
smap_low = round(smape(data['Low'], pred_low), 3)
data_final = pd.concat([data, pred_low, pred_high, fore_high, fore_low], axis=1)
data_final.loc[data.index[-1], 'Forecast_High'] = data_final.loc[data.index[-1], 'High']
data_final.loc[data.index[-1], 'Forecast_Low'] = data_final.loc[data.index[-1], 'Low']
return [data_final, smap_high, smap_low]
def Auto_Arima(data,horizon):
from pmdarima import auto_arima
index = pd.bdate_range(start=data.index[-1], periods=(horizon+1))
model_high = auto_arima(data['High'])
fore_high = model_high.predict(horizon)
fore_high = np.insert(fore_high, 0, data['High'][-1])
fore_high = pd.DataFrame(fore_high, index=index)
fore_high.columns = ['Forecast_High']
model_low = auto_arima(data['Low'])
fore_low = model_low.predict(horizon)
fore_low = np.insert(fore_low, 0, data['Low'][-1])
fore_low = pd.DataFrame(fore_low, index=index)
fore_low.columns = ['Forecast_Low']
data_final = | pd.concat([data,fore_high,fore_low],axis = 1) | pandas.concat |
#%%
import os
try:
os.chdir('/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/')
print(os.getcwd())
except:
pass
#%%
import sys
sys.path.append("/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/")
import pandas as pd
import numpy as np
import connectome_tools.process_matrix as promat
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
import pymaid
from pymaid_creds import url, name, password, token
# convert pair-sorted brain/sensories matrix to binary matrix based on synapse threshold
matrix_ad = pd.read_csv('data/axon-dendrite.csv', header=0, index_col=0)
matrix_dd = pd.read_csv('data/dendrite-dendrite.csv', header=0, index_col=0)
matrix_aa = pd.read_csv('data/axon-axon.csv', header=0, index_col=0)
matrix_da = pd.read_csv('data/dendrite-axon.csv', header=0, index_col=0)
# the columns are string by default and the indices int; now both are int
matrix_ad.columns = pd.to_numeric(matrix_ad.columns)
matrix_dd.columns = pd.to_numeric(matrix_dd.columns)
matrix_aa.columns = | pd.to_numeric(matrix_aa.columns) | pandas.to_numeric |
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath: str, categories_filepath: str) -> pd.DataFrame:
""" Load messages and category data from CSV files, merge data and return in
a dataframe
Args:
messages_filepath: str. Filepath to messages CSV file.
categories_filepath: str. Filepath to categories CSV file.
Returns:
df: DataFrame. A DataFrame of merged message and category data
"""
# Load messages and categories datasets from CSV files
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
# Merge messages and categories datasets on the common id field
df = | pd.merge(messages, categories, left_on='id', right_on='id', how='outer') | pandas.merge |
# -*- coding: utf-8 -*-
"""
@author: hkaneko
"""
# サンプルプログラムで使われる関数群
import matplotlib.figure as figure
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.spatial.distance import cdist
from sklearn import metrics
## 目的変数の実測値と推定値との間で、散布図を描いたり、r2, RMSE, MAE を計算したりする関数
# def performance_check_in_regression(y, estimated_y):
# plt.rcParams['font.size'] = 18 # 横軸や縦軸の名前の文字などのフォントのサイズ
# plt.figure(figsize=figure.figaspect(1)) # 図の形を正方形に
# plt.scatter(y, estimated_y.iloc[:, 0], c='blue') # 実測値 vs. 推定値プロット
# y_max = max(y.max(), estimated_y.iloc[:, 0].max()) # 実測値の最大値と、推定値の最大値の中で、より大きい値を取得
# y_min = min(y.min(), estimated_y.iloc[:, 0].min()) # 実測値の最小値と、推定値の最小値の中で、より小さい値を取得
# plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],
# [y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-') # 取得した最小値-5%から最大値+5%まで、対角線を作成
# plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # y 軸の範囲の設定
# plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # x 軸の範囲の設定
# plt.xlabel('actual y') # x 軸の名前
# plt.ylabel('estimated y') # y 軸の名前
# plt.show() # 以上の設定で描画
#
# r2 = metrics.r2_score(y, estimated_y) # r2
# rmse = metrics.mean_squared_error(y, estimated_y) ** 0.5 # RMSE
# mae = metrics.mean_absolute_error(y, estimated_y) # MAE
# return (r2, rmse, mae)
def k3n_error(x_1, x_2, k):
"""
k-nearest neighbor normalized error (k3n-error)
When X1 is data of X-variables and X2 is data of Z-variables
(low-dimensional data), this is k3n error in visualization (k3n-Z-error).
When X1 is Z-variables (low-dimensional data) and X2 is data of data of
X-variables, this is k3n error in reconstruction (k3n-X-error).
k3n-error = k3n-Z-error + k3n-X-error
Parameters
----------
x_1: numpy.array or pandas.DataFrame
x_2: numpy.array or pandas.DataFrame
k: int
The numbers of neighbor
Returns
-------
k3n_error : float
k3n-Z-error or k3n-X-error
"""
x_1 = np.array(x_1)
x_2 = np.array(x_2)
x_1_distance = cdist(x_1, x_1)
x_1_sorted_indexes = np.argsort(x_1_distance, axis=1)
x_2_distance = cdist(x_2, x_2)
for i in range(x_2.shape[0]):
_replace_zero_with_the_smallest_positive_values(x_2_distance[i, :])
identity_matrix = np.eye(len(x_1_distance), dtype=bool)
knn_distance_in_x_1 = np.sort(x_2_distance[:, x_1_sorted_indexes[:, 1:k + 1]][identity_matrix])
knn_distance_in_x_2 = np.sort(x_2_distance)[:, 1:k + 1]
sum_k3n_error = (
(knn_distance_in_x_1 - knn_distance_in_x_2) / knn_distance_in_x_2
).sum()
return sum_k3n_error / x_1.shape[0] / k
def _replace_zero_with_the_smallest_positive_values(arr):
"""
Replace zeros in array with the smallest positive values.
Parameters
----------
arr: numpy.array
"""
arr[arr == 0] = np.min(arr[arr != 0])
def plot_and_selection_of_hyperparameter(hyperparameter_values, metrics_values, x_label, y_label):
# ハイパーパラメータ (成分数、k-NN の k など) の値ごとの統計量 (CV 後のr2, 正解率など) をプロット
plt.rcParams['font.size'] = 18
plt.scatter(hyperparameter_values, metrics_values, c='blue')
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.show()
# 統計量 (CV 後のr2, 正解率など) が最大のときのハイパーパラメータ (成分数、k-NN の k など) の値を選択
return hyperparameter_values[metrics_values.index(max(metrics_values))]
def estimation_and_performance_check_in_regression_train_and_test(model, x_train, y_train, x_test, y_test):
# トレーニングデータの推定
estimated_y_train = model.predict(x_train) * y_train.std() + y_train.mean() # y を推定し、スケールをもとに戻します
estimated_y_train = pd.DataFrame(estimated_y_train, index=x_train.index,
columns=['estimated_y']) # Pandas の DataFrame 型に変換。行の名前・列の名前も設定
# トレーニングデータの実測値 vs. 推定値のプロット
plt.rcParams['font.size'] = 18 # 横軸や縦軸の名前の文字などのフォントのサイズ
plt.figure(figsize=figure.figaspect(1)) # 図の形を正方形に
plt.scatter(y_train, estimated_y_train.iloc[:, 0], c='blue') # 実測値 vs. 推定値プロット
y_max = max(y_train.max(), estimated_y_train.iloc[:, 0].max()) # 実測値の最大値と、推定値の最大値の中で、より大きい値を取得
y_min = min(y_train.min(), estimated_y_train.iloc[:, 0].min()) # 実測値の最小値と、推定値の最小値の中で、より小さい値を取得
plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],
[y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-') # 取得した最小値-5%から最大値+5%まで、対角線を作成
plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # y 軸の範囲の設定
plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # x 軸の範囲の設定
plt.xlabel('actual y') # x 軸の名前
plt.ylabel('estimated y') # y 軸の名前
plt.show() # 以上の設定で描画
# トレーニングデータのr2, RMSE, MAE
print('r^2 for training data :', metrics.r2_score(y_train, estimated_y_train))
print('RMSE for training data :', metrics.mean_squared_error(y_train, estimated_y_train) ** 0.5)
print('MAE for training data :', metrics.mean_absolute_error(y_train, estimated_y_train))
# トレーニングデータの結果の保存
y_train_for_save = pd.DataFrame(y_train) # Series のため列名は別途変更
y_train_for_save.columns = ['actual_y']
y_error_train = y_train_for_save.iloc[:, 0] - estimated_y_train.iloc[:, 0]
y_error_train = pd.DataFrame(y_error_train) # Series のため列名は別途変更
y_error_train.columns = ['error_of_y(actual_y-estimated_y)']
results_train = pd.concat([estimated_y_train, y_train_for_save, y_error_train], axis=1)
results_train.to_csv('estimated_y_train.csv') # 推定値を csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください
# テストデータの推定
estimated_y_test = model.predict(x_test) * y_train.std() + y_train.mean() # y を推定し、スケールをもとに戻します
estimated_y_test = pd.DataFrame(estimated_y_test, index=x_test.index,
columns=['estimated_y']) # Pandas の DataFrame 型に変換。行の名前・列の名前も設定
# テストデータの実測値 vs. 推定値のプロット
plt.rcParams['font.size'] = 18 # 横軸や縦軸の名前の文字などのフォントのサイズ
plt.figure(figsize=figure.figaspect(1)) # 図の形を正方形に
plt.scatter(y_test, estimated_y_test.iloc[:, 0], c='blue') # 実測値 vs. 推定値プロット
y_max = max(y_test.max(), estimated_y_test.iloc[:, 0].max()) # 実測値の最大値と、推定値の最大値の中で、より大きい値を取得
y_min = min(y_test.min(), estimated_y_test.iloc[:, 0].min()) # 実測値の最小値と、推定値の最小値の中で、より小さい値を取得
plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],
[y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-') # 取得した最小値-5%から最大値+5%まで、対角線を作成
plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # y 軸の範囲の設定
plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)) # x 軸の範囲の設定
plt.xlabel('actual y') # x 軸の名前
plt.ylabel('estimated y') # y 軸の名前
plt.show() # 以上の設定で描画
# テストデータのr2, RMSE, MAE
print('r^2 for test data :', metrics.r2_score(y_test, estimated_y_test))
print('RMSE for test data :', metrics.mean_squared_error(y_test, estimated_y_test) ** 0.5)
print('MAE for test data :', metrics.mean_absolute_error(y_test, estimated_y_test))
# テストデータの結果の保存
y_test_for_save = pd.DataFrame(y_test) # Series のため列名は別途変更
y_test_for_save.columns = ['actual_y']
y_error_test = y_test_for_save.iloc[:, 0] - estimated_y_test.iloc[:, 0]
y_error_test = | pd.DataFrame(y_error_test) | pandas.DataFrame |
import pandas as pd
import numpy as np
# The arrays loaded are not in proper format so we modify them to x,y pairs. Also we filter irrelevant values.
def modify_array(arr):
# arrays to be returned
arr_x = []
arr_y = []
for item in arr:
# Get the items in proper format
item = item.strip('[').strip(']').split()
# Filter values
if int(item[0]) >= 640 : item[0] = 320
if int(item[1]) >= 480 : item[1] = 240
# Save to the lists to be returned
arr_x.append(int(item[0]))
arr_y.append(int(item[1]))
return arr_x, arr_y
# Opens a data file and imports values
# args: sk_data_path: path to the data folder
# data_file: file to be imported
# returns: df: a dataframe with all the imported values
# skeletal data files come in the following format:
# Frame: f Hip,Shoulder_Center,Left: lsx,lsy lex,ley lwx,lwy lhx,lhy Right: rsx,rsy rex,rey rwx,rwy rhx,rhy
def import_data(sk_data_path, data_file):
data_f = open(sk_data_path + '/' + data_file, 'r')
# Read the data from csv file
read_df = | pd.read_csv(data_f) | pandas.read_csv |
import docx
from docx.shared import Pt
from docx.enum.text import WD_ALIGN_PARAGRAPH, WD_BREAK
from docx.shared import Cm
import os
import math
import pandas as pd
import numpy as np
import re
from datetime import date
import streamlit as st
import json
import glob
from PIL import Image
import smtplib
import docx2pdf
import shutil
import zipfile
from datetime import datetime
import platform
import matplotlib.pyplot as plt
def User_validation():
f=open("Validation/Validation.json","r")
past=json.loads(f.read())
f.close()
now=datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M")
time_past=datetime.strptime(past['Acceso']["Hora"], "%d/%m/%Y %H:%M")
timesince = now - time_past
Time_min= int(timesince.total_seconds() / 60)
bool_negate = Time_min<120
if not bool_negate:
past['Acceso'].update({"Estado":"Negado"})
str_json_0=json.dumps(past, indent=4)
J_f=open("Validation/Validation.json","w")
J_f.write(str_json_0)
J_f.close()
bool_aprove= past['Acceso']["Estado"]=="Aprovado"
if not bool_aprove:
colums= st.columns([1,2,1])
with colums[1]:
#st.image("Imagenes/Escudo_unal.png")
st.subheader("Ingrese el usuario y contraseña")
Usuario=st.text_input("Usuario")
Clave=st.text_input("Contraseña",type="password")
Users=["Gestor Comercial"]
bool_user = Usuario in Users
bool_clave = (Clave)==("1234")
bool_user_email = past['Acceso']["User"] == Usuario
bool_time2 = Time_min<1000
bool_1 = bool_time2 and bool_user_email
bool_2 = bool_user and bool_clave
if not bool_user_email and bool_2:
past['Acceso'].update({"User":Usuario,"Estado":"Aprovado","Hora":dt_string})
str_json_0=json.dumps(past, indent=4)
J_f=open("Validation/Validation.json","w")
J_f.write(str_json_0)
J_f.close()
if not bool_2:
if (Usuario != "") and (Clave!=""):
with colums[1]:
st.warning("Usuario o contraseña incorrectos.\n\n Por favor intente nuevamente.")
elif bool_2 and not bool_1:
past['Acceso'].update({"User":Usuario,"Estado":"Aprovado","Hora":dt_string})
str_json_0=json.dumps(past, indent=4)
J_f=open("Validation/Validation.json","w")
J_f.write(str_json_0)
J_f.close()
EMAIL_ADDRESS = '<EMAIL>'
EMAIL_PASSWORD = '<PASSWORD>'
try:
with smtplib.SMTP('smtp.gmail.com', 587) as smtp:
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
subject = 'Acceso aplicacion Julia'
body = 'Acceso usuario ' + Usuario +' el '+dt_string
msg = f'Subject: {subject}\n\n{body}'
smtp.sendmail(EMAIL_ADDRESS, EMAIL_ADDRESS, msg)
except:
pass
with colums[1]:
st.button("Acceder a la aplicación")
elif bool_2:
past['Acceso'].update({"Estado":"Aprovado","Hora":dt_string,"User":Usuario})
str_json_0=json.dumps(past, indent=4)
J_f=open("Validation/Validation.json","w")
J_f.write(str_json_0)
J_f.close()
with colums[1]:
st.button("Acceder a la aplicación")
return bool_aprove
def Num_dias(leng):
if leng==1:
return "1 día"
else:
return str(leng) + " días"
def day_week(dia):
if dia ==0:
Dia="Lunes"
elif dia ==1:
Dia="Martes"
elif dia ==2:
Dia="Miércoles"
elif dia ==3:
Dia="Jueves"
elif dia ==4:
Dia="Viernes"
elif dia ==5:
Dia="Sábado"
elif dia ==6:
Dia="Domingo-Festivo"
return Dia
def remove_row(table, row):
tbl = table._tbl
tr = row._tr
tbl.remove(tr)
def Range_fecha(dates):
if len(dates)==1:
return pd.to_datetime(dates[0]).strftime('%Y-%m-%d')
else:
return pd.to_datetime(dates[0]).strftime('%Y-%m-%d')+" hasta "+ pd.to_datetime(dates[-1]).strftime('%Y-%m-%d')
def any2str(obj):
if isinstance(obj, str):
return obj
elif math.isnan(obj):
return ""
elif isinstance(obj, int):
return str(obj)
elif isinstance(obj, float):
return str(obj)
def dt_fechas(data,data_user,Fechas,tipo_dia):
dt_Final=pd.DataFrame(columns=["Dia","Fecha","Requerimiento","Respaldo"])
for dia in Fechas:
data_fecha=data_user[data_user["Fecha"]== dia]
data_dia_todos=data[data["Fecha"]==dia]
try:
d_week=tipo_dia[Tipo_dia["FECHA"]==dia]["TIPO D"].to_numpy()[0]
except:
st.warning("Actualizar el calendario del excel extra")
d_week=day_week(pd.Series(data=dia).dt.dayofweek.to_numpy()[0])
df=pd.DataFrame([[d_week,dia,data_dia_todos["CANTIDAD"].sum(),data_fecha["CANTIDAD"].sum()]],columns=["Dia","Fecha","Requerimiento","Respaldo"])
dt_Final=dt_Final.append(df, ignore_index=True)
return dt_Final
def dt_fechas_2(data,data_user,Fechas,tipo_dia):
dt_Final=pd.DataFrame(columns=["Dia","Fecha","Requerimiento","Respaldo"])
for dia in Fechas:
data_fecha=data_user[data_user["FECHA"]== dia]
data_dia_todos=data[data["FECHA"]==dia]
try:
d_week=tipo_dia[Tipo_dia["FECHA"]==dia]["TIPO D"].to_numpy()[0]
except:
st.warning("Actualizar el calendario del excel extra")
d_week=day_week(pd.Series(data=dia).dt.dayofweek.to_numpy()[0])
df=pd.DataFrame([[d_week,dia,data_dia_todos["CANTIDAD"].sum(),data_fecha["CANTIDAD"].sum()]],columns=["Dia","Fecha","Requerimiento","Respaldo"])
dt_Final=dt_Final.append(df, ignore_index=True)
return dt_Final
def dt_fechas_3(data,data_user,Fechas,tipo_dia):
dt_Final=pd.DataFrame(columns=["Dia","Fecha","Respaldo","P_neto","TRM","PRECIO PONDERADO"])
for dia in Fechas:
data_fecha=data_user[data_user["FECHA"]== dia]
try:
d_week=tipo_dia[Tipo_dia["FECHA"]==dia]["TIPO D"].to_numpy()[0]
except:
st.warning("Actualizar el calendario del excel extra")
d_week=day_week(pd.Series(data=dia).dt.dayofweek.to_numpy()[0])
df=pd.DataFrame([[d_week,dia,data_fecha["CANTIDAD"].sum(),data_fecha["P NETO"].sum(),round(data_fecha["TRM"].mean(),2),round(data_fecha["PRECIO PONDERADO"].mean(),2)]],
columns=["Dia","Fecha","Respaldo","P_neto","TRM","PRECIO PONDERADO"])
dt_Final=dt_Final.append(df, ignore_index=True)
return dt_Final
def Mes_espa(mes):
if mes =="01":
Mes="Enero"
elif mes =="02":
Mes="Febrero"
elif mes =="03":
Mes="Marzo"
elif mes =="04":
Mes="Abril"
elif mes =="05":
Mes="Mayo"
elif mes =="06":
Mes="Junio"
elif mes =="07":
Mes="Julio"
elif mes =="08":
Mes="Agosto"
elif mes =="09":
Mes="Septiembre"
elif mes =="10":
Mes="Octubre"
elif mes =="11":
Mes="Noviembre"
elif mes =="12":
Mes="Diciembre"
return Mes
def F_Liq_pag(mes,ano):
if mes%12 ==1:
Fecha ="Enero"
elif mes%12 ==2:
Fecha ="Febrero"
elif mes%12 ==3:
Fecha ="Marzo"
elif mes%12 ==4:
Fecha ="Abril"
elif mes%12 ==5:
Fecha ="Mayo"
elif mes%12 ==6:
Fecha ="Junio"
elif mes%12 ==7:
Fecha ="Julio"
elif mes%12 ==8:
Fecha="Agosto"
elif mes%12 ==9:
Fecha="Septiembre"
elif mes%12 ==10:
Fecha="Octubre"
elif mes%12 ==11:
Fecha="Noviembre"
elif mes%12 ==0:
Fecha="Diciembre"
if mes > 12:
Fecha += " "+ str(ano+1)
else:
Fecha += " "+ str(ano)
return Fecha
def num2money(num):
if num < 1e3:
return str(round(num,2))
elif num < 1e6:
return str(round(num*1e3/1e6,2))+ " miles."
elif num < 1e9:
return str(round(num*1e3/1e9,2))+ " mill."
elif num < 1e12:
return str(round(num*1e3/1e12,2))+ " mil mill."
def mes_espa(mes):
if mes =="01":
Mes="enero"
elif mes =="02":
Mes="febrero"
elif mes =="03":
Mes="marzo"
elif mes =="04":
Mes="abril"
elif mes =="05":
Mes="mayo"
elif mes =="06":
Mes="junio"
elif mes =="07":
Mes="julio"
elif mes =="08":
Mes="agosto"
elif mes =="09":
Mes="septiembre"
elif mes =="10":
Mes="octubre"
elif mes =="11":
Mes="noviembre"
elif mes =="12":
Mes="diciembre"
return Mes
def mes_num(mes):
Opciones2=("Enero","Febrero","Marzo","Abril","Mayo","Junio","Julio","Agosto","Septiembre","Octubre","Noviembre","Diciembre")
if mes == Opciones2[0]:
Mes="01"
elif mes == Opciones2[1]:
Mes="02"
elif mes == Opciones2[2]:
Mes="03"
elif mes == Opciones2[3]:
Mes="04"
elif mes == Opciones2[4]:
Mes="05"
elif mes == Opciones2[5]:
Mes="06"
elif mes == Opciones2[6]:
Mes="07"
elif mes == Opciones2[7]:
Mes="08"
elif mes == Opciones2[8]:
Mes="09"
elif mes == Opciones2[9]:
Mes="10"
elif mes == Opciones2[10]:
Mes="11"
elif mes == Opciones2[11]:
Mes="12"
return Mes
def dia_esp(dia):
if dia =="01":
Dia="1"
elif dia =="02":
Dia="2"
elif dia =="03":
Dia="3"
elif dia =="04":
Dia="4"
elif dia =="05":
Dia="5"
elif dia =="06":
Dia="6"
elif dia =="07":
Dia="7"
elif dia =="08":
Dia="8"
elif dia =="09":
Dia="9"
else :
Dia = dia
return Dia
def set_font(rows,fila,col,size):
run=rows[fila].cells[col].paragraphs[0].runs
font = run[0].font
font.size= Pt(size)
font.name = 'Tahoma'
def replace_text_for_image(paragraph, key, value,wid,hei):
if key in paragraph.text:
inline = paragraph.runs
for item in inline:
if key in item.text:
item.text = item.text.replace(key, "")
for val in value:
r = paragraph.add_run()
r.add_picture(val,width=Cm(wid), height=Cm(hei))
def replace_text_in_paragraph(paragraph, key, value):
if key in paragraph.text:
inline = paragraph.runs
for item in inline:
if key in item.text:
item.text = item.text.replace(key, value)
def delete_columns(table, columns):
# sort columns descending
columns.sort(reverse=True)
grid = table._tbl.find("w:tblGrid", table._tbl.nsmap)
for ci in columns:
for cell in table.column_cells(ci):
cell._tc.getparent().remove(cell._tc)
# Delete column reference.
col_elem = grid[ci]
grid.remove(col_elem)
st.set_page_config(
layout="centered", # Can be "centered" or "wide". In the future also "dashboard", etc.
initial_sidebar_state="auto", # Can be "auto", "expanded", "collapsed"
page_title="JULIA RD", # String or None. Strings get appended with "• Streamlit".
page_icon="📊", # String, anything supported by st.image, or None.
)
if User_validation():
#if True:
Opciones1=("Oferta Firme de Respaldo","Certificado de Reintegros","Informe Comercial")
eleccion=st.sidebar.selectbox('Seleccione el proyecto',Opciones1)
#if False:
if eleccion==Opciones1[0]:
st.header("Creación ofertas firmes de respaldo")
st.subheader("Introducción de los documentos")
colums= st.columns([1,1,1])
with colums[0]:
uploaded_file_1 = st.file_uploader("Suba el consolidado base")
with colums[1]:
uploaded_file_2 = st.file_uploader("Suba la plantilla del documento")
with colums[2]:
uploaded_file_3 = st.file_uploader("Suba el excel adicional")
if (uploaded_file_1 is not None) and (uploaded_file_2 is not None) and (uploaded_file_3 is not None):
try:
data=pd.read_excel(uploaded_file_1)
Extras=pd.read_excel(uploaded_file_3,sheet_name="Usuarios")
Tipo_dia=pd.read_excel(uploaded_file_3,sheet_name="Calendario")
except:
st.warning("Recuerde que el formato del Excel tiene que ser xls")
data["Fecha"]=data["FECHAINI"].dt.to_pydatetime()
if data["USUARIO"].isnull().values.any():
st.warning("Revisar archivo de consolidado base, usuario no encontrado.")
data.dropna(subset = ["USUARIO"], inplace=True)
Users=pd.unique(data["USUARIO"])
else:
Users=pd.unique(data["USUARIO"])
Extras=pd.read_excel(uploaded_file_3,sheet_name="Usuarios")
Tipo_dia=pd.read_excel(uploaded_file_3,sheet_name="Calendario")
template_file_path = uploaded_file_2
today = date.today()
fecha=dia_esp(today.strftime("%d")) +" de "+ mes_espa(today.strftime("%m")) +" de "+ today.strftime("%Y")
colums= st.columns([1,4,1])
with colums[1]:
st.subheader("Introducción de las variables")
P_bolsa=st.text_input("Introduzca el Precio de Escasez de Activación",value="10.00")
P_contrato=st.text_input("Introduzca el precio del contrato [USD]",value="10.00")
P_TMR=st.text_input("Introduzca el valor de la TRM",value="3,950.00")
F_TRM = st.date_input("Seleccione la fecha del valor de la TRM:",value=today).strftime("%Y-%m-%d")
Agente_extra = st.text_input("Introduzca el nombre particular del agente")
columns_2 = st.columns([1,2,2,1])
Opciones2=("Enero","Febrero","Marzo","Abril","Mayo","Junio","Julio","Agosto","Septiembre","Octubre","Noviembre","Diciembre")
Opciones3=("I","II","III","IV","V")
with columns_2[1]:
eleccion2=st.selectbox('Seleccione el mes de la OFR',Opciones2)
with columns_2[2]:
eleccion3=st.selectbox('Selecciona la semana de la OFR',Opciones3)
if Agente_extra:
Agente_extra="_"+Agente_extra
else:
Agente_extra=""
columns_3 = st.columns([2,1,2])
with columns_3[1]:
if platform.system()=='Windows':
b=st.checkbox("PDF")
else:
b=False
a=st.button("Crear los documentos")
Ruta="Documentos/OFR/"+str(today.year)+"/"+mes_num(eleccion2)+"-"+eleccion2 +"/"+ eleccion3
Ruta_x="Documentos_exportar/"
if os.path.exists(Ruta_x):
shutil.rmtree(Ruta_x)
Ruta_x=Ruta_x+"/"
Ruta_x=Ruta_x+"/"
os.makedirs(Ruta_x, exist_ok=True)
if a:
try:
path1 = os.path.join(Ruta)
shutil.rmtree(path1)
os.makedirs(Ruta, exist_ok=True)
except:
os.makedirs(Ruta, exist_ok=True)
Ruta_word=Ruta+"/Word"
Ruta_pdf=Ruta+"/PDF"
Info ={"Ruta": Ruta,
"File_names": None
}
File_names=[]
os.makedirs(Ruta_word, exist_ok=True)
if b:
os.makedirs(Ruta_pdf, exist_ok=True)
zf = zipfile.ZipFile(
"Resultado.zip", "w", zipfile.ZIP_DEFLATED)
my_bar=st.progress(0)
steps=len(Users)
steps_done=0
for usuario in Users:
data_user=data.copy()
data_user=data_user[data_user["USUARIO"]==usuario]
Empresas = pd.unique(data_user["agente1"])
Respaldo = data[data["USUARIO"]== usuario]["CANTIDAD"].sum()
Fechas = pd.unique(data_user["Fecha"])
R_fechas = Range_fecha(Fechas)
Data_frame_fechas=dt_fechas(data.copy(),data_user,Fechas,Tipo_dia)
try:
Email = str(Extras[Extras["USUARIO"] == usuario]["CORREO"].values)
Porc_come = Extras[Extras["USUARIO"] == usuario]["MARGEN"].values[0]
except:
Email = ""
Porc_come = 0.1
st.warning("No hay coincidencia en el Excel de usuarios para: "+usuario)
Email = re.sub("\[|\]|\'|0","",Email)
tx_empresas=""
for idx ,val in enumerate(Empresas):
if len(Empresas)<4:
val_2=val[0:3]
tx_empresas += val_2
if idx==len(Empresas)-1:
pass
else:
tx_empresas +=", "
else:
tx_empresas += "Los Generadores"
P_kwh=float(re.sub(",","",P_TMR))*float(P_contrato)/1000
Ingreso=int(P_kwh*Respaldo)
C_comer=int(Ingreso*Porc_come)
C_GMS=int(Ingreso*4/1000)
I_NETO=Ingreso-C_comer-C_GMS
if len(Data_frame_fechas.index.values)>13:
Enter="\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
else:
Enter=""
variables = {
"${FECHA}": fecha,
"${MES}": eleccion2,
"${AGENTES}": tx_empresas,
"${USUARIO}": usuario,
"${PRECIO_BOLSA}": P_bolsa,
"${PRECIO_CONTRATO}": P_contrato,
"${FECHA_TRM}": F_TRM,
"${PRECIO_TRM}": P_TMR,
"${EMAIL_USUARIO}": Email,
"${PRECIO_PKWH}":str(round(P_kwh,2)),
"${PORC_COMER}":str(int(Porc_come*100))+"%",
"${RESPALDO_TOT}":f'{Respaldo:,}',
"${INGRESO}":f'{Ingreso:,}',
"${COST_COME}":f'{C_comer:,}',
"${COST_GMS}":f'{C_GMS:,}',
"${INGRESO_NETO}": f'{I_NETO:,}',
"${NUM_DIAS}":Num_dias(len(Fechas)),
"${RANGO_FECHAS_1}": R_fechas,
"${ENTER}": Enter,
"${MES_LIQUIDACION}": F_Liq_pag(Opciones2.index(eleccion2)+2,int(today.strftime("%Y"))),
"${MES_PAGO}": F_Liq_pag(Opciones2.index(eleccion2)+3,int(today.strftime("%Y"))),
"${INDICADOR}": eleccion3
}
template_document = docx.Document(template_file_path)
for variable_key, variable_value in variables.items():
for section in template_document.sections:
for paragraph in section.header.paragraphs:
replace_text_in_paragraph(paragraph, variable_key, variable_value)
for paragraph in template_document.paragraphs:
replace_text_in_paragraph(paragraph, variable_key, variable_value)
for table in template_document.tables:
for col in table.columns:
for cell in col.cells:
for paragraph in cell.paragraphs:
replace_text_in_paragraph(paragraph, variable_key, variable_value)
rows = template_document.tables[1].rows
index_1=Data_frame_fechas.index.values
Acum_Req=0
Acum_Res=0
for idx in index_1:
rows[int(idx)+1].cells[0].text = Data_frame_fechas.iloc[idx]["Dia"]
rows[int(idx)+1].cells[1].text = Data_frame_fechas.iloc[idx]["Fecha"].strftime('%Y-%m-%d')
rows[int(idx)+1].cells[2].text = f'{Data_frame_fechas.iloc[idx]["Requerimiento"]:,}'
Acum_Req += Data_frame_fechas.iloc[idx]["Requerimiento"]
rows[int(idx)+1].cells[3].text = f'{Data_frame_fechas.iloc[idx]["Respaldo"]:,}'
Acum_Res += Data_frame_fechas.iloc[idx]["Respaldo"]
for idx_2 in range(0,4):
run=rows[int(idx)+1].cells[idx_2].paragraphs[0].runs
font = run[0].font
font.size= Pt(10)
font.name = 'Tahoma'
for idx in np.arange(len(index_1)+1,37):
remove_row(template_document.tables[1], rows[len(index_1)+1])
rows[-1].cells[1].text = Num_dias(len(Fechas))
rows[-1].cells[2].text = f'{Acum_Req:,}'
rows[-1].cells[3].text = f'{Acum_Res:,}'
version=1
template_document.save(Ruta_x+usuario+"_OFR"+Agente_extra+"_"+eleccion2+"_"+str(today.year)+".docx")
zf.write(Ruta_x+usuario+"_OFR"+Agente_extra+"_"+eleccion2+"_"+str(today.year)+".docx")
if b:
docx2pdf.convert(Ruta_x+usuario+"_OFR"+Agente_extra+"_"+eleccion2+"_"+str(today.year)+".docx",
Ruta_pdf+"/"+usuario+"_OFR"+Agente_extra+"_"+eleccion2+"_"+str(today.year)+".pdf")
zf.write(Ruta_pdf+"/"+usuario+"_OFR"+Agente_extra+"_"+eleccion2+"_"+str(today.year)+".pdf")
File_names.extend([usuario+"_OFR"+Agente_extra+"_"+eleccion2+"_"+str(today.year)+".docx"])
steps_done += 1
my_bar.progress(int(steps_done*100/steps))
Info.update({"File_names":File_names})
json_info = json.dumps(Info, indent = 4)
with open(Ruta_x+'/00_data.json', 'w') as f:
json.dump(json_info, f)
zf.write(Ruta_x+'/00_data.json')
zf.close()
with open("Resultado.zip", "rb") as fp:
with columns_3[1]:
btn = st.download_button(
label="Descargar resultados",
data=fp,
file_name="Resultado.zip",
mime="application/zip"
)
else:
st.warning("Necesita subir los tres archivos")
#elif False:
elif eleccion==Opciones1[1]:
st.header("Creación certificados de reintegros")
st.subheader("Introducción de los documentos")
if True:
colums= st.columns([1,1,1])
with colums[0]:
uploaded_file_1 = st.file_uploader("Suba el documento de liquidación")
with colums[1]:
uploaded_file_2 = st.file_uploader("Suba la plantilla del documento")
with colums[2]:
uploaded_file_3 = st.file_uploader("Suba el excel adicional")
else:
uploaded_file_1="Liquidacion_base.xlsm"
uploaded_file_2="Certificado_base.docx"
uploaded_file_3="Excel_extra_certificados.xls"
if (uploaded_file_1 is not None) and (uploaded_file_2 is not None) and (uploaded_file_3 is not None):
try:
data=pd.read_excel(uploaded_file_1)
Extras=pd.read_excel(uploaded_file_3,sheet_name="Usuarios")
Tipo_dia=pd.read_excel(uploaded_file_3,sheet_name="Calendario")
Agentes=pd.read_excel(uploaded_file_3,sheet_name="Agentes")
except:
st.warning("Recuerde que el formato del Excel tiene que ser xls")
data["FECHA"]=data["FECHA"].dt.to_pydatetime()
if data["USUARIO"].isnull().values.any():
st.warning("Revisar archivo de consolidado base, usuario no encontrado.")
data.dropna(subset = ["USUARIO"], inplace=True)
Users=pd.unique(data["USUARIO"])
else:
Users=pd.unique(data["USUARIO"])
template_file_path = uploaded_file_2
today = date.today()
fecha=dia_esp(today.strftime("%d")) +" de "+ mes_espa(today.strftime("%m")) +" de "+ today.strftime("%Y")
colums= st.columns([1,4,1])
with colums[1]:
st.subheader("Introducción de las variables")
F_TRM = st.date_input("Seleccione la fecha del valor de la TRM:",value=today).strftime("%Y-%m-%d")
P_TMR=str(round(data["TRM"].mean(),2))
columns_2 = st.columns([1,2,2,1])
Opciones2=("Enero","Febrero","Marzo","Abril","Mayo","Junio","Julio","Agosto","Septiembre","Octubre","Noviembre","Diciembre")
with columns_2[1]:
eleccion3=st.number_input('Seleccione el año del cerficado',value=today.year)
with columns_2[2]:
eleccion2=st.selectbox('Seleccione el mes del cerficado',Opciones2)
columns_3 = st.columns([2,1,2])
with columns_3[1]:
if platform.system()=='Windows':
b=st.checkbox("PDF")
else:
b=False
a=st.button("Crear los documentos")
Ruta="Documentos/Certificados/"+str(eleccion3) +"/"+ mes_num(eleccion2)+"-"+eleccion2
Ruta_x="Documentos_exportar"
if os.path.exists(Ruta_x):
shutil.rmtree(Ruta_x)
Ruta_x=Ruta_x+"/"
Ruta_x=Ruta_x+"/"
os.makedirs(Ruta_x, exist_ok=True)
if a:
try:
path1 = os.path.join(Ruta)
shutil.rmtree(path1)
os.makedirs(Ruta, exist_ok=True)
except:
os.makedirs(Ruta, exist_ok=True)
Ruta_word=Ruta+"/Word"
Ruta_pdf=Ruta+"/PDF"
Info ={"Ruta": Ruta,
"File_names": None
}
File_names=[]
os.makedirs(Ruta_word, exist_ok=True)
if b:
os.makedirs(Ruta_pdf, exist_ok=True)
os.makedirs(Ruta_word, exist_ok=True)
if b:
os.makedirs(Ruta_pdf, exist_ok=True)
zf = zipfile.ZipFile(
"Resultado.zip", "w", zipfile.ZIP_DEFLATED)
my_bar=st.progress(0)
steps=len(Users)
steps_done=0
for usuario in Users:
data_user=data.copy()
data_user=data_user[data_user["USUARIO"]==usuario]
Empresas = | pd.unique(data_user["COMPRADOR"]) | pandas.unique |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 8 17:29:59 2020
@author: nmei
"""
import os
import itertools
import numpy as np
import pandas as pd
from shutil import rmtree
verbose = 1
batch_size = 16
node = 1
core = 16
mem = 4
cput = 24
units = [2,5,10,20,50,100,300] # one unit hidden layer cannot learn
dropouts = [0,0.25,0.5,0.75]
activations = ['elu',
'relu',
'selu',
'sigmoid',
'tanh',
'linear',
]
models = ['vgg19_bn','resnet50','alexnet','densenet169','mobilenet']
output_activations = ['softmax','sigmoid',]
temp = np.array(list(itertools.product(*[models,units,dropouts,activations,output_activations])))
df = | pd.DataFrame(temp,columns = ['model_names','hidden_units','dropouts','hidden_activations','output_activations']) | pandas.DataFrame |
import datetime
import pandas as pd
import plotly.express as px
import streamlit as st
def clean_dataframe(df):
df = df.drop(columns=[0])
df.rename(
columns={
1: "errand_date",
2: "scrape_time",
3: "rekyl_id",
4: "status",
5: "reporter",
6: "apartment",
7: "kategori",
8: "detaljer",
},
inplace=True,
)
return df
def reformat_dataframe(cleaned_df):
reformat_df = (
cleaned_df.groupby(["rekyl_id", "status", "kategori", "reporter", "detaljer"])
.agg({"scrape_time": "min", "errand_date": "min"})
.sort_values(by=["scrape_time"], ascending=False)
.reset_index()
)
reformat_df["scrape_time"] = pd.to_datetime(reformat_df["scrape_time"])
reformat_df["errand_date"] = pd.to_datetime(reformat_df["errand_date"])
return reformat_df
def add_info_flags(reform_df):
pivoted = reform_df.pivot(
values=["scrape_time"],
index=["rekyl_id", "errand_date", "kategori", "reporter", "detaljer"],
columns=["status"],
).reset_index()
pivoted["time_to_complete"] = (
pivoted["scrape_time"]["Avslutad"] - pivoted["errand_date"]
).dt.days
pivoted["is_completed"] = pivoted.apply(
lambda row: "No" if pd.isnull(row.scrape_time.Avslutad) else "Yes", axis=1
)
start_date = datetime.datetime(2021, 9, 5)
pivoted["after_start_scrape"] = start_date < pivoted["errand_date"]
return pivoted
def get_closed_stats_per_category(df):
df4 = df[(df["is_completed"] == "Yes") & (df["after_start_scrape"] == True)]
df5 = df4[["rekyl_id", "kategori", "time_to_complete"]]
df5.columns = df5.columns.droplevel(level=1)
df5 = (
df5.groupby(["kategori"])
.agg({"time_to_complete": "mean", "rekyl_id": "count"})
.rename(columns={"time_to_complete": "avg_days", "rekyl_id": "Antal ärenden"})
.reset_index()
.sort_values(by=["kategori"])
)
df5["avg_days"] = df5["avg_days"].astype("int")
return df5
def get_open_stats_per_category(df):
open_errands_df = df[df["is_completed"] == "No"]
open_errands_df.columns = open_errands_df.columns.droplevel(level=1)
return (
open_errands_df.groupby(["kategori"])
.agg({"rekyl_id": "count"})
.rename(columns={"rekyl_id": "num errands"})
.reset_index()
.sort_values(by=["kategori"])
)
def transform_errands_per_date(raw_data):
df = pd.DataFrame(raw_data, columns=["Datum", "Antal ärenden"])
df["Datum"] = pd.to_datetime(df["Datum"])
df["datum_year_month"] = df["Datum"].apply(lambda x: x.strftime("%Y-%m"))
return df.groupby(["datum_year_month"])["Antal ärenden"].sum()
def transform_top_reporter_data(raw_data):
return pd.DataFrame(
raw_data, columns=["Medlem", "Antal skapade ärenden", "Senaste ärende"]
).set_index(["Medlem"])
def transform_errand_per_category(raw_data):
df = pd.DataFrame(raw_data, columns=["datum", "Kategori", "Antal ärenden"])
df["datum"] = | pd.to_datetime(df["datum"]) | pandas.to_datetime |
import pandas as pd
import numpy as np
import os
import datetime
import git
from pathlib import Path
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
inputdir = f"{homedir}" + "/data/us/mobility/"
inputdir2 = f"{homedir}" + "/data/google_mobility/"
outputdir = f"{homedir}" + "/models/data/us/mobility/"
Path(outputdir).mkdir(parents=True, exist_ok=True)
outputdir += "county_"
def edit_column_date(frame,index):
#Edits the date format of columns of dataframes
#index: index of the first column of dates + 1
i = 0
for col in frame:
i += 1
if i >= index:
new_d = date_format(col)
frame = frame.rename(columns={col : new_d})
return frame
def sort_dates(frame,index):
#Sorts the columns by date of a frame with many nonconsecutive dates (several factors per date)
Beg = list(frame.columns[:index]) #First four entries
End = list(np.sort(np.array(frame.columns[index:]))) #Every Date Sorted
cols = list(Beg + End) #Ordered Columns
frame = frame[cols]
return frame
def date_format(date):
d = datetime.datetime.strptime(date, '%Y-%m-%d')
return datetime.date.strftime(d, "%m/%d/%y")
def main():
#Loading in mobility data
DL_us_m50 = pd.read_csv(inputdir+'DL-us-m50.csv', encoding='latin1')
DL_us_m50_index = pd.read_csv(inputdir+'DL-us-m50_index.csv', encoding='latin1')
DL_us_samples = pd.read_csv(inputdir+'DL-us-samples.csv')
#Cleaning the datasets
DL_us_m50 = edit_column_date(DL_us_m50,6)
DL_us_m50_index = edit_column_date(DL_us_m50_index,6)
DL_us_samples = edit_column_date(DL_us_samples,6)
DL_us_m50 = DL_us_m50.drop(columns=['country_code','admin_level','admin1','admin2'])
DL_us_m50_index = DL_us_m50_index.drop(columns=['country_code','admin_level','admin1','admin2'])
DL_us_samples = DL_us_samples.drop(columns=['country_code','admin_level','admin1','admin2'])
#Separating data into county info
DL_us_m50_County = DL_us_m50[DL_us_m50.fips >= 1000]
DL_us_m50_index_County = DL_us_m50_index[DL_us_m50_index.fips >= 1000]
DL_us_samples_County = DL_us_samples[DL_us_samples.fips >= 1000]
#merging the 3 datasets together
Mobility_County = pd.merge(DL_us_m50_County, DL_us_m50_index_County, left_on='fips', right_on='fips', suffixes=('_M_m50', ''), sort=True)
Mobility_County = pd.merge(Mobility_County, DL_us_samples_County, left_on='fips', right_on='fips', suffixes=('_M_idx', '_M_samples'), sort=True)
Mobility_County = Mobility_County[Mobility_County.fips >= -1]
Mobility_County.columns = Mobility_County.columns.str.replace('fips','FIPS')
#saving datasets with 3 values not consecutive and then consecutive
Mobility_County_Nonconsecutive = Mobility_County
Mobility_County_Consecutive = sort_dates(Mobility_County,1)
#MAking FIPS the main index
Mobility_County_Consecutive = Mobility_County_Consecutive.set_index('FIPS')
Mobility_County_Nonconsecutive = Mobility_County_Nonconsecutive.set_index('FIPS')
Mobility_County_Consecutive.to_csv(outputdir+'consecutive.csv')
Mobility_County_Nonconsecutive.to_csv(outputdir+'nonconsecutive.csv')
#New Google Mobility Data, must be processed
google_mobility = pd.read_csv(inputdir2+'mobility_report_US.csv', encoding='latin1')
#Taking only county data
google_mobility_county = google_mobility[google_mobility['Region'] != 'Total']
#Key to map counties to FIPS, and states to state abbreviations
Key = pd.read_csv('county_key.csv').sort_values(by=['FIPS'])
State_Abv = pd.read_csv('State_Abbrev.csv')
State_Abv = np.array(State_Abv)
#Dictionary from state names to state initials
State_Dict = dict((rows[0],rows[2]) for rows in State_Abv)
#Changing the state column of google mobility to its abbreviation code
google_mobility_county = google_mobility_county.replace({'State': State_Dict})
#Creating a location column, to make the google mobility locations unique
google_mobility_county['loc'] = google_mobility_county.Region.astype(str).str.cat(google_mobility_county.State.astype(str), sep=', ')
Key['loc'] = Key.COUNTY.astype(str).str.cat(Key.ST.astype(str), sep=', ')
#New google county mobility data, with fips codes attached
google_county = pd.merge(google_mobility_county, Key, left_on='loc', right_on='loc', sort=True)
#removing unecessary columns
google_county = google_county.drop(columns=['State','Region','ST','COUNTY','loc'])
#Splitting up this google county into its components to rejoin it later
google_residential = google_county.pivot(index='FIPS', columns='Date', values=['Residential'])
google_residential.to_csv(outputdir+'google_residential.csv')
#Reading in split up component and the resetting the header values
google_residential = pd.read_csv(outputdir+'google_residential.csv',header=1).iloc[1:].rename(columns={'Date':'FIPS'})
google_workplaces = google_county.pivot(index='FIPS', columns='Date', values=['Workplaces'])
google_workplaces.to_csv(outputdir+'google_workplaces.csv')
google_workplaces = pd.read_csv(outputdir+'google_workplaces.csv',header=1).iloc[1:].rename(columns={'Date':'FIPS'})
google_transit = google_county.pivot(index='FIPS', columns='Date', values=['Transit stations'])
google_transit.to_csv(outputdir+'google_transit.csv')
google_transit = pd.read_csv(outputdir+'google_transit.csv',header=1).iloc[1:].rename(columns={'Date':'FIPS'})
google_parks = google_county.pivot(index='FIPS', columns='Date', values=['Parks'])
google_parks.to_csv(outputdir+'google_parks.csv')
google_parks = pd.read_csv(outputdir+'google_parks.csv',header=1).iloc[1:].rename(columns={'Date':'FIPS'})
google_grocery = google_county.pivot(index='FIPS', columns='Date', values=['Grocery & pharmacy'])
google_grocery.to_csv(outputdir+'google_grocery.csv')
google_grocery = pd.read_csv(outputdir+'google_grocery.csv',header=1).iloc[1:].rename(columns={'Date':'FIPS'})
google_retail = google_county.pivot(index='FIPS', columns='Date', values=['Retail & recreation'])
google_retail.to_csv(outputdir+'google_retail.csv')
google_retail = pd.read_csv(outputdir+'google_retail.csv',header=1).iloc[1:].rename(columns={'Date':'FIPS'})
#Merging the data back together
google_county = pd.merge(google_residential, google_workplaces, left_on='FIPS', right_on='FIPS', suffixes=('_residential', ''))
google_county = | pd.merge(google_county, google_transit, left_on='FIPS', right_on='FIPS', suffixes=('_workplaces', '')) | pandas.merge |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = pd.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if pd._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, -1], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_uniques = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = pd.unique(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(Categorical(list('baabc')), name='foo')
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).unique()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
| tm.assert_numpy_array_equal(result, expected) | pandas.util.testing.assert_numpy_array_equal |
# Binance libraries
from log import Log
from binance_mod.client import Client
import os
import pandas as pd
import ta
from sklearn.preprocessing import RobustScaler
# TODO
# Place buy
# Load amount of euro you can buy with
# General Binance API class
class Binance:
# Initializes the class
# aks_buy: ask for permission from user to buy
# basic_coin: basic coin to calculate the assets
def __init__(self, ask_buy=True, basic_coin="EUR"):
api_key, api_secret = self.loadAPISecret()
self.client = Client(api_key=api_key, api_secret=api_secret)
self.ask_buy = ask_buy
self.basic_coin = basic_coin
self.update()
# Updates all the account variables
def update(self):
self.getAccountInfo()
self.getStatus()
self.getAssets()
# Returns the API key and API secret from the secret.txt file
def loadAPISecret(self):
with open ("secret.txt", "r") as myfile:
data = myfile.read().splitlines()
return data[0], data[1]
# Loads the account information
def getAccountInfo(self):
self.accountInfo = self.client.get_account()
self.accountStatus = self.client.get_account_status()
# Loads the account status
def getStatus(self):
self.status = self.client.get_system_status()["msg"]
# Loads the average value for all the coins in possession
def getAssets(self):
self.assets = []
self.assets.append({'asset': "Total " + self.basic_coin, 'value': 0.0})
balances = self.accountInfo["balances"]
for symbol in balances:
if float(symbol["free"]) > 0:
price = float(self.client.get_avg_price(symbol=symbol["asset"] + self.basic_coin)["price"])
value = float(symbol["free"]) * price
self.assets.append({'asset': symbol["asset"], 'amount': symbol["free"], 'price': price, 'value': value})
self.assets[0]["value"] += value
# Class for the coin itself
class Coin:
# Initializes the class
def __init__(self, symbol, binance, log=Log()):
self.symbol = symbol
self.binance = binance
self.log = log
# Data columns
self.org_columns = ['timestamp', 'open', 'high', 'low', 'close', 'volume', 'close_time', 'quote_av', 'trades', 'tb_base_av', 'tb_quote_av', 'ignore']
self.imp_columns = ['timestamp', 'open', 'high', 'low', 'close', 'volume']
self.update()
# Updates all the coin variables
def update(self):
self.getTradeFee()
self.getAllOrders()
self.getOpenOrders()
self.getTrades()
# Loads trade fee
def getTradeFee(self):
self.tradeFee = self.binance.client.get_trade_fee(symbol=self.symbol)
# Loads all orders
def getAllOrders(self):
self.allOrders = self.binance.client.get_all_orders(symbol=self.symbol)
# Loads open orders
def getOpenOrders(self):
self.openOrders = self.binance.client.get_open_orders(symbol=self.symbol)
# Loads trades
def getTrades(self):
self.trades = self.binance.client.get_my_trades(symbol=self.symbol)
# Returns data
# hours_ago: amount of hours to look back
# time_interval: time interval to get the data
def getData(self, hours_ago, time_interval=Client.KLINE_INTERVAL_1MINUTE):
hours_ago = str(hours_ago) + " hour ago UTC"
klines = self.binance.client.get_historical_klines(self.symbol, time_interval, hours_ago)
if len(klines) < 1:
print("Failed to download data from %s for %s." % (self.symbol, hours_ago))
return
print("Succesfully downloaded %s lines of data from %s for %s" % (str(len(klines)), self.symbol, hours_ago))
df_org = pd.DataFrame(klines, columns=self.org_columns)
df_org['timestamp'] = pd.to_datetime(df_org['timestamp'], unit='ms')
df_org.set_index('timestamp', inplace=True)
df_org.drop(columns=[item for item in self.org_columns if item not in self.imp_columns], axis=1, inplace=True)
df_org.dropna(inplace=True)
return df_org
# Loads scaled data in self.df
def scaleData(self, df_org):
df_org.to_csv("temp.csv")
self.df = | pd.read_csv("temp.csv") | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
"""
Created on Sun Jun 20 16:38:50 2021
@author: Team_16
"""
#%%
# =============================================================================
# 前期準備
# =============================================================================
'''
置入所需資料處理與繪圖套件
'''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import display
'''
匯入資料
'''
df_train = pd.read_csv("train.csv")
df_test = pd.read_csv("test.csv")
df_data = df_train.append(df_test)
#重新索引
df_data = df_data.reset_index()
#%%
# =============================================================================
# 遺漏值處理
# =============================================================================
'''
遺漏值個數
'''
#Pclass(133), Age(189), Cabin(690) 缺失值較多
for col in df_data.columns.tolist():
print('{} column missing values: {}'.format(col, df_data[col].isnull().sum()))
'''
Fare補值
'''
#查看遺漏值乘客的資訊
df_data[df_data['Fare'].isnull()]
#因具遺漏值的乘客為三等艙,故補三等艙、SibSp=0、Parch=0的fare中位數
med_fare = df_data.groupby(['Pclass', 'Parch', 'SibSp']).Fare.median()[3][0][0]
df_data['Fare'] = df_data['Fare'].fillna(med_fare)
'''
Pclass補值
'''
#補值第一步驟,按ticket補值,同樣ticket會是同樣艙等:
deplicate_ticket = []
for ticket in df_data.Ticket.unique():
tem = df_data.loc[df_data.Ticket == ticket, 'Fare']
if tem.count() > 1:
deplicate_ticket.append(df_data.loc[df_data.Ticket == ticket,['Name','Ticket','Fare','Pclass','Survived']])
deplicate_ticket = | pd.concat(deplicate_ticket) | pandas.concat |
"""Common types and functionalities for compute(...)."""
from typing import Any, Dict, List, Optional, Tuple, Union, cast
import dask
import dask.dataframe as dd
import numpy as np
import pandas as pd
from scipy.stats import gaussian_kde as gaussian_kde_
from scipy.stats import ks_2samp as ks_2samp_
from scipy.stats import normaltest as normaltest_
from scipy.stats import skewtest as skewtest_
from ...dtypes import drop_null
# Dictionary for mapping the time unit to its formatting. Each entry is of the
# form unit:(unit code for pd.Grouper freq parameter, pandas to_period strftime
# formatting for line charts, pandas to_period strftime formatting for box plot,
# label format).
DTMAP = {
"year": ("Y", "%Y", "%Y", "Year"),
"quarter": ("Q", "Q%q %Y", "Q%q %Y", "Quarter"),
"month": ("M", "%B %Y", "%b %Y", "Month"),
"week": ("W-SAT", "%d %B, %Y", "%d %b, %Y", "Week of"),
"day": ("D", "%d %B, %Y", "%d %b, %Y", "Date"),
"hour": ("H", "%d %B, %Y, %I %p", "%d %b, %Y, %I %p", "Hour"),
"minute": ("T", "%d %B, %Y, %I:%M %p", "%d %b, %Y, %I:%M %p", "Minute"),
"second": ("S", "%d %B, %Y, %I:%M:%S %p", "%d %b, %Y, %I:%M:%S %p", "Second"),
}
def _get_timeunit(min_time: pd.Timestamp, max_time: pd.Timestamp, dflt: int) -> str:
"""Auxillary function to find an appropriate time unit. Will find the
time unit such that the number of time units are closest to dflt."""
dt_secs = {
"year": 60 * 60 * 24 * 365,
"quarter": 60 * 60 * 24 * 91,
"month": 60 * 60 * 24 * 30,
"week": 60 * 60 * 24 * 7,
"day": 60 * 60 * 24,
"hour": 60 * 60,
"minute": 60,
"second": 1,
}
time_rng_secs = (max_time - min_time).total_seconds()
prev_bin_cnt, prev_unit = 0, "year"
for unit, secs_in_unit in dt_secs.items():
cur_bin_cnt = time_rng_secs / secs_in_unit
if abs(prev_bin_cnt - dflt) < abs(cur_bin_cnt - dflt):
return prev_unit
prev_bin_cnt = cur_bin_cnt
prev_unit = unit
return prev_unit
def _calc_box_stats(grp_srs: dd.Series, grp: str, dlyd: bool = False) -> pd.DataFrame:
"""
Auxiliary function to calculate the Tukey box plot statistics
dlyd is for if this function is called when dask is computing in parallel (dask.delayed)
"""
stats: Dict[str, Any] = dict()
try: # this is a bad fix for the problem of when there is no data passed to this function
if dlyd:
qntls = np.round(grp_srs.quantile([0.25, 0.50, 0.75]), 3)
else:
qntls = np.round(grp_srs.quantile([0.25, 0.50, 0.75]).compute(), 3)
stats["q1"], stats["q2"], stats["q3"] = qntls[0.25], qntls[0.50], qntls[0.75]
except ValueError:
stats["q1"], stats["q2"], stats["q3"] = np.nan, np.nan, np.nan
iqr = stats["q3"] - stats["q1"]
stats["lw"] = grp_srs[grp_srs >= stats["q1"] - 1.5 * iqr].min()
stats["uw"] = grp_srs[grp_srs <= stats["q3"] + 1.5 * iqr].max()
if not dlyd:
stats["lw"], stats["uw"] = dask.compute(stats["lw"], stats["uw"])
otlrs = grp_srs[(grp_srs < stats["lw"]) | (grp_srs > stats["uw"])]
if len(otlrs) > 100: # sample 100 outliers
otlrs = otlrs.sample(frac=100 / len(otlrs))
stats["otlrs"] = list(otlrs) if dlyd else list(otlrs.compute())
return pd.DataFrame({grp: stats})
def _calc_box_otlrs(df: dd.DataFrame) -> Tuple[List[str], List[float]]:
"""
Calculate the outliers for a box plot
"""
outx: List[str] = [] # list for the outlier groups
outy: List[float] = [] # list for the outlier values
for ind in df.index:
otlrs = df.loc[ind]["otlrs"]
outx = outx + [df.loc[ind]["grp"]] * len(otlrs)
outy = outy + otlrs
return outx, outy
def _calc_line_dt(
df: dd.DataFrame,
unit: str,
agg: Optional[str] = None,
ngroups: Optional[int] = None,
largest: Optional[bool] = None,
) -> Union[
Tuple[pd.DataFrame, Dict[str, int], str],
Tuple[pd.DataFrame, str, float],
Tuple[pd.DataFrame, str],
]:
"""
Calculate a line or multiline chart with date on the x axis. If df contains
one datetime column, it will make a line chart of the frequency of values. If
df contains a datetime and categorical column, it will compute the frequency
of each categorical value in each time group. If df contains a datetime and
numerical column, it will compute the aggregate of the numerical column grouped
by the time groups. If df contains a datetime, categorical, and numerical column,
it will compute the aggregate of the numerical column for values in the categorical
column grouped by time.
Parameters
----------
df
A dataframe
unit
The unit of time over which to group the values
agg
Aggregate to use for the numerical column
ngroups
Number of groups for the categorical column
largest
Use the largest or smallest groups in the categorical column
"""
# pylint: disable=too-many-locals
x = df.columns[0] # time column
unit = _get_timeunit(df[x].min(), df[x].max(), 100) if unit == "auto" else unit
if unit not in DTMAP.keys():
raise ValueError
grouper = | pd.Grouper(key=x, freq=DTMAP[unit][0]) | pandas.Grouper |
#!/usr/bin/env python
"""This file contains functions to compute features."""
import numpy as np
import pandas as pd
from tqdm import tqdm
from .sound import Waveform
from .statistics import Barrel
def load_waveforms_from_paths(paths, sample_rate):
"""Loads waveforms from paths using multiprocessing"""
progress_bar = tqdm(paths, desc='Loading waveforms...')
return [Waveform(path=p, sample_rate=sample_rate) for p in progress_bar]
def extract_features_from_paths(paths, components_list, statistics_list=None, sample_rate=44100):
"""Function which loads waveforms, computes the components and statistics and returns them,
without the need to store the waveforms in memory. This is to minimize the memory footprint
when running over multiple files.
Args:
paths (list of str): .wav to compute
components_list (list of str/dict): This is a list of the methods which
should be applied to all the waveform objects in waveforms. If a dict,
this also contains arguments to the sound.Waveform methods.
statistics_list (list of str): This is a list of the methods which
should be applied to all the time-dependent features computed
from the waveforms.
sample_rate (int > 0): sampling rate to load the waveforms
Returns:
pandas DataFrame: pandas dataframe where every row corresponds
to features extracted for one of the waveforms and columns
represent individual features.
"""
output_feats = []
paths = tqdm(paths, desc='Extracting features from paths...')
for path in paths:
wave = Waveform(path=path, sample_rate=sample_rate)
output_feats.append(
extract_features_from_waveform(
components_list, statistics_list, wave
)
)
return pd.DataFrame(output_feats)
def extract_features_from_waveform(components_list, statistics_list, waveform):
"""Given one waveform, a list of components and statistics, extract the
features from the waveform.
Args:
components_list (list of str or dict): This is a list of the methods which
should be applied to all the waveform objects in waveforms. If a dict,
this also contains arguments to the sound.Waveform methods.
statistics_list (list of str): This is a list of the methods which
should be applied to all the "time-dependent" components computed
from the waveforms.
waveform (Waveform): the waveform object to extract components from.
Returns:
dict: Dictionary mapping names to numerical components extracted
for this waveform.
"""
feats_this_waveform = {}
try:
# Compute components with surfboard.
components = waveform.compute_components(components_list)
# Loop over computed components to either prepare for output, or to apply statistics.
for component_name in components:
# Case of a dictionary: unpack dictionary and merge with existing set of components.
if isinstance(components[component_name], dict) and statistics_list is not None:
feats_this_waveform = {
**feats_this_waveform,
**components[component_name]
}
# Case of a float -- simply add that as a single value to the dictionary.
# Or: case of a np array when statistics list is None. In order to be able to obtain
# the numpy array from the pandas DataFrame, we must pass the np array as a list.
elif isinstance(components[component_name], float) or (
isinstance(components[component_name], np.ndarray) and statistics_list is None
):
feats_this_waveform[component_name] = components[component_name]
# Case of a np.array (the component is a time series). Apply Barrel.
elif isinstance(components[component_name], np.ndarray) and statistics_list is not None:
barrel = Barrel(components[component_name])
function_outputs = barrel.compute_statistics(statistics_list)
# Merge dictionaries...
feats_this_waveform = {
**feats_this_waveform,
**{"{}_{}".format(component_name, fun_name): v for fun_name, v in function_outputs.items()}
}
except Exception as extraction_exception:
print(f'Found exception "{extraction_exception}"... Skipping...')
return {}
except:
print('Unknow error. Skipping')
return {}
# Return an empty dict in the case of None.
feats_this_waveform = feats_this_waveform if feats_this_waveform is not None else {}
return feats_this_waveform
def extract_features(waveforms, components_list, statistics_list=None):
"""This is an important function. Given a list of Waveform objects, a list of
Waveform methods in the form of strings and a list of Barrel methods in the
form of strings, compute the time-independent features resulting. This function
does multiprocessing.
Args:
waveforms (list of Waveform): This is a list of waveform objects
components_list (list of str/dict): This is a list of the methods which
should be applied to all the waveform objects in waveforms. If a dict,
this also contains arguments to the sound.Waveform methods.
statistics_list (list of str): This is a list of the methods which
should be applied to all the time-dependent features computed
from the waveforms.
Returns:
pandas DataFrame: pandas dataframe where every row corresponds
to features extracted for one of the waveforms and columns
represent individual features.
"""
output_feats = []
waveforms = tqdm(waveforms, desc='Extracting features...')
for wave in waveforms:
output_feats.append(
extract_features_from_waveform(
components_list, statistics_list, wave
)
)
return | pd.DataFrame(output_feats) | pandas.DataFrame |
# get coin_gecko_history price api
import os
from pycoingecko import CoinGeckoAPI
import pydash
from datetime import datetime
import pandas
from models import Token, TokenDailyStats
from utils.date_util import DateUtil
from utils.upload_csv_to_gsc import upload_csv_to_gsc
from utils.import_gsc_to_bigquery import import_gsc_to_bigquery
import json
import time
from config import project_config
cg = CoinGeckoAPI()
columns = [
'symbol',
'address',
'coin_gecko_id',
'day',
'trading_vol_24h',
'high_price_24h',
'low_price_24h',
'circulating_supply',
'facebook_likes',
'fdv_to_tvl_ratio',
'fully_diluted_valuation',
'market_cap',
'market_cap_rank',
'max_supply',
'mcap_to_tvl_ratio',
'price',
'reddit_accounts_active_48h',
'reddit_average_comments_48h',
'reddit_average_posts_48h',
'reddit_subscribers',
'telegram_channel_user_count',
'total_supply',
'total_value_locked',
'twitter_followers',
'created_at',
'updated_at'
]
day_times = [
]
name = 'token_daily_stats'
def get_coins():
coin_list = cg.get_coins_list()
return coin_list
def get_history_market_from_coin_gecko():
coins = get_history_data_token()
for day in day_times:
day_result = []
for coin in coins:
try:
value = handle_history_data(coin['id'], day)
if value:
day_result.append(value)
except Exception as e:
print('get coin detail fail', e)
print(day, coin['id'])
print(day_result)
df = | pandas.DataFrame(day_result, columns=columns) | pandas.DataFrame |
"""N-dimensional CSV writer
See :doc:`format` for format specs
"""
import io
import csv
import pandas
import pshell as sh
import xarray
from .proper_unstack import proper_unstack
def write_csv(array, path_or_buf=None):
"""Write an n-dimensional array to an NDCSV file.
Any number of dimensions are supported. If the array has more than two
dimensions, all dimensions beyond the first are automatically stacked
together on the columns of the CSV file; if you want to stack dimensions on
the rows you'll need to manually invoke :meth:`xarray.DataArray.stack`
beforehand.
This function is conceptually similar to :meth:`pandas.DataFrame.to_csv`,
except that none of the many configuration settings is made available to
the end user, in order to ensure consistency in the output file.
:param array:
One of:
- :class:`xarray.DataArray`
- :class:`pandas.Series`
- :class:`pandas.DataFrame`
:param path_or_buf:
One of:
- .csv file path
- .csv.gz / .csv.bz2 / .csv.xz file path (the compression algorithm
is inferred automatically)
- file-like object open for writing
- None (the result is returned as a string)
"""
if path_or_buf is None:
buf = io.StringIO()
write_csv(array, buf)
return buf.getvalue()
if isinstance(path_or_buf, str):
# Automatically detect .csv or .csv.gz extension
with sh.open(path_or_buf, 'w') as fh:
write_csv(array, fh)
return
if isinstance(array, xarray.DataArray):
_write_csv_dataarray(array, path_or_buf)
elif isinstance(array, (pandas.Series, pandas.DataFrame)):
_write_csv_pandas(array, path_or_buf)
else:
raise TypeError('Input data is not a xarray.DataArray, pandas.Series '
'or pandas.DataFrame')
def _write_csv_dataarray(array, buf):
"""Write :class:`xarray.DataArray` to buffer
"""
if array.ndim == 0:
# 0D (scalar) array
buf.write('%s\n' % array.values)
return
# Keep track of non-index coordinates
# Note that scalar (a-dimensional) coords are silently discarded
coord_renames = {}
for k, v in array.coords.items():
if len(v.dims) > 1:
raise ValueError("Multi-dimensional coord '%s' is not supported "
"by the NDCSV format" % k)
if len(v.dims) == 1 and v.dims[0] != k:
coord_renames[k] = '%s (%s)' % (k, v.dims[0])
array = array.rename(coord_renames)
if array.ndim > 2:
# Automatically stack dims beyond the first.
# In the case where there's already a MultiIndex on a dim beyond
# the first, first unstack them and then stack them again back all
# together.
for dim in array.dims[1:]:
if isinstance(array.get_index(dim), pandas.MultiIndex):
# Note: unstacked dims end up on the right
array = proper_unstack(array, dim)
# The __columns__ label is completely arbitrary and we're going
# to lose it in a few moments when dumping to CSV.
array = array.stack(__columns__=array.dims[1:])
# non-index coords are lost when converting to pandas.
# Incorporate them into the MultiIndex
for dim in array.dims:
if isinstance(array.coords[dim].to_index(), pandas.MultiIndex):
array = array.reset_index(dim)
elif dim not in array.coords:
# Force default RangeIndex
array.coords[dim] = array.coords[dim]
if list(array[dim].coords) != [dim]:
array = array.set_index({dim: list(array[dim].coords)})
_write_csv_pandas(array.to_pandas(), buf)
def _write_csv_pandas(array, buf):
"""Write :class:`pandas.Series` or :class:`pandas.DataFrame` to buffer
"""
# Raise ValueError if there's empty strings in the header
_check_empty_index(array.index)
if array.ndim > 1:
_check_empty_index(array.columns)
writer = csv.writer(buf, lineterminator='\n')
if array.index.name is None:
array.index.name = 'dim_0'
if array.ndim == 1:
# pandas.Series. Write header by hand.
writer.writerow(list(array.index.names) + [''])
# First element is empty
if array.iloc[0] == '':
# An empty cell would confuse read_csv() below. Make it explicit.
array.iloc[0] = 'nan'
na_rep = 'nan'
elif pandas.isnull(array.iloc[0]):
na_rep = 'nan'
else:
# Keep the output CSV as clean as possible
na_rep = ''
array.to_csv(buf, header=None, na_rep=na_rep)
elif isinstance(array.columns, pandas.MultiIndex):
# pandas.DataFrame with a MultiIndex on the columns.
# Simplest case - works out of the box with Pandas!
array.to_csv(buf)
else:
# pandas.DataFrame without MultiIndex on the columns.
# Write header by hand.
if array.columns.name is None:
array.columns.name = 'dim_1'
header_cols = [array.columns.name]
if len(array.index.names) > 1:
header_cols += [''] * (len(array.index.names) - 1)
header_cols += array.columns.values.tolist()
writer.writerow(header_cols)
writer.writerow(list(array.index.names) + [''] * len(array.columns))
array.to_csv(buf, header=None)
def _check_empty_index(idx):
"""Check for empty strings and NaNs in pandas.Index
:param pandas.Index idx:
Series.index, DataFrame.index, or DataFrame.columns.
:raises ValueError:
If one or more cells of the index are empty strings or NaN
"""
if isinstance(idx, pandas.MultiIndex):
for level, label in zip(idx.levels, idx.labels):
# A MultiIndex with NaNs will have a levels and -1 labels
# In this example, x = [NaN, 1.0] y = [0, 1]
# MultiIndex(levels=[[1.0], [0, 1]],
# labels=[[-1, -1, 0, 0], [0, 1, 0, 1]],
# names=['x', 'y'])
if (label < 0).any():
raise ValueError('NaN in index')
# Check for empty strings
_check_empty_index(level)
else:
if (idx.dtype.kind in 'OU' # Object or Unicode
and pandas.Series(idx.str.contains('^$')).fillna(False).any()):
raise ValueError('Empty string in index')
if | pandas.isnull(idx) | pandas.isnull |
# Copyright 2021 Prayas Energy Group(https://www.prayaspune.org/peg/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from rumi.io import filemanager
from rumi.io import config
from rumi.io import common
from rumi.io import constant
from rumi.io import loaders
from rumi.io import utilities
import logging
import os
import functools
import numpy as np
import itertools
import math
logger = logging.getLogger(__name__)
def load_param(param_name, subfolder):
"""Loader function to be used by yaml framework. do not use this
directly.
"""
filepath = filemanager.find_filepath(param_name, subfolder)
logger.debug(f"Reading {param_name} from file {filepath}")
df = loaders.read_csv(param_name, filepath)
return df
def get_filtered_parameter(param_name):
"""Returns supply parameter at balancing time and balancing area.
This function will do necessary collapsing and expansion of
parameter data. It will do this operation on all float64 columns.
other columns will be treated as categorical.
:param: param_name
:returns: DataFrame
"""
param_data_ = loaders.get_parameter(param_name)
if not isinstance(param_data_, pd.DataFrame) and param_data_ == None:
return param_data_
original_order = [c for c in param_data_.columns]
param_data = utilities.filter_empty(param_data_) # for test data
specs = filemanager.supply_specs()
if param_name in specs:
param_specs = specs[param_name]
folder = param_specs.get("nested")
geographic = param_specs.get("geographic")
time = param_specs.get("time")
if geographic:
param_data = filter_on_geography(
param_data, geographic, folder)
if time:
param_data = filter_on_time(param_data, time, folder)
param_data = preserve_column_order(
param_data, original_order)
return param_data.fillna("")
def preserve_column_order(dataframe, original_order):
class DummyDFColumns:
"""A class to simulate df.columns from pa.DataFrame
"""
def __init__(self, cols):
self.columns = list(cols)
def indexof_geo(oldcols):
subset_cols = utilities.get_geographic_columns_from_dataframe(
oldcols)
return oldcols.columns.index(subset_cols[-1])+1
def indexof_time(oldcols):
subset_cols = utilities.get_time_columns_from_dataframe(oldcols)
return oldcols.columns.index(subset_cols[-1])+1
def extra_geo(dataframe, oldcols):
geo = utilities.get_geographic_columns_from_dataframe(dataframe)
return [c for c in geo if c not in oldcols.columns]
def extra_time(dataframe, oldcols):
time = utilities.get_time_columns_from_dataframe(dataframe)
return [c for c in time if c not in oldcols.columns]
def new_order(dataframe, oldcols):
cols = [c for c in oldcols]
oldcols_ = DummyDFColumns(cols)
if utilities.get_geographic_columns_from_dataframe(oldcols_):
for i, c in enumerate(extra_geo(dataframe, oldcols_),
start=indexof_geo(oldcols_)):
cols.insert(i, c)
oldcols_ = DummyDFColumns(cols)
if utilities.get_time_columns_from_dataframe(oldcols_):
for i, c in enumerate(extra_time(dataframe, oldcols_),
start=indexof_time(oldcols_)):
cols.insert(i, c)
return cols
return dataframe.reindex(columns=new_order(dataframe, original_order))
def filter_empty_columns(data, filtercols):
rows = len(data)
empty = [c for c in filtercols if data[c].isnull(
).sum() == rows or (data[c] == "").sum() == rows]
return data[[c for c in data.columns if c not in empty]]
def filter_empty_geography(data):
"""filter out empty geographic columns"""
return filter_empty_columns(data,
utilities.get_geographic_columns_from_dataframe(data))
def filter_empty_time(data):
"""filter out empty time columns"""
return filter_empty_columns(data,
utilities.get_time_columns_from_dataframe(data))
def finest_geography_from_balancing(entities):
g = [common.get_geographic_columns(
common.balancing_area(e)) for e in entities]
return max(g, key=len)
@functools.lru_cache(maxsize=1)
def get_all_carriers():
carrriers = ["PhysicalPrimaryCarriers",
"PhysicalDerivedCarriers", "NonPhysicalDerivedCarriers"]
allcarriers = []
for carrrier in carrriers:
allcarriers.extend(
list(loaders.get_parameter(carrrier)['EnergyCarrier']))
return allcarriers
def finest_time_from_balancing(entities):
t = [common.get_time_columns(common.balancing_time(e)) for e in entities]
return max(t, key=len)
@functools.lru_cache(maxsize=16)
def find_EC(entity, value):
if entity == 'EnergyCarrier':
return value
elif entity == 'EnergyConvTech':
EnergyConvTechnologies = loaders.get_parameter(
'EnergyConvTechnologies')
ect = EnergyConvTechnologies.set_index('EnergyConvTech')
return ect.loc[value]['OutputDEC']
else:
EnergyStorTechnologies = loaders.get_parameter(
'EnergyStorTechnologies')
est = EnergyStorTechnologies.set_index('EnergyStorTech')
return est.loc[value]['StoredEC']
def get_entity_type(folder):
if folder == "Carriers":
return 'EnergyCarrier'
elif folder == "Storage":
return 'EnergyStorTech'
else:
return 'EnergyConvTech'
def filter_on_time(data, granularity, folder):
"""granularity is either 'fine' or 'coarse' and folder is one of 'Carriers',
'Technologies', 'Storage'
"""
entity = get_entity_type(folder)
entities = get_all_carriers()
timecols = finest_time_from_balancing(entities)
dfs = []
if granularity == "fine":
for item in data[entity].unique():
q = f"{entity} == '{item}'"
d = filter_empty_time(data.query(q))
balancing_time = common.balancing_time(find_EC(entity, item))
d = group_by_time(d, balancing_time, timecols)
dfs.append(d)
else:
for item in data[entity].unique():
q = f"{entity} == '{item}'"
d = filter_empty_time(data.query(q))
balancing_time = common.balancing_time(find_EC(entity, item))
d = expand_by_time(d, entity, balancing_time, timecols)
dfs.append(d)
return pd.concat(dfs).reset_index(drop=True)
def get_nontime_columns(d):
return [c for c in d.columns if (not pd.api.types.is_float_dtype(d[c])) and c not in constant.TIME_SLICES]
def group_by_time(d, balancing_time, superset_cols):
timecols_ = common.get_time_columns(balancing_time)
othercols = get_nontime_columns(d)
d = utilities.groupby_time(d.fillna(""), othercols, balancing_time).copy()
rows = len(d)
diff = [c for c in superset_cols if c not in timecols_]
for c in diff:
d[c] = pd.Series([""]*rows, dtype=str, name=c)
return d[superset_cols + [c for c in d.columns if c not in superset_cols]]
def expand_by_time(d, entity, balancing_time, superset_cols):
timecols_ = common.get_time_columns(balancing_time)
label = d[entity].unique()[0]
base = utilities.base_dataframe_time(timecols_,
colname=entity,
val=label).reset_index()
d = d.merge(base, how='left')
rows = len(d)
diff = [c for c in superset_cols if c not in timecols_]
for c in diff:
d[c] = pd.Series([""]*rows, dtype=str, name=c)
return d
def filter_on_geography(data, granularity, folder):
"""granularity is either 'fine' or 'coarse' and folder is one of 'Carriers',
'Technologies', 'Storage'
"""
entity = get_entity_type(folder)
entities = get_all_carriers()
geocols = finest_geography_from_balancing(entities)
dfs = []
if granularity == "fine":
for item in data[entity].unique():
q = f"{entity} == '{item}'"
d = filter_empty_geography(data.query(q))
balancing_area = common.balancing_area(find_EC(entity, item))
d = group_by_geographic(d, balancing_area, geocols)
dfs.append(d)
else:
for item in data[entity].unique():
q = f"{entity} == '{item}'"
d = filter_empty_geography(data.query(q))
balancing_area = common.balancing_area(find_EC(entity, item))
d = expand_by_geographic(d, entity, balancing_area, geocols)
dfs.append(d)
return pd.concat(dfs).reset_index(drop=True)
def expand_by_geographic(d, entity, balancing_area, superset_cols):
geocols_ = common.get_geographic_columns(balancing_area)
label = d[entity].unique()[0]
base = utilities.base_dataframe_geography(geocols_,
colname=entity,
val=label).reset_index()
d = d.merge(base, how='left')
rows = len(d)
diff = [c for c in superset_cols if c not in geocols_]
for c in diff:
d[c] = | pd.Series([""]*rows, dtype=str, name=c) | pandas.Series |
import os
import json
from glob import glob
from base64 import b64decode, b64encode
from datetime import datetime
import pandas as pd
from omnisci.thrift.ttypes import TDashboard
def export_dashboard(con, id, dashboards_dir="dashboards"):
src = con.con._client.get_dashboard(session=con.con._session, dashboard_id=id)
filename = os.path.join(dashboards_dir, src.dashboard_name.strip() + ".json")
# print(src.dashboard_id, filename)
with open(filename, "w") as out:
try:
metadata = json.loads(src.dashboard_metadata)
except:
metadata = src.dashboard_metadata.strip(),
obj = dict(
dashboard_id = src.dashboard_id,
name = src.dashboard_name.strip(),
metadata = metadata,
# image_hash = src.image_hash,
last_update_time = src.update_time,
owner = src.dashboard_owner,
is_dash_shared = src.is_dash_shared,
permissions = src.dashboard_permissions.__dict__,
state = json.loads(b64decode(src.dashboard_state).decode("utf-8")),
)
json.dump(obj, out, sort_keys=True, indent=4)
return filename
def export_dashboards(con, dashboards_dir="dashboards", delete_files=False):
"""
Export dashboards to a directory.
Existing json files will be deleted.
It is expected the dir is under source control (e.g. git), but that is not managed by this function.
"""
os.makedirs(dashboards_dir, exist_ok=True)
if delete_files:
for filename in glob(f"{dashboards_dir}/*.json"):
print("delete file", filename)
os.remove(filename)
return [export_dashboard(con, board.dashboard_id, dashboards_dir=dashboards_dir)
for board in con.con.get_dashboards()]
def read_dashboard(filename):
"""
Import dashboard file (from export_dashboards) to Immerse.
"""
try:
with open(filename) as fin:
first_line = fin.readline()
if first_line.startswith("{"):
rest = fin.read()
return json.loads(first_line + rest)
else:
td = dict()
td['name'] = first_line.strip()
td['metadata'] = fin.readline().strip()
all = fin.read()
td['state'] = json.loads(all)
return td
except Exception as e:
raise Exception(filename) from e
def get_dashboards(con):
boards = dict()
for board in con.con.get_dashboards():
boards[board.dashboard_name.strip()] = board
return boards
def diff_dashboards(a, b):
ats = pd.to_datetime(a.update_time)
bts = | pd.to_datetime(b.update_time) | pandas.to_datetime |
# -*- coding: utf-8 -*-
from datetime import timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import (Timedelta,
period_range, Period, PeriodIndex,
_np_version_under1p10)
import pandas.core.indexes.period as period
class TestPeriodIndexArithmetic(object):
def test_pi_add_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = pi + offs
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
unanchored = np.array([pd.offsets.Hour(n=1),
pd.offsets.Minute(n=-2)])
with pytest.raises(period.IncompatibleFrequency):
pi + unanchored
with pytest.raises(TypeError):
unanchored + pi
@pytest.mark.xfail(reason='GH#18824 radd doesnt implement this case')
def test_pi_radd_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = offs + pi
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
# offset
# DateOffset
rng = | pd.period_range('2014', '2024', freq='A') | pandas.period_range |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
import pandas as pd
from pandas import (Index, Series, _np_version_under1p9)
from pandas.tseries.index import Timestamp
from pandas.types.common import is_integer
import pandas.util.testing as tm
from .common import TestData
class TestSeriesQuantile(TestData, tm.TestCase):
def test_quantile(self):
from numpy import percentile
q = self.ts.quantile(0.1)
self.assertEqual(q, percentile(self.ts.valid(), 10))
q = self.ts.quantile(0.9)
self.assertEqual(q, percentile(self.ts.valid(), 90))
# object dtype
q = Series(self.ts, dtype=object).quantile(0.9)
self.assertEqual(q, percentile(self.ts.valid(), 90))
# datetime64[ns] dtype
dts = self.ts.index.to_series()
q = dts.quantile(.2)
self.assertEqual(q, Timestamp('2000-01-10 19:12:00'))
# timedelta64[ns] dtype
tds = dts.diff()
q = tds.quantile(.25)
self.assertEqual(q, pd.to_timedelta('24:00:00'))
# GH7661
result = Series([np.timedelta64('NaT')]).sum()
self.assertTrue(result is pd.NaT)
msg = 'percentiles should all be in the interval \\[0, 1\\]'
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
with tm.assertRaisesRegexp(ValueError, msg):
self.ts.quantile(invalid)
def test_quantile_multi(self):
from numpy import percentile
qs = [.1, .9]
result = self.ts.quantile(qs)
expected = pd.Series([percentile(self.ts.valid(), 10),
percentile(self.ts.valid(), 90)],
index=qs, name=self.ts.name)
tm.assert_series_equal(result, expected)
dts = self.ts.index.to_series()
dts.name = 'xxx'
result = dts.quantile((.2, .2))
expected = Series([Timestamp('2000-01-10 19:12:00'),
Timestamp('2000-01-10 19:12:00')],
index=[.2, .2], name='xxx')
tm.assert_series_equal(result, expected)
result = self.ts.quantile([])
expected = pd.Series([], name=self.ts.name, index=Index(
[], dtype=float))
tm.assert_series_equal(result, expected)
def test_quantile_interpolation(self):
# GH #10174
if _np_version_under1p9:
raise nose.SkipTest("Numpy version is under 1.9")
from numpy import percentile
# interpolation = linear (default case)
q = self.ts.quantile(0.1, interpolation='linear')
self.assertEqual(q, percentile(self.ts.valid(), 10))
q1 = self.ts.quantile(0.1)
self.assertEqual(q1, percentile(self.ts.valid(), 10))
# test with and without interpolation keyword
self.assertEqual(q, q1)
def test_quantile_interpolation_dtype(self):
# GH #10174
if _np_version_under1p9:
raise nose.SkipTest("Numpy version is under 1.9")
from numpy import percentile
# interpolation = linear (default case)
q = pd.Series([1, 3, 4]).quantile(0.5, interpolation='lower')
self.assertEqual(q, percentile(np.array([1, 3, 4]), 50))
self.assertTrue(is_integer(q))
q = pd.Series([1, 3, 4]).quantile(0.5, interpolation='higher')
self.assertEqual(q, percentile(np.array([1, 3, 4]), 50))
self.assertTrue(is_integer(q))
def test_quantile_interpolation_np_lt_1p9(self):
# GH #10174
if not _np_version_under1p9:
raise nose.SkipTest("Numpy version is greater than 1.9")
from numpy import percentile
# interpolation = linear (default case)
q = self.ts.quantile(0.1, interpolation='linear')
self.assertEqual(q, percentile(self.ts.valid(), 10))
q1 = self.ts.quantile(0.1)
self.assertEqual(q1, percentile(self.ts.valid(), 10))
# interpolation other than linear
expErrMsg = "Interpolation methods other than "
with tm.assertRaisesRegexp(ValueError, expErrMsg):
self.ts.quantile(0.9, interpolation='nearest')
# object dtype
with tm.assertRaisesRegexp(ValueError, expErrMsg):
q = Series(self.ts, dtype=object).quantile(0.7,
interpolation='higher')
def test_quantile_nan(self):
# GH 13098
s = pd.Series([1, 2, 3, 4, np.nan])
result = s.quantile(0.5)
expected = 2.5
self.assertEqual(result, expected)
# all nan/empty
cases = [Series([]), Series([np.nan, np.nan])]
for s in cases:
res = s.quantile(0.5)
self.assertTrue(np.isnan(res))
res = s.quantile([0.5])
tm.assert_series_equal(res, pd.Series([np.nan], index=[0.5]))
res = s.quantile([0.2, 0.3])
tm.assert_series_equal(res, pd.Series([np.nan, np.nan],
index=[0.2, 0.3]))
def test_quantile_box(self):
cases = [[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03')],
[pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-03', tz='US/Eastern')],
[pd.Timedelta('1 days'), pd.Timedelta('2 days'),
pd.Timedelta('3 days')],
# NaT
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
| pd.Timestamp('2011-01-03') | pandas.Timestamp |
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To see moto-server logs
# pytest -s -p no:logging tests/test_aio_s3fs.py
import pytest
@pytest.mark.asyncio
async def test_pandas_s3_io(
aio_s3_bucket, aio_s3fs
):
import numpy as np
import pandas as pd
s3_file = f"s3://{aio_s3_bucket}/data.csv"
print(s3_file)
data = {"1": np.random.rand(5)}
df = pd.DataFrame(data=data)
df.to_csv(s3_file)
s3_df = pd.read_csv(s3_file, index_col=0)
assert isinstance(s3_df, pd.DataFrame)
| pd.testing.assert_frame_equal(df, s3_df) | pandas.testing.assert_frame_equal |
# coding: utf-8
"""
Summary
-------
Spatial interpolation functions for random forest interpolation using the scikit-learn package.
"""
# import
import statistics
import Eval as Eval
import make_blocks as mbk
import cluster_3d as c3d
import get_data as GD
from sklearn import metrics
from sklearn.model_selection import ShuffleSplit
from sklearn.ensemble import RandomForestRegressor
import geopandas as gpd
import pandas as pd
import numpy as np
import pyproj
import matplotlib.pyplot as plt
import warnings
# Runtime warning suppress, this suppresses the /0 warning
warnings.filterwarnings("ignore")
def random_forest_interpolator(latlon_dict, Cvar_dict, input_date, var_name, shapefile, show, \
file_path_elev, idx_list, expand_area, res = 10000):
'''Random forest interpolation
Parameters
----------
latlon_dict : dictionary
the latitude and longitudes of the stations
Cvar_dict : dictionary
dictionary of weather variable values for each station
input_date : string
the date you want to interpolate for
var_name : string
the name of the variable you are interpolating
shapefile : string
path to the study area shapefile, including its name
show : bool
whether you want to plot a map
file_path_elev : string
path to the elevation lookup file
idx_list : int
position of the elevation column in the lookup file
expand_area : bool
function will expand the study area so that more stations are taken into account (200 km)
Returns
----------
ndarray
- the array of values for the interpolated surface
list
- the bounds of the array surface, for use in other functions
'''
lat = []
lon = []
Cvar = []
na_map = gpd.read_file(shapefile)
bounds = na_map.bounds
if expand_area:
xmax = bounds['maxx']+200000
xmin = bounds['minx']-200000
ymax = bounds['maxy']+200000
ymin = bounds['miny']-200000
else:
xmax = bounds['maxx']
xmin = bounds['minx']
ymax = bounds['maxy']
ymin = bounds['miny']
for station_name in Cvar_dict.keys():
if station_name in latlon_dict.keys():
loc = latlon_dict[station_name]
latitude = loc[0]
longitude = loc[1]
# Filter out stations outside of grid
proj_coord = pyproj.Proj('esri:102001')(longitude, latitude)
if (proj_coord[1] <= float(ymax[0]) and proj_coord[1] >= float(ymin[0]) and proj_coord[0] <= float(xmax[0]) and proj_coord[0] >= float(xmin[0])):
cvar_val = Cvar_dict[station_name]
lat.append(float(latitude))
lon.append(float(longitude))
Cvar.append(cvar_val)
y = np.array(lat)
x = np.array(lon)
z = np.array(Cvar)
pixelHeight = res
pixelWidth = res
num_col = int((xmax - xmin) / pixelHeight)
num_row = int((ymax - ymin) / pixelWidth)
# We need to project to a projected system before making distance matrix
source_proj = pyproj.Proj(proj='latlong', datum='NAD83')
xProj, yProj = pyproj.Proj('esri:102001')(x, y)
df_trainX = pd.DataFrame({'xProj': xProj, 'yProj': yProj, 'var': z})
if expand_area:
yProj_extent = np.append(
yProj, [bounds['maxy']+200000, bounds['miny']-200000])
xProj_extent = np.append(
xProj, [bounds['maxx']+200000, bounds['minx']-200000])
else:
yProj_extent = np.append(yProj, [bounds['maxy'], bounds['miny']])
xProj_extent = np.append(xProj, [bounds['maxx'], bounds['minx']])
Yi = np.linspace(np.min(yProj_extent), np.max(yProj_extent), num_row+1)
Xi = np.linspace(np.min(xProj_extent), np.max(xProj_extent), num_col+1)
Xi, Yi = np.meshgrid(Xi, Yi)
Xi, Yi = Xi.flatten(), Yi.flatten()
maxmin = [np.min(yProj_extent), np.max(yProj_extent),
np.max(xProj_extent), np.min(xProj_extent)]
# Elevation
# Preparing the coordinates to send to the function that will get the elevation grid
concat = np.array((Xi.flatten(), Yi.flatten())).T
send_to_list = concat.tolist()
# The elevation function takes a tuple
send_to_tuple = [tuple(x) for x in send_to_list]
Xi1_grd = []
Yi1_grd = []
elev_grd = []
# Get the elevations from the lookup file
elev_grd_dict = GD.finding_data_frm_lookup(
send_to_tuple, file_path_elev, idx_list)
for keys in elev_grd_dict.keys(): # The keys are each lat lon pair
x = keys[0]
y = keys[1]
Xi1_grd.append(x)
Yi1_grd.append(y)
# Append the elevation data to the empty list
elev_grd.append(elev_grd_dict[keys])
elev_array = np.array(elev_grd) # make an elevation array
elev_dict = GD.finding_data_frm_lookup(zip(
xProj, yProj), file_path_elev, idx_list) # Get the elevations for the stations
xProj_input = []
yProj_input = []
e_input = []
for keys in zip(xProj, yProj): # Repeat process for just the stations not the whole grid
x = keys[0]
y = keys[1]
xProj_input.append(x)
yProj_input.append(y)
e_input.append(elev_dict[keys])
source_elev = np.array(e_input)
Xi1_grd = np.array(Xi1_grd)
Yi1_grd = np.array(Yi1_grd)
df_trainX = pd.DataFrame(
{'xProj': xProj, 'yProj': yProj, 'elevS': source_elev, 'var': z})
df_testX = pd.DataFrame(
{'Xi': Xi1_grd, 'Yi': Yi1_grd, 'elev': elev_array})
reg = RandomForestRegressor(
n_estimators=100, max_features='sqrt', random_state=1)
y = np.array(df_trainX['var']).reshape(-1, 1)
X_train = np.array(df_trainX[['xProj', 'yProj', 'elevS']])
X_test = np.array(df_testX[['Xi', 'Yi', 'elev']])
reg.fit(X_train, y)
Zi = reg.predict(X_test)
rf_grid = Zi.reshape(num_row+1, num_col+1)
if show:
fig, ax = plt.subplots(figsize=(15, 15))
crs = {'init': 'esri:102001'}
na_map = gpd.read_file(shapefile)
plt.imshow(rf_grid, extent=(xProj_extent.min(
)-1, xProj_extent.max()+1, yProj_extent.max()-1, yProj_extent.min()+1))
na_map.plot(ax= ax, color='white', edgecolor='k', linewidth=2, zorder=10, alpha=0.1)
plt.scatter(xProj, yProj, c=z, edgecolors='k')
plt.gca().invert_yaxis()
cbar = plt.colorbar()
cbar.set_label(var_name)
title = 'RF Interpolation for %s on %s' % (var_name, input_date)
fig.suptitle(title, fontsize=14)
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.show()
return rf_grid, maxmin
def cross_validate_rf(latlon_dict, Cvar_dict, shapefile, file_path_elev, elev_array, idx_list, pass_to_plot):
'''Leave-one-out cross-validation procedure for RF
Parameters
----------
latlon_dict : dictionary
the latitude and longitudes of the stations
Cvar_dict : dictionary
dictionary of weather variable values for each station
shapefile : string
path to the study area shapefile, including its name
file_path_elev : string
path to the elevation lookup file
elev_array : ndarray
array for elevation, create using IDEW interpolation (this is a trick to speed up code)
idx_list : int
position of the elevation column in the lookup file
pass_to_plot : bool
whether you will be plotting the error and need a version without absolute value error (i.e. fire season days)
Returns
----------
dictionary
- a dictionary of the absolute error at each station when it was left out
dictionary
- if pass_to_plot = True, returns a dictionary without the absolute value of the error, for example for plotting fire season error
'''
x_origin_list = []
y_origin_list = []
absolute_error_dictionary = {} # for plotting
no_absolute_value_dict = {} # to see whether under or over estimation
station_name_list = []
projected_lat_lon = {}
for station_name in Cvar_dict.keys():
if station_name in latlon_dict.keys():
station_name_list.append(station_name)
loc = latlon_dict[station_name]
latitude = loc[0]
longitude = loc[1]
Plat, Plon = pyproj.Proj('esri:102001')(longitude, latitude)
Plat = float(Plat)
Plon = float(Plon)
projected_lat_lon[station_name] = [Plat, Plon]
for station_name_hold_back in station_name_list:
lat = []
lon = []
Cvar = []
for station_name in sorted(Cvar_dict.keys()):
if station_name in latlon_dict.keys():
if station_name != station_name_hold_back:
loc = latlon_dict[station_name]
latitude = loc[0]
longitude = loc[1]
cvar_val = Cvar_dict[station_name]
lat.append(float(latitude))
lon.append(float(longitude))
Cvar.append(cvar_val)
else:
pass
y = np.array(lat)
x = np.array(lon)
z = np.array(Cvar)
na_map = gpd.read_file(shapefile)
bounds = na_map.bounds
xmax = bounds['maxx']
xmin = bounds['minx']
ymax = bounds['maxy']
ymin = bounds['miny']
pixelHeight = 10000
pixelWidth = 10000
num_col = int((xmax - xmin) / pixelHeight)
num_row = int((ymax - ymin) / pixelWidth)
# We need to project to a projected system before making distance matrix
source_proj = pyproj.Proj(proj='latlong', datum='NAD83')
xProj, yProj = pyproj.Proj('esri:102001')(x, y)
df_trainX = | pd.DataFrame({'xProj': xProj, 'yProj': yProj, 'var': z}) | pandas.DataFrame |
from .handler import function_handler
import yaml
import pytest
import pandas as pd
import numpy as np
from packaging import version
def transform_setup(function):
# read in file infos
with open("tests/test_yamls/test_transform.yml", "r") as stream:
file_infos = yaml.safe_load(stream)
if function == "decompress-content":
extract_infos = [file_info for file_info in file_infos if file_info['function'] == "decompress-content"]
transform_infos = []
for info in extract_infos:
# get file infos
input_file = info['input_file']
output_file = info['output_file']
# create input & output dfs
f = open(input_file, "rb")
binary_str = f.read()
input_df = pd.DataFrame({0: [binary_str]})
output_df = pd.read_csv(output_file)
transform_infos.append((input_df, output_df))
return transform_infos
if function == "transform-to-dataframe":
extract_infos = [file_info for file_info in file_infos if file_info['function'] == "transform-to-dataframe"]
transform_infos = []
for info in extract_infos:
# get file infos
input_file = info['input_file']
output_file = info['output_file']
str_type = info['str_type']
# create input & output dfs
input_df = pd.read_csv(input_file)
output_df = pd.read_csv(output_file)
transform_infos.append((input_df, output_df, str_type))
return transform_infos
if function == "split-dataframe-rows":
extract_infos = [file_info for file_info in file_infos if file_info['function'] == "split-dataframe-rows"]
transform_infos = []
for info in extract_infos:
# get file infos
input_file = info['input_file']
# create input & output dfs
input_df = pd.read_csv(input_file, header=None)
output_dfs = [pd.read_csv(output_df) for output_df in info['output_files']]
transform_infos.append((input_df, output_dfs))
return transform_infos
if function == "flatten-lists-to-dataframe":
extract_infos = [file_info for file_info in file_infos if file_info['function'] == "flatten-lists-to-dataframe"]
transform_infos = []
for info in extract_infos:
# get file infos
input_file = info['input_file']
output_file = info['output_file']
extract_field = info['extract_field']
preserve_origin_data = info['preserve_origin_data']
# create input & output dfs
input_df = pd.read_csv(input_file)
output_df = pd.read_csv(output_file)
transform_infos.append((input_df, output_df, extract_field, preserve_origin_data))
return transform_infos
if function == "string-injecting":
extract_infos = [file_info for file_info in file_infos if file_info['function'] == "string-injecting"]
transform_infos = []
for info in extract_infos:
# get file infos
input_file = info['input_file']
output_file = info['output_file']
# create input & output dfs
input_df = pd.read_csv(input_file, names=[0, 'gem_name'])
output_df = pd.read_csv(output_file)
transform_infos.append((input_df, output_df))
return transform_infos
if function == "rename-columns":
extract_infos = [file_info for file_info in file_infos if file_info['function'] == "rename-columns"]
transform_infos = []
for info in extract_infos:
# get file infos
input_file = info['input_file']
output_file = info['output_file']
map_dict = info['rename_map']
# create input & output dfs
input_df = pd.read_csv(input_file)
output_df = pd.read_csv(output_file)
transform_infos.append((input_df, output_df, map_dict))
return transform_infos
if function == "json-array-to-dataframe":
extract_infos = [file_info for file_info in file_infos if file_info['function'] == "json-array-to-dataframe"]
transform_infos = []
for info in extract_infos:
# get file infos
input_file = info['input_file']
output_file = info['output_file']
headers = info['headers']
extract_field = info['extract_field']
# create input & output dfs
input_df = pd.read_csv(input_file)
output_df = pd.read_csv(output_file)
transform_infos.append((input_df, output_df, extract_field, headers))
return transform_infos
if function == "sampling":
extract_infos = [file_info for file_info in file_infos if file_info['function'] == "sampling"]
transform_infos = []
for info in extract_infos:
# get file infos
input_file = info['input_file']
output_file = info['output_file']
sampling_nums = info['nums']
seed = info['seed']
# create input & output dfs
input_df = | pd.read_csv(input_file) | pandas.read_csv |
# coding: utf-8
"""
@author: Daan
Finds alignments between two thesauri by performing exact string matching on all possible combinations of types of labels (prefLabel and altLabel) and language (nl and en)
Does a second round of matching in which alignments are found by looking at the stemmed forms of strings that did not have a previous match
A numeric analysis of the alignments is exported to a csv file
"""
import xmltodict
import os.path
import pandas as pd
from nltk.stem import PorterStemmer
def main():
#Initializes the input and output files and directories
create_output_dir()
targeted_input_files = ["rma-skos-thesaurus-amalgame","rma-skos-materials-amalgame","rma-skos-lib-amalgame","rma-skos-thesaurus-amalgame-scheme-KEYWORD"]
input_file_1 = targeted_input_files[2]
source_file_1 = os.path.join(os.path.abspath('..\\xslt_mapping\\output'), input_file_1) + '.rdf'
with open(source_file_1, encoding='utf8') as fd:
doc_1 = xmltodict.parse(fd.read())
input_file_2 = targeted_input_files[0]
source_file_2 = os.path.join(os.path.abspath('..\\xslt_mapping\\output'), input_file_2) + '.rdf'
with open(source_file_2, encoding='utf8') as fd:
doc_2 = xmltodict.parse(fd.read())
"""
Can be used to check the length of the different input files, which is important when deciding a source and a target for the mapping
"""
#print(len(doc_1['rdf:RDF']['rdf:Description']))
#print(len(doc_2['rdf:RDF']['rdf:Description']))
#Perform the matching and export results to a csv
match_df, stemmed_match_df = full_matching_run(doc_1,doc_2)
match_df.to_csv ('output/' + 'Full_label_matches_stemmed_' + input_file_1 + "_" + input_file_2 + '.csv', index = None, header=True)
stemmed_match_df.to_csv ('output/' + 'Full_label_matches_stemmed_strings' + input_file_1 + "_" + input_file_2 + '.csv', index = None, header=True)
print('Analysis completed and placed in the output folder')
def create_output_dir():
if not os.path.exists('output'):
os.mkdir('output')
def full_matching_run(doc_1,doc_2):
#Initialize the dataframes and possible matches
matching_df = | pd.DataFrame(columns=["label_type","language","matches_doc_1","matches_doc_1_stemmed","matches_doc_2","matches_doc_2_stemmed"]) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2014-2019 OpenEEmeter contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime, timedelta
from pkg_resources import resource_stream
import numpy as np
import pandas as pd
import pytest
import pytz
from eemeter.transform import (
as_freq,
clean_caltrack_billing_data,
downsample_and_clean_caltrack_daily_data,
clean_caltrack_billing_daily_data,
day_counts,
get_baseline_data,
get_reporting_data,
get_terms,
remove_duplicates,
NoBaselineDataError,
NoReportingDataError,
overwrite_partial_rows_with_nan,
)
def test_as_freq_not_series(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
with pytest.raises(ValueError):
as_freq(meter_data, freq="H")
def test_as_freq_hourly(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
as_hourly = as_freq(meter_data.value, freq="H")
assert as_hourly.shape == (18961,)
assert round(meter_data.value.sum(), 1) == round(as_hourly.sum(), 1) == 21290.2
def test_as_freq_daily(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
as_daily = as_freq(meter_data.value, freq="D")
assert as_daily.shape == (792,)
assert round(meter_data.value.sum(), 1) == round(as_daily.sum(), 1) == 21290.2
def test_as_freq_daily_all_nones_instantaneous(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
meter_data["value"] = np.nan
assert meter_data.shape == (27, 1)
as_daily = as_freq(meter_data.value, freq="D", series_type="instantaneous")
assert as_daily.shape == (792,)
assert round(meter_data.value.sum(), 1) == round(as_daily.sum(), 1) == 0
def test_as_freq_daily_all_nones(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
meter_data["value"] = np.nan
assert meter_data.shape == (27, 1)
as_daily = as_freq(meter_data.value, freq="D")
assert as_daily.shape == (792,)
assert round(meter_data.value.sum(), 1) == round(as_daily.sum(), 1) == 0
def test_as_freq_month_start(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
assert meter_data.shape == (27, 1)
as_month_start = as_freq(meter_data.value, freq="MS")
assert as_month_start.shape == (28,)
assert round(meter_data.value.sum(), 1) == round(as_month_start.sum(), 1) == 21290.2
def test_as_freq_hourly_temperature(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
assert temperature_data.shape == (19417,)
as_hourly = as_freq(temperature_data, freq="H", series_type="instantaneous")
assert as_hourly.shape == (19417,)
assert round(temperature_data.mean(), 1) == round(as_hourly.mean(), 1) == 54.6
def test_as_freq_daily_temperature(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
assert temperature_data.shape == (19417,)
as_daily = as_freq(temperature_data, freq="D", series_type="instantaneous")
assert as_daily.shape == (811,)
assert abs(temperature_data.mean() - as_daily.mean()) <= 0.1
def test_as_freq_month_start_temperature(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
assert temperature_data.shape == (19417,)
as_month_start = as_freq(temperature_data, freq="MS", series_type="instantaneous")
assert as_month_start.shape == (29,)
assert round(as_month_start.mean(), 1) == 53.4
def test_as_freq_daily_temperature_monthly(il_electricity_cdd_hdd_billing_monthly):
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
temperature_data = temperature_data.groupby(pd.Grouper(freq="MS")).mean()
assert temperature_data.shape == (28,)
as_daily = as_freq(temperature_data, freq="D", series_type="instantaneous")
assert as_daily.shape == (824,)
assert round(as_daily.mean(), 1) == 54.5
def test_as_freq_empty():
meter_data = pd.DataFrame({"value": []})
empty_meter_data = as_freq(meter_data.value, freq="H")
assert empty_meter_data.empty
def test_as_freq_perserves_nulls(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
monthly_with_nulls = meter_data[meter_data.index.year != 2016].reindex(
meter_data.index
)
daily_with_nulls = as_freq(monthly_with_nulls.value, freq="D")
assert (
round(monthly_with_nulls.value.sum(), 2)
== round(daily_with_nulls.sum(), 2)
== 11094.05
)
assert monthly_with_nulls.value.isnull().sum() == 13
assert daily_with_nulls.isnull().sum() == 365
def test_day_counts(il_electricity_cdd_hdd_billing_monthly):
data = il_electricity_cdd_hdd_billing_monthly["meter_data"].value
counts = day_counts(data.index)
assert counts.shape == (27,)
assert counts.iloc[0] == 29.0
assert pd.isnull(counts.iloc[-1])
assert counts.sum() == 790.0
def test_day_counts_empty_series():
index = pd.DatetimeIndex([])
index.freq = None
data = | pd.Series([], index=index) | pandas.Series |
"""
@author: <NAME>
"""
import os
import re
import numpy as np
import pandas as pd
import json
import pickle
from argparse import ArgumentParser
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, classification_report
import sys
import warnings
parser = ArgumentParser()
# For training & validation
parser.add_argument("--trainDir", type=str, default="./datasets/train", help="path to training data")
parser.add_argument("--validDir", type=str, default="./datasets/valid",help="path to validation data")
parser.add_argument("--outDir", type=str, default="experiments", help="directory to store trained model & classification report")
parser.add_argument("--features", type=str, default="lri", choices=["lri", "lr", "li", "ri"], help="combination of context feature vectors for the experiment")
parser.add_argument("--modelType", type=str, default="svm", choices=["knn", "svm", "mlp"], help="type of model to experiment")
# For testing
parser.add_argument('--test', action='store_true', help="indicator for test data")
parser.add_argument("--testDir", type=str, default="./datasets/test", help="path to test data")
parser.add_argument("--testOutDir", type=str, default="./test_out", help="path to output files for storing test predictions")
parser.add_argument("--modelsDir", type=str, help="path to trained models")
# Tunable params for weighted k-NN
parser.add_argument("--k", type=int, default=5, help="[k-NN] no. of neighbors")
parser.add_argument("--beta", type=float, default=1, help="[k-NN] relative weight of right context vector")
parser.add_argument("--gamma", type=float, default=1, help="[k-NN] relative weight of interplay context vector")
# Tunable params for MLP
parser.add_argument("--numNeurons", type=int, default=100, help="[MLP] no. of neurons in hidden layer")
# Tunable params for SVM
parser.add_argument("--C", type=float, default=1, help="[SVM] regularization parameter")
class PSDExperiment:
def __init__(self, model_type, params, features, train_path, valid_path, output_dir, test=False):
self.features = features
self.model_type = model_type
self.params = params
if not test:
print("Initializing experiment...")
self.train_dir = train_path
self.valid_dir = valid_path
self.model_identifier = f"{features}_{model_type}"
for key, value in params.items():
self.model_identifier += f"_{key}={value}"
self.output_dir = output_dir
self.models_dir = os.path.join(self.output_dir, "models", self.model_identifier)
self.reports_dir = os.path.join(self.output_dir, "reports")
for folder in [self.models_dir, self.reports_dir ]:
if not os.path.exists(folder):
os.makedirs(folder)
self.prepositions_with_one_sense = {}
else:
with open("sense_mapping.json", "r") as f:
self.sense_mappings = json.load(f)
def __initializeModel(self):
if self.model_type == "knn":
return KNeighborsClassifier(n_neighbors=self.params["k"])
elif self.model_type == "mlp":
return MLPClassifier(hidden_layer_sizes=(self.params["num_neurons"],), random_state=1, max_iter=200)
elif self.model_type == "svm":
return SVC(kernel="linear", C=self.params["C"])
else:
print("Invalid model! Exitting...")
exit(1)
def __getFeatures(self, df):
X_vl = np.stack(df["vl"].values, axis=0)
X_vr = np.stack(df["vr"].values, axis=0)
X_vi = np.stack(df["vi"].values, axis=0)
if self.features == "lri":
if self.model_type == "knn":
return X_vl + self.params["beta"]*X_vr + self.params["gamma"]*X_vi
else:
return np.concatenate((X_vl, X_vr, X_vi), axis=1)
elif self.features == "lr":
if self.model_type == "knn":
return X_vl + self.params["beta"]*X_vr
else:
return np.concatenate((X_vl, X_vr), axis=1)
elif self.features == "li":
if self.model_type == "knn":
return X_vl + self.params["gamma"]*X_vi
else:
return np.concatenate((X_vl, X_vi), axis=1)
elif self.features == "ri":
if self.model_type == "knn":
return self.params["beta"]*X_vr + self.params["gamma"]*X_vi
else:
return np.concatenate((X_vr, X_vi), axis=1)
def trainModels(self):
print(f"Training {self.model_type} models...")
for prep_train_data in os.listdir(self.train_dir):
preposition = re.findall(r"([a-z]*)\.pkl", prep_train_data)[0]
train_df = pd.read_pickle(os.path.join(self.train_dir, prep_train_data))
X = self.__getFeatures(train_df)
y = train_df["preposition_sense"]
num_senses = len(y.unique())
print("Preposition: %s \tNumber of senses: %d" % (preposition, num_senses))
if num_senses > 1:
# Train a model to disambiguate each preposition
model = self.__initializeModel()
model.fit(X, y)
pickle.dump(model, open(os.path.join(self.models_dir, preposition + ".sav"), 'wb'))
else:
self.prepositions_with_one_sense[preposition] = y[0]
print("Training completed!")
print("==================================================================")
def validateModels(self):
print("Validating models...")
y_actual_all = pd.Series([], dtype=str)
y_pred_all = np.array([])
for prep_valid_data in os.listdir(self.valid_dir):
preposition = re.findall(r"([a-z]*)\.pkl", prep_valid_data)[0]
valid_df = pd.read_pickle(os.path.join(self.valid_dir, prep_valid_data))
y_actual = valid_df["preposition_sense"]
y_actual_all = y_actual_all.append(y_actual)
if preposition in self.prepositions_with_one_sense.keys():
y_pred = pd.Series([self.prepositions_with_one_sense[preposition]]*len(valid_df))
else:
X = self.__getFeatures(valid_df)
model = pickle.load(open(os.path.join(self.models_dir, preposition + ".sav"), 'rb'))
y_pred = model.predict(X)
y_pred_all = np.append(y_pred_all, y_pred)
print("Preposition: %s \tValidation Accuracy: %.4f" % (preposition, accuracy_score(y_actual, y_pred)))
report = classification_report(y_actual_all, y_pred_all, output_dict=True)
valid_report = | pd.DataFrame(report) | pandas.DataFrame |
# coding:utf-8
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.linear_model import LinearRegression
import warnings
pd.options.mode.chained_assignment = None
warnings.filterwarnings("ignore", category=DeprecationWarning)
# 第一步:数据导入
DataTrain = pd.read_csv("../MS/train.csv")
DataTest = pd.read_csv("../MS/test.csv")
DataTrain.head()
DataTest.head()
DataTrain.info()
DataTest.info()
DataTrain.isnull().sum()
DataTest.isnull().sum()
# 由结果可知提供的数据集没有数据丢失现象
# 第二步:数据可视化分析
# 查找异常值
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.set_size_inches(5, 5)
sns.boxplot(data=DataTrain, y="count", orient="v", ax=ax)
ax.set(ylabel="count", title="Box Plot On Count")
DataTrain.reset_index(inplace=True)
fig1,(ax1, ax2) = plt.subplots(nrows=2, ncols=1)
fig1.set_size_inches(10, 5)
sns.regplot(x="index", y="temp", data=DataTrain, ax=ax1)
sns.regplot(x="index", y="atemp", data=DataTrain, ax=ax2)
ax1.set(ylabel="temp", title="temp scatter diagram")
ax2.set(ylabel="atemp", title="atemp scatter diagram")
fig2 = plt.figure()
fig2.set_size_inches(10, 5)
ax3 = fig2.add_subplot(1, 1, 1)
sns.regplot(x="index", y="humidity", data=DataTrain, ax=ax3)
ax3.set(ylabel="humidity", title="humidity scatter diagram")
fig3 = plt.figure()
fig3.set_size_inches(10, 5)
ax4 = fig3.add_subplot(1, 1, 1)
sns.regplot(x="index", y="windspeed", data=DataTrain, ax=ax4)
ax4.set(ylabel="windspeed", title="windspeed scatter diagram")
# 存在异常值体感温度,风速,以及部分count值
# 数据切割
# DataTrain.drop('index', inplace=True, axis=1)
DataTrain['datetime'] = pd.to_datetime(DataTrain['datetime'])
DataTrain['year'] = DataTrain['datetime'].dt.year
DataTrain['month'] = DataTrain['datetime'].dt.month
DataTrain['day'] = DataTrain['datetime'].dt.day
DataTrain['hour'] = DataTrain['datetime'].dt.hour
DataTest['datetime'] = | pd.to_datetime(DataTest['datetime']) | pandas.to_datetime |
# Importamos las librerias necesarias
import pandas as pd #ºpara trabajar con dataframes
import os #para trabajar con los directorios
import re #para trabajar con expresiones regulares
import matplotlib.pyplot as plt
import seaborn as sns
#PASO 1: Unimos los archivos de cada una de las carpetas ETFs y Stock
def ruta_carpeta_ETFs():
path='/Users/juanlu_navarro/Documents/Carrera Juan/programacion/Trabajo-final/ETFs'
os.chdir(path)
ruta_carpeta_ETFs()
def union_archivos_ETFs(path):
archivos= [ x for x in os.listdir() if re.search('.txt',x)]
print(archivos)
df= pd.DataFrame()
for i in archivos:
archivo=pd.read_csv(i)
df= | pd.concat([df,archivo]) | pandas.concat |
# Hello streamlit
import streamlit as st
import pandas as pd
import base64
import io
# ifcopenshell
import ifcopenshell
from ifcopenshell.util import element
st.title("Hello Renholdskalkulator")
st.text("Last opp P13 kompatibel BIM og din renholdskost per m2 i menyen til venstre")
# file uploader
st.sidebar.header("BIM-basert renholdskostnader")
st.sidebar.caption("Sett inn kostnader per m2 i kr")
your_m2_price = st.sidebar.number_input("Sett inn din pris per m2",min_value=0,step = 100)
st.sidebar.caption("Last opp en renholdskompatibel ifc")
uploaded_file = st.sidebar.file_uploader("Choose a file",type=["ifc"])
## If/when we want to test agains P13 mvd it is found here:
## https://test-bimvalbygg.dibk.no/mvd/ramme_etttrinn_igangsetting.mvdxml
# Loads a data from a string representation of an IFC file
# ref: https://community.osarch.org/discussion/659/ifcopenshell-how-to-work-with-file-content-instead-of-file-path
def load_ifc(ifc_file):
ifc = ifcopenshell.file.from_string(ifc_file)
return ifc
def get_qtos(elem):
psets = element.get_psets(elem)
if "BaseQuantities" in psets:
return psets["BaseQuantities"]
elif "Qto_SpaceBaseQuantities" in psets:
return psets["Qto_SpaceBaseQuantities"]
else:
return {}
# Fetched from ifcopenshell.util.unit --> will be part of newer releases
def get_unit_assignment(ifc_file):
unit_assignments = ifc_file.by_type("IfcUnitAssignment")
if unit_assignments:
return unit_assignments[0]
# function to get net areas for spaces
def get_net_areas(elem):
qtos=get_qtos(elem)
net_areas = {}
if "NetFloorArea" in qtos:
net_areas["Netto Gulvareal m2"] = round(qtos["NetFloorArea"],2)
else:
net_areas["Netto Gulvareal m2"] = None
if "NetCeilingArea" in qtos:
net_areas["Netto Takareal m2"] = round(qtos["NetCeilingArea"],2)
else:
net_areas["Netto Takareal m2"] = None
if "NetWallArea" in qtos:
net_areas["Netto Veggareal m2"] = round(qtos["NetWallArea"],2)
else:
net_areas["Netto Veggareal m2"] = None
return net_areas
def get_room_info(space):
room = space.get_info()
info_to_return = {}
# Name
if "Name" in room:
info_to_return["Navn"] = room["Name"]
else:
info_to_return["Navn"] = None
# LongName
if "LongName" in room:
info_to_return["Langt navn"] = room["LongName"]
else:
info_to_return["Langt navn"] = None
# Type
spaceType = element.get_type(space)
if spaceType:
info_to_return["Romtype"] = spaceType.Name
else:
info_to_return["Romtype"] = None
# GlobalID
if "GlobalId" in room:
info_to_return["GlobalId"] = room["GlobalId"]
else:
info_to_return["GlobalId"] = None
# add qtos
info_to_return.update(get_net_areas(space))
return info_to_return
def space_df(spaces,options):
spacelist = []
for space in spaces:
spacelist.append(get_room_info(space))
df = | pd.DataFrame(spacelist) | pandas.DataFrame |
# coding=utf-8
# Author: <NAME>
# Date: Sept 02, 2019
#
# Description: Builds a MultiLayer network (HS, MM & DM) based on genes found by DGE with StringDB edges.
#
#
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
import networkx as nx
from itertools import chain, product
from utils import ensurePathExists, open_undefined_last_column_files
import argparse
if __name__ == '__main__':
#
# Args
#
celltype = 'spermatocyte'
network = 'prp113'
#
# Init
#
G = nx.Graph()
##
# HS Network Data
##
print('Processing HS data')
df_HS = pd.read_csv("results/HS-DGE-rnf113_vs_control-SELECTED-string.csv", index_col='id_gene', usecols=['id_string', 'id_gene', 'gene', 'logFC', 'logCPM', 'FDR'])
df_HS = df_HS.rename(columns={'gene': 'label'}) # Rename
# Map id_gene -> id_string
dict_HS_id_gene_to_id_string = df_HS.explode('id_string').reset_index().set_index('id_string')['id_gene'].to_dict()
# Bag of String (some genes have >1 id_string)
set_HS_id_strings = set(np.hstack(df_HS['id_string'].dropna()))
# Identify layer
df_HS['layer'] = 'HS'
# Add Nodes with tuple(id, attrs)
idxs = df_HS.index.to_list()
attrs = [{k: v for k, v in m.items() if pd.notnull(v)} for m in df_HS.to_dict(orient='rows')]
G.add_nodes_from([(i, d) for i, d in zip(idxs, attrs)])
#
# HS Links
#
print('Adding links')
df_HS_links = | pd.read_csv("../../data/StringDB/9606/9606.protein.links.full.v11.0.txt.gz", sep=' ', usecols=['protein1', 'protein2', 'textmining', 'database', 'experiments', 'coexpression', 'neighborhood', 'fusion', 'cooccurence', 'combined_score']) | pandas.read_csv |
import numpy as np
import pickle as pkl
import scipy.sparse as sp
import torch
import torch.nn as nn
import random
from sklearn import metrics
import pandas as pd
def shuffle_nodes(items, masks):
node_num = masks.sum(axis=1)
batch_size = masks.shape[0]
node_max = masks.shape[1]
shuffle_item = items.copy()
for i in range(batch_size):
shuf_idx = np.append(np.random.permutation(node_num[i]), np.ones(node_max - node_num[i]) * (node_max - 1))
idx = shuf_idx.astype(np.int32)
shuffle_item[i] = shuffle_item[i, idx]
return shuffle_item
###############################################
# This section of code adapted from tkipf/gcn #
###############################################
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def macro_f1(preds, labels):
labels = labels.to(torch.device("cpu")).numpy()
preds = preds.to(torch.device("cpu")).numpy()
macro = metrics.f1_score(labels, preds, average='macro')
return macro
def sparse_to_tuple(sparse_mx, insert_batch=False):
"""Convert sparse matrix to tuple representation."""
"""Set insert_batch=True if you want to insert a batch dimension."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
if insert_batch:
coords = np.vstack((np.zeros(mx.row.shape[0]), mx.row, mx.col)).transpose()
values = mx.data
shape = (1,) + mx.shape
else:
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def standardize_data(f, train_mask):
"""Standardize feature matrix and convert to tuple representation"""
# standardize data
f = f.todense()
mu = f[train_mask == True, :].mean(axis=0)
sigma = f[train_mask == True, :].std(axis=0)
f = f[:, np.squeeze(np.array(sigma > 0))]
mu = f[train_mask == True, :].mean(axis=0)
sigma = f[train_mask == True, :].std(axis=0)
f = (f - mu) / sigma
return f
def preprocess_features(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return features.todense(), sparse_to_tuple(features)
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))
return sparse_to_tuple(adj_normalized)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
###############################################
# This section of code adapted from DGRec #
###############################################
def load_latest_session(data_path):
ret = []
for line in open(data_path + '/latest_sessions.txt'):
chunks = line.strip().split(',')
ret.append(chunks)
return ret
def load_map(data_path):
id_map = {}
for line in open(data_path):
k, v = line.strip().split(',')
id_map[k] = str(v)
map_num = len(id_map)
return map_num
def load_adj(data_path, dataset="Yelp"):
if dataset == "Yelp":
adj_social = sp.load_npz(data_path + "/meta_path/adj_user.npz")
mp_city = sp.load_npz(data_path + "/meta_path/mp_city.npz")
mp_category = sp.load_npz(data_path + "/meta_path/mp_category.npz")
# return [mp_iui, mp_social_dict, mp_category, mp_city]
adj_social = adj_social.tolil()
return [adj_social, mp_category.tolil(), mp_city.tolil()]
elif dataset == "Tmall":
mp_brand = sp.load_npz(data_path + "/meta_path/mp_brand.npz")
mp_seller = sp.load_npz(data_path + "/meta_path/mp_seller.npz")
mp_cate = sp.load_npz(data_path + "/meta_path/mp_category.npz")
return [mp_seller.tolil(), mp_brand.tolil(), mp_cate.tolil()]
elif dataset == "Nowplaying":
mp_artist = sp.load_npz(data_path + "/meta_path/mp_artist.npz")
mp_hashtag = sp.load_npz(data_path + "/meta_path/mp_hashtag.npz")
mp_context = sp.load_npz(data_path + "/meta_path/mp_context.npz")
return [mp_artist.tolil(), mp_hashtag.tolil(), mp_context.tolil()]
def load_data(path="./Yelp/processed/", dataset="Yelp"):
latest_sessions = load_latest_session(path)
mp_adj_list = load_adj(path, dataset)
mp_test_adj = load_adj(path + '/test', dataset)
# mp_adj_list = mp_test_adj
if dataset == "Yelp":
business_file = path + '/business_id_map.csv'
user_file = path + '/user_id_map.csv'
city_file = path + '/city_id_map.csv'
category_file = path + '/category_id_map.csv'
business_num = load_map(data_path=business_file)
user_num = load_map(data_path=user_file)
city_num = load_map(data_path=city_file)
category_num = load_map(data_path=category_file)
num_list = [business_num, user_num, city_num, category_num]
train = pd.read_csv(path + '/train.csv', sep=',', dtype={0: str, 1: str, 2: str, 3: str, 4: str, 5: str, 6: str,
7: str})
valid = pd.read_csv(path + '/valid.csv', sep=',', dtype={0: str, 1: str, 2: str, 3: str, 4: str, 5: str,
6: str, 7: str})
test = pd.read_csv(path + '/test.csv', sep=',', dtype={0: str, 1: str, 2: str, 3: str, 4: str, 5: str, 6: str,
7: str})
# return adjs, features, labels, idx_train, idx_val, idx_test
elif dataset == "Tmall":
train = | pd.read_csv(path + '/train.csv', sep=',', dtype={0: str, 1: int, 2: int, 3: int}) | pandas.read_csv |
# Copyright (c) 2019.
# Author: <NAME>
"""This is the Date helper.
This module helps to create and transform datetime.
"""
import datetime
from dateutil import tz
import pandas as pd
class DateHelper(object):
@staticmethod
def create_local_utc_datetime():
return datetime.datetime.utcnow()
@staticmethod
def transform_timezone_to_local(dt):
# now = datetime.datetime.utcnow()
HERE = tz.tzlocal()
UTC = tz.gettz('UTC')
dt = dt.replace(tzinfo=UTC)
dt = dt.astimezone(HERE)
return dt
@staticmethod
def get_local_datetime():
dt = DateHelper.create_local_utc_datetime()
return DateHelper.transform_timezone_to_local(dt)
@staticmethod
def format_datetime(dt):
ts = | pd.to_datetime(dt, format="%Y-%m-%d-%H:%M:%S.%f") | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 29 08:35:09 2019
@author: user
"""
# execute primary input data building script
# import build_input_res_heating
print('####################')
print('BUILDING INPUT DATA FOR INCLUDING DEMAND-SIDE RESPONSE, ENERGY EFFICIENCY AND DHW BOILERS')
print('####################')
import os
import itertools
import hashlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import grimsel.auxiliary.sqlutils.aux_sql_func as aql
import datetime
import seaborn as sns
from grimsel.auxiliary.aux_general import print_full
from grimsel.auxiliary.aux_general import translate_id
import config_local as conf
from grimsel.auxiliary.aux_general import expand_rows
base_dir = conf.BASE_DIR
data_path = conf.PATH_CSV
data_path_prv = conf.PATH_CSV + '_res_heating'
seed = 2
np.random.seed(seed)
db = conf.DATABASE
sc = conf.SCHEMA
#db = 'grimsel_1'
#sc = 'lp_input_ht_ee_dsm'
def append_new_rows(df, tb):
list_col = list(aql.get_sql_cols(tb, sc, db).keys())
aql.write_sql(df[list_col], db=db, sc=sc, tb=tb, if_exists='append')
def del_new_rows(ind, tb, df):
del_list = df[ind].drop_duplicates()
for i in ind:
del_list[i] = '%s = '%i + del_list[i].astype(str)
del_str = ' OR '.join(del_list.apply(lambda x: '(' + ' AND '.join(x) + ')', axis=1))
exec_strg = '''
DELETE FROM {sc}.{tb}
WHERE {del_str}
'''.format(tb=tb, sc=sc, del_str=del_str)
aql.exec_sql(exec_strg, db=db)
def replace_table(df, tb):
print('Replace table %s'%tb)
# list_col = list(aql.get_sql_cols(tb, sc, db).keys())
aql.write_sql(df, db=db, sc=sc, tb=tb, if_exists='replace')
def append_new_cols(df, tb):
#
list_col = list(aql.get_sql_cols(tb, sc, db).keys())
col_new = dict.fromkeys((set(df.columns.tolist()) - set(list_col)))
for key, value in col_new.items():
col_new[key] = 'DOUBLE PRECISION'
# col_new = dict.fromkeys((set(list_col[0].columns.tolist()) - set(list_col)),1)
aql.add_column(df_src=df,tb_tgt=[sc,tb],col_new=col_new,on_cols=list_col, db=db)
# %% DHW loads
dfload_arch_dhw = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_dhw_dec.csv')
dfload_arch_dhw['DateTime'] = dfload_arch_dhw['DateTime'].astype('datetime64[ns]')
# dfload_arch_dhw = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_dhw_dec')
dferg_arch_dhw = dfload_arch_dhw.groupby('nd_id')['erg_tot'].sum().reset_index()
dferg_arch_dhw['nd_id_new'] = dferg_arch_dhw.nd_id
dfload_arch_dhw_central = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_dhw_cen.csv')
# dfload_arch_dhw_central = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_dhw_cen')
dferg_arch_dhw_central = dfload_arch_dhw_central.groupby('nd_id')['erg_tot'].sum().reset_index()
dferg_arch_dhw_central['nd_id_new'] = dferg_arch_dhw_central.nd_id
# dfload_dhw_elec = pd.read_csv(os.path.join(base_dir,'../heat_dhw/dhw_el_load_night_charge.csv'),sep=';')
# dfload_dhw_elec['DateTime'] = pd.to_datetime(dfload_dhw_elec.DateTime)
# dfload_dhw_remove = pd.merge(dfload_arch_dhw,dfload_dhw_elec.drop(columns='dhw_mw'), on='DateTime' )
# dfload_dhw_remove = pd.merge(dfload_dhw_remove,dferg_arch_dhw.drop(columns='nd_id_new').rename(columns={'erg_tot':'erg_year'}),on='nd_id'
# ).assign(load_dhw_rem = lambda x: x.dhw_rel_load*x.erg_year)
# %% Central DHW loads
#Bau load
dfload_arch_dhw_central = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_dhw_cen.csv')
# dfload_arch_dhw_central = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_dhw_cen')
dfload_arch_dhw_central['erg_tot'] = dfload_arch_dhw_central.erg_tot/24 # MWh -> MW
dfload_arch_dhw_central['erg_tot_retr_1pc'] = dfload_arch_dhw_central.erg_tot # here already in MW previous line
dfload_arch_dhw_central['erg_tot_retr_2pc'] = dfload_arch_dhw_central.erg_tot # here already in MW previous line
dfload_arch_dhw_central = dfload_arch_dhw_central.set_index('DateTime')
dfload_arch_dhw_central.index = pd.to_datetime(dfload_arch_dhw_central.index)
#fossil load
dfload_arch_dhw_central_fossil = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_dhw_cen_fossil.csv')
# dfload_arch_dhw_central_fossil = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_dhw_cen_fossil')
dfload_arch_dhw_central_fossil['erg_tot_fossil'] = dfload_arch_dhw_central_fossil.erg_tot/24 # MWh -> MW
dfload_arch_dhw_central_fossil['erg_tot_retr_1pc_fossil'] = dfload_arch_dhw_central_fossil.erg_tot/24 # MWh -> MW
dfload_arch_dhw_central_fossil['erg_tot_retr_2pc_fossil'] = dfload_arch_dhw_central_fossil.erg_tot/24 # MWh -> MW
dfload_arch_dhw_central_fossil = dfload_arch_dhw_central_fossil.drop(columns='erg_tot')
dfload_arch_dhw_central_fossil = dfload_arch_dhw_central_fossil.set_index('DateTime')
dfload_arch_dhw_central_fossil.index = pd.to_datetime(dfload_arch_dhw_central_fossil.index)
dfload_arch_dhw_central = dfload_arch_dhw_central.reset_index()
dfload_arch_dhw_central = pd.merge(dfload_arch_dhw_central,dfload_arch_dhw_central_fossil,on=['index','doy','nd_id'])
dfload_arch_dhw_central = dfload_arch_dhw_central.set_index('DateTime')
dfload_arch_dhw_central.index = pd.to_datetime(dfload_arch_dhw_central.index)
# %% Seperation for aw and bw heat pumps DHW central
dfload_arch_dhw_central_aw = dfload_arch_dhw_central.copy()
dfload_arch_dhw_central_aw[['erg_tot', 'erg_tot_fossil',
'erg_tot_retr_1pc', 'erg_tot_retr_2pc', 'erg_tot_retr_1pc_fossil',
'erg_tot_retr_2pc_fossil']] *= 0.615
dfload_arch_dhw_central_ww = dfload_arch_dhw_central.copy()
dfload_arch_dhw_central_ww[['erg_tot', 'erg_tot_fossil',
'erg_tot_retr_1pc', 'erg_tot_retr_2pc', 'erg_tot_retr_1pc_fossil',
'erg_tot_retr_2pc_fossil']] *= 0.385
# %% DSR loads
dfload_arch_dsr_sfh_1day = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_dsr_sfh_1day.csv')
dfload_arch_dsr_mfh_1day = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_dsr_mfh_1day.csv')
# dfload_arch_dsr_sfh_1day = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_dsr_sfh_1day')
# dfload_arch_dsr_mfh_1day = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_dsr_mfh_1day')
dfload_arch_dsr_sfh_1h = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_dsr_sfh_1h.csv')
dfload_arch_dsr_sfh_1h['DateTime'] = dfload_arch_dsr_sfh_1h['DateTime'].astype('datetime64[ns]')
dfload_arch_dsr_mfh_1h = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_dsr_mfh_1h.csv')
dfload_arch_dsr_mfh_1h['DateTime'] = dfload_arch_dsr_mfh_1h['DateTime'].astype('datetime64[ns]')
# dfload_arch_dsr_sfh_1h = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_dsr_sfh_1h')
# dfload_arch_dsr_mfh_1h = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_dsr_mfh_1h')
dfload_arch_dsr_1day = pd.concat([dfload_arch_dsr_sfh_1day,dfload_arch_dsr_mfh_1day])
dfload_arch_dsr_1day['erg_dsr_1day_MW'] = dfload_arch_dsr_1day.erg_dsr_1day/24 # MWh -> MW
dfload_arch_dsr_1h = pd.concat([dfload_arch_dsr_sfh_1h,dfload_arch_dsr_mfh_1h])
dfload_arch_dsr_1h_2015 = dfload_arch_dsr_1h.loc[dfload_arch_dsr_1h.nd_id.str.contains('2015')]
dfload_arch_dsr_1h_2015 = dfload_arch_dsr_1h_2015.reset_index(drop=True)
dferg_arch_dsr = dfload_arch_dsr_1day.groupby('nd_id')['erg_dsr_1day'].sum().reset_index()
# dferg_arch_dsr_1h = dfload_arch_dsr_1h.groupby('nd_id')['erg_dsr_1h'].sum().reset_index()
dferg_arch_dsr_1day = dfload_arch_dsr_1day.groupby('nd_id')['erg_dsr_1day'].sum().reset_index()
dferg_arch_dsr['nd_id_new'] = dferg_arch_dsr.nd_id.str[0:13]
dferg_arch_dsr_1day['nd_id_new'] = dferg_arch_dsr.nd_id.str[0:13]
dferg_arch_dsr_1day['erg_dsr_2015'] = dferg_arch_dsr_1day.loc[dferg_arch_dsr_1day.nd_id.str.contains('DSR_2015')].erg_dsr_1day
dferg_arch_dsr_1day['erg_dsr_2035'] = dferg_arch_dsr_1day.loc[dferg_arch_dsr_1day.nd_id.str.contains('DSR_2035')].erg_dsr_1day
dferg_arch_dsr_1day['erg_dsr_2050'] = dferg_arch_dsr_1day.loc[dferg_arch_dsr_1day.nd_id.str.contains('DSR_2050')].erg_dsr_1day
dferg_arch_dsr_1day['erg_dsr_best_2035'] = dferg_arch_dsr_1day.loc[dferg_arch_dsr_1day.nd_id.str.contains('DSR_best_2035')].erg_dsr_1day
# dferg_arch_dsr_1day = dferg_arch_dsr_1day.fillna(method='ffill').loc[dferg_arch_dsr_1day.nd_id.str.contains('2050')].reset_index(drop=True)
dferg_arch_dsr_1day = dferg_arch_dsr_1day.fillna(method='ffill').loc[dferg_arch_dsr_1day.nd_id.str.contains('DSR_best_2035')].reset_index(drop=True)
# %% EE loads just others (without DSR hourly demand)
dfload_arch_ee_sfh = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_ee_sfh_diff_wo_dsr.csv')
dfload_arch_ee_sfh['DateTime'] = dfload_arch_ee_sfh['DateTime'].astype('datetime64[ns]')
dfload_arch_ee_mfh = pd.read_csv(base_dir + '/dsr_ee_dhw/demand/dmnd_archetypes_ee_mfh_diff_wo_dsr.csv')
dfload_arch_ee_mfh['DateTime'] = dfload_arch_ee_mfh['DateTime'].astype('datetime64[ns]')
# dfload_arch_ee_sfh = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_ee_sfh_diff_wo_dsr')
# dfload_arch_ee_mfh = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_ee_mfh_diff_wo_dsr')
dfload_arch_ee = pd.concat([dfload_arch_ee_sfh,dfload_arch_ee_mfh])
dferg_arch_ee = dfload_arch_ee.groupby('nd_id')['erg_diff'].sum().reset_index()
# dferg_arch_ee['nd_id_new'] = dferg_arch_dhw.nd_id
# %% COP profile
#
#dfcop_pr_35 = aql.read_sql('grimsel_1', 'profiles_raw','cop_35')
dfcop_pr_60 = pd.read_csv(base_dir + '/dsr_ee_dhw/cop/cop_60.csv')
# dfcop_pr_60 = aql.read_sql('grimsel_1', 'profiles_raw','cop_60')
#
dfcop_pr_60_dhw = dfcop_pr_60
dfcop_pr_60_dhw['pp_id'] = dfcop_pr_60.pp_id.str.replace('HP','DHW')
#dfcop_pr_35 = dfcop_pr_35.set_index('DateTime')
#dfcop_pr_35.index = pd.to_datetime(dfcop_pr_35.index)
# dfcop_pr_60 = dfcop_pr_60.set_index('DateTime')
# dfcop_pr_60.index = pd.to_datetime(dfcop_pr_60.index)
dfcop_pr_60_dhw = dfcop_pr_60_dhw.set_index('DateTime')
dfcop_pr_60_dhw.index = pd.to_datetime(dfcop_pr_60_dhw.index)
#
#dfcop_pr_35['hy'] = 24*dfcop_pr_35.doy - 24
dfcop_pr_60_dhw['hy'] = 24*dfcop_pr_60_dhw.doy - 24
# %% ~~~~~~~~~~~~~~~~~~ DEF_NODE (we add DSR nodes)
color_nd = {'MFH_RUR_0_DSR': '#472503',
'MFH_RUR_1_DSR': '#472503',
'MFH_RUR_2_DSR': '#472503',
'MFH_RUR_3_DSR': '#472503',
'MFH_SUB_0_DSR': '#041FA3',
'MFH_SUB_1_DSR': '#041FA3',
'MFH_SUB_2_DSR': '#041FA3',
'MFH_SUB_3_DSR': '#041FA3',
'MFH_URB_0_DSR': '#484A4B',
'MFH_URB_1_DSR': '#484A4B',
'MFH_URB_2_DSR': '#484A4B',
'MFH_URB_3_DSR': '#484A4B',
'SFH_RUR_0_DSR': '#0A81EE',
'SFH_RUR_1_DSR': '#0A81EE',
'SFH_RUR_2_DSR': '#0A81EE',
'SFH_RUR_3_DSR': '#0A81EE',
'SFH_SUB_0_DSR': '#6D3904',
'SFH_SUB_1_DSR': '#6D3904',
'SFH_SUB_2_DSR': '#6D3904',
'SFH_SUB_3_DSR': '#6D3904',
'SFH_URB_0_DSR': '#818789',
'SFH_URB_1_DSR': '#818789',
'SFH_URB_2_DSR': '#818789',
'SFH_URB_3_DSR': '#818789'
}
col_nd_df = | pd.DataFrame.from_dict(color_nd, orient='index') | pandas.DataFrame.from_dict |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
col_index=['c1', 'c2', 'c3']
df=pd.DataFrame([[11,12,13],[21,22,23],[31,32,33]], columns=col_index)
ef=pd.DataFrame([[1100,1200,1300],[2100,2200,2300],[3100,3200,3300]], columns=col_index)
concat=pd.concat([df, ef], axis=1)
concat.reset_index()
print(concat)
df=pd.DataFrame([[11,12,13],[21,22,23],[31,32,33]],
columns=col_index,
index=[1, 2, 3])
ef=pd.DataFrame([[1100,1200,1300],[2100,2200,2300],[3100,3200,3300]],
columns=col_index,
index=[2, 3, 4])
concat=pd.concat([df, ef])
concat.reset_index()
print(concat)
datafile='earthquakes.csv'
#useful if you want the directory this script is in
if '__file__' in dir():
path, _=os.path.split(__file__)
else: path=os.getcwd()
filename=os.path.join(path, datafile)
df=pd.read_csv(filename, parse_dates=[0]) #dialect defaults to Excel
df.drop(df.columns[[5,6,7,8,9,11,12,14,15,16,17,18,20,21]], axis=1, inplace=True)
df['place']=df['place'].str.split(',').str.get(1)
df['place']=df['place'].str.strip()
df['place']=df['place'].str.replace("CA", "California")
df['year']=df['time'].dt.year
df['mon']=df['time'].dt.month
df['day']=df['time'].dt.day
#df.set_index('time', inplace=True)
pd.set_option("display.width", 100)
pt= | pd.pivot_table(df, index=['place', 'year', 'mon']) | pandas.pivot_table |
#!/usr/bin/env python
# Copyright 2016 DIANA-HEP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import random
import sys
import time
import unittest
import numpy
import histogrammar as hg
from histogrammar.defs import Factory
from histogrammar.primitives.average import Average
from histogrammar.primitives.bag import Bag
from histogrammar.primitives.bin import Bin
from histogrammar.primitives.categorize import Categorize
from histogrammar.primitives.centrallybin import CentrallyBin
from histogrammar.primitives.collection import Branch, Index, Label, UntypedLabel
from histogrammar.primitives.count import Count
from histogrammar.primitives.deviate import Deviate
from histogrammar.primitives.fraction import Fraction
from histogrammar.primitives.irregularlybin import IrregularlyBin
from histogrammar.primitives.minmax import Minimize, Maximize
from histogrammar.primitives.select import Select
from histogrammar.primitives.sparselybin import SparselyBin
from histogrammar.primitives.stack import Stack
from histogrammar.primitives.sum import Sum
from histogrammar import util
from histogrammar.util import xrange
tolerance = 1e-12
util.relativeTolerance = tolerance
util.absoluteTolerance = tolerance
class Numpy(object):
def __enter__(self):
try:
import numpy
except ImportError:
return None
self.errstate = numpy.geterr()
numpy.seterr(invalid="ignore")
return numpy
def __exit__(self, exc_type, exc_value, traceback):
try:
import numpy
numpy.seterr(**self.errstate)
except ImportError:
pass
class Pandas(object):
def __enter__(self):
try:
import pandas # noqa
except ImportError:
return None
def __exit__(self, exc_type, exc_value, traceback):
try:
import pandas # noqa
except ImportError:
pass
def makeSamples(SIZE, HOLES):
with Numpy() as numpy:
if numpy is None:
return {"empty": None, "positive": None, "boolean": None, "noholes": None, "withholes": None, "withholes2": None}
empty = numpy.array([], dtype=float)
if numpy is not None:
rand = random.Random(12345)
positive = numpy.array([abs(rand.gauss(0, 1)) + 1e-12 for i in xrange(SIZE)])
assert all(x > 0.0 for x in positive)
boolean = positive > 1.5
noholes = numpy.array([rand.gauss(0, 1) for i in xrange(SIZE)])
withholes = numpy.array([rand.gauss(0, 1) for i in xrange(SIZE)])
for i in xrange(HOLES):
withholes[rand.randint(0, SIZE)] = float("nan")
for i in xrange(HOLES):
withholes[rand.randint(0, SIZE)] = float("inf")
for i in xrange(HOLES):
withholes[rand.randint(0, SIZE)] = float("-inf")
withholes2 = numpy.array([rand.gauss(0, 1) for i in xrange(SIZE)])
for i in xrange(HOLES):
withholes2[rand.randint(0, SIZE)] = float("nan")
for i in xrange(HOLES):
withholes2[rand.randint(0, SIZE)] = float("inf")
for i in xrange(HOLES):
withholes2[rand.randint(0, SIZE)] = float("-inf")
return {"empty": empty, "positive": positive, "boolean": boolean, "noholes": noholes, "withholes": withholes, "withholes2": withholes2}
def to_ns(x):
"""convert timestamp to nanosec since 1970-1-1"""
import pandas as pd
return pd.to_datetime(x).value
def unit(x):
"""unit return function"""
return x
def get_test_histograms1():
""" Get set 1 of test histograms
"""
# dummy dataset with mixed types
# convert timestamp (col D) to nanosec since 1970-1-1
import pandas as pd
import histogrammar as hg
df = pd.util.testing.makeMixedDataFrame()
df['date'] = df['D'].apply(to_ns)
df['boolT'] = True
df['boolF'] = False
# building 1d-, 2d-, and 3d-histogram (iteratively)
hist1 = hg.Categorize(unit('C'))
hist2 = hg.Bin(5, 0, 5, unit('A'), value=hist1)
hist3 = hg.SparselyBin(origin=pd.Timestamp('2009-01-01').value, binWidth=pd.Timedelta(days=1).value,
quantity=unit('date'), value=hist2)
# fill them
hist1.fill.numpy(df)
hist2.fill.numpy(df)
hist3.fill.numpy(df)
return df, hist1, hist2, hist3
def get_test_histograms2():
""" Get set 2 of test histograms
"""
# dummy dataset with mixed types
# convert timestamp (col D) to nanosec since 1970-1-1
import pandas as pd
import histogrammar as hg
df = pd.util.testing.makeMixedDataFrame()
# building 1d-, 2d-histogram (iteratively)
hist1 = hg.Categorize(unit('C'))
hist2 = hg.Bin(5, 0, 5, unit('A'), value=hist1)
hist3 = hg.Bin(5, 0, 5, unit('A'))
hist4 = hg.Categorize(unit('C'), value=hist3)
# fill them
hist1.fill.numpy(df)
hist2.fill.numpy(df)
hist3.fill.numpy(df)
hist4.fill.numpy(df)
return df, hist1, hist2, hist3, hist4
class TestNumpy(unittest.TestCase):
def runTest(self):
self.testSum()
self.testAverage()
self.testDeviate()
self.testMinimize()
self.testMaximize()
self.testBin()
self.testBinTrans()
self.testBinAverage()
self.testBinDeviate()
self.testSparselyBin()
self.testSparselyBinTrans()
self.testSparselyBinAverage()
self.testSparselyBinDeviate()
self.testCentrallyBin()
self.testCentrallyBinTrans()
self.testCentrallyBinAverage()
self.testCentrallyBinDeviate()
self.testCategorize()
self.testCategorizeTrans()
self.testFractionBin()
self.testStackBin()
self.testIrregularlyBinBin()
self.testSelectBin()
self.testLabelBin()
self.testUntypedLabelBin()
self.testIndexBin()
self.testBranchBin()
self.testBag()
SIZE = 10000
HOLES = 100
data = makeSamples(SIZE, HOLES)
empty = data["empty"]
positive = data["positive"]
boolean = data["boolean"]
noholes = data["noholes"]
withholes = data["withholes"]
withholes2 = data["withholes2"]
def twosigfigs(self, number):
return round(number, 1 - int(math.floor(math.log10(number))))
def compare(self, name, hnp, npdata, hpy, pydata):
import numpy
npdata2 = npdata.copy()
hnp2 = hnp.copy()
hnp3 = hnp.copy()
hpy2 = hpy.copy()
hpy3 = hpy.copy()
startTime = time.time()
hnp.fill.numpy(npdata)
numpyTime = time.time() - startTime
if pydata.dtype != numpy.unicode_:
for key in npdata:
diff = (npdata[key] != npdata2[key]) & numpy.bitwise_not(
numpy.isnan(npdata[key])) & numpy.bitwise_not(numpy.isnan(npdata2[key]))
if numpy.any(diff):
raise AssertionError("npdata has been modified:\n{0}\n{1}\n{2}\n{3} vs {4}".format(npdata[key], npdata2[key], numpy.nonzero(
diff), npdata[key][numpy.nonzero(diff)[0][0]], npdata2[key][numpy.nonzero(diff)[0][0]]))
hnp2.fill.numpy(npdata)
hnp3.fill.numpy(npdata)
hnp3.fill.numpy(npdata)
assert (hnp + hnp2) == hnp3
assert (hnp2 + hnp) == hnp3
assert (hnp + hnp.zero()) == hnp2
assert (hnp.zero() + hnp) == hnp2
startTime = time.time()
for d in pydata:
if isinstance(d, numpy.unicode_):
d = str(d)
else:
d = float(d)
hpy.fill(d)
pyTime = time.time() - startTime
for h in [hpy2, hpy3, hpy3]:
for d in pydata:
if isinstance(d, numpy.unicode_):
d = str(d)
else:
d = float(d)
h.fill(d)
assert (hpy + hpy) == hpy3
assert (hpy + hpy2) == hpy3
assert (hpy2 + hpy) == hpy3
assert (hpy + hpy.zero()) == hpy2
assert (hpy.zero() + hpy) == hpy2
hnpj = json.dumps(hnp.toJson())
hpyj = json.dumps(hpy.toJson())
if Factory.fromJson(hnp.toJson()) != Factory.fromJson(hpy.toJson()):
raise AssertionError("\n numpy: {0}\npython: {1}".format(hnpj, hpyj))
else:
sys.stderr.write("{0:45s} | numpy: {1:.3f}ms python: {2:.3f}ms = {3:g}X speedup\n".format(
name, numpyTime*1000, pyTime*1000, self.twosigfigs(pyTime/numpyTime)))
assert Factory.fromJson((hnp + hnp2).toJson()) == Factory.fromJson((hpy + hpy2).toJson())
assert Factory.fromJson(hnp3.toJson()) == Factory.fromJson(hpy3.toJson())
# Warmup: apparently, Numpy does some dynamic optimization that needs to warm up...
if empty is not None:
Sum(lambda x: x["empty"]).fill.numpy(data)
Sum(lambda x: x["empty"]).fill.numpy(data)
Sum(lambda x: x["empty"]).fill.numpy(data)
Sum(lambda x: x["empty"]).fill.numpy(data)
Sum(lambda x: x["empty"]).fill.numpy(data)
def testSum(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
self.compare("Sum no data", Sum(lambda x: x["empty"]), self.data, Sum(lambda x: x), self.empty)
self.compare("Sum noholes", Sum(lambda x: x["noholes"]), self.data, Sum(lambda x: x), self.noholes)
self.compare("Sum holes", Sum(lambda x: x["withholes"]), self.data, Sum(lambda x: x), self.withholes)
def testAverage(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
self.compare("Average no data", Average(lambda x: x["empty"]), self.data, Average(lambda x: x), self.empty)
self.compare("Average noholes", Average(
lambda x: x["noholes"]), self.data, Average(lambda x: x), self.noholes)
self.compare("Average holes", Average(lambda x: x["withholes"]),
self.data, Average(lambda x: x), self.withholes)
def testDeviate(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
self.compare("Deviate no data", Deviate(lambda x: x["empty"]), self.data, Deviate(lambda x: x), self.empty)
self.compare("Deviate noholes", Deviate(
lambda x: x["noholes"]), self.data, Deviate(lambda x: x), self.noholes)
self.compare("Deviate holes", Deviate(lambda x: x["withholes"]),
self.data, Deviate(lambda x: x), self.withholes)
def testMinimize(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
self.compare("Minimize no data", Minimize(
lambda x: x["empty"]), self.data, Minimize(lambda x: x), self.empty)
self.compare("Minimize noholes", Minimize(
lambda x: x["noholes"]), self.data, Minimize(lambda x: x), self.noholes)
self.compare("Minimize holes", Minimize(
lambda x: x["withholes"]), self.data, Minimize(lambda x: x), self.withholes)
def testMaximize(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
self.compare("Maximize no data", Maximize(
lambda x: x["empty"]), self.data, Maximize(lambda x: x), self.empty)
self.compare("Maximize noholes", Maximize(
lambda x: x["noholes"]), self.data, Maximize(lambda x: x), self.noholes)
self.compare("Maximize holes", Maximize(
lambda x: x["withholes"]), self.data, Maximize(lambda x: x), self.withholes)
def testBin(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
for bins in [10, 100]:
self.compare("Bin ({0} bins) no data".format(bins), Bin(bins, -3.0, 3.0,
lambda x: x["empty"]), self.data, Bin(bins, -3.0, 3.0, lambda x: x), self.empty)
self.compare("Bin ({0} bins) noholes".format(bins), Bin(bins, -3.0, 3.0,
lambda x: x["noholes"]), self.data, Bin(bins, -3.0, 3.0, lambda x: x), self.noholes)
self.compare("Bin ({0} bins) holes".format(bins), Bin(
bins, -3.0, 3.0, lambda x: x["withholes"]), self.data, Bin(bins, -3.0, 3.0, lambda x: x), self.withholes)
def testBinTrans(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
for bins in [10, 100]:
self.compare("BinTrans ({0} bins) no data".format(bins), Bin(bins, -3.0, 3.0, lambda x: x["empty"], Count(
lambda x: 0.5*x)), self.data, Bin(bins, -3.0, 3.0, lambda x: x, Count(lambda x: 0.5*x)), self.empty)
self.compare("BinTrans ({0} bins) noholes".format(bins), Bin(bins, -3.0, 3.0, lambda x: x["noholes"], Count(
lambda x: 0.5*x)), self.data, Bin(bins, -3.0, 3.0, lambda x: x, Count(lambda x: 0.5*x)), self.noholes)
self.compare("BinTrans ({0} bins) holes".format(bins), Bin(bins, -3.0, 3.0, lambda x: x["withholes"], Count(
lambda x: 0.5*x)), self.data, Bin(bins, -3.0, 3.0, lambda x: x, Count(lambda x: 0.5*x)), self.withholes)
def testBinAverage(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
for bins in [10, 100]:
self.compare("BinAverage ({0} bins) no data".format(bins), Bin(bins, -3.0, 3.0, lambda x: x["empty"], Average(
lambda x: x["empty"])), self.data, Bin(bins, -3.0, 3.0, lambda x: x, Average(lambda x: x)), self.empty)
self.compare("BinAverage ({0} bins) noholes".format(bins), Bin(bins, -3.0, 3.0, lambda x: x["noholes"], Average(
lambda x: x["noholes"])), self.data, Bin(bins, -3.0, 3.0, lambda x: x, Average(lambda x: x)), self.noholes)
self.compare("BinAverage ({0} bins) holes".format(bins), Bin(bins, -3.0, 3.0, lambda x: x["withholes"], Average(
lambda x: x["withholes"])), self.data, Bin(bins, -3.0, 3.0, lambda x: x, Average(lambda x: x)), self.withholes)
def testBinDeviate(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
for bins in [10, 100]:
self.compare("BinDeviate ({0} bins) no data".format(bins), Bin(bins, -3.0, 3.0, lambda x: x["empty"], Deviate(
lambda x: x["empty"])), self.data, Bin(bins, -3.0, 3.0, lambda x: x, Deviate(lambda x: x)), self.empty)
self.compare("BinDeviate ({0} bins) noholes".format(bins), Bin(bins, -3.0, 3.0, lambda x: x["noholes"], Deviate(
lambda x: x["noholes"])), self.data, Bin(bins, -3.0, 3.0, lambda x: x, Deviate(lambda x: x)), self.noholes)
self.compare("BinDeviate ({0} bins) holes".format(bins), Bin(bins, -3.0, 3.0, lambda x: x["withholes"], Deviate(
lambda x: x["withholes"])), self.data, Bin(bins, -3.0, 3.0, lambda x: x, Deviate(lambda x: x)), self.withholes)
def testSparselyBin(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
self.compare("SparselyBin no data", SparselyBin(
0.1, lambda x: x["empty"]), self.data, SparselyBin(0.1, lambda x: x), self.empty)
self.compare("SparselyBin noholes", SparselyBin(
0.1, lambda x: x["noholes"]), self.data, SparselyBin(0.1, lambda x: x), self.noholes)
self.compare("SparselyBin holes", SparselyBin(
0.1, lambda x: x["withholes"]), self.data, SparselyBin(0.1, lambda x: x), self.withholes)
def testSparselyBinTrans(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
self.compare("SparselyBinTrans no data", SparselyBin(0.1, lambda x: x["empty"], Count(
lambda x: 0.5*x)), self.data, SparselyBin(0.1, lambda x: x, Count(lambda x: 0.5*x)), self.empty)
self.compare("SparselyBinTrans noholes", SparselyBin(0.1, lambda x: x["noholes"], Count(
lambda x: 0.5*x)), self.data, SparselyBin(0.1, lambda x: x, Count(lambda x: 0.5*x)), self.noholes)
self.compare("SparselyBinTrans holes", SparselyBin(0.1, lambda x: x["withholes"], Count(
lambda x: 0.5*x)), self.data, SparselyBin(0.1, lambda x: x, Count(lambda x: 0.5*x)), self.withholes)
def testSparselyBinAverage(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
self.compare("SparselyBinAverage no data", SparselyBin(0.1, lambda x: x["empty"], Average(
lambda x: x["empty"])), self.data, SparselyBin(0.1, lambda x: x, Average(lambda x: x)), self.empty)
self.compare("SparselyBinAverage noholes", SparselyBin(0.1, lambda x: x["noholes"], Average(
lambda x: x["noholes"])), self.data, SparselyBin(0.1, lambda x: x, Average(lambda x: x)), self.noholes)
self.compare("SparselyBinAverage holes", SparselyBin(0.1, lambda x: x["withholes"], Average(
lambda x: x["withholes"])), self.data, SparselyBin(0.1, lambda x: x, Average(lambda x: x)), self.withholes)
def testSparselyBinDeviate(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
self.compare("SparselyBinDeviate no data", SparselyBin(0.1, lambda x: x["empty"], Deviate(
lambda x: x["empty"])), self.data, SparselyBin(0.1, lambda x: x, Deviate(lambda x: x)), self.empty)
self.compare("SparselyBinDeviate noholes", SparselyBin(0.1, lambda x: x["noholes"], Deviate(
lambda x: x["noholes"])), self.data, SparselyBin(0.1, lambda x: x, Deviate(lambda x: x)), self.noholes)
self.compare("SparselyBinDeviate holes", SparselyBin(0.1, lambda x: x["withholes"], Deviate(
lambda x: x["withholes"])), self.data, SparselyBin(0.1, lambda x: x, Deviate(lambda x: x)), self.withholes)
def testCentrallyBin(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
centers = [-3.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 3.0]
self.compare("CentrallyBin no data", CentrallyBin(
centers, lambda x: x["empty"]), self.data, CentrallyBin(centers, lambda x: x), self.empty)
self.compare("CentrallyBin noholes", CentrallyBin(
centers, lambda x: x["noholes"]), self.data, CentrallyBin(centers, lambda x: x), self.noholes)
self.compare("CentrallyBin holes", CentrallyBin(
centers, lambda x: x["withholes"]), self.data, CentrallyBin(centers, lambda x: x), self.withholes)
def testCentrallyBinTrans(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
centers = [-3.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 3.0]
self.compare("CentrallyBinTrans no data", CentrallyBin(centers, lambda x: x["empty"], Count(
lambda x: 0.5*x)), self.data, CentrallyBin(centers, lambda x: x, Count(lambda x: 0.5*x)), self.empty)
self.compare("CentrallyBinTrans noholes", CentrallyBin(centers, lambda x: x["noholes"], Count(
lambda x: 0.5*x)), self.data, CentrallyBin(centers, lambda x: x, Count(lambda x: 0.5*x)), self.noholes)
self.compare("CentrallyBinTrans holes", CentrallyBin(centers, lambda x: x["withholes"], Count(
lambda x: 0.5*x)), self.data, CentrallyBin(centers, lambda x: x, Count(lambda x: 0.5*x)), self.withholes)
def testCentrallyBinAverage(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
centers = [-3.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 3.0]
self.compare("CentrallyBinAverage no data", CentrallyBin(centers, lambda x: x["empty"], Average(
lambda x: x["empty"])), self.data, CentrallyBin(centers, lambda x: x, Average(lambda x: x)), self.empty)
self.compare("CentrallyBinAverage noholes", CentrallyBin(centers, lambda x: x["noholes"], Average(
lambda x: x["noholes"])), self.data, CentrallyBin(centers, lambda x: x, Average(lambda x: x)), self.noholes)
self.compare("CentrallyBinAverage holes", CentrallyBin(centers, lambda x: x["withholes"], Average(
lambda x: x["withholes"])), self.data, CentrallyBin(centers, lambda x: x, Average(lambda x: x)), self.withholes)
def testCentrallyBinDeviate(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
centers = [-3.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 3.0]
self.compare("CentrallyBinDeviate no data", CentrallyBin(centers, lambda x: x["empty"], Deviate(
lambda x: x["empty"])), self.data, CentrallyBin(centers, lambda x: x, Deviate(lambda x: x)), self.empty)
self.compare("CentrallyBinDeviate noholes", CentrallyBin(centers, lambda x: x["noholes"], Deviate(
lambda x: x["noholes"])), self.data, CentrallyBin(centers, lambda x: x, Deviate(lambda x: x)), self.noholes)
self.compare("CentrallyBinDeviate holes", CentrallyBin(centers, lambda x: x["withholes"], Deviate(
lambda x: x["withholes"])), self.data, CentrallyBin(centers, lambda x: x, Deviate(lambda x: x)), self.withholes)
def testCategorize(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
self.compare("Categorize no data", Categorize(lambda x: numpy.array(numpy.floor(
x["empty"]), dtype="<U5")), self.data, Categorize(lambda x: x), numpy.array(numpy.floor(self.empty), dtype="<U5"))
self.compare("Categorize noholes", Categorize(lambda x: numpy.array(numpy.floor(
x["noholes"]), dtype="<U5")), self.data, Categorize(lambda x: x), numpy.array(numpy.floor(self.noholes), dtype="<U5"))
self.compare("Categorize holes", Categorize(lambda x: numpy.array(numpy.floor(
x["withholes"]), dtype="<U5")), self.data, Categorize(lambda x: x), numpy.array(numpy.floor(self.withholes), dtype="<U5"))
def testCategorizeTrans(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
self.compare("CategorizeTrans no data", Categorize(lambda x: numpy.array(numpy.floor(x["empty"]), dtype="<U5"), Count(
lambda x: 0.5*x)), self.data, Categorize(lambda x: x, Count(lambda x: 0.5*x)), numpy.array(numpy.floor(self.empty), dtype="<U5"))
self.compare("CategorizeTrans noholes", Categorize(lambda x: numpy.array(numpy.floor(x["noholes"]), dtype="<U5"), Count(
lambda x: 0.5*x)), self.data, Categorize(lambda x: x, Count(lambda x: 0.5*x)), numpy.array(numpy.floor(self.noholes), dtype="<U5"))
self.compare("CategorizeTrans holes", Categorize(lambda x: numpy.array(numpy.floor(x["withholes"]), dtype="<U5"), Count(
lambda x: 0.5*x)), self.data, Categorize(lambda x: x, Count(lambda x: 0.5*x)), numpy.array(numpy.floor(self.withholes), dtype="<U5"))
def testFractionBin(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
self.compare("FractionBin no data", Fraction(lambda x: x["empty"], Bin(
100, -3.0, 3.0, lambda x: x["empty"])), self.data, Fraction(lambda x: x, Bin(100, -3.0, 3.0, lambda x: x)), self.empty)
self.compare("FractionBin noholes", Fraction(lambda x: x["noholes"], Bin(
100, -3.0, 3.0, lambda x: x["noholes"])), self.data, Fraction(lambda x: x, Bin(100, -3.0, 3.0, lambda x: x)), self.noholes)
self.compare("FractionBin holes", Fraction(lambda x: x["withholes"], Bin(
100, -3.0, 3.0, lambda x: x["withholes"])), self.data, Fraction(lambda x: x, Bin(100, -3.0, 3.0, lambda x: x)), self.withholes)
def testStackBin(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
cuts = [-3.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 3.0]
self.compare("StackBin no data", Stack(cuts, lambda x: x["empty"], Bin(
100, -3.0, 3.0, lambda x: x["empty"])), self.data, Stack(cuts, lambda x: x, Bin(100, -3.0, 3.0, lambda x: x)), self.empty)
self.compare("StackBin noholes", Stack(cuts, lambda x: x["noholes"], Bin(
100, -3.0, 3.0, lambda x: x["noholes"])), self.data, Stack(cuts, lambda x: x, Bin(100, -3.0, 3.0, lambda x: x)), self.noholes)
self.compare("StackBin holes", Stack(cuts, lambda x: x["withholes"], Bin(
100, -3.0, 3.0, lambda x: x["withholes"])), self.data, Stack(cuts, lambda x: x, Bin(100, -3.0, 3.0, lambda x: x)), self.withholes)
def testIrregularlyBinBin(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
cuts = [-3.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 3.0]
self.compare("IrregularlyBinBin no data", IrregularlyBin(cuts, lambda x: x["empty"], Bin(
100, -3.0, 3.0, lambda x: x["empty"])), self.data, IrregularlyBin(cuts, lambda x: x, Bin(100, -3.0, 3.0, lambda x: x)), self.empty)
self.compare("IrregularlyBinBin noholes", IrregularlyBin(cuts, lambda x: x["noholes"], Bin(
100, -3.0, 3.0, lambda x: x["noholes"])), self.data, IrregularlyBin(cuts, lambda x: x, Bin(100, -3.0, 3.0, lambda x: x)), self.noholes)
self.compare("IrregularlyBinBin holes", IrregularlyBin(cuts, lambda x: x["withholes"], Bin(
100, -3.0, 3.0, lambda x: x["withholes"])), self.data, IrregularlyBin(cuts, lambda x: x, Bin(100, -3.0, 3.0, lambda x: x)), self.withholes)
def testSelectBin(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
self.compare("SelectBin no data", Select(lambda x: x["empty"], Bin(
100, -3.0, 3.0, lambda x: x["empty"])), self.data, Select(lambda x: x, Bin(100, -3.0, 3.0, lambda x: x)), self.empty)
self.compare("SelectBin noholes", Select(lambda x: x["noholes"], Bin(
100, -3.0, 3.0, lambda x: x["noholes"])), self.data, Select(lambda x: x, Bin(100, -3.0, 3.0, lambda x: x)), self.noholes)
self.compare("SelectBin holes", Select(lambda x: x["withholes"], Bin(
100, -3.0, 3.0, lambda x: x["withholes"])), self.data, Select(lambda x: x, Bin(100, -3.0, 3.0, lambda x: x)), self.withholes)
def testLabelBin(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
self.compare("LabelBin no data", Label(
x=Bin(100, -3.0, 3.0, lambda x: x["empty"])), self.data, Label(x=Bin(100, -3.0, 3.0, lambda x: x)), self.empty)
self.compare("LabelBin noholes", Label(
x=Bin(100, -3.0, 3.0, lambda x: x["noholes"])), self.data, Label(x=Bin(100, -3.0, 3.0, lambda x: x)), self.noholes)
self.compare("LabelBin holes", Label(
x=Bin(100, -3.0, 3.0, lambda x: x["withholes"])), self.data, Label(x=Bin(100, -3.0, 3.0, lambda x: x)), self.withholes)
def testUntypedLabelBin(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
self.compare("UntypedLabelBin no data", UntypedLabel(
x=Bin(100, -3.0, 3.0, lambda x: x["empty"])), self.data, UntypedLabel(x=Bin(100, -3.0, 3.0, lambda x: x)), self.empty)
self.compare("UntypedLabelBin noholes", UntypedLabel(
x=Bin(100, -3.0, 3.0, lambda x: x["noholes"])), self.data, UntypedLabel(x=Bin(100, -3.0, 3.0, lambda x: x)), self.noholes)
self.compare("UntypedLabelBin holes", UntypedLabel(x=Bin(
100, -3.0, 3.0, lambda x: x["withholes"])), self.data, UntypedLabel(x=Bin(100, -3.0, 3.0, lambda x: x)), self.withholes)
def testIndexBin(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
self.compare("IndexBin no data", Index(
Bin(100, -3.0, 3.0, lambda x: x["empty"])), self.data, Index(Bin(100, -3.0, 3.0, lambda x: x)), self.empty)
self.compare("IndexBin noholes", Index(
Bin(100, -3.0, 3.0, lambda x: x["noholes"])), self.data, Index(Bin(100, -3.0, 3.0, lambda x: x)), self.noholes)
self.compare("IndexBin holes", Index(
Bin(100, -3.0, 3.0, lambda x: x["withholes"])), self.data, Index(Bin(100, -3.0, 3.0, lambda x: x)), self.withholes)
def testBranchBin(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
self.compare("BranchBin no data", Branch(
Bin(100, -3.0, 3.0, lambda x: x["empty"])), self.data, Branch(Bin(100, -3.0, 3.0, lambda x: x)), self.empty)
self.compare("BranchBin noholes", Branch(
Bin(100, -3.0, 3.0, lambda x: x["noholes"])), self.data, Branch(Bin(100, -3.0, 3.0, lambda x: x)), self.noholes)
self.compare("BranchBin holes", Branch(
Bin(100, -3.0, 3.0, lambda x: x["withholes"])), self.data, Branch(Bin(100, -3.0, 3.0, lambda x: x)), self.withholes)
def testBag(self):
with Numpy() as numpy:
if numpy is None:
return
sys.stderr.write("\n")
self.compare("Bag no data", Bag(lambda x: x["empty"], "N"), self.data, Bag(lambda x: x, "N"), self.empty)
self.compare("Bag noholes", Bag(lambda x: x["noholes"], "N"),
self.data, Bag(lambda x: x, "N"), self.noholes)
self.compare("Bag holes", Bag(lambda x: x["withholes"], "N"),
self.data, Bag(lambda x: x, "N"), self.withholes)
class TestPandas(unittest.TestCase):
def runTest(self):
self.test_n_dim()
self.test_n_bins()
self.test_num_bins()
self.test_most_probable_value()
self.test_bin_labels()
self.test_bin_centers()
self.test_bin_entries()
self.test_bin_edges()
self.test_bin_width()
self.test_irregular()
self.test_centrally()
def test_n_dim(self):
""" Test dimension assigned to a histogram
"""
with Pandas() as pd:
if pd is None:
return
with Numpy() as np: # noqa
if numpy is None:
return
sys.stderr.write("\n")
df, hist1, hist2, hist3 = get_test_histograms1()
hist0 = hg.Count()
assert hist0.n_dim == 0
assert hist1.n_dim == 1
assert hist2.n_dim == 2
assert hist3.n_dim == 3
def test_datatype(self):
""" Test dimension assigned to a histogram
"""
with Pandas() as pd:
if pd is None:
return
with Numpy() as np: # noqa
if numpy is None:
return
sys.stderr.write("\n")
df, hist1, hist2, hist3 = get_test_histograms1()
assert hist1.datatype == str
np.testing.assert_array_equal(hist2.datatype, [numpy.number, str])
np.testing.assert_array_equal(hist3.datatype, [numpy.datetime64, numpy.number, str])
def test_n_bins(self):
""" Test getting the number of allocated bins
"""
with Pandas() as pd:
if pd is None:
return
with Numpy() as np: # noqa
if numpy is None:
return
sys.stderr.write("\n")
df, hist1, hist2, hist3 = get_test_histograms1()
assert hist1.n_bins == 5
assert hist2.n_bins == 5
assert hist3.n_bins == 7
def test_num_bins(self):
""" Test getting the number of bins from lowest to highest bin
"""
with Pandas() as pd:
if pd is None:
return
with Numpy() as np: # noqa
if numpy is None:
return
sys.stderr.write("\n")
df1 = pd.DataFrame({'A': [0, 2, 4, 5, 7, 9, 11, 13, 13, 15]})
df2 = | pd.DataFrame({'A': [2, 4, 4, 6, 8, 7, 10, 14, 17, 19]}) | pandas.DataFrame |
"""
Test for amfe-tools module
"""
from unittest import TestCase
import numpy as np
from scipy.sparse import csr_matrix
import pandas as pd
from amfe.tools import invert_dictionary, invert_dictionary_with_iterables
from .tools import CustomDictAssertTest
class ToolsTest(TestCase):
def setUp(self):
self.custom_asserter = CustomDictAssertTest()
def tearDown(self):
pass
def test_invert_dictionary(self):
dictionary = {'a': 1,
2: 'bjk',
5: 'bjk',
9: (1, 2)}
dictionary_desired = {1: 'a',
'bjk': [2, 5],
(1, 2): 9}
dictionary_actual = invert_dictionary(dictionary)
self.custom_asserter.assert_dict_equal(dictionary_actual, dictionary_desired)
def test_invert_dictionary_with_iterables(self):
dictionary = {'a': np.array([1, 3, 8]),
7: np.array([5, 8]),
True: (5,),
('bc', 'de', 5): [2, 4],
None: np.array([4]),
'ij': ('tuple', 1),
False: ('tuple',),
3: 'de',
5: 'df'}
dictionary_desired = {1: ('a', 'ij'),
3: np.array(['a'], dtype=object),
5: np.array([7, True], dtype=object),
8: np.array(['a', 7], dtype=object),
2: [('bc', 'de', 5)],
4: [('bc', 'de', 5), None],
'tuple': ('ij', False),
'd': 5,
'e': 3,
'f': 5}
dictionary_actual = invert_dictionary_with_iterables(dictionary)
self.custom_asserter.assert_dict_equal(dictionary_actual, dictionary_desired)
dictionary = {'a': [8],
7: | pd.Series({'col1': 5, 'col2': 8}) | pandas.Series |
# Copyright <NAME> 2019.
# Copyright Center for Energy Informatics 2018.
# Distributed under the MIT License.
# See accompanying file License.txt, or online at
# https://opensource.org/licenses/MIT
from datetime import datetime, timezone
import pandas as pd
from pandas.util.testing import assert_series_equal
import pystanley
def test_parse_readings() -> None:
path = "/some/path"
readings = [
[1484006400000000000, 12],
[1491004800000000000, -34],
[1491523200000000000, 53]
]
expected = pd.Series(
[12, -34, 53],
index=pd.to_datetime([
1484006400000,
1491004800000,
1491523200000,
],
unit="ms",
utc=True
),
name=str(path)
)
actual = pystanley._parse_readings(path, readings)
assert_series_equal(actual, expected)
def test_to_nanos_timestamp() -> None:
expected = 1491004800000000000
actual = pystanley._to_nanos_timestamp(datetime(2017, 4, 1, tzinfo=timezone.utc))
assert expected == actual
def test_intertwine_series() -> None:
a = | pd.Series([1, 2, 3, 4], index=[1, 3, 4, 9]) | pandas.Series |
from pandas.testing import assert_frame_equal
import pandas as pd
from sparkmagic.utils.utils import coerce_pandas_df_to_numeric_datetime
def test_no_coercing():
records = [{u'buildingID': 0, u'date': u'6/1/13', u'temp_diff': u'12'},
{u'buildingID': 1, u'date': u'random', u'temp_diff': u'0adsf'}]
desired_df = pd.DataFrame(records)
df = pd.DataFrame(records)
coerce_pandas_df_to_numeric_datetime(df)
assert_frame_equal(desired_df, df)
def test_date_coercing():
records = [{u'buildingID': 0, u'date': u'6/1/13', u'temp_diff': u'12'},
{u'buildingID': 1, u'date': u'6/1/13', u'temp_diff': u'0adsf'}]
desired_df = pd.DataFrame(records)
desired_df["date"] = pd.to_datetime(desired_df["date"])
df = pd.DataFrame(records)
coerce_pandas_df_to_numeric_datetime(df)
assert_frame_equal(desired_df, df)
def test_date_coercing_none_values():
records = [{u'buildingID': 0, u'date': u'6/1/13', u'temp_diff': u'12'},
{u'buildingID': 1, u'date': None, u'temp_diff': u'0adsf'}]
desired_df = pd.DataFrame(records)
desired_df["date"] = pd.to_datetime(desired_df["date"])
df = pd.DataFrame(records)
coerce_pandas_df_to_numeric_datetime(df)
assert_frame_equal(desired_df, df)
def test_date_none_values_and_no_coercing():
records = [{u'buildingID': 0, u'date': u'6/1/13', u'temp_diff': u'12'},
{u'buildingID': 1, u'date': None, u'temp_diff': u'0adsf'},
{u'buildingID': 1, u'date': u'adsf', u'temp_diff': u'0adsf'}]
desired_df = pd.DataFrame(records)
df = pd.DataFrame(records)
coerce_pandas_df_to_numeric_datetime(df)
assert_frame_equal(desired_df, df)
def test_numeric_coercing():
records = [{u'buildingID': 0, u'date': u'6/1/13', u'temp_diff': u'12'},
{u'buildingID': 1, u'date': u'adsf', u'temp_diff': u'0'}]
desired_df = pd.DataFrame(records)
desired_df["temp_diff"] = pd.to_numeric(desired_df["temp_diff"])
df = pd.DataFrame(records)
coerce_pandas_df_to_numeric_datetime(df)
assert_frame_equal(desired_df, df)
def test_numeric_coercing_none_values():
records = [{u'buildingID': 0, u'date': u'6/1/13', u'temp_diff': u'12'},
{u'buildingID': 1, u'date': u'asdf', u'temp_diff': None}]
desired_df = | pd.DataFrame(records) | pandas.DataFrame |
"""
script is based off of riboseq_readingFrame_utr3meta
"""
## plotting
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.rcParams['pdf.fonttype'] = 42 # this keeps most text as actual text in PDFs, not outlines
## import dependencies
import sys
import math
import matplotlib.patches as mpatches
import numpy as np
import pandas as pd
import scipy.stats as stats
pd.set_option('display.max_columns', 50)
import seaborn as sns
from pylab import *
import argparse
import importlib
### import libsettings file and add to global namespace
parser= argparse.ArgumentParser()
parser.add_argument('--rootDir', help= 'the root directory containing data and scripts')
parser.add_argument('--libSetFile', help= 'riboseq libsettings file to run riboseq_main')
parser.add_argument('--threadNumb', help= 'number of threads')
args = parser.parse_args()
sys.path.append("%s/riboseq" % args.rootDir)
sys.path.append("%s/riboseq/libsettings" % args.rootDir)
rootDir = args.rootDir
libsetName = args.libSetFile
libset = importlib.import_module("%s" % libsetName)
for attr in dir(libset):
if not attr.startswith("_"):
globals()[attr] = getattr(libset, attr)
threadNumb = str(args.threadNumb)
sys.path.append('%s/riboseq' % rootDir)
### set colors
black = '#000000'
orange = '#ffb000'
cyan = '#63cfff'
red = '#eb4300'
green = '#00c48f'
pink = '#eb68c0'
yellow = '#fff71c'
blue = '#006eb9'
colorList = [black, orange, red, green, blue, yellow, pink]
def log_trans_b10(x):
try:
return math.log(x, 10)
except:
return float(-6.00)
# return float("NaN")
def log_trans_b2(x):
try:
return math.log(x, 2)
except:
# return float("NaN")
return float(-15.00) # set arbitrarily low value
def corrfunc(x, y, **kws):
r, _ = stats.pearsonr(x, y)
rho, _ = stats.spearmanr(x, y)
ax = plt.gca()
ax.annotate("r = {:.3f}".format(r),
xy=(.1, .9), xycoords=ax.transAxes)
ax.annotate(u"p = {:.3f}".format(rho),
xy=(.1, .85), xycoords=ax.transAxes)
def load_countTables():
FPassignpath = "%s/FPassignment/%s/%s" % (rootpath, genome_name, experiment)
namelist = []
dflist= []
# dflu3 = []
for samp in samplelist:
###
dftemp = | pd.read_csv('%s/%s/countTables/%s_fl_rpm_28to35_countTable_rpkm_utr3adj.csv' % (FPassignpath, samp, samp)) | pandas.read_csv |
import abc
import time, random
import pandas as pd
import os
import numpy as np
import benchutils as utils
import knowledgebases
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LinearRegression, Lasso
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_selection import RFE, VarianceThreshold
from sklearn import preprocessing
class FeatureSelectorFactory():
"""Singleton class.
Python code encapsulates it in a way that is not shown in Sphinx, so have a look at the descriptions in the source code.
Creates feature selector object based on a given name.
New feature selection approaches must be registered here.
Names for feature selectors must follow to a particular scheme, with keywords separated by _:
- first keyword is the actual selector name
- if needed, second keyword is the knowledge base
- if needed, third keyword is the (traditional) approach to be combined
Examples:
- Traditional Approaches have only one keyword, e.g. InfoGain or ANOVA
- LassoPenalty_KEGG provides KEGG information to the LassoPenalty feature selection approach
- Weighted_KEGG_InfoGain --> Factory creates an instance of KBweightedSelector which uses KEGG as knowledge base and InfoGain as traditional selector.
While the focus here lies on the combination of traditional approaches with prior biological knowledge, it is theoretically possible to use ANY selector object for combination that inherits from :class:`FeatureSelector`.
:param config: configuration parameters for UMLS web service as specified in config file.
:type config: dict
"""
class __FeatureSelectorFactory():
def createFeatureSelector(self, name):
"""Create selector from a given name.
Separates creation process into (traditional) approaches (only one keyword), approaches requiring a knowledge base, and approaches requiring both a knowledge base and another selector, e.g. a traditional one.
:param name: selector name following the naming conventions: first keyword is the actual selector name, second keyword is the knowledge base, third keyword another selector to combine. Keywords must be separated by "_". Example: Weighted_KEGG_InfoGain
:type name: str
:return: instance of a feature selector implementation.
:rtype: :class:`FeatureSelector` or inheriting class
"""
parts = name.split("_")
if len(parts) == 1:
return self.createTraditionalSelector(name)
elif len(parts) == 2:
return self.createIntegrativeSelector(parts[0], parts[1])
elif len(parts) == 3:
return self.createCombinedSelector(parts[0], parts[1], parts[2])
utils.logError("ERROR: The provided selector name does not correspond to the expected format. "
"A selector name should consist of one or more keywords separated by _. "
"The first keyword is the actual approach (e.g. weighted, or a traditional approach), "
"the second keyword corresponds to a knowledge base to use (e.g. KEGG),"
"the third keyword corresponds to a traditional selector to use (e.g. when using a modifying or combining approach")
exit()
def createTraditionalSelector(self, selectorName):
"""Creates a (traditional) selector (without a knowledge base) from a given name.
Register new implementations of a (traditional) selector here.
Stops processing if the selector could not be found.
:param selectorName: selector name
:type selectorName: str
:return: instance of a feature selector implementation.
:rtype: :class:`FeatureSelector` or inheriting class
"""
if selectorName == "Random":
return RandomSelector()
if selectorName == "VB-FS":
return VarianceSelector()
if selectorName == "Variance":
return Variance2Selector()
if selectorName == "ANOVA":
return AnovaSelector()
if selectorName == "mRMR":
return MRMRSelector()
if selectorName == "SVMpRFE":
return SVMRFESelector()
# RUN WEKA FEATURE SELECTION AS SELECTED
if selectorName == "InfoGain":
return InfoGainSelector()
if selectorName == "ReliefF":
return ReliefFSelector()
#if "-RFE" in selectorName or "-SFS" in selectorName: -- SFS is currently disabled because sometimes the coef_ param is missing and error is thrown
if "-RFE" in selectorName:
return WrapperSelector(selectorName)
if selectorName == "Lasso":
return LassoSelector()
if selectorName == "RandomForest":
return RandomForestSelector()
utils.logError("ERROR: The listed selector " + selectorName + " is not available. See the documentation for available selectors. Stop execution.")
exit()
def createIntegrativeSelector(self, selectorName, kb):
"""Creates a feature selector using a knowledge base from the given selector and knowledge base names.
Register new implementations of a prior knowledge selector here that does not requires a (traditional) selector.
Stops processing if the selector could not be found.
:param selectorName: selector name
:type selectorName: str
:param kb: knowledge base name
:type kb: str
:return: instance of a feature selector implementation.
:rtype: :class:`FeatureSelector` or inheriting class
"""
kbfactory = knowledgebases.KnowledgeBaseFactory()
knowledgebase = kbfactory.createKnowledgeBase(kb)
if selectorName == "NetworkActivity":
featuremapper = PathwayActivityMapper()
return NetworkActivitySelector(knowledgebase, featuremapper)
if selectorName == "CorgsNetworkActivity":
featuremapper = CORGSActivityMapper()
return NetworkActivitySelector(knowledgebase, featuremapper)
if selectorName == "LassoPenalty":
return LassoPenalty(knowledgebase)
if selectorName == "KBonly":
return KbSelector(knowledgebase)
utils.logError("ERROR: The listed selector " + selectorName + " is not available. See the documentation for available selectors. Stop execution.")
exit()
def createCombinedSelector(self, selectorName, trad, kb):
"""Creates a feature selector that combines a knowledge base and another feature selector based on the given names.
Register new implementations of a prior knowledge selector that requires another selector here.
Stops processing if the selector could not be found.
:param selectorName: selector name
:type selectorName: str
:param trad: name of the (traditional) feature selector.
:type trad: str
:param kb: knowledge base name
:type kb: str
:return: instance of a feature selector implementation.
:rtype: :class:`FeatureSelector` or inheriting class
"""
tradSelector = self.createTraditionalSelector(trad)
kbfactory = knowledgebases.KnowledgeBaseFactory()
knowledgebase = kbfactory.createKnowledgeBase(kb)
if selectorName == "Postfilter":
return PostFilterSelector(knowledgebase, tradSelector)
if selectorName == "Prefilter":
return PreFilterSelector(knowledgebase, tradSelector)
if selectorName == "Extension":
return ExtensionSelector(knowledgebase, tradSelector)
if selectorName == "Weighted":
return KBweightedSelector(knowledgebase, tradSelector)
utils.logError("ERROR: The listed selector " + selectorName + " is not available. See the documentation for available selectors. Stop execution.")
exit()
instance = None
def __init__(self):
if not FeatureSelectorFactory.instance:
FeatureSelectorFactory.instance = FeatureSelectorFactory.__FeatureSelectorFactory()
def __getattr__(self, name):
return getattr(self.instance, name)
class FeatureSelector:
"""Abstract super class for feature selection functionality.
Every feature selection class has to inherit from this class and implement its :meth:`FeatureSelector.selectFeatures` method and - if necessary - its :meth:`FeatureSelector.setParams` method.
Once created, feature selection can be triggered by first setting parameters (input, output, etc) as needed with :meth:`FeatureSelector.setParams`.
The actual feature selection is triggered by invoking :meth:`FeatureSelector.selectFeatures`.
:param input: absolute path to input dataset.
:type input: str
:param output: absolute path to output directory (where the ranking will be stored).
:type output: str
:param dataset: the dataset for which to select features. Will be loaded dynamically based on self.input at first usage.
:type dataset: :class:`pandas.DataFrame`
:param dataConfig: config parameters for input data set.
:type dataConfig: dict
:param name: selector name
:type name: str
"""
def __init__(self, name):
self.input = None
self.output = None
self.dataset = None
self.loggingDir = None
self.dataConfig = utils.getConfig("Dataset")
self.setTimeLogs(utils.createTimeLog())
self.enableLogFlush()
self.name = name
super().__init__()
@abc.abstractmethod
def selectFeatures(self):
"""Abstract.
Invoke feature selection functionality in this method when implementing a new selector
:return: absolute path to the output ranking file.
:rtype: str
"""
pass
def getTimeLogs(self):
"""Gets all logs for this selector.
:return: dataframe of logged events containing start/end time, duration, and a short description.
:rtype: :class:`pandas.DataFrame`
"""
return self.timeLogs
def setTimeLogs(self, newTimeLogs):
"""Overwrites the current logs with new ones.
:param newTimeLogs: new dataframe of logged events containing start/end time, duration, and a short description.
:type newTimeLogs: :class:`pandas.DataFrame`
"""
self.timeLogs = newTimeLogs
def disableLogFlush(self):
"""Disables log flushing (i.e., writing the log to a separate file) of the selector at the end of feature selection.
This is needed when a :class:`CombiningSelector` uses a second selector and wants to avoid that its log messages are written, potentially overwriting logs from another selector of the same name.
"""
self.enableLogFlush = False
def enableLogFlush(self):
"""Enables log flushing, i.e. writing the logs to a separate file at the end of feature selection.
"""
self.enableLogFlush = True
def getName(self):
"""Gets the selector's name.
:return: selector name.
:rtype: str
"""
return self.name
def getData(self):
"""Gets the labeled dataset from which to select features.
:return: dataframe containing the dataset with class labels.
:rtype: :class:`pandas.DataFrame`
"""
if self.dataset is None:
self.dataset = pd.read_csv(self.input, index_col=0)
return self.dataset
def getUnlabeledData(self):
"""Gets the dataset without labels.
:return: dataframe containing the dataset without class labels.
:rtype: :class:`pandas.DataFrame`
"""
dataset = self.getData()
return dataset.loc[:, dataset.columns != "classLabel"]
def getFeatures(self):
"""Gets features from the dataset.
:return: list of features.
:rtype: list of str
"""
return self.getData().columns[1:]
def getUniqueLabels(self):
"""Gets the unique class labels available in the dataset.
:return: list of distinct class labels.
:rtype: list of str
"""
return list(set(self.getLabels()))
def getLabels(self):
"""Gets the labels in the data set.
:return: all labels from the dataset.
:rtype: list of str
"""
return list(self.getData()["classLabel"])
def setParams(self, inputPath, outputDir, loggingDir):
"""Sets parameters for the feature selection run: path to the input datast and path to the output directory.
:param inputPath: absolute path to the input file containing the dataset for analysis.
:type inputPath: str
:param outputDir: absolute path to the output directory (where to store the ranking)
:type outputDir: str
:param loggingDir: absolute path to the logging directory (where to store log files)
:type loggingDir: str
"""
self.input = inputPath
self.output = outputDir
self.loggingDir = loggingDir
def writeRankingToFile(self, ranking, outputFile, index = False):
"""Writes a given ranking to a specified file.
:param ranking: dataframe with the ranking.
:type ranking: :class:`pandas.DataFrame`
:param outputFile: absolute path of the file where ranking will be stored.
:type outputFile: str
:param index: whether to write the dataframe's index or not.
:type index: bool, default False
"""
if not ranking.empty:
ranking.to_csv(outputFile, index = index, sep = "\t")
else:
#make sure to write at least the header if the dataframe is empty
with open(outputFile, 'w') as outfile:
header_line = "\"attributeName\"\t\"score\"\n"
outfile.write(header_line)
class PythonSelector(FeatureSelector):
"""Abstract.
Inherit from this class when implementing a feature selector using any of scikit-learn's functionality.
As functionality invocation, input preprocessing and output postprocessing are typically very similar/the same for such implementations, this class already encapsulates it.
Instead of implementing :meth:`PythonSelector.selectFeatures`, implement :meth:`PythonSelector.runSelector`.
"""
def __init__(self, name):
super().__init__(name)
@abc.abstractmethod
def runSelector(self, data, labels):
"""Abstract - implement this method when inheriting from this class.
Runs the actual feature selector of scikit-learn.
Is invoked by :meth:`PythonSelector.selectFeatures`.
:param data: dataframe containing the unlabeled dataset.
:type data: :class:`pandas.DataFrame`
:param labels: numerically encoded class labels.
:type labels: list of int
:return: sklearn/mlxtend selector that ran the selection (containing coefficients etc.).
"""
pass
def selectFeatures(self):
"""Executes the feature selection procedure.
Prepares the input data set to match scikit-learn's expected formats and postprocesses the output to create a ranking.
:return: absolute path to the output ranking file.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
start = time.time()
filename = self.getName() + ".csv"
outputFile = self.output + filename
data, labels = self.prepareInput()
selector = self.runSelector(data, labels)
self.prepareOutput(outputFile, data, selector)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished ########################")
return outputFile
def prepareInput(self):
"""Prepares the input data set before running any of scikit-learn's selectors.
Removes the labels from the input data set and encodes the labels in numbers.
:return: dataset (without labels) and labels encoded in numbers.
:rtype: :class:`pandas.DataFrame` and list of int
"""
start = time.time()
labels = self.getLabels()
data = self.getUnlabeledData()
le = preprocessing.LabelEncoder()
numeric_labels = le.fit_transform(labels)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Input Preparation")
return data, numeric_labels
def prepareOutput(self, outputFile, data, selector):
"""Transforms the selector output to a valid ranking and stores it into the specified file.
:param outputFile: absolute path of the file to which to write the ranking.
:type outputFile: str
:param data: input dataset.
:type data: :class:`pandas.DataFrame`
:param selector: selector object from scikit-learn.
"""
start = time.time()
ranking = pd.DataFrame()
ranking["attributeName"] = data.columns
ranking["score"] = selector.scores_
ranking = ranking.sort_values(by='score', ascending=False)
self.writeRankingToFile(ranking, outputFile)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Output Preparation")
class RSelector(FeatureSelector,metaclass=abc.ABCMeta):
"""Selector class for invoking R code for feature selection.
Inherit from this class if you want to use R code, implement :meth:`RSelector.createParams` with what your script requires, and set self.scriptName accordingly.
:param rConfig: config parameters to execute R code.
:type rConfig: dict
"""
def __init__(self, name):
self.rConfig = utils.getConfig("R")
self.scriptName = "FS_" + name + ".R"
super().__init__(name)
@abc.abstractmethod
def createParams(self, filename):
"""Abstract.
Implement this method to set the parameters your R script requires.
:param filename: absolute path of the output file.
:type filename: str
:return: list of parameters to use for R code execution, e.g. input and output filenames.
:rtype: list of str
"""
pass
def selectFeatures(self):
"""Triggers the feature selection.
Actually a wrapper method that invokes external R code.
:return: absolute path to the result file containing the ranking.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
start = time.time()
filename = self.getName() + ".csv"
outputFile = self.output + filename
params = self.createParams(outputFile)
utils.runRCommand(self.rConfig, self.scriptName , params)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished ########################")
return filename
class JavaSelector(FeatureSelector):
"""Selector class for invoking R code for feature selection.
Inherit from this class if you want to use R code, implement :meth:`RSelector.createParams` with what your script requires, and set self.scriptName accordingly.
:param javaConfig: config parameters to execute java code.
:type javaConfig: dict
"""
def __init__(self, name):
self.javaConfig = utils.getConfig("Java")
super().__init__(name)
@abc.abstractmethod
def createParams(self):
"""Abstract.
Implement this method to set the parameters your java code requires.
:return: list of parameters to use for java code execution, e.g. input and output filenames.
:rtype: list of str
"""
pass
def selectFeatures(self):
"""Triggers the feature selection.
Actually a wrapper method that invokes external java code.
:return: absolute path to the result file containing the ranking.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
start = time.time()
filename = self.name + ".csv"
params = self.createParams()
utils.runJavaCommand(self.javaConfig, "/WEKA_FeatureSelector.jar", params)
output_filepath = self.output + filename
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished ########################")
return output_filepath
############################### PRIOR KNOWLEDGE SELECTORS ###############################
class PriorKnowledgeSelector(FeatureSelector,metaclass=abc.ABCMeta):
"""Super class for all prior knowledge approaches.
If you want to implement an own prior knowledge approach that uses a knowledge base (but not a second selector and no network approaches), inherit from this class.
:param knowledgebase: instance of a knowledge base.
:type knowledgebase: :class:`knowledgebases.KnowledgeBase` or inheriting class
:param alternativeSearchTerms: list of alternative search terms to use for querying the knowledge base.
:type alternativeSearchTerms: list of str
"""
def __init__(self, name, knowledgebase):
self.knowledgebase = knowledgebase
super().__init__(name)
self.alternativeSearchTerms = self.collectAlternativeSearchTerms()
@abc.abstractmethod
def selectFeatures(self):
"""Abstract.
Implement this method when inheriting from this class.
:return: absolute path to the output ranking file.
:rtype: str
"""
pass
def collectAlternativeSearchTerms(self):
"""Gets all alternative search terms that were specified in the config file and put them into a list.
:return: list of alternative search terms to use for querying the knowledge base.
:rtype: list of str
"""
alternativeTerms = self.dataConfig["alternativeSearchTerms"].split(" ")
searchTerms = []
for term in alternativeTerms:
searchTerms.append(term.replace("_", " "))
return searchTerms
def getSearchTerms(self):
"""Gets all search terms to use for querying a knowledge base.
Search terms that will be used are a) the class labels in the dataset, and b) the alternative search terms that were specified in the config file.
:return: list of search terms to use for querying the knowledge base.
:rtype: list of str
"""
searchTerms = list(self.getUniqueLabels())
searchTerms.extend(self.alternativeSearchTerms)
return searchTerms
def getName(self):
"""Returns the full name (including applied knowledge base) of this selector.
:return: selector name.
:rtype: str
"""
return self.name + "_" + self.knowledgebase.getName()
#selector class for modifying integrative approaches
class CombiningSelector(PriorKnowledgeSelector):
"""Super class for prior knoweldge approaches that use a knowledge base AND combine it with any kind of selector, e.g. a traditional approach.
Inherit from this class if you want to implement a feature selector that requires both a knowledge base and another selector, e.g. because it combines information from both.
:param knowledgebase: instance of a knowledge base.
:type knowledgebase: :class:`knowledgebases.KnowledgeBase` or inheriting class
:param tradApproach: any feature selector implementation to use internally, e.g. a traditional approach like ANOVA
:type tradApproach: :class:`FeatureSelector`
"""
def __init__(self, name, knowledgebase, tradApproach):
self.tradSelector = tradApproach
self.tradSelector.disableLogFlush()
super().__init__(name, knowledgebase)
self.tradSelector.setTimeLogs(self.timeLogs)
@abc.abstractmethod
def selectFeatures(self):
"""Abstract.
Implement this method as desired when inheriting from this class.
:return: absolute path to the output ranking file.
:rtype: str
"""
pass
def getName(self):
"""Returns the full name (including applied knowledge base and feature selector) of this selector.
:returns: selector name.
:rtype: str
"""
return self.name + "_" + self.tradSelector.getName() + "_" + self.knowledgebase.getName()
def getExternalGenes(self):
"""Gets all genes related to the provided search terms from the knowledge base.
:returns: list of gene names.
:rtype: list of str
"""
start = time.time()
externalGenes = self.knowledgebase.getRelevantGenes(self.getSearchTerms())
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Getting External Genes")
return externalGenes
class NetworkSelector(PriorKnowledgeSelector):
"""Abstract.
Inherit from this method if you want to implement a new network approach that actually conducts feature EXTRACTION, i.e. maps the original data set to have pathway/subnetworks.
Instead of :meth:`FeatureSelector.selectFeatures` implement :meth:`NetworkSelector.selectPathways` when inheriting from this class.
Instances of :class:`NetworkSelector` and inheriting classes also require a :class:`PathwayMapper` object that transfers the dataset to the new feature space.
Custom implementations thus need to implement a) a selection strategy to select pathways and b) a mapping strategy to compute new feature values for the selected pathways.
:param featureMapper: feature mapping object that transfers the feature space.
:type featureMapper: :class:`FeatureMapper` or inheriting class
"""
def __init__(self, name, knowledgebase, featuremapper):
self.featureMapper = featuremapper
super().__init__(name, knowledgebase)
@abc.abstractmethod
def selectPathways(self, pathways):
"""Selects the pathways that will become the new features of the data set.
Implement this method (instead of :meth:`FeatureSelector.selectFeatures` when inheriting from this class.
:param pathways: dict of pathways (pathway names as keys) to select from.
:type pathways: dict
:returns: pathway ranking as dataframe
:rtype: :class:`pandas.DataFrame`
"""
pass
def writeMappedFile(self, mapped_data, fileprefix):
"""Writes the mapped dataset with new feature values to the same directory as the original file is located (it will be automatically processed then).
:param mapped_data: dataframe containing the dataset with mapped feature space.
:type mapped_data: :class:`pandas.DataFrame`
:param fileprefix: prefix of the file name, e.g. the directory path
:type fileprefix: str
:return: absolute path of the file name to store the mapped data set.
:rtype: str
"""
mapped_filepath = fileprefix + "_" + self.getName() + ".csv"
mapped_data.to_csv(mapped_filepath)
return mapped_filepath
def getName(self):
"""Gets the selector name (including the knowledge base).
:returns: selector name.
:rtype: str
"""
return self.name + "_" + self.knowledgebase.getName()
def filterPathways(self, pathways):
filtered_pathways = {}
for pathwayName in pathways:
genes = pathways[pathwayName].nodes_by_label.keys()
#check if there is an overlap between the pathway and data set genes
existingGenes = list(set(self.getFeatures()) & set(genes))
if len(existingGenes) > 0:
filtered_pathways[pathwayName] = pathways[pathwayName]
else:
utils.logWarning("WARNING: No genes of pathway " + pathwayName + " found in dataset. Pathway will not be considered")
return filtered_pathways
def selectFeatures(self):
"""Instead of selecting existing features, instances of :class:`NetworkSelector` select pathways or submodules as features.
For that, it first queries its knowledge base for pathways.
It then selects the top k pathways (strategy to be implemented in :meth:`NetworkSelector.selectPathways`) and subsequently maps the dataset to its new feature space.
The mapping will be conducted by an object of :class:`PathwayMapper` or inheriting classes.
If a second dataset for cross-validation is available, the feature space of this dataset will also be transformed.
:returns: absolute path to the pathway ranking.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
overallstart = time.time()
pathways = self.knowledgebase.getRelevantPathways(self.getSearchTerms())
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, overallstart, end, "Get Pathways")
#filter pathways to only those that contain at least one gene from the data set
pathways = self.filterPathways(pathways)
start = time.time()
pathwayRanking = self.selectPathways(pathways)
outputFile = self.output + self.getName() + ".csv"
self.writeRankingToFile(pathwayRanking, outputFile)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Pathway Selection")
pathwayNames = pathwayRanking["attributeName"]
start = time.time()
mapped_data = self.featureMapper.mapFeatures(self.getData(), pathways)
fileprefix = os.path.splitext(self.input)[0]
mapped_filepath = self.writeMappedFile(mapped_data, fileprefix)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Mapping")
#if crossvalidation is enabled, we also have to map the crossvalidation file
if (utils.getConfigBoolean("Evaluation", "enableCrossEvaluation")):
start = time.time()
#we need to get the cross validation file that had been moved into the intermediate folder
crossValidationPath = utils.getConfigValue("General", "crossVal_preprocessing") + "ready/"
crossValidationFile = utils.getConfigValue("Evaluation", "crossEvaluationData")
crossValFilename = os.path.basename(crossValidationFile)
crossValFilepath = crossValidationPath + crossValFilename
crossValData = pd.read_csv(crossValFilepath, index_col=0)
mapped_crossValData = self.featureMapper.mapFeatures(crossValData, pathways)
crossvalFileprefix = os.path.splitext(crossValFilepath)[0]
crossval_mapped_filepath = self.writeMappedFile(mapped_crossValData, crossvalFileprefix)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "CrossValidation Feature Mapping")
overallend = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, overallstart, overallend, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished ########################")
return outputFile
############################### FILTER ###############################
class RandomSelector(FeatureSelector):
"""Baseline Selector: Randomly selects any features.
"""
def __init__(self):
super().__init__("Random")
def selectFeatures(self):
"""Randomly select any features from the feature space.
Assigns a score of 0.0 to every feature
:returns: absolute path to the ranking file.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
start = time.time()
filename = self.getName() + ".csv"
outFilename = self.output + filename
#randomly pick any features
with open(self.input, 'r') as infile:
header = infile.readline().rstrip().split(",")
max_index = len(header)
min_index = 2
shuffled_indices = random.sample(range(min_index, max_index), max_index - 2)
with open(outFilename, 'w') as outfile:
header_line = "\"attributeName\"\t\"score\"\n"
outfile.write(header_line)
for i in shuffled_indices:
line = "\"" + header[i] + "\"\t\"0.0000\"\n"
outfile.write(line)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished ########################")
return outFilename
class AnovaSelector(PythonSelector):
"""Runs ANOVA feature selection using scikit-learn implementation
"""
def __init__(self):
super().__init__("ANOVA")
def runSelector(self, data, labels):
"""Runs the ANOVA feature selector of scikit-learn.
Is invoked by :meth:`PythonSelector.selectFeatures`.
:param data: dataframe containing the unlabeled dataset.
:type data: :class:`pandas.DataFrame`
:param labels: numerically encoded class labels.
:type labels: list of int
:return: sklearn/mlxtend selector that ran the selection (containing coefficients etc.).
"""
start = time.time()
#setting k to "all" returns all features
selector = SelectKBest(f_classif, k="all")
selector.fit_transform(data, labels)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "ANOVA")
return selector
class Variance2Selector(PythonSelector):
"""Runs variance-based feature selection using scikit-learn.
"""
def __init__(self):
super().__init__("Variance")
def prepareOutput(self, outputFile, data, selector):
"""Transforms the selector output to a valid ranking and stores it into the specified file.
We need to override this method because variance selector has no attribute scores but variances.
:param outputFile: absolute path of the file to which to write the ranking.
:type outputFile: str
:param data: input dataset.
:type data: :class:`pandas.DataFrame`
:param selector: selector object from scikit-learn.
"""
start = time.time()
ranking = pd.DataFrame()
ranking["attributeName"] = data.columns
ranking["score"] = selector.variances_
ranking = ranking.sort_values(by='score', ascending=False)
self.writeRankingToFile(ranking, outputFile)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Output Preparation")
def runSelector(self, data, labels):
"""Runs the actual variance-based feature selector of scikit-learn.
Is invoked by :meth:`PythonSelector.selectFeatures`.
:param data: dataframe containing the unlabeled dataset.
:type data: :class:`pandas.DataFrame`
:param labels: numerically encoded class labels.
:type labels: list of int
:return: sklearn/mlxtend selector that ran the selection (containing coefficients etc.).
"""
start = time.time()
selector = VarianceThreshold()
selector.fit_transform(data)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Variance_p")
return selector
class MRMRSelector(RSelector):
"""Runs maximum Relevance minimum Redundancy (mRMR) feature selection using the mRMRe R implementation: https://cran.r-project.org/web/packages/mRMRe/index.html
Actually a wrapper class for invoking the R code.
:param scriptName: name of the R script to invoke.
:type scriptName: str
:param maxFeatures: maximum number of features to select. Currently all features (=0) are ranked..
:type maxFeatures: int
"""
def __init__(self):
self.maxFeatures = 0
super().__init__("mRMR")
def createParams(self, outputFile):
"""Sets the parameters the R script requires (input file, output file, maximum number of features).
:return: list of parameters to use for mRMR execution in R.
:rtype: list of str
"""
params = [self.input, outputFile, str(self.maxFeatures)]
return params
class VarianceSelector(RSelector):
"""Runs variance-based feature selection using R genefilter library.
Actually a wrapper class for invoking the R code.
:param scriptName: name of the R script to invoke.
:type scriptName: str
"""
def __init__(self):
super().__init__("VB-FS")
def createParams(self, outputFile):
"""Sets the parameters the R script requires (input file, output file).
:param outputFile: absolute path to the output file that will contain the ranking.
:type outputFile: str
:return: list of parameters to use for mRMR execution in R.
:rtype: list of str
"""
params = [self.input, outputFile]
return params
class InfoGainSelector(JavaSelector):
"""Runs InfoGain feature selection as provided by WEKA: https://www.cs.waikato.ac.nz/ml/weka/
Actually a wrapper class for invoking java code.
"""
def __init__(self):
super().__init__("InfoGain")
def createParams(self):
"""Sets the parameters the java program requires (input file, output file, selector name).
:return: list of parameters to use for InfoGain execution in java.
:rtype: list of str
"""
params = [self.input, self.output, "InfoGain"]
return params
class ReliefFSelector(JavaSelector):
"""Runs ReliefF feature selection as provided by WEKA: https://www.cs.waikato.ac.nz/ml/weka/
Actually a wrapper class for invoking java code.
"""
def __init__(self):
super().__init__("ReliefF")
def createParams(self):
"""Sets the parameters the java program requires (input file, output file, selector name).
:return: list of parameters to use for InfoGain execution in java.
:rtype: list of str
"""
params = [self.input, self.output, "ReliefF"]
return params
############################### FILTER - COMBINED ###############################
class KbSelector(PriorKnowledgeSelector):
"""Knowledge base selector.
Selects features exclusively based the information retrieved from a knowledge base.
:param knowledgebase: instance of a knowledge base.
:type knowledgebase: :class:`knowledgebases.KnowledgeBase`
"""
def __init__(self, knowledgebase):
super().__init__("KBonly", knowledgebase)
def updateScores(self, entry, newGeneScores):
"""Updates a score entry with the new score retrieved from the knowledge base.
Used by apply function.
:param entry: a gene score entry consisting of the gene name and its score
:type entry: :class:`pandas.Series`
:param newGeneScores: dataframe containing gene scores retrieved from the knowledge base.
:type newGeneScores: :class:`pandas.DataFrame`
:returns: updated series element.
:rtype: :class:`pandas.Series`
"""
gene = entry["attributeName"]
updatedGenes = newGeneScores.iloc[:,0]
#if the gene has a new score, update the entry
if gene in updatedGenes.values:
x = newGeneScores.loc[(newGeneScores["gene_symbol"] == gene), "score"]
#necessary because we want to get the scalar value, not a series
entry["score"] = x.iloc[0]
return entry
def selectFeatures(self):
"""Does the actual feature selection.
Retrieves association scores for genes from the knowledge base based on the given search terms.
:returns: absolute path to the resulting ranking file.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
start = time.time()
outputFile = self.output + self.getName() + ".csv"
genes = self.getFeatures()
# assign a minimal default score (0.000001) to all genes
attributeNames = genes
scores = [0.00001] * len(genes)
ranking = pd.DataFrame({"attributeName": attributeNames, "score": scores})
kb_start = time.time()
associatedGenes = self.knowledgebase.getGeneScores(self.getSearchTerms())
kb_end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, kb_start, kb_end, "Getting External Gene Scores")
# assign association score to all genes in data
updated_ranking = ranking.apply(self.updateScores, axis = 1, newGeneScores = associatedGenes)
#sort by score, with highest on top
updated_ranking = updated_ranking.sort_values("score", ascending=False)
#save final rankings to file
self.writeRankingToFile(updated_ranking, outputFile)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished. ########################")
return outputFile
class KBweightedSelector(CombiningSelector):
"""Selects features based on association scores retrieved from the knowledge base and the relevance score retrieved by the (traditional) approach.
Computes the final score via tradScore * assocScore.
:param knowledgebase: instance of a knowledge base.
:type knowledgebase: :class:`knowledgebases.KnowledgeBase` or inheriting class
:param tradApproach: any feature selector implementation to use internally, e.g. a traditional approach like ANOVA
:type tradApproach: :class:`FeatureSelector`
"""
def __init__(self, knowledgebase, tradApproach):
super().__init__("Weighted", knowledgebase, tradApproach)
def updateScores(self, entry, newGeneScores):
"""Updates a score entry with the new score retrieved from the knowledge base.
Used by apply function.
:param entry: a gene score entry consisting of the gene name and its score
:type entry: :class:`pandas.Series`
:param newGeneScores: dataframe containing gene scores retrieved from the knowledge base.
:type newGeneScores: :class:`pandas.DataFrame`
:returns: updated series element.
:rtype: :class:`pandas.Series`
"""
gene = entry["attributeName"]
updatedGenes = newGeneScores.iloc[:,0]
#if the gene has a new score, update the entry
if gene in updatedGenes.values:
x = newGeneScores.loc[(newGeneScores["gene_symbol"] == gene), "score"]
#necessary because we want to get the scalar value, not a series
entry["score"] = x.iloc[0]
return entry
def getName(self):
"""Gets the selector name (including the knowledge base and (traditional) selector).
:returns: selector name.
:rtype: str
"""
return self.name + "_" + self.tradSelector.getName() + "_" + self.knowledgebase.getName()
def computeStatisticalRankings(self, intermediateDir):
"""Computes the statistical relevance score of all features using the (traditional) selector.
:param intermediateDir: absolute path to output directory for (traditional) selector (where to write the statistical rankings).
:type intermediateDir: str
:returns: dataframe with statistical ranking.
:rtype: :class:`pandas.DataFrame`
"""
start = time.time()
self.tradSelector.setParams(self.input, intermediateDir, self.loggingDir)
statsRankings = self.tradSelector.selectFeatures()
#load data frame from file
statisticalRankings = pd.read_csv(statsRankings, index_col = 0, sep = "\t", engine = "python")
self.timeLogs = pd.concat([self.timeLogs, self.tradSelector.getTimeLogs()])
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Statistical Ranking")
return statisticalRankings
def computeExternalRankings(self):
"""Computes the association scores for every gene using the knowledge base.
Genes for which no entry could be found receive a default score of 0.000001.
:return: dataframe with statistical ranking.
:rtype: :class:`pandas.DataFrame`
"""
start = time.time()
genes = self.getFeatures()
# assign a minimal default score (0.000001) to all genes
geneScores = dict.fromkeys(genes, 0.000001)
associatedGenes = self.knowledgebase.getGeneScores(self.getSearchTerms())
#assign association score to all genes in data
# assign association score to all genes in data
for gene in geneScores.keys():
# check if score for gene was found in knowledge base
if gene in list(associatedGenes.iloc[:, 0]):
gene_entry = associatedGenes[associatedGenes["gene_symbol"] == gene]
geneScores[gene] = gene_entry.iloc[0, 1]
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "External Ranking")
return geneScores
def combineRankings(self, externalRankings, statisticalRankings):
"""Combines score rankings from both the knowledge base and the (traditional) selector (kb_score * trad_score) to retrieve a final score for every gene.
:param externalRankings: dataframe with ranking from knowledge base.
:type externalRankings: :class:`pandas.DataFrame`
:param statisticalRankings: dataframe with statistical ranking.
:type statisticalRankings: :class:`pandas.DataFrame`
:returns: dataframe with final combined ranking.
:rtype: :class:`pandas.DataFrame`
"""
start = time.time()
#just take over the statistical rankings and alter the scores accordingly
combinedRankings = statisticalRankings.copy()
features = statisticalRankings.index
#go trough every item and combine by weighting
for feature in features:
#update scores - external rankings only provide feature scores, no indices
if feature in externalRankings.keys():
externalScore = externalRankings[feature]
else:
#if no entry exists, set the score to be minimal to not zero up the whole equation in the end
externalScore = 0.00001
if externalScore == 0:
# if no entry exists, set the score to be minimal to not zero up the whole equation in the end
externalScore = 0.00001
statsScore = statisticalRankings.at[feature, "score"]
combinedRankings.at[feature, "score"] = externalScore * statsScore
#reorder genes based on new score
combinedRankings = combinedRankings.sort_values('score', ascending=False)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Ranking Combination")
return combinedRankings
def selectFeatures(self):
"""Runs the feature selection process.
Retrieves scores from knowledge base and (traditional) selector and combines these to a single score.
:returns: absolute path to final output file containing the ranking.
:rtype: str
"""
utils.logInfo("######################## " + self.getName() + "... ########################")
start = time.time()
intermediateDir = utils.getConfigValue("General",
"intermediateDir") + self.getName() + "/"
utils.createDirectory(intermediateDir)
outputFile = self.output + self.getName() + ".csv"
#compute gene rankings with traditional approaches
statisticalRankings = self.computeStatisticalRankings(intermediateDir)
#compute gene rankings/associations with external knowledge base
externalRankings = self.computeExternalRankings()
#combine ranking scores
combinedRankings = self.combineRankings(externalRankings, statisticalRankings)
#save final rankings to file
#note: here the gene ids are the index, so write it to file
self.writeRankingToFile(combinedRankings, outputFile, True)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Feature Selection")
if self.enableLogFlush:
utils.flushTimeLog(self.timeLogs, self.loggingDir + self.getName() + ".csv")
utils.logInfo("######################## " + self.getName() + " finished. ########################")
return outputFile
class LassoPenalty(PriorKnowledgeSelector, RSelector):
"""Runs feature selection by invoking xtune R package: https://cran.r-project.org/web/packages/xtune/index.html
xtune is a Lasso selector that uses feature-individual penalty scores.
These penalty scores are retrieved from the knowledge base.
"""
selectFeatures = RSelector.selectFeatures #make sure the right selectFeatures method will be invoked
getName = PriorKnowledgeSelector.getName
def __init__(self, knowledgebase):
super().__init__("LassoPenalty", knowledgebase)
self.scriptName = "FS_LassoPenalty.R"
def createParams(self, outputFile):
"""Sets the parameters the xtune R script requires (input file, output file, filename containing rankings from knowledge base).
:return: list of parameters to use for xtune execution in R.
:rtype: list of str
"""
externalScore_filename = self.computeExternalRankings()
params = [self.input, outputFile, externalScore_filename]
return params
def computeExternalRankings(self):
"""Computes the association scores for each feature based on the scores retrieved from the knowledge base.
Features that could not be found in the knowledge base receive a default score of 0.000001.
:return: absolute path to the file containing the external rankings.
:rtype: str
"""
start = time.time()
intermediateOutput = utils.getConfigValue("General",
"intermediateDir") + self.getName() + "/"
utils.createDirectory(intermediateOutput)
genes = self.getFeatures()
# assign a minimal default score (0.000001) to all genes
geneScores = dict.fromkeys(genes, 0.000001)
associatedGenes = self.knowledgebase.getGeneScores(self.getSearchTerms())
#assign association score to all genes in data
for gene in geneScores.keys():
#check if score for gene was found in knowledge base
if gene in list(associatedGenes.iloc[:,0]):
gene_entry = associatedGenes[associatedGenes["gene_symbol"] == gene]
geneScores[gene] = gene_entry.iloc[0,1]
#write gene scores to file
scores_filename = intermediateOutput + self.knowledgebase.getName() + "_scores.csv"
scores_df = pd.DataFrame.from_dict(geneScores, orient = "index", columns = ["score"])
scores_df = scores_df.sort_values('score', ascending=False)
scores_df.to_csv(scores_filename, index=True)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "External Ranking")
return scores_filename
############################### WRAPPER ###############################
class WrapperSelector(PythonSelector):
"""Selector implementation for wrapper selectors using scikit-learn.
Currently implements recursive feature eliminatin (RFE) and sequential forward selection (SFS) strategies,
which can be combined with nearly any classifier offered by scikit-learn, e.g. SVM.
:param selector: scikit-learn selector strategy (currently RFE and SFS)
:param classifier: scikit-learn classifier to use for wrapper selection.
"""
def __init__(self, name):
super().__init__(name)
self.classifier = self.createClassifier()
self.selector = self.createSelector()
def createClassifier(self):
"""Creates a classifier instance (from scikit-learn) to be used during the selection process.
To enable the framework to use a new classifier, extend this method accordingly.
:returns: scikit-learn classifier instance.
"""
classifier = None
classifierType = self.name.split("-")[0]
if "KNN" in classifierType:
#attention: assumes that KNN is followed by a number!
k = int(classifierType.replace("KNN", ""))
classifier = KNeighborsClassifier(n_neighbors=k)
elif classifierType == "SVMl":#SVM with linear kernel
classifier = LinearSVC(max_iter=10000)
#elif classifierType == "SVMp": # SVM with polynomial kernel, but it does not have coef component
# classifier = SVC(kernel="poly")
elif classifierType == "LR":
classifier = LinearRegression()
elif classifierType == "NB":
#use MultinomialNB because we cannot assume feature likelihood to be gaussian by default
classifier = MultinomialNB()
elif classifierType == "ANOVA":
classifier = f_classif
else:
raise BaseException("No suitable classifier found for " + classifierType + ". Choose between KNNx, SVMl (SVM with linear kernel), SVMp (SVM with polynomial kernel), LR, NB, ANOVA.")
return classifier
def createSelector(self):
"""Creates a selector instance that leads the selection process.
Currently, sequential forward selection (SFS) and recursive feature elimination (RFE) are implemented.
Extend this method if you want to add another selection strategy.
:returns: scikit-learn selector instance.
"""
selector = None
k = utils.getConfigValue("Gene Selection - General", "selectKgenes")
selectorType = self.name.split("-")[1]
if selectorType == "RFE":
selector = RFE(self.classifier, int(k))
elif selectorType == "SFS":
selector = SFS(self.classifier,
k_features=int(k),
forward=True,
floating=False,
scoring='accuracy',
verbose = 2,
n_jobs = int(utils.getConfigValue("General", "numCores"))/2, #use half of the available cores
cv=0)
return selector
def prepareOutput(self, outputFile, data, selector):
"""Overwrites the inherited prepareOutput method because we need to access the particular selector's coefficients.
The coefficients are extracted as feature scores and will be written to the rankings file.
:param outputFile: selector name
:type outputFile: str
:param data: input dataset to get the feature names.
:type data: :class:`pandas.DataFrame`
:param selector: selector instance that is used during feature selection.
"""
start = time.time()
ranking = pd.DataFrame()
try:
x = selector.estimator_.coef_
except:
try:
x = selector.estimator.coef_
except:
x = selector.est_.coef_
selected_columnIDs = selector.ranking[selector.ranking_ == 1]
selected_features = data.columns[selected_columnIDs]
ranking["attributeName"] = selected_features
ranking["score"] = x[0]
ranking = ranking.sort_values('score', ascending=False)
self.writeRankingToFile(ranking, outputFile)
end = time.time()
self.timeLogs = utils.logRuntime(start, end, "Prepare Output")
def runSelector(self, data, labels):
"""Runs the actual feature selector of scikit-learn.
Is invoked by :meth:`PythonSelector.selectFeatures`.
:param data: dataframe containing the unlabeled dataset.
:type data: :class:`pandas.DataFrame`
:param labels: numerically encoded class labels.
:type labels: list of int
:return: sklearn/mlxtend selector that ran the selection (containing coefficients etc.).
"""
# do gene selection
start = time.time()
#adjust k to not exceed data columns
k = int(utils.getConfigValue("Gene Selection - General", "selectKgenes"))
if k > data.columns.size:
self.selector.n_features_to_select_ = data.columns.size
self.selector.k_features = data.columns.size
# do data scaling
scaling = StandardScaler().fit(data)
scaled_data = scaling.transform(data)
data = scaled_data
self.selector = self.selector.fit(data, labels)
end = time.time()
self.timeLogs = utils.logRuntime(self.timeLogs, start, end, "Wrapper Selector")
return self.selector
class SVMRFESelector(JavaSelector):
"""Executes SVM-RFE with poly-kernel.
Uses an efficient java implementation from WEKA and is thus just a wrapper class to invoke the corresponding jars.
"""
def __init__(self):
super().__init__("SVMpRFE")
def createParams(self):
"""Sets the parameters the java program requires (input file, output file, selector name).
:return: list of parameters to use for InfoGain execution in java.
:rtype: list of str
"""
params = [self.input, self.output, "SVMpRFE"]
return params
############################### EMBEDDED ###############################
class RandomForestSelector(PythonSelector):
"""Selector class that implements RandomForest as provided by scikit-learn.
"""
def __init__(self):
super().__init__("RandomForest")
#override method because there is no scores_ attribute but instead feature_importances_
def prepareOutput(self, outputFile, data, selector):
"""Overwrites the inherited prepareOutput method because we need to access the RandomForest selector's feature importances.
These feature importances are extracted as feature scores and will be written to the rankings file.
:param outputFile: selector name
:type outputFile: str
:param data: input dataset to get the feature names.
:type data: :class:`pandas.DataFrame`
:param selector: RandomForest selector instance that is used during feature selection.
"""
start = time.time()
ranking = | pd.DataFrame() | pandas.DataFrame |
import decimal
import numpy as np
from numpy import iinfo
import pytest
import pandas as pd
from pandas import to_numeric
from pandas.util import testing as tm
class TestToNumeric(object):
def test_empty(self):
# see gh-16302
s = pd.Series([], dtype=object)
res = to_numeric(s)
expected = pd.Series([], dtype=np.int64)
tm.assert_series_equal(res, expected)
# Original issue example
res = to_numeric(s, errors='coerce', downcast='integer')
expected = pd.Series([], dtype=np.int8)
tm.assert_series_equal(res, expected)
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
s = pd.Series(['orange', 1, -3.14, 'apple'])
msg = 'Unable to parse string "orange" at position 0'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = to_numeric(s, errors='coerce')
expected = pd.Series([1., 0., np.nan])
tm.assert_series_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_numeric(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
s = [1., 3., 4., 5.]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = pd.Series([1, -3.14, 7], dtype='O')
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series([1, -3.14, 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
# GH 14827
df = pd.DataFrame(dict(
a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), '0.1'],
b=[1.0, 2.0, 3.0, 4.0],
))
expected = pd.DataFrame(dict(
a=[1.2, 3.14, np.inf, 0.1],
b=[1.0, 2.0, 3.0, 4.0],
))
# Test to_numeric over one column
df_copy = df.copy()
df_copy['a'] = df_copy['a'].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
# Test to_numeric over multiple columns
df_copy = df.copy()
df_copy[['a', 'b']] = df_copy[['a', 'b']].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
def test_numeric_lists_and_arrays(self):
# Test to_numeric with embedded lists and arrays
df = pd.DataFrame(dict(
a=[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 1.6, 0.1],
))
tm.assert_frame_equal(df, expected)
df = pd.DataFrame(dict(
a=[np.array([decimal.Decimal(3.14), 1.0]), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 0.1],
))
tm.assert_frame_equal(df, expected)
def test_all_nan(self):
s = pd.Series(['a', 'b', 'c'])
res = to_numeric(s, errors='coerce')
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(res, expected)
@pytest.mark.parametrize("errors", [None, "ignore", "raise", "coerce"])
def test_type_check(self, errors):
# see gh-11776
df = pd.DataFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]})
kwargs = dict(errors=errors) if errors is not None else dict()
error_ctx = pytest.raises(TypeError, match="1-d array")
with error_ctx:
to_numeric(df, **kwargs)
def test_scalar(self):
assert pd.to_numeric(1) == 1
assert pd.to_numeric(1.1) == 1.1
assert pd.to_numeric('1') == 1
assert pd.to_numeric('1.1') == 1.1
with pytest.raises(ValueError):
to_numeric('XX', errors='raise')
assert to_numeric('XX', errors='ignore') == 'XX'
assert np.isnan(to_numeric('XX', errors='coerce'))
def test_numeric_dtypes(self):
idx = pd.Index([1, 2, 3], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
idx = pd.Index([1., np.nan, 3., np.nan], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, | pd.Series(idx, name='xxx') | pandas.Series |
################################################################################
# This module aggregates the all psycho-linguistic measures into one matrix by
# each 'AC_Doc_ID (item stem or option).
# Parameters df_ac_pos: input pandas.DataFrame, it should have, at least, POS
# count columns with the 'AC_Doc_ID's as the index of
# the DataFrame
# pos_start_q: integer column number (starting from zero)
# specifying the starting point of POS count
# columns in the question DataFrame, from the point
# to the end, all the columns should be the POS count
# columns
# df_ac_loc_overlapping_lemma: pandas.DataFrame of the overlapping
# lemma location information, even no
# location information, still
# df_ac_overlapping_lemma is
# acceptable
# df_ac_loc_overlapping_syn_lemma: pandas.DataFrame of
# the overlapping lemma with synonym
# location information, even no
# location information, still
# df_ac_overlapping_syn_lemma is
# acceptable
# df_ac_overlapping_nchunk: pandas.DataFrame as a result of
# overlapping NChunk counts
# df_ac_oanc_lemma_freq_q: pandas.DataFrame reporting each
# 'AC_Doc_ID's lemma frequency stats
# stem_option_name_clm: column name of stem/option identifier
# in the aggregated DataFrame
# stem_identifier: name of the stem identifier in the aggregated
# DataFrame
# keep_specific_columns_POS = None: a list of column names to be
# included into the aggrageted
# matrix as a part of the original
# columns of the df_ac_pos input
# DataFrame
# stop_words_POS = None: list of POS to specify stop words, they
# should all include in the POS question
# and passage DataFrames
# df_ac_lemma_q = None: pandas.DataFrame of questions, it should
# have, at least, lemma count columns with the 'AC_Doc_ID's
# as the index of the DataFrame
# include_specific_lemma_count = None: a list of lemmas to be
# included into the aggrageted
# matrix as the lemma counts
# df_ac_pos_p = None: pandas.DataFrame of passages, it should have,
# at least, POS count columns, passage name and the section
# columns
# passage_name_clm_q = None: column name of the passage names
# in the lemma question DataFrame
# passage_sec_clm_q = None: column name of the passage sections
# in the lemma question DataFrame
# passage_name_clm_p = None: column name of the passage names
# in the passage DataFrame
# passage_sec_clm_p = None: column name of the passage sections
# in the passage DataFrame
# pos_start_p: integer column number (starting from zero)
# specifying the starting point of POS count
# columns in the passage DataFrame, from the point
# to the end, all the columns should be the POS
# count columns
# decimal_places = None: specify the decimal places to round at
# df_ac_overlapping_hypernyms = None: pandas.DataFrame as a result
# of overlapping hypernym counts
# df_ac_overlapping_hyponyms = None: pandas.DataFrame as a result
# of overlapping hyponym counts
# nchunk_suffix = '_nc': specify the suffix of NChunk variables
# which was used for the column names of
# the overlapping NChunk
# hypernym_suffix = '_hype': specify the suffix of hypernym variables
# which was used for the column names of
# the overlapping hypernyms
# hyponym_suffix = '_hypo': specify the suffix of hyponym variables
# which was used for the column names of
# the overlapping hyponyms
# df_ac_bigram_pmi_distribution = None: pandas.DataFrame as bigram
# PMI stats
# df_ac_trigram_pmi_distribution = None: pandas.DataFrame as trigram
# PMI stats
# Returns Result: pandas.DataFrame including the original columns of
# the df_ac_pos DataFrame plus aggregated result columns
################################################################################
def ac_aggregate_plim(df_ac_pos, pos_start_q, df_ac_loc_overlapping_lemma,
df_ac_loc_overlapping_syn_lemma, df_ac_overlapping_nchunk,
df_ac_oanc_lemma_freq_q, stem_option_name_clm, stem_identifier,
keep_specific_columns_POS = None, stop_words_POS = None,
df_ac_lemma_q = None, include_specific_lemma_count = None,
df_ac_pos_p = None, passage_name_clm_q = None, passage_sec_clm_q =None,
passage_name_clm_p = None, passage_sec_clm_p = None,
pos_start_p = None, decimal_places = None,
df_ac_overlapping_hypernyms = None, df_ac_overlapping_hyponyms = None,
nchunk_suffix = '_nc', hypernym_suffix = '_hype',
hyponym_suffix = '_hypo', df_ac_bigram_pmi_distribution = None,
df_ac_trigram_pmi_distribution = None):
import pandas as pd
df_ac_buf_POS = df_ac_pos.iloc[:, pos_start_q:]
all_option_count_name_clms = []
df_ac_options = df_ac_pos.drop_duplicates([stem_option_name_clm])
for i, x in enumerate(df_ac_options[stem_option_name_clm]):
if x != stem_identifier:
s = 'Count_' + x
all_option_count_name_clms.append(s)
for i, x in enumerate(df_ac_options[stem_option_name_clm]):
if x != stem_identifier:
s = 'Count_s_' + x
all_option_count_name_clms.append(s)
for i, x in enumerate(df_ac_options[stem_option_name_clm]):
if x != stem_identifier:
s = 'Count' + nchunk_suffix + '_' + x
all_option_count_name_clms.append(s)
for i, x in enumerate(df_ac_options[stem_option_name_clm]):
if x != stem_identifier:
s = 'Count' + hypernym_suffix + '_' + x
all_option_count_name_clms.append(s)
for i, x in enumerate(df_ac_options[stem_option_name_clm]):
if x != stem_identifier:
s = 'Count' + hyponym_suffix + '_' + x
all_option_count_name_clms.append(s)
option_len = len(all_option_count_name_clms) // 5
if stop_words_POS != None:
df_ac_buf_POS = df_ac_buf_POS.drop(stop_words_POS, axis=1)
df_ac_buf_sum = pd.DataFrame({ 'POS_sum' : df_ac_buf_POS.sum(axis=1) })
if keep_specific_columns_POS != None:
df_ac_buf_POS_head = df_ac_pos.loc[:, keep_specific_columns_POS]
else:
df_ac_buf_POS_head = df_ac_pos.copy()
df_ac_buf_POS_head['POS_sum'] = df_ac_buf_sum['POS_sum']
if df_ac_loc_overlapping_lemma is not None:
df_concat = pd.concat([df_ac_buf_POS_head, df_ac_loc_overlapping_lemma], axis=1)
else:
df_concat = df_ac_buf_POS_head.copy()
df_concat_tmp = df_concat.copy()
if df_ac_loc_overlapping_syn_lemma is not None:
df_concat = pd.concat([df_concat_tmp, df_ac_loc_overlapping_syn_lemma], axis=1)
df_concat_tmp = df_concat.copy()
if df_ac_overlapping_nchunk is not None:
df_concat = pd.concat([df_concat_tmp, df_ac_overlapping_nchunk, df_ac_oanc_lemma_freq_q], axis=1)
else:
df_concat = pd.concat([df_concat_tmp, df_ac_oanc_lemma_freq_q], axis=1)
df_concat_tmp = df_concat.copy()
if df_ac_overlapping_hypernyms is not None:
df_concat = pd.concat([df_concat_tmp, df_ac_overlapping_hypernyms], axis=1)
df_concat_tmp = df_concat.copy()
if df_ac_overlapping_hyponyms is not None:
df_concat = pd.concat([df_concat_tmp, df_ac_overlapping_hyponyms], axis=1)
df_concat_tmp = df_concat.copy()
if df_ac_bigram_pmi_distribution is not None:
df_concat = pd.concat([df_concat_tmp, df_ac_bigram_pmi_distribution], axis=1)
df_concat_tmp = df_concat.copy()
if df_ac_trigram_pmi_distribution is not None:
df_concat = | pd.concat([df_concat_tmp, df_ac_trigram_pmi_distribution], axis=1) | pandas.concat |
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: all
# notebook_metadata_filter: all,-language_info
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + trusted=true
import pandas as pd
import re
import matplotlib.pyplot as plt
import numpy as np
import ast
from lib.functions_data import *
# + trusted=true
import sys
from pathlib import Path
import os
cwd = os.getcwd()
parent = str(Path(cwd).parents[0])
sys.path.append(parent)
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
# -
# To avoid pulling the full dataset down each time we re-run the notebook, a CSV of the cut-down dataset is saved for easier reloading.
# + trusted=true
#Checking for the cut of the full dataset and creating it if it doesn't exist:
try:
dec = pd.read_csv(parent + '/data/dec_euctr_extract.csv').drop('Unnamed: 0', axis=1)
except FileNotFoundError:
cols = ['eudract_number_with_country', 'date_of_competent_authority_decision',
'clinical_trial_type', 'national_competent_authority', 'eudract_number',
'date_on_which_this_record_was_first_entered_in_the_eudract_data',
'trial_status', 'date_of_the_global_end_of_the_trial', 'trial_results']
#You can use this URL if you want to download the full raw data
data_link = 'https://www.dropbox.com/s/4qt0msiipyn7crm/euctr_euctr_dump-2020-12-03-095517.csv.zip?dl=1'
dec = pd.read_csv(data_link, compression='zip', low_memory=False, usecols=cols)
dec.to_csv(parent + '/data/dec_euctr_extract.csv')
#This is additional data we collect from the results page we need for certain analyses
results_info = pd.read_csv(parent + '/data/euctr_data_quality_results_scrape_dec_2020.csv')
results_info['trial_start_date'] = pd.to_datetime(results_info.trial_start_date)
# + trusted=true
#Quick look at the spread of trial statuses on the EUCTR
dec.trial_status.value_counts(dropna=False)
# -
# The "date_of_competent_authority_decision" field has 2 nonsensical year values in which the correct value can reasonably be derived from context. We fix those below:
#
# https://www.clinicaltrialsregister.eu/ctr-search/trial/2009-016759-22/DK
#
# https://www.clinicaltrialsregister.eu/ctr-search/trial/2006-006947-30/FR
# + trusted=true
ind = dec[dec.date_of_competent_authority_decision.notnull() &
dec.date_of_competent_authority_decision.str.contains('210')].index
ind = ind.to_list()[0]
ind_2 = dec[dec.date_of_competent_authority_decision.notnull() &
dec.date_of_competent_authority_decision.str.contains('2077')].index
ind_2 = ind_2.to_list()[0]
dec.at[ind, 'date_of_competent_authority_decision'] = '2010-06-18'
dec.at[ind_2, 'date_of_competent_authority_decision'] = '2007-04-05'
# + trusted=true
#get rid of all protocols from non EU/EEA countries
dec_filt = dec[dec.clinical_trial_type != 'Outside EU/EEA'].reset_index(drop=True)
#lets see how many that is:
print(len(dec) - len(dec_filt))
# + trusted=true
dec_ctas = dec[['eudract_number', 'eudract_number_with_country']].groupby('eudract_number').count()['eudract_number_with_country']
print(f'There are {len(dec_ctas)} registered trials and {dec_ctas.sum()} CTAs including non-EU/EEA CTAs')
# + trusted=true
decf_ctas = dec_filt[['eudract_number', 'eudract_number_with_country']].groupby('eudract_number').count()['eudract_number_with_country']
print(f'There are {len(decf_ctas)} registered trials and {decf_ctas.sum()} CTAs excluding non-EU/EEA CTAs')
# + trusted=true
#Making dates into dates and adding a column of just the "Year" for relevant dates
dec_filt['date_on_which_this_record_was_first_entered_in_the_eudract_data'] = pd.to_datetime(dec_filt['date_on_which_this_record_was_first_entered_in_the_eudract_data'])
dec_filt['entered_year'] = dec_filt['date_on_which_this_record_was_first_entered_in_the_eudract_data'].dt.year
dec_filt['date_of_competent_authority_decision'] = pd.to_datetime(dec_filt['date_of_competent_authority_decision'])
dec_filt['approved_year'] = dec_filt['date_of_competent_authority_decision'].dt.year
# + trusted=true
#Creating a copy of the original dataset we can mess with and
#renaming columns to better variable names
analysis_df = dec_filt.copy()
analysis_df.columns = ['eudract_number_country',
'approved_date',
'clinical_trial_type',
'nca',
'eudract_number',
'date_entered',
'trial_status',
'completion_date',
'trial_results',
'entered_year',
'approved_year']
#And update the NCA names to the more accurate recent names
analysis_df['nca'] = analysis_df['nca'].replace(nca_name_mapping)
# + trusted=true
#Table 1
analysis_df[['nca', 'eudract_number_country']].groupby('nca').count()
# + trusted=true
#You can reproduce the data on the earliest registered protocol for each country by running this cell
#with the appropriate country abbreviation. For example, to get the date for Italy:
print(earliest_record_check(analysis_df, 'Italy - AIFA'))
#Uncomment this to get the date for all countries at once
#for abrev in country_abrevs.keys():
# print(f'Country: {abrev}\nEarliest record date: {earliest_record_check(dec_filt, abrev)}')
# + trusted=true
#lastly this is helpful to have the country names in various orders
ordered_countries_original = list(dec_filt.national_competent_authority.value_counts().index)
ordered_countries_new = list(analysis_df.nca.value_counts().index)
# -
# # Registrations Over Time
# + trusted=true
reg_df = analysis_df[['eudract_number', 'nca', 'date_entered', 'entered_year', 'approved_date', 'approved_year']].reset_index(drop=True)
reg_df.head()
# + trusted=true
#Data for Overall Trend in Registrations
grouped_overall = reg_df[['eudract_number']].groupby([reg_df.entered_year]).count()
earliest_entered = reg_df[['eudract_number', 'date_entered']].groupby('eudract_number', as_index=False).min()
earliest_entered['year'] = earliest_entered.date_entered.dt.year
unique_trials = earliest_entered[['eudract_number', 'year']].groupby('year').count()
# + trusted=true
fig, ax = plt.subplots(figsize = (12,6), dpi=400)
grouped_overall[(grouped_overall.index > 2004) & (grouped_overall.index < 2020)].plot(ax=ax, legend=False, lw=2,
marker='.', markersize=12)
unique_trials[(unique_trials.index > 2004) & (unique_trials.index < 2020)].plot(ax=ax, legend=False, grid=True,
lw=2, marker='^', markersize=10)
ax.legend(['Total CTAs', 'Unique Trials'], bbox_to_anchor = (1, 1))
ax.set_xticks(range(2005, 2020))
ax.set_yticks(range(0,7500, 500))
plt.xlabel('CTA Entry Year', labelpad=10)
plt.ylabel('Records Entered')
plt.title('Trend in new CTA and Trial Registration on the EUCTR', pad=10)
#fig.savefig(parent + '/data/Figures/fig_s1.jpg', bbox_inches='tight', dpi=400)
fig.show()
# -
# Now we're interested in breaking the data down a bit further. Here we will break it down into quarters and years for more detailed analysis. We graph the years for which we have full EUCTR data (2005-2019).
# + trusted=true
grouped = reg_df[['eudract_number']].groupby([reg_df.nca, pd.PeriodIndex(reg_df.date_entered, freq='Q')]).count()
get_index = reg_df[['eudract_number']].groupby(pd.PeriodIndex(reg_df.date_entered, freq='Q')).count()
quarters = list(get_index.index)
# + trusted=true
grouped_2 = reg_df[['eudract_number']].groupby([reg_df.nca, pd.PeriodIndex(reg_df.date_entered, freq='Y')]).count()
get_index = reg_df[['eudract_number']].groupby(pd.PeriodIndex(reg_df.date_entered, freq='Y')).count()
years = list(get_index.index)
# + trusted=true
grouped_year = reg_df[['eudract_number']].groupby([reg_df.nca, reg_df.entered_year]).count()
grouped_year_2 = reg_df[['eudract_number']].groupby([reg_df.nca, reg_df.approved_year]).count()
# + trusted=true
fig, axes = plt.subplots(figsize = (20, 16), nrows=7, ncols=4, dpi=400)
#fig.suptitle("Cumulative trial registrations by NCA", y=1.02, fontsize=23)
fig.tight_layout()
pd.set_option('mode.chained_assignment', None)
for x, y in enumerate(fig.axes):
country = grouped.loc[ordered_countries_new[x]]
first_reporting_quarter = country[country.eudract_number > 0].index.min()
adjusted_data = zero_out_dict(country.to_dict()['eudract_number'], quarters)
data = pd.DataFrame({'eudract_number': adjusted_data})
x_ticks = data.index
#Get rid of leading zeros
data['eudract_number'] = np.where(data.index < first_reporting_quarter, np.nan, data.eudract_number)
consolidated = data[(data.index.year > 2004) & (data.index.year < 2020) & data.eudract_number.notnull()]
leading_zero_check = True
i=0
while leading_zero_check:
if consolidated.eudract_number[i] == 0:
consolidated.at[consolidated.index[i], 'eudract_number'] = np.nan
i+=1
else:
leading_zero_check = False
consolidated = consolidated[consolidated.eudract_number.notnull()]
cumulative = consolidated.cumsum()
# Plotting the country trend
cumulative.plot(ax=y, lw=4, sharex='col',legend=False)
#Plotting the reference line
cumulative.loc[[cumulative.index[0], cumulative.index[-1]]].plot(ax=y, legend=False, lw=2, style='--')
y.set_title(ordered_countries_new[x], pad=6, fontsize=16)
y.set_axisbelow(True)
y.grid(zorder=0)
y.set_xlabel('')
y.set_xlim(x_ticks[0], x_ticks[-1])
pd.set_option('mode.chained_assignment', 'warn')
fig.text(-0.015, 0.5, 'Cumulative Trial Count', ha='center', va='center', rotation='vertical', fontsize=20)
fig.text(.5, -0.02, 'Record Entry Year', ha='center', va='center', fontsize=20)
plt.legend(['Cumulative Count of New CTA Registrations', 'Stable Trend Line'],
loc='upper center', ncol=5, bbox_to_anchor = (-1.2, -.55), fontsize=15)
plt.show()
#fig.savefig(parent + '/data/Figures/fig_1.jpg', bbox_inches='tight', dpi=400)
#fig.savefig(parent + '/data/Figures/fig_1.eps', bbox_inches='tight', dpi=400)
# + trusted=true
#Reduced Figure
fig, axes = plt.subplots(figsize = (20, 3), nrows=1, ncols=5, dpi=400)
#fig.suptitle("Cumulative trial registrations by NCA", y=1.02, fontsize=23)
fig.tight_layout()
included_countries = ['UK - MHRA', 'France - ANSM', 'Norway - NoMA', 'Romania - ANMDM', 'Italy - AIFA']
pd.set_option('mode.chained_assignment', None)
for x, y in enumerate(fig.axes):
country = grouped.loc[included_countries[x]]
first_reporting_quarter = country[country.eudract_number > 0].index.min()
adjusted_data = zero_out_dict(country.to_dict()['eudract_number'], quarters)
data = pd.DataFrame({'eudract_number': adjusted_data})
x_ticks = data.index
#Get rid of leading zeros
data['eudract_number'] = np.where(data.index < first_reporting_quarter, np.nan, data.eudract_number)
consolidated = data[(data.index.year > 2004) & (data.index.year < 2020) & data.eudract_number.notnull()]
leading_zero_check = True
i=0
while leading_zero_check:
if consolidated.eudract_number[i] == 0:
consolidated.at[consolidated.index[i], 'eudract_number'] = np.nan
i+=1
else:
leading_zero_check = False
consolidated = consolidated[consolidated.eudract_number.notnull()]
cumulative = consolidated.cumsum()
# Plotting the country trend
cumulative.plot(ax=y, lw=4, sharex='col',legend=False)
#Plotting the reference line
cumulative.loc[[cumulative.index[0], cumulative.index[-1]]].plot(ax=y, legend=False, lw=2, style='--')
y.set_title(included_countries[x], pad=6, fontsize=16)
y.set_axisbelow(True)
y.grid(zorder=0)
y.set_xlabel('')
y.set_xlim(x_ticks[0], x_ticks[-1])
pd.set_option('mode.chained_assignment', 'warn')
fig.text(-0.015, 0.5, 'Cumulative Trial Count', ha='center', va='center', rotation='vertical', fontsize=15)
fig.text(.5, -0.04, 'Record Entry Year', ha='center', va='center', fontsize=20)
plt.legend(['Cumulative Count of Protocol Registrations', 'Stable Trend Line'],
loc='upper center', ncol=5, bbox_to_anchor = (-1.75, -.3), fontsize=15)
plt.show()
#fig.savefig(parent + '/data/Figures/fig_1.jpg', bbox_inches='tight', dpi=400)
#fig.savefig(parent + '/data/Figures/fig_1.eps', bbox_inches='tight', dpi=400)
# + trusted=true
fig, axes = plt.subplots(figsize = (20, 16), nrows=7, ncols=4, dpi=400)
fig.suptitle("Trends in trial registrations by NCA by Year", y=1.02, fontsize=23)
fig.tight_layout()
pd.set_option('mode.chained_assignment', None)
for x, y in enumerate(fig.axes):
country = grouped_2.loc[ordered_countries_new[x]]
first_reporting_year = country[country.eudract_number > 0].index.min()
adjusted_data = zero_out_dict(country.to_dict()['eudract_number'], years)
data = pd.DataFrame({'eudract_number': adjusted_data})
x_ticks = data.index
#Get rid of leading zeros
data['eudract_number'] = np.where(data.index < first_reporting_year, np.nan, data.eudract_number)
consolidated = data[(data.index.year > 2004) & (data.index.year < 2020) & data.eudract_number.notnull()]
leading_zero_check = True
i=0
while leading_zero_check:
if consolidated.eudract_number[i] == 0:
consolidated.at[consolidated.index[i], 'eudract_number'] = np.nan
i+=1
else:
leading_zero_check = False
consolidated = consolidated[consolidated.eudract_number.notnull()]
consolidated.plot(ax=y, lw=2, sharex='col',legend=False)
#if ordered_countries_original[x] == 'Slovenia - JAZMP':
# y.set_yticks(range(0,16,3))
y.set_title(ordered_countries_new[x], pad=6, fontsize=16)
y.set_axisbelow(True)
y.grid(zorder=0)
y.set_xlabel('')
y.set_xlim(x_ticks[0], x_ticks[-1])
y.set_ylim(ymin=0)
pd.set_option('mode.chained_assignment', 'warn')
fig.text(-0.015, 0.5, 'Trial Count', ha='center', va='center', rotation='vertical', fontsize=20)
fig.text(.5, -0.02, 'Record Entry Year', ha='center', va='center', fontsize=20)
plt.show()
#fig.savefig(parent + '/data/Figures/fig_s2.jpg', bbox_inches='tight', dpi=400)
# + trusted=true
fig, axes = plt.subplots(figsize = (20, 3), nrows=1, ncols=5, dpi=400)
#fig.suptitle("Trends in trial registrations by NCA by Year", y=1.02, fontsize=23)
fig.tight_layout()
included_countries = ['UK - MHRA', 'France - ANSM', 'Norway - NoMA', 'Romania - ANMDM', 'Italy - AIFA']
pd.set_option('mode.chained_assignment', None)
for x, y in enumerate(fig.axes):
country = grouped_2.loc[included_countries[x]]
first_reporting_year = country[country.eudract_number > 0].index.min()
adjusted_data = zero_out_dict(country.to_dict()['eudract_number'], years)
data = pd.DataFrame({'eudract_number': adjusted_data})
x_ticks = data.index
#Get rid of leading zeros
data['eudract_number'] = np.where(data.index < first_reporting_year, np.nan, data.eudract_number)
consolidated = data[(data.index.year > 2004) & (data.index.year < 2020) & data.eudract_number.notnull()]
leading_zero_check = True
i=0
while leading_zero_check:
if consolidated.eudract_number[i] == 0:
consolidated.at[consolidated.index[i], 'eudract_number'] = np.nan
i+=1
else:
leading_zero_check = False
consolidated = consolidated[consolidated.eudract_number.notnull()]
consolidated.plot(ax=y, lw=2, sharex='col',legend=False)
#if ordered_countries_original[x] == 'Slovenia - JAZMP':
# y.set_yticks(range(0,16,3))
y.set_title(included_countries[x], pad=6, fontsize=16)
y.set_axisbelow(True)
y.grid(zorder=0)
y.set_xlabel('')
y.set_xlim(x_ticks[0], x_ticks[-1])
y.set_ylim(ymin=0)
pd.set_option('mode.chained_assignment', 'warn')
fig.text(-0.015, 0.5, 'Trial Count', ha='center', va='center', rotation='vertical', fontsize=20)
fig.text(.5, -0.02, 'Record Entry Year', ha='center', va='center', fontsize=20)
plt.show()
#fig.savefig(parent + '/data/Figures/fig_s2.jpg', bbox_inches='tight', dpi=400)
# -
# For comparison here are the raw trends in new registrations by quarter
# + trusted=true
fig, axes = plt.subplots(figsize = (20, 16), nrows=7, ncols=4, dpi=400)
fig.suptitle("Trends in trial registrations by NCA by Quarter", y=1.02, fontsize=23)
fig.tight_layout()
pd.set_option('mode.chained_assignment', None)
for x, y in enumerate(fig.axes):
country = grouped.loc[ordered_countries_new[x]]
first_reporting_quarter = country[country.eudract_number > 0].index.min()
adjusted_data = zero_out_dict(country.to_dict()['eudract_number'], quarters)
data = pd.DataFrame({'eudract_number': adjusted_data})
x_ticks = data.index
#Get rid of leading zeros
data['eudract_number'] = np.where(data.index < first_reporting_quarter, np.nan, data.eudract_number)
consolidated = data[(data.index.year > 2004) & (data.index.year < 2020) & data.eudract_number.notnull()]
leading_zero_check = True
i=0
while leading_zero_check:
if consolidated.eudract_number[i] == 0:
consolidated.at[consolidated.index[i], 'eudract_number'] = np.nan
i+=1
else:
leading_zero_check = False
consolidated = consolidated[consolidated.eudract_number.notnull()]
consolidated.plot(ax=y, lw=2, sharex='col',legend=False)
if ordered_countries_original[x] == 'Slovenia - JAZMP':
y.set_yticks(range(0,16,3))
y.set_title(ordered_countries_new[x], pad=6, fontsize=16)
y.set_axisbelow(True)
y.grid(zorder=0)
y.set_xlabel('')
y.set_xlim(x_ticks[0], x_ticks[-1])
pd.set_option('mode.chained_assignment', 'warn')
fig.text(-0.015, 0.5, 'Trial Count', ha='center', va='center', rotation='vertical', fontsize=20)
fig.text(.5, -0.02, 'Record Entry Year', ha='center', va='center', fontsize=20)
plt.show()
#fig.savefig(parent + '/data/Figures/fig_s2.jpg', bbox_inches='tight', dpi=400)
# -
# Lasty, we can sense check that these dates make sense by comparing the year the CTA was entered to the date the NCA gave approval. When we graph them on top of each other, we can see that the overall trend align very well though with approvals being slightly less susceptable to large jumps.
# + trusted=true
grouped_year = reg_df[['eudract_number']].groupby([reg_df.nca, reg_df.entered_year]).count()
grouped_year_2 = reg_df[['eudract_number']].groupby([reg_df.nca, reg_df.approved_year]).count()
# -
# Here is the trend by year, not quarter, but we do not include this graph in the paper as it is duplicated in the next graph.
# + trusted=true
fig, axes = plt.subplots(figsize = (20, 16), nrows=7, ncols=4, dpi=300)
fig.suptitle("Trends in trial registrations by NCA", y=1.02, fontsize=23)
fig.tight_layout()
pd.set_option('mode.chained_assignment', None)
for x, y in enumerate(fig.axes):
country = grouped_year.loc[ordered_countries_new[x]]
first_reporting_quarter = country[country.eudract_number > 0].index.min()
adjusted_data = zero_out_dict(country.to_dict()['eudract_number'], range(2004, 2020))
data = pd.DataFrame({'eudract_number': adjusted_data})
x_ticks = data.index
#Get rid of leading zeros
data['eudract_number'] = np.where(data.index < first_reporting_quarter, np.nan, data.eudract_number)
consolidated = data[(data.index > 2004) & (data.index < 2020) & data.eudract_number.notnull()]
leading_zero_check = True
i=0
while leading_zero_check:
if consolidated.eudract_number.values[i] == 0:
consolidated.at[consolidated.index[i], 'eudract_number'] = np.nan
i+=1
else:
leading_zero_check = False
consolidated = consolidated[consolidated.eudract_number.notnull()]
consolidated.plot(ax=y, lw=2, sharex='col',legend=False)
y.set_title(ordered_countries_new[x], pad=6, fontsize=16)
y.set_axisbelow(True)
y.grid(zorder=0)
y.set_xlabel('')
y.set_xlim(x_ticks[0], x_ticks[-1])
pd.set_option('mode.chained_assignment', 'warn')
fig.text(-0.015, 0.5, 'Trial Count', ha='center', va='center', rotation='vertical', fontsize=20)
fig.text(.5, -0.02, 'Record Entry Year', ha='center', va='center', fontsize=20)
plt.legend(['First Entered Date', 'NCA Approval Date'],
loc='upper center', ncol=5, bbox_to_anchor = (-1.2, -.5), fontsize=15)
plt.show()
# + trusted=true
fig, axes = plt.subplots(figsize = (20, 16), nrows=7, ncols=4, dpi=400)
fig.suptitle("Trends in trial registrations by NCA", y=1.02, fontsize=23)
fig.tight_layout()
pd.set_option('mode.chained_assignment', None)
for x, y in enumerate(fig.axes):
country = grouped_year.loc[ordered_countries_new[x]]
country_2 = grouped_year_2.loc[ordered_countries_new[x]]
first_reporting_quarter = country[country.eudract_number > 0].index.min()
adjusted_data = zero_out_dict(country.to_dict()['eudract_number'], range(2004, 2020))
adjusted_data_2 = zero_out_dict(country_2.to_dict()['eudract_number'], range(2004, 2020))
data = pd.DataFrame({'eudract_number': adjusted_data})
data_2 = pd.DataFrame({'eudract_number': adjusted_data_2})
x_ticks = data.index
#Get rid of leading zeros
data['eudract_number'] = np.where(data.index < first_reporting_quarter, np.nan, data.eudract_number)
data_2['eudract_number'] = np.where(data_2.index < first_reporting_quarter, np.nan, data_2.eudract_number)
consolidated = data[(data.index > 2004) & (data.index < 2020) & data.eudract_number.notnull()]
consolidated_2 = data_2[(data_2.index > 2004) & (data_2.index < 2020) & data_2.eudract_number.notnull()]
leading_zero_check = True
i=0
while leading_zero_check:
if consolidated.eudract_number.values[i] == 0:
consolidated.at[consolidated.index[i], 'eudract_number'] = np.nan
i+=1
else:
leading_zero_check = False
while leading_zero_check:
if consolidated_2.eudract_number.values[i] == 0:
consolidated_2.at[consolidated_2.index[i], 'eudract_number'] = np.nan
i+=1
else:
leading_zero_check = False
consolidated = consolidated[consolidated.eudract_number.notnull()]
consolidated_2 = consolidated_2[consolidated_2.eudract_number.notnull()]
consolidated.plot(ax=y, lw=2, sharex='col',legend=False)
consolidated_2.plot(ax=y, lw=2, sharex='col',legend=False)
y.set_title(ordered_countries_new[x], pad=6, fontsize=16)
y.set_axisbelow(True)
y.grid(zorder=0)
y.set_xlabel('')
y.set_xlim(x_ticks[0], x_ticks[-1])
fig.text(-0.015, 0.5, 'Trial Count', ha='center', va='center', rotation='vertical', fontsize=20)
fig.text(.5, -0.02, 'Record Entry Year', ha='center', va='center', fontsize=20)
pd.set_option('mode.chained_assignment', 'warn')
plt.legend(['First Entered Date', 'NCA Approval Date'],
loc='upper center', ncol=5, bbox_to_anchor = (-1.2, -.5), fontsize=15)
plt.show()
#fig.savefig(parent + '/data/Figures/fig_s3.jpg', bbox_inches='tight', dpi=400)
# -
# # Cross-checking countries listed in results with public CTAs
# + trusted=true
results_info_filt = results_info[results_info.recruitment_countries.notnull()].reset_index(drop=True)
# + trusted=true
protocols = results_info_filt.trial_countries.to_list()
results_countries = results_info_filt.recruitment_countries.to_list()
start_date = results_info_filt.trial_start_date.to_list()
trial_ids = results_info_filt.trial_id.to_list()
zipped_cats = zip(trial_ids, protocols, results_countries, start_date)
results_list = compare_enrollment_registration(zipped_cats)
missing_protocols = pd.DataFrame(results_list)
missing_protocols['total_missing'] = missing_protocols.unaccounted.apply(len)
# + trusted=true
acct = missing_protocols.accounted.to_list()
unacct = missing_protocols.unaccounted.to_list()
# + trusted=true
accounted_count = {}
unaccounted_count = {}
for ac, un in zip(acct, unacct):
if ac:
for a in ac:
accounted_count[a] = accounted_count.get(a, 0) + 1
if un:
for u in un:
unaccounted_count[u] = unaccounted_count.get(u, 0) + 1
# + trusted=true
accounted_series = pd.Series(accounted_count)
unaccounted_series = pd.Series(unaccounted_count)
reg_check_no_buffer = accounted_series.to_frame().join(unaccounted_series.to_frame(), how='outer', rsuffix='unac').rename({'0': 'accounted', '0unac': 'unaccounted'}, axis=1).fillna(0)
# + trusted=true
reg_check_no_buffer['total'] = reg_check_no_buffer['accounted'] + reg_check_no_buffer['unaccounted']
reg_check_no_buffer['acct_prct'] = round((reg_check_no_buffer['accounted'] / reg_check_no_buffer['total']) * 100, 2)
reg_check_no_buffer['unacct_prct'] = round((reg_check_no_buffer['unaccounted'] / reg_check_no_buffer['total']) * 100, 2)
reg_check_no_buffer.head()
# + trusted=true
(reg_check_no_buffer.total.sum() - reg_check_no_buffer.unaccounted.sum()) / reg_check_no_buffer.total.sum()
# + trusted=true
reg_check_no_buffer.loc['Romania']
# + trusted=true
reg_check_no_buffer.loc['France']
# + trusted=true
reg_check_no_buffer.loc['Poland']
# + trusted=true
fig, ax = plt.subplots(figsize = (20,10), dpi=400)
sorted_countries = reg_check_no_buffer.sort_values(by='total')
sorted_countries[['accounted', 'unaccounted']].plot.bar(stacked=True, ax=ax,
legend=False, width=.75)
ax.set_axisbelow(True)
ax.grid(axis='y', zorder=0)
rects = ax.patches[0:30]
for rect, label, y_off in zip(rects, sorted_countries.acct_prct.values, sorted_countries.total.values):
ax.text(rect.get_x() + rect.get_width() / 2, y_off + 25, str(label) + '%',
ha='center', va='bottom', fontsize=9)
for patch in ax.patches[30:]:
patch.set_hatch('/')
ax.legend(['Protocol Available', 'Protocol Unavailable'],
loc='upper left', fontsize=15)
#plt.title('CTA Availability for Reported Trials By Country', pad=10, fontsize=23)
plt.ylabel('Trial Count', fontsize=15, labelpad=6)
plt.show()
#fig.savefig(parent + '/data/Figures/fig_2.jpg', bbox_inches='tight', dpi=400)
#fig.savefig(parent + '/data/Figures/fig_2.eps', bbox_inches='tight', dpi=400)
# + trusted=true
min_start_date = analysis_df[['eudract_number', 'entered_year']].groupby('eudract_number', as_index=False).min()
by_year_df = missing_protocols.merge(min_start_date, how='left', left_on='trial_id', right_on='eudract_number').drop('eudract_number', axis=1)
# + trusted=true
prct_missing
# + trusted=true
fig, ax = plt.subplots(figsize=(24,12), dpi = 400)
to_graph = by_year_df[['entered_year', 'total_missing']].groupby('entered_year').sum()
to_graph.index = to_graph.index.astype(int)
prct_missing = grouped_overall.join(to_graph)
prct_missing['missing_cta_prct'] = (prct_missing.total_missing / prct_missing.eudract_number) * 100
labels = [str(x) for x in range(2004,2021)]
#I have no idea why but I can only get this graph to work with plt.errorbar
#l1 = plt.errorbar(prct_missing.index,prct_missing.missing_cta_prct, lw=5, color='orange')
l1 = ax.plot(prct_missing.index,prct_missing.missing_cta_prct, marker='.', markersize=25, lw=5, color='orange', label='% Missing')
plt.tick_params(axis='both', which='major', labelsize=15)
#plt.ylabel('# of Missing Trials', fontsize=25, labelpad=10)
plt.xticks(rotation=25)
plt.title("Missing CTAs by Trial Entry Year", pad = 20, fontsize = 25)
ax.set_ylim([0,10])
ax.set_ylabel('# of Missing Trials', fontsize=20, labelpad=50)
ax.set_xlabel('Record Entry Year', fontsize=20, labelpad=10)
ax2 = plt.twinx()
ax2.set_axisbelow(True)
#ax.yaxis.grid(linestyle='--', linewidth=.5, zorder=ax.get_zorder()-10)
ax2.bar(to_graph.index, to_graph.total_missing, tick_label=labels)
ax2.tick_params(axis='both', which='major', labelsize=15)
ax2.set_ylabel('% Missing', fontsize=20, labelpad=30)
ax.yaxis.tick_right()
ax2.yaxis.tick_left()
ax.set_zorder(ax2.get_zorder()+1)
ax.patch.set_visible(False)
ax.legend(fontsize=20)
plt.show()
#fig.savefig(parent + '/data/Figures/fig_s4.jpg', bbox_inches='tight', dpi=400)
# -
# As a quick sense check, we can use longer lags between the first available protocol for a country to see if it makes a difference. This can be adjusted using the offseet parameter of the compare_enrollment_registration function. However, Figure 4 above makes the point that missing CTAs are not all clustered around the years in which countries first connected with the EMA system as well.
# + trusted=true
protocols = results_info_filt.trial_countries.to_list()
results_countries = results_info_filt.recruitment_countries.to_list()
start_date = results_info_filt.trial_start_date.to_list()
trial_ids = results_info_filt.trial_id.to_list()
zipped_cats = zip(trial_ids, protocols, results_countries, start_date)
results_sens = compare_enrollment_registration(zipped_cats, offset=6)
missing_sensitivity = pd.DataFrame(results_sens)
# + trusted=true
acct_sens = missing_sensitivity.accounted.to_list()
unacct_sens = missing_sensitivity.unaccounted.to_list()
accounted_count_sens = {}
unaccounted_count_sens = {}
for ac, un in zip(acct_sens, unacct_sens):
if ac:
for a in ac:
accounted_count_sens[a] = accounted_count_sens.get(a, 0) + 1
if un:
for u in un:
unaccounted_count_sens[u] = unaccounted_count_sens.get(u, 0) + 1
accounted_series_sens = pd.Series(accounted_count_sens)
unaccounted_series_sens = pd.Series(unaccounted_count_sens)
# + trusted=true
reg_check_buffer = accounted_series_sens.to_frame().join(unaccounted_series_sens.to_frame(), how='outer', rsuffix='unac').rename({'0': 'accounted', '0unac': 'unaccounted'}, axis=1).fillna(0)
reg_check_buffer['total'] = reg_check_buffer['accounted'] + reg_check_buffer['unaccounted']
reg_check_buffer['acct_prct'] = round((reg_check_buffer['accounted'] / reg_check_buffer['total']) * 100, 2)
reg_check_buffer.head()
# -
# # Trial Status By Country Over Time
# + trusted=true
status_df = analysis_df[['eudract_number', 'nca', 'entered_year', 'trial_status']].reset_index(drop=True)
status_df['trial_status'] = status_df.trial_status.fillna('Missing')
status_group = status_df.groupby(['nca', 'entered_year', 'trial_status'], as_index=False).count()
# + trusted=true
ordered_countries = list(status_group[['nca', 'eudract_number']].groupby('nca').sum().sort_values(by='eudract_number', ascending=False).index)
#Removing these for low number of trials
ordered_countries.remove('Malta - ADM')
ordered_countries.remove('Luxembourg - Ministry of Health')
ordered_countries.remove('Cyprus - MoH-Ph.S')
# -
# Here we create our trial status categories
# + trusted=true
country_status = {}
for c in status_group.nca.unique():
country_dict = {}
country = status_group[status_group.nca == c]
completed = country[country.trial_status.isin(['Completed', 'Prematurely Ended'])][['entered_year', 'eudract_number']].groupby('entered_year').sum()
comp_dict = completed.to_dict()['eudract_number']
country_dict['completed'] = zero_out_dict(comp_dict, range(2004,2021))
ongoing = country[country.trial_status.isin(['Ongoing', 'Restarted'])][['entered_year', 'eudract_number']].groupby('entered_year').sum()
ong_dict = ongoing.to_dict()['eudract_number']
country_dict['ongoing'] = zero_out_dict(ong_dict, range(2004,2021))
missing = country[country.trial_status == 'Missing']
missing_dict = pd.Series(missing.eudract_number.values, index=missing.entered_year).to_dict()
country_dict['missing'] = zero_out_dict(missing_dict, range(2004,2021))
other = country[~country.trial_status.isin(['Completed', 'Ongoing', 'Restarted', 'Prematurely Ended', 'Missing'])][['entered_year', 'eudract_number']].groupby('entered_year').sum()
other_dict = other.to_dict()['eudract_number']
country_dict['other'] = zero_out_dict(other_dict, range(2004,2021))
country_status[c] = country_dict
# + trusted=true
#Shaping up the final data so it's easy to use
regrouped = pd.DataFrame.from_dict(country_status, orient='index').stack().to_frame()[0].apply(pd.Series).reindex(
['completed', 'ongoing', 'other', 'missing'], level=1)
# + trusted=true
#A glance at the overall trend
grouped_total = regrouped.droplevel(level=0).groupby(regrouped.droplevel(level=0).index).sum()
title='Trial Status of CTAs by Record Entry Date'
fig, ax = plt.subplots(figsize = (10,5), dpi=400)
grouped_total.reindex(['completed', 'ongoing', 'missing', 'other']).T.plot.bar(stacked=True, width=.9, ax=ax,
rot=45, title = title, legend=False)
ax.set_axisbelow(True)
ax.grid(axis='y', zorder=0)
ax.set_xlabel('Record Entry Year', labelpad=10)
ax.set_ylabel('CTA Count')
plt.legend(['Completed', 'Ongoing', 'Other', 'Missing'],
loc='upper center', ncol=5, bbox_to_anchor = (0.5, -0.2), fontsize=12)
plt.show()
#fig.savefig(parent + '/data/Figures/fig_s5.jpg', bbox_inches='tight', dpi=400)
# +
#A quick look at the trend on ongoing trials over time
# + trusted=true
g_total = grouped_total.T
g_total['prct'] = g_total['completed'] / (g_total['ongoing'] + g_total['completed'] + g_total['missing'] + g_total['other'])
g_total
# + trusted=true
grouped_total.sum(axis=1)
# + trusted=true
63434/97227
# + trusted=true
overall_prct_dict = {}
for x in ordered_countries:
g = regrouped.loc[[x]].droplevel(level=0).T
num = g.completed.sum()
denom = num + g.ongoing.sum() + g.missing.sum() + g.other.sum()
overall_prct_dict[x] = num / denom
rankings_completed = pd.Series(overall_prct_dict).sort_values(ascending=False)
rankings_completed
# + trusted=true
#And now a look at the trend for each NCA
fig, axes = plt.subplots(figsize = (20, 16), nrows=7, ncols=4, dpi=400)
#fig.suptitle("Trial Status of Protocols by NCA", y=1.02, fontsize=23)
fig.tight_layout()
for x, y in enumerate(fig.axes):
regrouped.loc[[rankings_completed.index[x]]].droplevel(level=0).T.plot.bar(stacked=True, ax=y, width=.85,
legend=False, sharex='col', rot=45)
y.set_title(rankings_completed.index[x], pad=6, fontsize=16)
y.set_axisbelow(True)
y.grid(axis='y', zorder=0)
y.set_xlabel('')
hatches = ['','//', 'oo', '\\\\']
counter = 0
h_counter = 0
patch_count = len(y.patches)
for p in y.patches:
p.set_hatch(hatches[h_counter])
counter += 1
if counter == ((patch_count/4) * (h_counter+1)):
h_counter += 1
fig.text(-0.015, 0.5, 'Trial Count', ha='center', va='center', rotation='vertical', fontsize=20)
fig.text(.5, -0.025, 'Record Entry Year', ha='center', va='center', fontsize=20)
plt.legend(['Completed', 'Ongoing', 'Other', 'Missing'],
loc='upper center', ncol=5, bbox_to_anchor = (-1.2, -.55), fontsize=15)
plt.show()
#fig.savefig(parent + '/data/Figures/fig_3.jpg', bbox_inches='tight', dpi=400)
#fig.savefig(parent + '/data/Figures/fig_3.eps', bbox_inches='tight', dpi=400)
# + trusted=true
#Pull out the highlighted countries
fig, axes = plt.subplots(figsize = (20, 3), nrows=1, ncols=4, dpi=400)
#fig.suptitle("Trial Status of Protocols by NCA", y=1.02, fontsize=23)
fig.tight_layout()
included_countries = ['Lithuania - VVKT', 'Belgium - FAMHP', 'Netherlands - CCMO', 'Spain - AEMPS']
for x, y in enumerate(fig.axes):
regrouped.loc[[included_countries[x]]].droplevel(level=0).T.plot.bar(stacked=True, ax=y, width=.85,
legend=False, sharex='col', rot=45)
y.set_title(included_countries[x], pad=6, fontsize=16)
y.set_axisbelow(True)
y.grid(axis='y', zorder=0)
y.set_xlabel('')
hatches = ['','//', 'oo', '\\\\']
counter = 0
h_counter = 0
patch_count = len(y.patches)
for p in y.patches:
p.set_hatch(hatches[h_counter])
counter += 1
if counter == ((patch_count/4) * (h_counter+1)):
h_counter += 1
fig.text(0, 0.5, 'Trial Count', ha='center', va='center', rotation='vertical', fontsize=20)
fig.text(.5, -0.13, 'Record Entry Year', ha='center', va='center', fontsize=20)
plt.legend(['Completed', 'Ongoing', 'Other', 'Missing'],
loc='upper center', ncol=5, bbox_to_anchor = (-1.2, -.38), fontsize=15)
plt.show()
#fig.savefig(parent + '/data/Figures/fig_3.jpg', bbox_inches='tight', dpi=400)
#fig.savefig(parent + '/data/Figures/fig_3.eps', bbox_inches='tight', dpi=400)
# -
# # Conflicting Completion Status
#
# Potentially look at the number of multi-protocol trials that have conflicting trial status.
# + trusted=true
status_by_trial = analysis_df[['eudract_number', 'nca', 'entered_year', 'trial_status']].reset_index(drop=True)
status_by_trial['trial_status'] = status_by_trial.trial_status.fillna('Missing')
# + trusted=true
status_by_trial.head()
# + trusted=true
status_by_trial['trial_status'].value_counts()
# + trusted=true
status_summary = status_by_trial[['eudract_number', 'trial_status']].groupby('eudract_number')['trial_status'].count().to_frame(name='count').join(status_by_trial[['eudract_number', 'trial_status']].groupby('eudract_number')['trial_status'].apply(list).to_frame(name='status'))
# + trusted=true
status_summary['set'] = status_summary['status'].apply(set)
# + trusted=true
multi_status = status_summary[status_summary['count'] > 1].reset_index().set_index('eudract_number')
# + trusted=true
indiv_status = multi_status['set'].to_list()
# + trusted=true
#This counts the number of trials that have an ongoing and a completed status
c = 0
indicator_var = []
for i in indiv_status:
if ('Ongoing' in i or 'Restarted' in i) and ('Completed' in i or 'Premautrely Ended' in i):
c+=1
indicator_var.append(1)
else:
indicator_var.append(0)
print(c)
# + trusted=true
c/len(indiv_status)
# + trusted=true
group_year = status_by_trial[['eudract_number', 'entered_year']].groupby('eudract_number').max()
# + tags=[] trusted=true
multi_status['conflict'] = indicator_var
year_joined = multi_status.join(group_year, how='left')
year_joined.head()
# + trusted=true
conflict_summary = year_joined[['conflict', 'entered_year']].reset_index(drop=True).groupby('entered_year').agg(['sum', 'count'])
# + trusted=true
conflict_summary.head()
# + trusted=true
conflict_summary['prct'] = conflict_summary['conflict']['sum'] / conflict_summary['conflict']['count']
# + trusted=true
conflict_summary
# + trusted=true
fig, ax = plt.subplots(figsize = (10,5), dpi=400)
plt.plot(conflict_summary.index, conflict_summary['prct'])
plt.show()
# + trusted=true
single_status = status_summary[status_summary['count'] == 1].reset_index().set_index('eudract_number').join(status_by_trial.set_index('eudract_number')[['entered_year']], how='left')
# + trusted=true
def is_ongoing(x):
if "Ongoing" in x:
return 1
else:
return 0
# + trusted=true
single_status['ongoing'] = single_status['status'].apply(is_ongoing)
# + trusted=true
single_s_grouped
# + trusted=true
single_s_grouped[single_s_grouped['ongoing']['sum'].index < 2015].sum()
# + trusted=true
6068/16552
# -
# # Missing Completion Dates
# + trusted=true
date_df = analysis_df[['eudract_number', 'nca', 'entered_year', 'trial_status', 'completion_date', 'trial_results']].reset_index(drop=True)
date_df['trial_status'] = date_df.trial_status.fillna('Missing')
date_df['has_completion_date'] = np.where(date_df.completion_date.isna(), 0, 1)
# + trusted=true
only_completed = date_df[date_df.trial_status.isin(['Completed', 'Prematurely Ended'])].reset_index(drop=True)
# + trusted=true
total_completed = only_completed[['nca',
'entered_year',
'has_completion_date']].groupby(['nca',
'entered_year']).count().rename({'has_completion_date': 'denominator'}, axis=1)
total_completed_date = only_completed[['nca', 'entered_year', 'has_completion_date']].groupby(['nca', 'entered_year']).sum().rename({'has_completion_date': 'numerator'}, axis=1)
# + trusted=true
merged_dates = total_completed.join(total_completed_date)
merged_dates['missing_dates'] = merged_dates.denominator - merged_dates.numerator
# + trusted=true
stacked_dates = merged_dates.drop('denominator', axis=1).stack().unstack(1)
# + trusted=true
overall_dates = stacked_dates.droplevel(level=0).groupby(stacked_dates.droplevel(level=0).index).sum()
title='Availability of Completion Dates for Completed CTAs'
fig, ax = plt.subplots(figsize = (10,5), dpi=400)
overall_dates.reindex(['numerator', 'missing_dates']).T.plot.bar(stacked=True, width=.9, ax=ax, legend=False,
rot=45, title=title)
ax.set_axisbelow(True)
ax.grid(axis='y', zorder=0)
ax.set_xlabel('Protocol Record Entry Year', labelpad=10)
ax.set_ylabel('CTA Count')
plt.legend(['Has Date', 'Missing Date'],
loc='upper right', fontsize=12)
plt.show()
#fig.savefig(parent + '/data/Figures/fig_s6.jpg', bbox_inches='tight', dpi=400)
# + trusted=true
dates_trans = overall_dates.T
# + trusted=true
dates_trans['prct'] = dates_trans['numerator'] / (dates_trans['numerator'] + dates_trans['missing_dates'])
# + trusted=true
dates_trans.missing_dates.sum() + dates_trans.numerator.sum()
# + trusted=true
dates_trans.numerator.sum() / (dates_trans.missing_dates.sum() + dates_trans.numerator.sum())
# + trusted=true
dates_trans
# + trusted=true
overall_comp_dict = {}
for x in ordered_countries:
d = stacked_dates.loc[x].T
num = d.numerator.sum()
denom = num + d.missing_dates.sum()
overall_comp_dict[x] = num / denom
rankings_compdate = pd.Series(overall_comp_dict).sort_values(ascending=False)
rankings_compdate
# + trusted=true
fig, axes = plt.subplots(figsize = (20, 16), nrows=7, ncols=4, dpi=400)
#fig.suptitle("Available Completion Dates for Completed CTAs by NCA", y=1.02, fontsize=23)
fig.tight_layout()
for x, y in enumerate(fig.axes):
stacked_dates.loc[[rankings_compdate.index[x]]].droplevel(level=0).T.plot.bar(stacked=True, ax=y, width=.85,
legend=False, sharex='col', rot=45)
y.set_title(rankings_compdate.index[x], pad=6, fontsize=16)
y.set_axisbelow(True)
y.grid(axis='y', zorder=0)
y.set_xlabel('')
hatches = ['','//']
counter = 0
h_counter = 0
patch_count = len(y.patches)
for p in y.patches:
p.set_hatch(hatches[h_counter])
counter += 1
if counter == ((patch_count/2) * (h_counter+1)):
h_counter += 1
fig.text(-0.015, 0.5, 'Trial Count', ha='center', va='center', rotation='vertical', fontsize=20)
fig.text(.5, -0.02, 'Record Entry Year', ha='center', va='center', fontsize=20)
plt.legend(['Has Date', 'Missing Date'],
loc='lower center', ncol=5, bbox_to_anchor = (-1.25, -.9), fontsize=15)
plt.show()
#fig.savefig(parent + '/data/Figures/fig_4.jpg', bbox_inches='tight', dpi=400)
#fig.savefig(parent + '/data/Figures/fig_4.eps', bbox_inches='tight', dpi=400)
# -
# # Combining Status and Completion Date into single graph
# + trusted=true
date_df2 = analysis_df[['eudract_number', 'nca', 'entered_year', 'trial_status', 'completion_date', 'trial_results']].reset_index(drop=True)
date_df2['trial_status'] = date_df2.trial_status.fillna('Missing')
date_df2['has_completion_date'] = np.where(date_df2.completion_date.isna(), 0, 1)
# + trusted=true
total_trials = date_df2[['nca',
'entered_year',
'has_completion_date']].groupby(['nca',
'entered_year']).count().rename({'has_completion_date': 'denominator'}, axis=1)
total_trials_date = only_completed[['nca', 'entered_year', 'has_completion_date']].groupby(['nca', 'entered_year']).sum().rename({'has_completion_date': 'numerator'}, axis=1)
# + trusted=true
merged_dates2 = total_trials.join(total_trials_date)
merged_dates2['missing_dates'] = merged_dates2.denominator - merged_dates2.numerator
# + trusted=true
stacked_dates2 = merged_dates2.drop('denominator', axis=1).stack().unstack(1)
# + trusted=true
stacked_dates2
# + trusted=true
total_comp_dict = {}
for x in ordered_countries:
d = stacked_dates2.loc[x].T
num = d.numerator.sum()
denom = num + d.missing_dates.sum()
total_comp_dict[x] = num / denom
rankings_compdate2 = | pd.Series(total_comp_dict) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 12:46:52 2020
Updated on June 3, 2021
@author: jacob
"""
import pandas as pd
import matplotlib.pyplot as plt
import scipy.optimize as optim
import numpy as np
from scipy import stats
#from heatmap import heatmap_gr, heatmap_ymax
#from graph_repl import graph_repls
# How many time points are graphed
XSCALE = 97
def graph_avg(df_dict, con_data, exp_data, con_name, exp_name, data_path, plate_list, hm_flag=False, log_flag=False):
""" Plot Formatting """
# You typically want your plot to be ~1.33x wider than tall.
# Common sizes: (10, 7.5) and (12, 9)
plt.figure(figsize=(10, 7.5))
con_color = "#0466c8"
exp_color = "#d62828"
# Remove the plot frame lines. They are unnecessary
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
# Set background to white
ax.set_facecolor('white')
# Ensure that the axis ticks only show up on the bottom and left of the plot.
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Limit the range of the plot to only where the data is.
plt.ylim(0, 2)
plt.xlim(0, XSCALE)
# Make sure your axis ticks are large enough to be easily read.
plt.yticks(np.arange(0, 1.7, 0.2), [str(round(x, 1)) for x in np.arange(0, 1.7, 0.2)], fontsize=14)
plt.xticks(np.arange(0, XSCALE, 24), [str(round(x,1)) for x in np.arange(0, XSCALE, 24)], fontsize=14)
# Provide tick lines across the plot to help your viewers trace along the axis ticks.
for y in np.arange(0, 1.7, 0.2):
plt.plot(range(0, XSCALE), [y] * len(range(0, XSCALE)), "--", lw=0.5, color="black", alpha=0.3)
""" Calculations """
# Parameter fits for individual control wells
control_grs = []
control_ymaxs = []
# Storing wells to compute average of replicate
control_wells = []
con_avg = con_name + "_avg"
# Lists of parameter values for the experimental replicate
exp_grs = []
exp_ymaxs = []
# Storing wells to compute average of replicate
exp_wells = []
exp_avg = exp_name + "_avg"
avg_df = | pd.DataFrame() | pandas.DataFrame |
"""
## Twitter Celebrity Matcher
This app is a tool to match celebrities from Twitter with their respective tweets.
Author: [<NAME>](https://www.linkedin.com/in/ahmedshahriar)
Source: [Github](https://github.com/ahmedshahriar/TwitterCelebrityMatcher)
"""
import logging
import os
from typing import Mapping, Optional
import pandas as pd
from core.dataprep import TwitterDataPrep
from core.scraper import TwitterScraper
from config import CONSUMER_KEY, ACCESS_SECRET, CONSUMER_SECRET, ACCESS_KEY, MODEL_PATH, TWITTER_USER_LIST_FILE, \
TWITTER_USER_LIST_PATH
def scrape_embed_tweets(username: str) -> Optional[pd.DataFrame]:
"""
Scrape tweets and generate embedding dataframe
:param username: Twitter username
:return: embedding dataframe
"""
df_embeddings = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import openpyxl
import numpy as np
import os
import string
import glob
''' This program compiles all (individual) saved excel files to compare different models in one environment
'''
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
path_core = __location__+ "/Results/Train/"
print("OK")
# SELECT THE ENVIRONMENTS
# env_path_list = ["Env_1",
# "Env_2",
# "Env_3",
# "Env_8",
# "Env_9",
# "Env_10",
# "Env_11"]
env_path_list = ["Env_1",
"Env_2",
"Env_3",
"Env_4"]
env_path_list = ["Env_1"]
alphabet = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'AA', 'AB', 'AC', 'AD', 'AE', 'AF', 'AG', 'AH', 'AI', 'AJ', 'AK', 'AL', 'AM', 'AN', 'AO', 'AP', 'AQ', 'AR', 'AS', 'AT', 'AU', 'AV', 'AW', 'AX', 'AY', 'AZ']
list_sheets = ["Run_Conf", "Score", "Percent", "Loss", "Time"]
for env_path in env_path_list:
file_path_list = []
path = path_core + env_path + "/Train_Env_1_DQN*.xlsx"
for fname in sorted(glob.glob(path)):
file_path_list.append(fname)
print("LEN(FILE_PATH_LIST):", len(file_path_list))
load_path = __location__+ "/Results/Train/Compare_Models.xlsx"
excel_data_base = pd.ExcelFile(load_path)
load_path_new = __location__+ "/Results/Train/" + env_path + "/Compare_Models_new_" + env_path + ".xlsx"
excel_writer_to_append = pd.ExcelWriter(load_path_new)
workbook = excel_writer_to_append.book
excel_data_base_col = pd.read_excel(excel_data_base, sheetname="Run_Conf")
df_Run_Conf_list = pd.DataFrame()
df_Score_list = pd.DataFrame()
df_Percent_list = pd.DataFrame()
df_Loss_list = pd.DataFrame()
df_Time_list = pd.DataFrame()
for i in range(len(file_path_list)):
print("File:", i)
excel_file = pd.ExcelFile(file_path_list[i])
# print("excel_file ", excel_file )
df_Run_Conf = pd.read_excel(excel_file, sheetname=list_sheets[0], converters={'A': str})
df_Run_Conf = df_Run_Conf.set_index(list_sheets[0])
df_Score = pd.read_excel(excel_file, sheetname=list_sheets[1], parse_cols="A:B")
df_Score = df_Score.set_index(list_sheets[1])
df_Percent = pd.read_excel(excel_file, sheetname=list_sheets[2], parse_cols="A:B")
df_Percent = df_Percent.set_index(list_sheets[2])
df_Loss = pd.read_excel(excel_file, sheetname=list_sheets[3], parse_cols="A:B")
df_Loss = df_Loss.set_index(list_sheets[3])
df_Time = pd.read_excel(excel_file, sheetname=list_sheets[4], parse_cols="A:B")
df_Time = df_Time.set_index(list_sheets[4])
df_Run_Conf_list = pd.concat([df_Run_Conf_list, df_Run_Conf], axis=1, join="outer")
df_Score_list = pd.concat([df_Score_list, df_Score], axis=1, join="outer")
df_Percent_list = pd.concat([df_Percent_list, df_Percent], axis=1, join="outer")
df_Loss_list = pd.concat([df_Loss_list, df_Loss], axis=1, join="outer")
df_Time_list = pd.concat([df_Time_list, df_Time], axis=1, join="outer")
list_of_df = [df_Run_Conf_list,df_Score_list,df_Percent_list,df_Loss_list,df_Time_list]
# print("df_Run_Conf_list\n\n", df_Run_Conf_list)
i = 0
df = pd.DataFrame()
for sheet in list_sheets:
print("Sheet:", sheet)
# if sheet == "Run_Conf":
# dict = {} # In order to parse the correct data the headers should be strings
# for n in range(len(excel_data_base_col.columns)):
# dict[n] = str
# df_data_base = pd.read_excel(excel_data_base, sheetname=sheet, converters=dict)
# else:
df_data_base = pd.read_excel(excel_data_base, sheetname=sheet)
new_df_data_base = df_data_base.set_index(sheet)
# print("df_Run_Conf_list\n\n", new_df_data_base)
df = list_of_df[i]
new_df_data_base = | pd.concat([new_df_data_base, df], axis=1, join_axes=[df.index], join="outer") | pandas.concat |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
from numpy import nan
import pandas as pd
from distutils.version import LooseVersion
from pandas import (Index, Series, DataFrame, Panel, isnull,
date_range, period_range)
from pandas.core.index import MultiIndex
import pandas.core.common as com
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_panel_equal,
assert_equal)
import pandas.util.testing as tm
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
raise nose.SkipTest('scipy.interpolate.pchip missing')
# ----------------------------------------------------------------------
# Generic types test cases
class Generic(object):
_multiprocess_can_split_ = True
def setUp(self):
pass
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if np.isscalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
for axis in self._axes():
kwargs = {axis: list('ABCD')}
obj = self._construct(4, **kwargs)
# no values passed
# self.assertRaises(Exception, o.rename(str.lower))
# rename a single axis
result = obj.rename(**{axis: str.lower})
expected = obj.copy()
setattr(expected, axis, list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {}
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value='empty', **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_get_default(self):
# GH 7725
d0 = "a", "b", "c", "d"
d1 = np.arange(4, dtype='int64')
others = "e", 10
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
self.assertEqual(s.get(i), d)
self.assertEqual(s.get(i, d), d)
self.assertEqual(s.get(i, "z"), d)
for other in others:
self.assertEqual(s.get(other, "z"), "z")
self.assertEqual(s.get(other, other), other)
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=1)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=np.nan)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
# empty
obj = self._construct(shape=0)
self.assertRaises(ValueError, lambda: bool(obj))
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
def f():
if obj1:
com.pprint_thing("this works and shouldn't")
self.assertRaises(ValueError, f)
self.assertRaises(ValueError, lambda: obj1 and obj2)
self.assertRaises(ValueError, lambda: obj1 or obj2)
self.assertRaises(ValueError, lambda: not obj1)
def test_numpy_1_7_compat_numeric_methods(self):
# GH 4435
# numpy in 1.7 tries to pass addtional arguments to pandas functions
o = self._construct(shape=4)
for op in ['min', 'max', 'max', 'var', 'std', 'prod', 'sum', 'cumsum',
'cumprod', 'median', 'skew', 'kurt', 'compound', 'cummax',
'cummin', 'all', 'any']:
f = getattr(np, op, None)
if f is not None:
f(o)
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
o = self._construct(shape=4, value=9.)
expected = o.astype(np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, expected)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
# are close
o = self._construct(shape=4, value=9.000000000005)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
expected = o.astype(np.int64)
self._compare(result, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
return self._construct(shape=3, dtype=dtype)
self.assertRaises(NotImplementedError, f, [("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
self.assertIsNone(v)
else:
self.assertEqual(v, getattr(y, m, None))
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = 'foo'
o2 = self._construct(shape=3)
o2.name = 'bar'
# TODO
# Once panel can do non-trivial combine operations
# (currently there is an a raise in the Panel arith_ops to prevent
# this, though it actually does work)
# can remove all of these try: except: blocks on the actual operations
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
try:
result = getattr(o, op)(o)
self.check_metadata(o, result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
try:
self.check_metadata(o, v1 & v1)
except (ValueError):
pass
try:
self.check_metadata(o, v1 | v1)
except (ValueError):
pass
# combine_first
try:
result = o.combine_first(o2)
self.check_metadata(o, result)
except (AttributeError):
pass
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
try:
result = o + o2
self.check_metadata(result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
try:
self.check_metadata(v1 & v2)
except (ValueError):
pass
try:
self.check_metadata(v1 | v2)
except (ValueError):
pass
def test_head_tail(self):
# GH5370
o = self._construct(shape=10)
# check all index types
for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeDateIndex,
tm.makePeriodIndex]:
axis = o._get_axis_name(0)
setattr(o, axis, index(len(getattr(o, axis))))
# Panel + dims
try:
o.head()
except (NotImplementedError):
raise nose.SkipTest('not implemented on {0}'.format(
o.__class__.__name__))
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(7))
self._compare(o.tail(-3), o.tail(7))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4,
random_state=seed))
self._compare(
o.sample(frac=0.7, random_state=seed), o.sample(
frac=0.7, random_state=seed))
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)))
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)))
# Check for error when random_state argument invalid.
with tm.assertRaises(ValueError):
o.sample(random_state='astring!')
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with tm.assertRaises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with tm.assertRaises(ValueError):
o.sample(n=-3)
with tm.assertRaises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with tm.assertRaises(ValueError):
o.sample(n=3.2)
# Check lengths are right
self.assertTrue(len(o.sample(n=4) == 4))
self.assertTrue(len(o.sample(frac=0.34) == 3))
self.assertTrue(len(o.sample(frac=0.36) == 4))
###
# Check weights
###
# Weight length must be right
with tm.assertRaises(ValueError):
o.sample(n=3, weights=[0, 1])
with tm.assertRaises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with tm.assertRaises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with tm.assertRaises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with tm.assertRaises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with tm.assertRaises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=nan_weights)
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10,
'easyweights': easy_weight_list})
sample1 = df.sample(n=1, weights='easyweights')
assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series, panel, or
# DataFrame with axis = 1.
s = Series(range(10))
with tm.assertRaises(ValueError):
s.sample(n=3, weights='weight_column')
panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4],
minor_axis=[3, 4, 5])
with tm.assertRaises(ValueError):
panel.sample(n=1, weights='weight_column')
with tm.assertRaises(ValueError):
df.sample(n=1, weights='weight_column', axis=1)
# Check weighting key error
with tm.assertRaises(KeyError):
df.sample(n=3, weights='not_a_real_column_name')
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(
df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10})
second_column_weight = [0, 1]
assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']])
# Different axis arg types
assert_frame_equal(df.sample(n=1, axis='columns',
weights=second_column_weight),
df[['col2']])
weight = [0] * 10
weight[5] = 0.5
assert_frame_equal(df.sample(n=1, axis='rows', weights=weight),
df.iloc[5:6])
assert_frame_equal(df.sample(n=1, axis='index', weights=weight),
df.iloc[5:6])
# Check out of range axis values
with tm.assertRaises(ValueError):
df.sample(n=1, axis=2)
with tm.assertRaises(ValueError):
df.sample(n=1, axis='not_a_name')
with tm.assertRaises(ValueError):
s = pd.Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with tm.assertRaises(ValueError):
df.sample(n=1, axis=1, weights=[0.5] * 10)
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10})
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
assert_frame_equal(sample1, df[['colString']])
# Test default axes
p = pd.Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6],
minor_axis=[1, 3, 5])
assert_panel_equal(
p.sample(n=3, random_state=42), p.sample(n=3, axis=1,
random_state=42))
assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0,
random_state=42))
# Test that function aligns weights with frame
df = DataFrame(
{'col1': [5, 6, 7],
'col2': ['a', 'b', 'c'], }, index=[9, 5, 3])
s = Series([1, 0, 0], index=[3, 5, 9])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
# Weights have index values to be dropped because not in
# sampled DataFrame
s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
# Weights have empty values to be filed with zeros
s3 = Series([0.01, 0], index=[3, 5])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
# No overlap in weight and sampled DataFrame indices
s4 = Series([1, 0], index=[1, 2])
with tm.assertRaises(ValueError):
df.sample(1, weights=s4)
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
self.assertTrue(o.size == np.prod(o.shape))
self.assertTrue(o.size == 10 ** len(o.axes))
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
self.assertTrue(len(np.array_split(o, 5)) == 5)
self.assertTrue(len(np.array_split(o, 2)) == 2)
def test_unexpected_keyword(self): # GH8597
from pandas.util.testing import assertRaisesRegexp
df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe'])
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
ts = df['joe'].copy()
ts[2] = np.nan
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.drop('joe', axis=1, in_place=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.reindex([1, 0], inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ca.fillna(0, inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ts.fillna(0, in_place=True)
class TestSeries(tm.TestCase, Generic):
_typ = Series
_comparator = lambda self, x, y: assert_series_equal(x, y)
def setUp(self):
self.ts = tm.makeTimeSeries() # Was at top level in test_series
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
def test_rename_mi(self):
s = Series([11, 21, 31],
index=MultiIndex.from_tuples(
[("A", x) for x in ["a", "B", "c"]]))
s.rename(str.lower)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = Series([1, 2, 3])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([1, '2', 3.])
result = o._get_numeric_data()
expected = Series([], dtype=object, index=pd.Index([], dtype=object))
self._compare(result, expected)
o = Series([True, False, True])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([True, False, True])
result = o._get_bool_data()
self._compare(result, o)
o = Series(date_range('20130101', periods=3))
result = o._get_numeric_data()
expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object))
self._compare(result, expected)
def test_nonzero_single_element(self):
# allow single item via bool method
s = Series([True])
self.assertTrue(s.bool())
s = Series([False])
self.assertFalse(s.bool())
# single item nan to raise
for s in [Series([np.nan]), Series([pd.NaT]), Series([True]),
Series([False])]:
self.assertRaises(ValueError, lambda: bool(s))
for s in [Series([np.nan]), Series([pd.NaT])]:
self.assertRaises(ValueError, lambda: s.bool())
# multiple bool are still an error
for s in [Series([True, True]), Series([False, False])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
# single non-bool are an error
for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
def test_metadata_propagation_indiv(self):
# check that the metadata matches up on the resulting ops
o = Series(range(3), range(3))
o.name = 'foo'
o2 = Series(range(3), range(3))
o2.name = 'bar'
result = o.T
self.check_metadata(o, result)
# resample
ts = Series(np.random.rand(1000),
index=date_range('20130101', periods=1000, freq='s'),
name='foo')
result = ts.resample('1T').mean()
self.check_metadata(ts, result)
result = ts.resample('1T').min()
self.check_metadata(ts, result)
result = ts.resample('1T').apply(lambda x: x.sum())
self.check_metadata(ts, result)
_metadata = Series._metadata
_finalize = Series.__finalize__
Series._metadata = ['name', 'filename']
o.filename = 'foo'
o2.filename = 'bar'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat' and name == 'filename':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
Series.__finalize__ = finalize
result = pd.concat([o, o2])
self.assertEqual(result.filename, 'foo+bar')
self.assertIsNone(result.name)
# reset
Series._metadata = _metadata
Series.__finalize__ = _finalize
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_numpy_array_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_numpy_array_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interp_regression(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with tm.assertRaises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
self.assertRaises(ValueError, s.interpolate, method='linear', limit=2,
limit_direction='abc')
# raises an error even if no limit is specified.
self.assertRaises(ValueError, s.interpolate, method='linear',
limit_direction='abc')
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., np.nan, 7., 9., 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([1., 3., 5., np.nan, 9., 11.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12,
np.nan])
expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([1., 3., 4., np.nan, 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5., 5., 5., 7., 9., np.nan])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([5., 5., 5., 7., 9., 9.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., np.nan, np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_all_good(self):
# scipy
tm._skip_if_no_scipy()
s = Series([1, 2, 3])
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
assert_series_equal(result, s)
def test_interp_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
with tm.assertRaises(ValueError):
s.interpolate(method='polynomial', order=1)
def test_interp_nonmono_raise(self):
tm._skip_if_no_scipy()
s = Series([1, np.nan, 3], index=[0, 2, 1])
with tm.assertRaises(ValueError):
s.interpolate(method='krogh')
def test_interp_datetime64(self):
tm._skip_if_no_scipy()
df = Series([1, np.nan, 3], index=date_range('1/1/2000', periods=3))
result = df.interpolate(method='nearest')
expected = Series([1., 1., 3.],
index=date_range('1/1/2000', periods=3))
assert_series_equal(result, expected)
def test_interp_limit_no_nans(self):
# GH 7173
s = pd.Series([1., 2., 3.])
result = s.interpolate(limit=1)
expected = s
assert_series_equal(result, expected)
def test_describe(self):
self.series.describe()
self.ts.describe()
def test_describe_objects(self):
s = Series(['a', 'b', 'b', np.nan, np.nan, np.nan, 'c', 'd', 'a', 'a'])
result = s.describe()
expected = Series({'count': 7, 'unique': 4,
'top': 'a', 'freq': 3}, index=result.index)
assert_series_equal(result, expected)
dt = list(self.ts.index)
dt.append(dt[0])
ser = Series(dt)
rs = ser.describe()
min_date = min(dt)
max_date = max(dt)
xp = Series({'count': len(dt),
'unique': len(self.ts.index),
'first': min_date, 'last': max_date, 'freq': 2,
'top': min_date}, index=rs.index)
assert_series_equal(rs, xp)
def test_describe_empty(self):
result = pd.Series().describe()
self.assertEqual(result['count'], 0)
self.assertTrue(result.drop('count').isnull().all())
nanSeries = Series([np.nan])
nanSeries.name = 'NaN'
result = nanSeries.describe()
self.assertEqual(result['count'], 0)
self.assertTrue(result.drop('count').isnull().all())
def test_describe_none(self):
noneSeries = Series([None])
noneSeries.name = 'None'
expected = Series([0, 0], index=['count', 'unique'], name='None')
assert_series_equal(noneSeries.describe(), expected)
class TestDataFrame(tm.TestCase, Generic):
_typ = DataFrame
_comparator = lambda self, x, y: assert_frame_equal(x, y)
def test_rename_mi(self):
df = DataFrame([
11, 21, 31
], index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]]))
df.rename(str.lower)
def test_nonzero_single_element(self):
# allow single item via bool method
df = DataFrame([[True]])
self.assertTrue(df.bool())
df = DataFrame([[False]])
self.assertFalse(df.bool())
df = DataFrame([[False, False]])
self.assertRaises(ValueError, lambda: df.bool())
self.assertRaises(ValueError, lambda: bool(df))
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = DataFrame({'A': [1, '2', 3.]})
result = o._get_numeric_data()
expected = DataFrame(index=[0, 1, 2], dtype=object)
self._compare(result, expected)
def test_interp_basic(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
expected = DataFrame({'A': [1., 2., 3., 4.],
'B': [1., 4., 9., 9.],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df.interpolate()
assert_frame_equal(result, expected)
result = df.set_index('C').interpolate()
expected = df.set_index('C')
expected.loc[3, 'A'] = 3
expected.loc[5, 'B'] = 9
assert_frame_equal(result, expected)
def test_interp_bad_method(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
with tm.assertRaises(ValueError):
df.interpolate(method='not_a_method')
def test_interp_combo(self):
df = DataFrame({'A': [1., 2., np.nan, 4.],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df['A'].interpolate()
expected = Series([1., 2., 3., 4.], name='A')
assert_series_equal(result, expected)
result = df['A'].interpolate(downcast='infer')
expected = Series([1, 2, 3, 4], name='A')
assert_series_equal(result, expected)
def test_interp_nan_idx(self):
df = DataFrame({'A': [1, 2, np.nan, 4], 'B': [np.nan, 2, 3, 4]})
df = df.set_index('A')
with tm.assertRaises(NotImplementedError):
df.interpolate(method='values')
def test_interp_various(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
df = df.set_index('C')
expected = df.copy()
result = df.interpolate(method='polynomial', order=1)
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923076
assert_frame_equal(result, expected)
result = df.interpolate(method='cubic')
expected.A.loc[3] = 2.81621174
expected.A.loc[13] = 5.64146581
assert_frame_equal(result, expected)
result = df.interpolate(method='nearest')
expected.A.loc[3] = 2
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
result = df.interpolate(method='slinear')
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923077
assert_frame_equal(result, expected)
result = df.interpolate(method='zero')
expected.A.loc[3] = 2.
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
def test_interp_alt_scipy(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
result = df.interpolate(method='barycentric')
expected = df.copy()
expected.ix[2, 'A'] = 3
expected.ix[5, 'A'] = 6
assert_frame_equal(result, expected)
result = df.interpolate(method='barycentric', downcast='infer')
assert_frame_equal(result, expected.astype(np.int64))
result = df.interpolate(method='krogh')
expectedk = df.copy()
expectedk['A'] = expected['A']
assert_frame_equal(result, expectedk)
_skip_if_no_pchip()
import scipy
result = df.interpolate(method='pchip')
expected.ix[2, 'A'] = 3
if LooseVersion(scipy.__version__) >= '0.17.0':
expected.ix[5, 'A'] = 6.0
else:
expected.ix[5, 'A'] = 6.125
assert_frame_equal(result, expected)
def test_interp_rowwise(self):
df = DataFrame({0: [1, 2, np.nan, 4],
1: [2, 3, 4, np.nan],
2: [np.nan, 4, 5, 6],
3: [4, np.nan, 6, 7],
4: [1, 2, 3, 4]})
result = df.interpolate(axis=1)
expected = df.copy()
expected.loc[3, 1] = 5
expected.loc[0, 2] = 3
expected.loc[1, 3] = 3
expected[4] = expected[4].astype(np.float64)
assert_frame_equal(result, expected)
# scipy route
tm._skip_if_no_scipy()
result = df.interpolate(axis=1, method='values')
assert_frame_equal(result, expected)
result = df.interpolate(axis=0)
expected = df.interpolate()
assert_frame_equal(result, expected)
def test_rowwise_alt(self):
df = DataFrame({0: [0, .5, 1., np.nan, 4, 8, np.nan, np.nan, 64],
1: [1, 2, 3, 4, 3, 2, 1, 0, -1]})
df.interpolate(axis=0)
def test_interp_leading_nans(self):
df = DataFrame({"A": [np.nan, np.nan, .5, .25, 0],
"B": [np.nan, -3, -3.5, np.nan, -4]})
result = df.interpolate()
expected = df.copy()
expected['B'].loc[3] = -3.75
assert_frame_equal(result, expected)
tm._skip_if_no_scipy()
result = df.interpolate(method='polynomial', order=1)
assert_frame_equal(result, expected)
def test_interp_raise_on_only_mixed(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': ['a', 'b', 'c', 'd'],
'C': [np.nan, 2, 5, 7],
'D': [np.nan, np.nan, 9, 9],
'E': [1, 2, 3, 4]})
with tm.assertRaises(TypeError):
df.interpolate(axis=1)
def test_interp_inplace(self):
df = DataFrame({'a': [1., 2., np.nan, 4.]})
expected = DataFrame({'a': [1., 2., 3., 4.]})
result = df.copy()
result['a'].interpolate(inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result['a'].interpolate(inplace=True, downcast='infer')
assert_frame_equal(result, expected.astype('int64'))
def test_interp_inplace_row(self):
# GH 10395
result = DataFrame({'a': [1., 2., 3., 4.],
'b': [np.nan, 2., 3., 4.],
'c': [3, 2, 2, 2]})
expected = result.interpolate(method='linear', axis=1, inplace=False)
result.interpolate(method='linear', axis=1, inplace=True)
assert_frame_equal(result, expected)
def test_interp_ignore_all_good(self):
# GH
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 2, 3, 4],
'C': [1., 2., np.nan, 4.],
'D': [1., 2., 3., 4.]})
expected = DataFrame({'A': np.array(
[1, 2, 3, 4], dtype='float64'),
'B': np.array(
[1, 2, 3, 4], dtype='int64'),
'C': np.array(
[1., 2., 3, 4.], dtype='float64'),
'D': np.array(
[1., 2., 3., 4.], dtype='float64')})
result = df.interpolate(downcast=None)
assert_frame_equal(result, expected)
# all good
result = df[['B', 'D']].interpolate(downcast=None)
assert_frame_equal(result, df[['B', 'D']])
def test_describe(self):
tm.makeDataFrame().describe()
tm.makeMixedDataFrame().describe()
tm.makeTimeDataFrame().describe()
def test_describe_percentiles_percent_or_raw(self):
msg = 'percentiles should all be in the interval \\[0, 1\\]'
df = tm.makeDataFrame()
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[10, 50, 100])
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[2])
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[-2])
def test_describe_percentiles_equivalence(self):
df = tm.makeDataFrame()
d1 = df.describe()
d2 = df.describe(percentiles=[.25, .75])
assert_frame_equal(d1, d2)
def test_describe_percentiles_insert_median(self):
df = tm.makeDataFrame()
d1 = df.describe(percentiles=[.25, .75])
d2 = df.describe(percentiles=[.25, .5, .75])
assert_frame_equal(d1, d2)
self.assertTrue('25%' in d1.index)
self.assertTrue('75%' in d2.index)
# none above
d1 = df.describe(percentiles=[.25, .45])
d2 = df.describe(percentiles=[.25, .45, .5])
assert_frame_equal(d1, d2)
self.assertTrue('25%' in d1.index)
self.assertTrue('45%' in d2.index)
# none below
d1 = df.describe(percentiles=[.75, 1])
d2 = df.describe(percentiles=[.5, .75, 1])
assert_frame_equal(d1, d2)
self.assertTrue('75%' in d1.index)
self.assertTrue('100%' in d2.index)
# edge
d1 = df.describe(percentiles=[0, 1])
d2 = df.describe(percentiles=[0, .5, 1])
assert_frame_equal(d1, d2)
self.assertTrue('0%' in d1.index)
self.assertTrue('100%' in d2.index)
def test_describe_no_numeric(self):
df = DataFrame({'A': ['foo', 'foo', 'bar'] * 8,
'B': ['a', 'b', 'c', 'd'] * 6})
desc = df.describe()
expected = DataFrame(dict((k, v.describe())
for k, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(desc, expected)
ts = tm.makeTimeSeries()
df = DataFrame({'time': ts.index})
desc = df.describe()
self.assertEqual(desc.time['first'], min(ts.index))
def test_describe_empty_int_columns(self):
df = DataFrame([[0, 1], [1, 2]])
desc = df[df[0] < 0].describe() # works
assert_series_equal(desc.xs('count'),
Series([0, 0], dtype=float, name='count'))
self.assertTrue(isnull(desc.ix[1:]).all().all())
def test_describe_objects(self):
df = DataFrame({"C1": ['a', 'a', 'c'], "C2": ['d', 'd', 'f']})
result = df.describe()
expected = DataFrame({"C1": [3, 2, 'a', 2], "C2": [3, 2, 'd', 2]},
index=['count', 'unique', 'top', 'freq'])
assert_frame_equal(result, expected)
df = DataFrame({"C1": pd.date_range('2010-01-01', periods=4, freq='D')
})
df.loc[4] = pd.Timestamp('2010-01-04')
result = df.describe()
expected = DataFrame({"C1": [5, 4, pd.Timestamp('2010-01-04'), 2,
pd.Timestamp('2010-01-01'),
pd.Timestamp('2010-01-04')]},
index=['count', 'unique', 'top', 'freq',
'first', 'last'])
assert_frame_equal(result, expected)
# mix time and str
df['C2'] = ['a', 'a', 'b', 'c', 'a']
result = df.describe()
expected['C2'] = [5, 3, 'a', 3, np.nan, np.nan]
assert_frame_equal(result, expected)
# just str
expected = DataFrame({'C2': [5, 3, 'a', 4]},
index=['count', 'unique', 'top', 'freq'])
result = df[['C2']].describe()
# mix of time, str, numeric
df['C3'] = [2, 4, 6, 8, 2]
result = df.describe()
expected = DataFrame({"C3": [5., 4.4, 2.607681, 2., 2., 4., 6., 8.]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
assert_frame_equal(result, expected)
assert_frame_equal(df.describe(), df[['C3']].describe())
assert_frame_equal(df[['C1', 'C3']].describe(), df[['C3']].describe())
assert_frame_equal(df[['C2', 'C3']].describe(), df[['C3']].describe())
def test_describe_typefiltering(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24, dtype='int64'),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
descN = df.describe()
expected_cols = ['numC', 'numD', ]
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(descN, expected)
desc = df.describe(include=['number'])
assert_frame_equal(desc, descN)
desc = df.describe(exclude=['object', 'datetime'])
assert_frame_equal(desc, descN)
desc = df.describe(include=['float'])
assert_frame_equal(desc, descN.drop('numC', 1))
descC = df.describe(include=['O'])
expected_cols = ['catA', 'catB']
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(descC, expected)
descD = df.describe(include=['datetime'])
assert_series_equal(descD.ts, df.ts.describe())
desc = df.describe(include=['object', 'number', 'datetime'])
assert_frame_equal(desc.loc[:, ["numC", "numD"]].dropna(), descN)
assert_frame_equal(desc.loc[:, ["catA", "catB"]].dropna(), descC)
descDs = descD.sort_index() # the index order change for mixed-types
assert_frame_equal(desc.loc[:, "ts":].dropna().sort_index(), descDs)
desc = df.loc[:, 'catA':'catB'].describe(include='all')
assert_frame_equal(desc, descC)
desc = df.loc[:, 'numC':'numD'].describe(include='all')
assert_frame_equal(desc, descN)
desc = df.describe(percentiles=[], include='all')
cnt = Series(data=[4, 4, 6, 6, 6],
index=['catA', 'catB', 'numC', 'numD', 'ts'])
assert_series_equal(desc.count(), cnt)
self.assertTrue('count' in desc.index)
self.assertTrue('unique' in desc.index)
self.assertTrue('50%' in desc.index)
self.assertTrue('first' in desc.index)
desc = df.drop("ts", 1).describe(percentiles=[], include='all')
assert_series_equal(desc.count(), cnt.drop("ts"))
self.assertTrue('first' not in desc.index)
desc = df.drop(["numC", "numD"], 1).describe(percentiles=[],
include='all')
assert_series_equal(desc.count(), cnt.drop(["numC", "numD"]))
self.assertTrue('50%' not in desc.index)
def test_describe_typefiltering_category_bool(self):
df = DataFrame({'A_cat': pd.Categorical(['foo', 'foo', 'bar'] * 8),
'B_str': ['a', 'b', 'c', 'd'] * 6,
'C_bool': [True] * 12 + [False] * 12,
'D_num': np.arange(24.) + .5,
'E_ts': tm.makeTimeSeries()[:24].index})
# bool is considered numeric in describe, although not an np.number
desc = df.describe()
expected_cols = ['C_bool', 'D_num']
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(desc, expected)
desc = df.describe(include=["category"])
self.assertTrue(desc.columns.tolist() == ["A_cat"])
# 'all' includes numpy-dtypes + category
desc1 = df.describe(include="all")
desc2 = df.describe(include=[np.generic, "category"])
assert_frame_equal(desc1, desc2)
def test_describe_timedelta(self):
df = DataFrame({"td": pd.to_timedelta(np.arange(24) % 20, "D")})
self.assertTrue(df.describe().loc["mean"][0] == pd.to_timedelta(
"8d4h"))
def test_describe_typefiltering_dupcol(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
s = df.describe(include='all').shape[1]
df = pd.concat([df, df], axis=1)
s2 = df.describe(include='all').shape[1]
self.assertTrue(s2 == 2 * s)
def test_describe_typefiltering_groupby(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
G = df.groupby('catA')
self.assertTrue(G.describe(include=['number']).shape == (16, 2))
self.assertTrue(G.describe(include=['number', 'object']).shape == (22,
3))
self.assertTrue(G.describe(include='all').shape == (26, 4))
def test_describe_multi_index_df_column_names(self):
""" Test that column names persist after the describe operation."""
df = pd.DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
# GH 11517
# test for hierarchical index
hierarchical_index_df = df.groupby(['A', 'B']).mean().T
self.assertTrue(hierarchical_index_df.columns.names == ['A', 'B'])
self.assertTrue(hierarchical_index_df.describe().columns.names ==
['A', 'B'])
# test for non-hierarchical index
non_hierarchical_index_df = df.groupby(['A']).mean().T
self.assertTrue(non_hierarchical_index_df.columns.names == ['A'])
self.assertTrue(non_hierarchical_index_df.describe().columns.names ==
['A'])
def test_no_order(self):
tm._skip_if_no_scipy()
s = Series([0, 1, np.nan, 3])
with tm.assertRaises(ValueError):
s.interpolate(method='polynomial')
with tm.assertRaises(ValueError):
s.interpolate(method='spline')
def test_spline(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
result = s.interpolate(method='spline', order=1)
expected = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result, expected)
def test_spline_extrapolate(self):
tm.skip_if_no_package(
'scipy', '0.15',
'setting ext on scipy.interpolate.UnivariateSpline')
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
result3 = s.interpolate(method='spline', order=1, ext=3)
expected3 = Series([1., 2., 3., 4., 5., 6., 6.])
assert_series_equal(result3, expected3)
result1 = s.interpolate(method='spline', order=1, ext=0)
expected1 = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result1, expected1)
def test_spline_smooth(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
self.assertNotEqual(s.interpolate(method='spline', order=3, s=0)[5],
s.interpolate(method='spline', order=3)[5])
def test_spline_interpolation(self):
tm._skip_if_no_scipy()
s = Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
result1 = s.interpolate(method='spline', order=1)
expected1 = s.interpolate(method='spline', order=1)
assert_series_equal(result1, expected1)
# GH #10633
def test_spline_error(self):
tm._skip_if_no_scipy()
s = pd.Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
with tm.assertRaises(ValueError):
s.interpolate(method='spline')
with tm.assertRaises(ValueError):
s.interpolate(method='spline', order=0)
def test_metadata_propagation_indiv(self):
# groupby
df = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
result = df.groupby('A').sum()
self.check_metadata(df, result)
# resample
df = DataFrame(np.random.randn(1000, 2),
index=date_range('20130101', periods=1000, freq='s'))
result = df.resample('1T')
self.check_metadata(df, result)
# merging with override
# GH 6923
_metadata = DataFrame._metadata
_finalize = DataFrame.__finalize__
np.random.seed(10)
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['a', 'b'])
df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['c', 'd'])
DataFrame._metadata = ['filename']
df1.filename = 'fname1.csv'
df2.filename = 'fname2.csv'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'merge':
left, right = other.left, other.right
value = getattr(left, name, '') + '|' + getattr(right,
name, '')
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, ''))
return self
DataFrame.__finalize__ = finalize
result = df1.merge(df2, left_on=['a'], right_on=['c'], how='inner')
self.assertEqual(result.filename, 'fname1.csv|fname2.csv')
# concat
# GH 6927
DataFrame._metadata = ['filename']
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list('ab'))
df1.filename = 'foo'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
DataFrame.__finalize__ = finalize
result = pd.concat([df1, df1])
self.assertEqual(result.filename, 'foo+foo')
# reset
DataFrame._metadata = _metadata
DataFrame.__finalize__ = _finalize
def test_tz_convert_and_localize(self):
l0 = date_range('20140701', periods=5, freq='D')
# TODO: l1 should be a PeriodIndex for testing
# after GH2106 is addressed
with tm.assertRaises(NotImplementedError):
period_range('20140701', periods=1).tz_convert('UTC')
with tm.assertRaises(NotImplementedError):
period_range('20140701', periods=1).tz_localize('UTC')
# l1 = period_range('20140701', periods=5, freq='D')
l1 = date_range('20140701', periods=5, freq='D')
int_idx = Index(range(5))
for fn in ['tz_localize', 'tz_convert']:
if fn == 'tz_convert':
l0 = l0.tz_localize('UTC')
l1 = l1.tz_localize('UTC')
for idx in [l0, l1]:
l0_expected = getattr(idx, fn)('US/Pacific')
l1_expected = getattr(idx, fn)('US/Pacific')
df1 = DataFrame(np.ones(5), index=l0)
df1 = getattr(df1, fn)('US/Pacific')
self.assertTrue(df1.index.equals(l0_expected))
# MultiIndex
# GH7846
df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1]))
df3 = getattr(df2, fn)('US/Pacific', level=0)
self.assertFalse(df3.index.levels[0].equals(l0))
self.assertTrue(df3.index.levels[0].equals(l0_expected))
self.assertTrue(df3.index.levels[1].equals(l1))
self.assertFalse(df3.index.levels[1].equals(l1_expected))
df3 = getattr(df2, fn)('US/Pacific', level=1)
self.assertTrue(df3.index.levels[0].equals(l0))
self.assertFalse(df3.index.levels[0].equals(l0_expected))
self.assertTrue(df3.index.levels[1].equals(l1_expected))
self.assertFalse(df3.index.levels[1].equals(l1))
df4 = DataFrame(np.ones(5),
MultiIndex.from_arrays([int_idx, l0]))
# TODO: untested
df5 = getattr(df4, fn)('US/Pacific', level=1) # noqa
self.assertTrue(df3.index.levels[0].equals(l0))
self.assertFalse(df3.index.levels[0].equals(l0_expected))
self.assertTrue(df3.index.levels[1].equals(l1_expected))
self.assertFalse(df3.index.levels[1].equals(l1))
# Bad Inputs
for fn in ['tz_localize', 'tz_convert']:
# Not DatetimeIndex / PeriodIndex
with tm.assertRaisesRegexp(TypeError, 'DatetimeIndex'):
df = DataFrame(index=int_idx)
df = getattr(df, fn)('US/Pacific')
# Not DatetimeIndex / PeriodIndex
with tm.assertRaisesRegexp(TypeError, 'DatetimeIndex'):
df = DataFrame(np.ones(5),
MultiIndex.from_arrays([int_idx, l0]))
df = getattr(df, fn)('US/Pacific', level=0)
# Invalid level
with tm.assertRaisesRegexp(ValueError, 'not valid'):
df = DataFrame(index=l0)
df = getattr(df, fn)('US/Pacific', level=1)
def test_set_attribute(self):
# Test for consistent setattr behavior when an attribute and a column
# have the same name (Issue #8994)
df = DataFrame({'x': [1, 2, 3]})
df.y = 2
df['y'] = [2, 4, 6]
df.y = 5
assert_equal(df.y, 5)
assert_series_equal(df['y'], Series([2, 4, 6], name='y'))
def test_pct_change(self):
# GH 11150
pnl = DataFrame([np.arange(0, 40, 10), np.arange(0, 40, 10), np.arange(
0, 40, 10)]).astype(np.float64)
pnl.iat[1, 0] = np.nan
pnl.iat[1, 1] = np.nan
pnl.iat[2, 3] = 60
mask = pnl.isnull()
for axis in range(2):
expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(
axis=axis) - 1
expected[mask] = np.nan
result = pnl.pct_change(axis=axis, fill_method='pad')
self.assert_frame_equal(result, expected)
class TestPanel(tm.TestCase, Generic):
_typ = Panel
_comparator = lambda self, x, y: assert_panel_equal(x, y)
class TestNDFrame(tm.TestCase):
# tests that don't fit elsewhere
def test_squeeze(self):
# noop
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries()]:
tm.assert_series_equal(s.squeeze(), s)
for df in [tm.makeTimeDataFrame()]:
tm.assert_frame_equal(df.squeeze(), df)
for p in [tm.makePanel()]:
tm.assert_panel_equal(p.squeeze(), p)
for p4d in [tm.makePanel4D()]:
tm.assert_panel4d_equal(p4d.squeeze(), p4d)
# squeezing
df = tm.makeTimeDataFrame().reindex(columns=['A'])
tm.assert_series_equal(df.squeeze(), df['A'])
p = tm.makePanel().reindex(items=['ItemA'])
tm.assert_frame_equal(p.squeeze(), p['ItemA'])
p = tm.makePanel().reindex(items=['ItemA'], minor_axis=['A'])
tm.assert_series_equal(p.squeeze(), p.ix['ItemA', :, 'A'])
p4d = tm.makePanel4D().reindex(labels=['label1'])
tm.assert_panel_equal(p4d.squeeze(), p4d['label1'])
p4d = tm.makePanel4D().reindex(labels=['label1'], items=['ItemA'])
tm.assert_frame_equal(p4d.squeeze(), p4d.ix['label1', 'ItemA'])
# don't fail with 0 length dimensions GH11229 & GH8999
empty_series = pd.Series([], name='five')
empty_frame = pd.DataFrame([empty_series])
empty_panel = | pd.Panel({'six': empty_frame}) | pandas.Panel |
import warnings
import numpy as np
import pandas as pd
def create_initial_infections(
empirical_infections,
synthetic_data,
start,
end,
seed,
virus_shares,
reporting_delay,
population_size,
):
"""Create a DataFrame with initial infections.
.. warning::
In case a person is drawn to be newly infected more than once we only
infect her on the first date. If the probability of being infected is
large, not correcting for this will lead to a lower infection probability
than in the empirical data.
Args:
empirical_infections (pandas.Series): Newly infected Series with the index
levels ["date", "county", "age_group_rki"]. Should already be corrected
upwards to include undetected cases.
synthetic_data (pandas.DataFrame): Dataset with one row per simulated
individual. Must contain the columns age_group_rki and county.
start (str or pd.Timestamp): Start date.
end (str or pd.Timestamp): End date.
seed (int)
virus_shares (dict or None): If None, it is assumed that there is only one
strain. If dict, keys are the names of the virus strains and the values
are pandas.Series with a DatetimeIndex and the share among newly infected
individuals on each day as value.
reporting_delay (int): Number of days by which the reporting of cases is
delayed. If given, later days are used to get the infections of the
demanded time frame.
population_size (int): Population size behind the empirical_infections.
Returns:
pandas.DataFrame: DataFrame with same index as synthetic_data and one column
for each day between start and end. Dtype is boolean or categorical.
Values identify which individual gets infected with which variant.
"""
np.random.seed(seed)
assert reporting_delay >= 0, "Reporting delay must be >= 0"
reporting_delay = pd.Timedelta(days=reporting_delay)
start = pd.Timestamp(start) + reporting_delay
end = pd.Timestamp(end) + reporting_delay
index_cols = ["date", "county", "age_group_rki"]
correct_index_levels = empirical_infections.index.names == index_cols
assert correct_index_levels, f"Your data must have {index_cols} as index levels."
dates = empirical_infections.index.get_level_values("date").unique()
expected_dates = pd.date_range(start, end)
missing_dates = [str(x.date()) for x in expected_dates if x.date() not in dates]
assert len(missing_dates) == 0, f"The following dates are missing: {missing_dates}"
empirical_infections = empirical_infections.loc[
pd.Timestamp(start) : pd.Timestamp(end)
]
assert (
empirical_infections.notnull().all().all()
), "No NaN allowed in the empirical data"
duplicates_in_index = empirical_infections.index.duplicated().any()
assert not duplicates_in_index, "Your index must not have any duplicates."
cases = empirical_infections.to_frame().unstack("date")
cases.columns = [str(x.date() - reporting_delay) for x in cases.columns.droplevel()]
group_infection_probs = _calculate_group_infection_probs(
cases, population_size, synthetic_data
)
initially_infected = _draw_bools_by_group(
synthetic_data=synthetic_data,
group_by=["county", "age_group_rki"],
probabilities=group_infection_probs,
)
if virus_shares is not None:
for sr in virus_shares.values():
sr.index = sr.index - reporting_delay
initially_infected = _add_variant_info_to_infections(
initially_infected, virus_shares
)
return initially_infected
def _calculate_group_infection_probs(cases, population_size, synthetic_data):
"""Calculate the infection probability for each group and date.
Args:
cases (pandas.DataFrame): columns are the dates, the index are counties
and age groups.
population_size (int): Size of the population from which the cases
originate.
synthetic_data (pandas.DataFrame): Dataset with one row per simulated
individual. Must contain the columns age_group_rki and county.
Returns:
group_infection_probs (pandas.DataFrame): columns are dates, index are
counties and age groups. The values are the probabilities to be
infected by age group on a particular date.
"""
upscale_factor = population_size / len(synthetic_data)
synthetic_group_sizes = synthetic_data.groupby(["county", "age_group_rki"]).size()
upscaled_group_sizes = upscale_factor * synthetic_group_sizes
cases = cases.reindex(upscaled_group_sizes.index).fillna(0)
group_infection_probs = pd.DataFrame(index=upscaled_group_sizes.index)
for col in cases.columns:
prob = cases[col] / upscaled_group_sizes
group_infection_probs[col] = prob
return group_infection_probs
def _draw_bools_by_group(synthetic_data, group_by, probabilities):
"""Draw boolean values for each individual in synthetic data.
Args:
synthetic_data (pd.DataFrame): Synthetic data set containing
the group_by variables.
group_by (list): List of variables according to which the data
are grouped.
probabilities (pd.DataFrame): The index levels are the
group_by variables. There can be several columns with
probabilities.
Returns:
pandas.DataFrame or pandas.Series
"""
group_indices = synthetic_data.groupby(group_by).groups
res = | pd.DataFrame(False, columns=probabilities.columns, index=synthetic_data.index) | pandas.DataFrame |
# Check the following urls for more info about Pan-STARRS:
#
# https://outerspace.stsci.edu/display/PANSTARRS/PS1+Image+Cutout+Service#PS1ImageCutoutService-ImportantFITSimageformat,WCS,andflux-scalingnotes
# https://outerspace.stsci.edu/display/PANSTARRS/PS1+Stack+images#PS1Stackimages-Photometriccalibration
#
# For DES:
#
# https://des.ncsa.illinois.edu/releases/dr1/dr1-docs/processing
#
# For SDSS:
#
# https://www.sdss.org/dr12/algorithms/fluxcal/#SDSStoAB
# https://data.sdss.org/datamodel/files/BOSS_PHOTOOBJ/frames/RERUN/RUN/CAMCOL/frame.html
#
# Some parts of this notebook are based on https://github.com/djones1040/PS1_surface_brightness/blob/master/Surface%20Brightness%20Tutorial.ipynb and codes from <NAME>
import os
import glob
import subprocess
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import sep
from astropy.io import fits
from astropy.modeling import models, fitting
from astropy import coordinates as coords, units as u, wcs
from photutils import (CircularAnnulus,
CircularAperture,
aperture_photometry)
from photutils.detection import DAOStarFinder
import reproject
from reproject.mosaicking import reproject_and_coadd
from .utils import (get_survey_filters, extract_filters,
check_survey_validity, check_filters_validity,
calc_ext, calc_sky_unc)
sep.set_sub_object_limit(1e4)
# Masking Stars
#-------------------------------
def create_circular_mask(h, w, centre, radius):
"""Creates a circular mask of an image.
Parameters
----------
h: int
Image height.
w: int
Image width.
centre: tuple-like
Centre of the circular mask.
radius: float
Radius of the circular mask.
Returns
-------
mask: 2D bool-array
Circular mask (inside the circle = `True`).
"""
Y, X = np.ogrid[:h, :w]
dist_from_center = np.sqrt((X - centre[0])**2 + (Y-centre[1])**2)
mask = dist_from_center <= radius
return mask
def inside_galaxy(star_center, gal_center, gal_r):
"""Checks whether a star is inside a galaxy.
Parameters
==========
star_center: tuple-like
Centre of the star.
gal_center: tuple-like
Centre of the galaxy.
gal_r: float
Radius to define the galaxy size.
Returns
=======
condition: bool
`True` if the star is inside the galaxy,
`False` otherwise.
"""
dist_from_center = np.sqrt((star_center[0] - gal_center[0])**2 +
(star_center[1] - gal_center[1])**2)
condition = dist_from_center <= gal_r
return condition
def fit_2dgauss(star_data, x0=None, y0=None, plot_fit=False):
"""Fits a 2D gaussian to a star.
Parameters
----------
star_data: 2D array
Image data.
x0: int, default `None`
Star's x-axis centre.
y0: int, default `None`
Star's y-axis centre.
plot_fit: bool, default `False`
If `True`, the model fit is plotted with `r_in`
and `r_out`.
Returns
-------
model_sigma: float
2D gaussian sigma parameter. The largest between
sigma_x and sigma_y.
"""
# initial guess
sigma = 0.5
amp = np.max(star_data)
if (x0 is None) | (y0 is None):
y0, x0 = np.unravel_index(np.argmax(star_data),
star_data.shape)
fitter = fitting.LevMarLSQFitter()
gaussian_model = models.Gaussian2D(amp, x0, y0, sigma, sigma)
gaussian_model.fixed['x_mean'] = True
gaussian_model.fixed['y_mean'] = True
gaussian_model.bounds['x_stddev'] = (0, 8)
gaussian_model.bounds['y_stddev'] = (0, 8)
yi, xi = np.indices(star_data.shape)
model_result = fitter(gaussian_model, xi, yi, star_data)
model_sigma = max([model_result.x_stddev.value,
model_result.y_stddev.value])
if plot_fit:
model_data = model_result(xi, yi)
fig, ax = plt.subplots()
ax.imshow(star_data, interpolation='nearest', cmap='gray',
vmin=m-s, vmax=m+s, origin='lower')
x_mean = model_result.x_mean.value
y_mean = model_result.y_mean.value
circle_in = plt.Circle((x_mean, y_mean), Rin_scale*model_sigma,
facecolor ='none', edgecolor = 'red',
linewidth = 2)
circle_out = plt.Circle((x_mean, y_mean), Rout_scale*model_sigma,
facecolor ='none', edgecolor = 'red',
linewidth = 2)
ax.add_patch(circle_in)
ax.add_patch(circle_out)
plt.show()
return model_sigma
def mask_image(data, apertures, bkg, gal_center=None,
gal_r=None, plot_output=None):
"""Creates a mask of the stars with the mean value of the background
around them.
**Note**: the galaxy coordinates are (y, x).
Parameters
----------
data: 2D array
Image data.
apertures: `photutils.aperture.circle.CircularAperture`
Circular apertures of the stars.
bkg: float
Background level used to limit the aperture size to
mask the stars. In other words, increase the aperture size
of the mask until the mask value is <= 3*bkg to properly mask
bright stars.
gal_center: tuple-like, default `None`
Centre of the galaxy (y, x) in pixels.
gal_r: float, default `None`
Radius to define the galaxy size.
plot_output: str, default `None`
If not `None`, saves the output plots with the masked stars with
the given name.
"""
h, w = data.shape[:2]
masked_data = data.copy()
ring_scales = [3, 4, 5] # how many sigmas
model_sigmas = []
skip_indeces = []
for i, aperture in enumerate(apertures):
star_y, star_x = aperture.positions
size = 10
xmin = max(int(star_x-2*size), 0)
xmax = min(int(star_x+2*size), w)
ymin = max(int(star_y-2*size), 0)
ymax = min(int(star_y+2*size), h)
# some stars close to the edges of the image fail,
# but we can skip those anyway
if (xmin<star_x<xmax) & (ymin<star_y<ymax):
star_data = masked_data[xmin:xmax, ymin:ymax]
else:
skip_indeces.append(i)
continue # skip this star
# fit a gaussian to the star and get sigma
x0, y0 = star_x-xmin, star_y-ymin
model_sigma = fit_2dgauss(star_data)
model_sigmas.append(model_sigma)
if model_sigma==8:
# 10 is the limit I putted in `fit_2dgauss` --> fit failed
skip_indeces.append(i)
continue # skip this star
# check if the star is inside the galaxy aperture
star_center = aperture.positions
if (gal_center is None) or (gal_r is None):
star_inside_galaxy = False
else:
star_inside_galaxy = inside_galaxy(star_center,
gal_center, gal_r)
if star_inside_galaxy:
r_in = ring_scales[0]*model_sigma,
r_mid = ring_scales[1]*model_sigma
r_out = ring_scales[2]*model_sigma
# calculate flux in outer ring (4-6 sigmas)
ann = CircularAnnulus(aperture.positions,
r_in=r_mid, r_out=r_out)
ann_mean = aperture_photometry(
data, ann)['aperture_sum'][0] / ann.area
mask = create_circular_mask(h, w, star_center, r_in)
# basically remove the failed fits and avoid large objects
not_big_object = r_in<(gal_r/3)
# do not mask the galaxy
dist2gal = np.sum(np.abs(apertures.positions - gal_center), axis=1)
gal_id = np.argmin(dist2gal)
if not np.isnan(ann_mean) and not_big_object and i!=gal_id:
masked_data[mask] = ann_mean
else:
skip_indeces.append(i)
if plot_output is not None:
m, s = np.nanmean(data), np.nanstd(data)
fig, ax = plt.subplots(1, 3, figsize=(15, 10))
# reference image
ax[0].imshow(data, interpolation='nearest', cmap='gray',
vmin=m-s, vmax=m+s, origin='lower')
ax[0].set_title('reference image')
# apertures image
ax[1].imshow(data, interpolation='nearest', cmap='gray',
vmin=m-s, vmax=m+s, origin='lower')
ax[1].set_title('apertures image')
if (gal_center is not None) or (gal_r is not None):
gal_circle = plt.Circle(gal_center, gal_r,
ec='b', fill=False)
ax[1].add_patch(gal_circle)
# masked image
ax[2].imshow(masked_data, interpolation='nearest', cmap='gray',
vmin=m-s, vmax=m+s, origin='lower')
ax[2].set_title('masked image')
for i, (aperture, model_sigma) in enumerate(zip(apertures,
model_sigmas)):
if i not in skip_indeces:
for scale in ring_scales:
aperture.r = scale*model_sigma
aperture.plot(ax[1], color='red', lw=1.5, alpha=0.5)
plt.tight_layout()
plt.savefig(plot_output)
plt.close(fig)
return masked_data, model_sigmas
# Coadding images
#-------------------------------
def coadd_images(sn_name, filters='riz', work_dir='', survey='PS1'):
"""Reprojects and coadds images for the choosen filters for
common-aperture photometry.
Parameters
----------
sn_name: str
SN name to be used for finding the images locally.
filters: str, default `riz`
Filters to use for the coadd image.
work_dir: str, default ''
Working directory where to find the objects'
directories with the images. Default, current directory.
survey: str, default `PS1`
Survey to use as prefix for the images.
Returns
-------
A fits file with the coadded images is created with the filters
used as the name of the file at the SN directory.
"""
init_dir = os.path.abspath('.')
sn_dir = os.path.join(work_dir, sn_name)
fits_files = [os.path.join(sn_dir,
f'{survey}_{filt}.fits') for filt in filters]
hdu_list = []
for fits_file in fits_files:
fits_image = fits.open(fits_file)
hdu_list.append(fits_image[0])
hdu_list = fits.HDUList(hdu_list)
# use the last image as reference
coadd = reproject_and_coadd(hdu_list, fits_image[0].header,
reproject_function=reproject.reproject_interp)
fits_image[0].data = coadd[0]
outfile = os.path.join(sn_dir, f'{survey}_{filters}.fits')
fits_image.writeto(outfile, overwrite=True)
# Photometry
#-------------------------------
def extract_aperture_params(fits_file, host_ra, host_dec, threshold, bkg_sub=True):
"""Extracts aperture parameters of a galaxy.
**Note:** the galaxy must be ideally centred in the image.
Parameters
==========
fits_file: str
Path to the fits file.
host_ra: float
Host-galaxy Right ascension of the galaxy in degrees.
host_dec: float
Host-galaxy Declination of the galaxy in degrees.
threshold: float
Threshold used by `sep.extract()` to extract objects.
bkg_sub: bool, default `True`
If `True`, the image gets background subtracted.
Returns
=======
gal_object: numpy array
Galaxy object extracted with `sep.extract()`.
objects: numpy array
All objects extracted with `sep.extract()`.
"""
img = fits.open(fits_file)
header = img[0].header
data = img[0].data
img_wcs = wcs.WCS(header, naxis=2)
data = data.astype(np.float64)
bkg = sep.Background(data)
bkg_rms = bkg.globalrms
if bkg_sub:
data_sub = np.copy(data - bkg)
else:
data_sub = np.copy(data)
# extract objects with Source Extractor
objects = sep.extract(data_sub, threshold, err=bkg_rms)
# obtain the galaxy data (hopefully centred in the image)
gal_coords = coords.SkyCoord(ra=host_ra*u.degree,
dec=host_dec*u.degree)
gal_x, gal_y = img_wcs.world_to_pixel(gal_coords)
x_diff = np.abs(objects['x']-gal_x)
y_diff = np.abs(objects['y']-gal_y)
dist = np.sqrt(x_diff**2 + y_diff**2)
gal_id = np.argmin(dist)
gal_object = objects[gal_id:gal_id+1]
return gal_object, objects
def extract_global_photometry(fits_file, host_ra, host_dec, gal_object=None,
mask_stars=True, threshold=3, bkg_sub=True,
survey='PS1', plot_output=None):
"""Extracts PanSTARRS's global photometry of a galaxy. The use
of `gal_object` is intended for common-aperture photometry.
**Note:** the galaxy must be ideally centred in the image.
Parameters
==========
fits_file: str
Path to the fits file.
host_ra: float
Host-galaxy Right ascension of the galaxy in degrees.
host_dec: float
Host-galaxy Declination of the galaxy in degrees.
gal_object: numpy array, default `None`
Galaxy object extracted with `extract_aperture_params()`.
Use this for common-aperture photometry only.
mask_stars: bool, default `True`
If `True`, the stars identified inside the common aperture
are masked with the mean value of the background around them.
threshold: float, default `3`
Threshold used by `sep.extract()` to extract objects.
bkg_sub: bool, default `True`
If `True`, the image gets background subtracted.
survey: str, default `PS1`
Survey to use for the zero-points.
plot_output: str, default `None`
If not `None`, saves the output plots with the given name.
Returns
=======
mag: float
Aperture magnitude.
mag_err: float
Error on the aperture magnitude.
"""
check_survey_validity(survey)
img = fits.open(fits_file)
header = img[0].header
data = img[0].data
exptime = float(header['EXPTIME'])
data = data.astype(np.float64)
bkg = sep.Background(data)
bkg_rms = bkg.globalrms
if bkg_sub:
data_sub = np.copy(data - bkg)
else:
data_sub = np.copy(data)
if gal_object is None:
# no common aperture
gal_object, objects = extract_aperture_params(fits_file,
host_ra,
host_dec,
threshold,
bkg_sub)
else:
gal_object2, objects = extract_aperture_params(fits_file,
host_ra,
host_dec,
threshold,
bkg_sub)
# sometimes, one of the filter images can be flipped, so the position of the
# aperture from the coadd image might not match that of the filter image. This
# is a workaround as we only need the semi-major and semi-minor axes.
gal_object['x'] = gal_object2['x']
gal_object['y'] = gal_object2['y']
gal_object['theta'] = gal_object2['theta']
if mask_stars:
# identify bright source.....
#mean, median, std = sigma_clipped_stats(data_sub, sigma=3.0)
#daofind = DAOStarFinder(fwhm=3.0, threshold=7.*std) # avoids bogus sources
#sources = daofind(data_sub)
#positions = np.transpose((sources['xcentroid'], sources['ycentroid']))
# ... or create apertures for the sources obtained with sep
positions = np.transpose([objects['x'], objects['y']])
apertures = CircularAperture(positions, r=4) # the value of r is irrelevant
# mask image
gal_center = (gal_object['x'][0], gal_object['y'][0])
gal_r = (6/2)*gal_object['a'][0] # using the major-axis as radius
if plot_output is not None:
split_name = os.path.splitext(plot_output)
plot_masking_output = f'{split_name[0]}_star_masking{split_name[1]}'
else:
plot_masking_output = None
masked_data, model_sigmas = mask_image(data_sub, apertures, bkg_rms,
gal_center, gal_r, plot_masking_output)
data_sub = masked_data.copy()
# aperture photometry
# This uses what would be the default SExtractor parameters.
# See https://sep.readthedocs.io/en/v1.1.x/apertures.html
# NaNs are converted to mean values to avoid issues with the photometry.
# The value used, might slightly affects the results (~ 0.0x mag).
masked_data = np.nan_to_num(data_sub, nan=np.nanmean(data_sub))
kronrad, krflag = sep.kron_radius(masked_data,
gal_object['x'],
gal_object['y'],
gal_object['a'],
gal_object['b'],
gal_object['theta'],
6.0)
r_min = 1.75 # minimum diameter = 3.5
if kronrad*np.sqrt(gal_object['a']*gal_object['b']) < r_min:
print(f'Warning: using circular photometry on {fits_file}')
flux, flux_err, flag = sep.sum_circle(masked_data,
gal_object['x'],
gal_object['y'],
r_min,
err=bkg.globalrms,
subpix=1)
else:
flux, flux_err, flag = sep.sum_ellipse(masked_data,
gal_object['x'],
gal_object['y'],
gal_object['a'],
gal_object['b'],
gal_object['theta'],
2.5*kronrad,
err=bkg.globalrms,
subpix=1)
zp_dict = {'PS1':25 + 2.5*np.log10(exptime),
'DES':30,
'SDSS':22.5}
zp = zp_dict[survey]
mag = -2.5*np.log10(flux) + zp
mag_err = 2.5/np.log(10)*flux_err/flux
if plot_output is not None:
fig, ax = plt.subplots()
m, s = np.nanmean(data_sub), np.nanstd(data_sub)
im = ax.imshow(data_sub, interpolation='nearest',
cmap='gray',
vmin=m-s, vmax=m+s,
origin='lower')
e = Ellipse(xy=(gal_object['x'][0], gal_object['y'][0]),
width=6*gal_object['a'][0],
height=6*gal_object['b'][0],
angle=gal_object['theta'][0]*180./np.pi)
e.set_facecolor('none')
e.set_edgecolor('red')
ax.add_artist(e)
plt.tight_layout()
plt.savefig(plot_output)
plt.close(fig)
return mag[0], mag_err[0]
def multi_global_photometry(name_list, host_ra_list, host_dec_list, work_dir='',
filters=None, coadd=True, coadd_filters='riz',
mask_stars=True, threshold=3, bkg_sub=True, survey="PS1",
correct_extinction=True, plot_output=False):
"""Extract global photometry for multiple SNe.
Parameters
==========
name_list: list-like
List of SN names.
host_ra_list: list-like
List of host-galaxy right ascensions in degrees.
host_dec_list: list-like
List of host-galaxy declinations in degrees.
work_dir: str, default ''
Working directory where to find the objects'
directories with the images. Default, current directory.
filters: str, defaul `None`
Filters used to extract photometry. If `None`, use all
the available filters for the given survey.
coadd: bool, default `True`
If `True`, a coadd image is created for common aperture.
coadd_filters: str, default `riz`
Filters to use for the coadd image.
mask_stars: bool, default `True`
If `True`, the stars identified inside the common aperture
are masked with the mean value of the background around them.
threshold: float, default `3`
Threshold used by `sep.extract()` to extract objects.
bkg_sub: bool, default `True`
If `True`, the image gets background subtracted.
survey: str, default `PS1`
Survey to use for the zero-points.
correct_extinction: bool, default `True`
If `True`, the magnitudes are corrected for extinction.
plot_output: bool, default `False`
If `True`, saves the output plots.
Returns
=======
global_phot_df: DataFrame
Dataframe with the photometry, errors and SN name.
"""
check_survey_validity(survey)
check_filters_validity(filters, survey)
if filters is None:
filters = get_survey_filters(survey)
# dictionary to save results
mag_dict = {filt:[] for filt in filters}
mag_err_dict = {filt+'_err':[] for filt in filters}
mag_dict.update(mag_err_dict)
results_dict = {'name':[], 'host_ra':[], 'host_dec':[]}
results_dict.update(mag_dict)
# filter funcstions for extinction correction
filters_dict = extract_filters(filters, survey)
for name, host_ra, host_dec in zip(name_list, host_ra_list,
host_dec_list):
sn_dir = os.path.join(work_dir, name)
image_files = [os.path.join(sn_dir, f'{survey}_{filt}.fits')
for filt in filters]
if coadd:
coadd_images(name, coadd_filters, work_dir, survey)
coadd_file = os.path.join(sn_dir, f'{survey}_{coadd_filters}.fits')
gal_object, _ = extract_aperture_params(coadd_file,
host_ra, host_dec,
threshold, bkg_sub)
else:
gal_object = None
for image_file, filt in zip(image_files, filters):
try:
if plot_output:
plot_output = os.path.join(sn_dir, f'global_{filt}.jpg')
else:
plot_output = None
mag, mag_err = extract_global_photometry(image_file,
host_ra,
host_dec,
gal_object,
mask_stars,
threshold,
bkg_sub,
survey,
plot_output)
if correct_extinction:
wave = filters_dict[filt]['wave']
transmission = filters_dict[filt]['transmission']
A_ext = calc_ext(wave, transmission,
host_ra, host_dec)
mag -= A_ext
results_dict[filt].append(mag)
results_dict[filt+'_err'].append(mag_err)
except Exception as message:
results_dict[filt].append(np.nan)
results_dict[filt+'_err'].append(np.nan)
print(f'{name} failed with {filt} band: {message}')
results_dict['name'].append(name)
results_dict['host_ra'].append(host_ra)
results_dict['host_dec'].append(host_dec)
global_phot_df = | pd.DataFrame(results_dict) | pandas.DataFrame |
from datetime import datetime
import warnings
import numpy as np
from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Index, Series
import pandas._testing as tm
from pandas.core.window.common import flex_binary_moment
from pandas.tests.window.common import (
moments_consistency_cov_data,
moments_consistency_is_constant,
moments_consistency_mock_mean,
moments_consistency_series_data,
moments_consistency_std_data,
moments_consistency_var_data,
moments_consistency_var_debiasing_factors,
)
def _rolling_consistency_cases():
for window in [1, 2, 3, 10, 20]:
for min_periods in {0, 1, 2, 3, 4, window}:
if min_periods and (min_periods > window):
continue
for center in [False, True]:
yield window, min_periods, center
# binary moments
def test_rolling_cov(series):
A = series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).cov(B)
tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_corr(series):
A = series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).corr(B)
tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = a.rolling(window=len(a), min_periods=1).corr(b)
tm.assert_almost_equal(result[-1], a.corr(b))
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_rolling_pairwise_cov_corr(func, frame):
result = getattr(frame.rolling(window=10, min_periods=5), func)()
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = getattr(frame[1].rolling(window=10, min_periods=5), func)(frame[5])
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(result, expected, check_names=False)
@pytest.mark.parametrize("method", ["corr", "cov"])
def test_flex_binary_frame(method, frame):
series = frame[1]
res = getattr(series.rolling(window=10), method)(frame)
res2 = getattr(frame.rolling(window=10), method)(series)
exp = frame.apply(lambda x: getattr(series.rolling(window=10), method)(x))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = getattr(frame.rolling(window=10), method)(frame2)
exp = DataFrame(
{k: getattr(frame[k].rolling(window=10), method)(frame2[k]) for k in frame}
)
| tm.assert_frame_equal(res3, exp) | pandas._testing.assert_frame_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Get/preprocess Google mobility data for the Netherlands.
Created on Sat Feb 6 21:31:20 2021
@author: @hk_nien
"""
import zipfile
import io
import urllib.request
import pandas as pd
import matplotlib.pyplot as plt
import scipy.signal
import tools
def download_g_mobility_data():
"""Download Google Mobility data, write to data/2020_NL_⋯.csv."""
url = 'https://www.gstatic.com/covid19/mobility/Region_Mobility_Report_CSVs.zip'
print(f'Getting Google Mobility data ...')
with urllib.request.urlopen(url) as response:
data_bytes = response.read()
zipf = zipfile.ZipFile(io.BytesIO(data_bytes))
fname = zipf.extract('2020_NL_Region_Mobility_Report.csv', path='data/')
print(f'Wrote {fname} .')
def get_g_mobility_data():
"""Return dataframe with simplified mobility data for the Netherlands.
Index is timestamp.
Values are 1.0 for baseline = (1 + deviation/100)
from the original deviations. Smoothed to remove weekday effects.
Column names abbreviated: retail_recr, groc_phar, parks, transit, work, resid.
"""
df = pd.read_csv('data/2020_NL_Region_Mobility_Report.csv')
df = df.loc[df['sub_region_1'].isna()]
df['date'] = pd.to_datetime(df['date'])
df.set_index('date', inplace=True)
df.rename(columns={
'retail_and_recreation_percent_change_from_baseline': 'retail_recr',
'grocery_and_pharmacy_percent_change_from_baseline': 'groc_phar',
'parks_percent_change_from_baseline': 'parks',
'transit_stations_percent_change_from_baseline': 'transit',
'workplaces_percent_change_from_baseline': 'work',
'residential_percent_change_from_baseline': 'resid'
}, inplace=True)
df.drop(columns=['country_region_code', 'country_region', 'sub_region_1', 'sub_region_2',
'metro_area', 'iso_3166_2_code', 'census_fips_code'], inplace=True)
for c in df.columns:
smooth_data = scipy.signal.savgol_filter(df[c].values, 13, 2, mode='interp')
df[c] = 1 + 0.01 * smooth_data
# check whether it's up to date.
# Mobility data released on 2nd or 3rd of the month?
today = | pd.to_datetime('now') | pandas.to_datetime |
import os
import sys
import numpy as np
import pandas as pd
import xlwings as xw
from logzero import logger
from openpyxl import load_workbook
from openpyxl.styles import Alignment
from spareparts.lib.colors import Colors
from spareparts.lib.filters import (
trash_assemblies,
trash_description,
trash_fastener,
trash_file_name,
trash_item_number,
trash_parts_ending_P1_or_A1,
trash_prp,
trash_prp1,
trash_robot,
)
from spareparts.lib.settings import (
JDEPATH,
blue,
dict_header,
excel_headers,
headers_bg_hue,
mauve,
orange,
splname,
temp_jde,
template1,
template2,
tempo_local,
)
from yaspin import Spinner, yaspin
sp = Spinner([
"[ ]",
"[= ]",
"[== ]",
"[=== ]",
"[ ===]",
"[ ==]",
"[ =]",
"[ ]",
"[ =]",
"[ ==]",
"[ ===]",
"[====]",
"[=== ]",
"[== ]",
"[= ]"
], 80)
class Spareparts:
"""Generate spareparts list."""
JDE_TEMP = os.path.join(tempo_local, temp_jde)
def __init__(self):
self.jde = self.load_jde_data()
self.db = pd.DataFrame()
self.spl = pd.DataFrame()
self.asm = pd.DataFrame()
self.elec = pd.DataFrame()
self.garbage = pd.DataFrame()
self.nuts = pd.DataFrame()
self.plates = pd.DataFrame()
self.gearbox = pd.DataFrame()
self.drawings = {}
def generate_spl(self):
if os.path.exists("SPL.xlsx"):
raise FileExistsError(
"Remove or rename the SPL.xlsx in the current folder to able the process to run."
)
has_text_reports = os.listdir(".")
if not has_text_reports:
raise FileNotFoundError(
"No text file report has been found in the current folder."
)
files = (file for file in Spareparts.listing_txt_files())
parts = pd.concat(
[Spareparts.parse_se_report(file) for file in files], ignore_index=True
)
self.spl = Spareparts.joining_spl_jde(self.jde, parts)
self.spl.part_number = (
self.spl.part_number.str.strip()
) # strip part_number column
def load_db(self):
"""Load the item-level database"""
db_model = os.path.join(tempo_local, "levels.csv")
if not os.path.exists(db_model):
raise FileNotFoundError("No file levels.csv found in user tempo.\n")
df = pd.read_csv(db_model, dtype={"possibility": str})
df.columns = df.columns.str.strip().str.lower().str.replace(" ", "_")
df.item_number = df.item_number.astype(str)
df.item_number = df.item_number.str.strip()
df.possibility = df.possibility.astype(str)
df.possibility = df.possibility.str.strip()
self.db = df[["item_number", "possibility"]]
self.spl = self.spl.join(self.db.set_index("item_number"), on="jdelitm")
@staticmethod
def loading_spl(path):
"""load the data from spl list"""
if not os.path.exists(path):
raise FileNotFoundError("Check if spl path is correct.")
spl = pd.read_excel(path, sheet_name="Sheet1")
spl.columns = spl.columns.str.strip().str.lower().str.replace(" ", "_")
spl.item_number = spl.item_number.astype("str")
spl = spl[["item_number"]]
return spl
@staticmethod
def load_jde_data():
JDE_TEMP = Spareparts.JDE_TEMP
if os.path.exists(JDE_TEMP):
answer = input(
f"Do you want to load the temporary jde? (fast) \n Path:{JDE_TEMP}\n Proceed ([y]/n) ?:"
)
if answer.lower() in ["yes", "y"]:
jde_temp = pd.read_csv(JDE_TEMP)
return jde_temp
else:
print("Process interrupted.")
sys.exit()
else:
with yaspin(sp, side="right", text="Loading the JDE Inventory..."):
jde_data = Spareparts.extract_jde()
jde_data.to_csv(JDE_TEMP, index=False)
return jde_data
@staticmethod
def extract_jde():
""""""
# add a try - except (in case the file is not found)
# logger.info()
df = pd.read_excel(
JDEPATH,
sheet_name=0,
skiprows=[0, 1, 2, 3],
usecols="A,C,P,E,H,I,K,O,U,X,AA,AR,AT,CB",
dtype={"Business Unit": int, "Unit Cost": float},
)
df.columns = df.columns.str.strip().str.lower().str.replace(" ", "_")
df = df[df.business_unit == 101]
return df
@staticmethod
def parse_se_report(fichier):
""""""
try:
# add try and except
df = pd.read_csv(
fichier,
delimiter="\t",
skiprows=[0, 2],
header=1,
names=[
"Part Number",
"Revision",
"DSC_A",
"JDELITM",
"DIM",
"Quantity",
"File Name",
],
index_col=False,
encoding="latin3",
error_bad_lines=False,
na_values="-",
)
except pd.errors.ParserError as parse_error:
# Wrong format of text extracted from solidedge.
logger.error(f" [-][{parse_error}]")
sys.exit()
else:
# clean the columns
df.columns = df.columns.str.strip().str.lower().str.replace(" ", "_")
df["jdelitm"] = df["jdelitm"].str.strip()
df = Spareparts.replacing_C01(df)
df["quantity"] = pd.to_numeric(df["quantity"], errors="coerce")
df = df.groupby(
["part_number", "revision", "dsc_a", "dim", "jdelitm", "file_name"],
as_index=False,
)["quantity"].sum()
df = df.replace(r"^-?\s+$", np.nan, regex=True)
df = df.dropna(subset=["part_number", "jdelitm"]) #TODO: Keep the JDELITM columns in spl.
# give the module number
module_number = os.path.splitext(os.path.basename(fichier))[0]
df["module"] = module_number
logger.info(" [+][\t %s }\t]" % module_number)
return df
finally:
df = None
@staticmethod
def listing_txt_files():
""""""
return (file for file in os.listdir(".") if file.endswith(".txt"))
@staticmethod
def replacing_C01(df):
"""Replacing 123456_C01 to 123456."""
pat = r"(?P<number>\d{6})(?P<suffixe>_C\d{2})"
repl = lambda m: m.group("number")
df["part_number"] = df["part_number"].str.replace(pat, repl)
return df
@staticmethod
def joining_spl_jde(jde, parts):
"""transform the jde column to string format
join the parts documents with the jde on jdelitm column
and sort it on column:module
"""
jde.item_number = jde.item_number.astype(str)
spl = parts.join(jde.set_index("item_number"), on="jdelitm").sort_values(
"module"
)
return spl
def part_type(self):
"""create a column type --> .par .psm .asm"""
self.spl["type"] = self.spl.file_name.str.split(".").str[-1].str.strip()
self.spl.type = self.spl.type.str.lower()
def lines_numbers(self):
logger.info(
"\n\n"
"Qty/Groups :\n"
"-------------------------\n"
f"spl :\t{self.spl.shape[0]}\n"
f"garbage :\t{self.garbage.shape[0]}\n"
f"plates :\t{self.plates.shape[0]}\n"
f"elec :\t{self.elec.shape[0]}\n"
f"asm :\t{self.asm.shape[0]}\n"
f"nuts :\t{self.nuts.shape[0]}\n"
"-------------------------\n\n"
)
@yaspin(sp, side="right", text="Creating excel file, do not close the window ")
def create_excel(self, given_name_xlsx):
"""fill the tabs in excel file with the dataframes"""
tabs = {
"nuts": self.nuts,
"asm": self.asm,
"plates": self.plates,
"elec": self.elec,
"gearbox": self.gearbox,
"garbage": self.garbage,
"spl": self.spl,
}
wb = xw.Book() # this will create a new workbook
for tab in tabs.keys():
sht = wb.sheets.add(tab)
for tab, df in tabs.items():
sht = wb.sheets[
tab
] # skip the Sheet1 and create spl within a loop for all tab
sht.range("A1").value = excel_headers # insert headers (horizontal)
sht.range("A1:R1").api.Font.Bold = True # bold headers (horizontal)
for rang, color in headers_bg_hue.items():
xw.Range(rang).color = color
for colum, data in dict_header.items():
sht.range(colum).options(index=False, header=False).value = df[data]
sht.autofit()
wb.sheets[-1].delete()
wb.save(given_name_xlsx)
wb.close()
logger.info(f"{template1}: created")
@staticmethod
@yaspin(sp, side="right", text="Editing excel file, do not close the window ")
def edit_excel(file_name, new_name):
wb = load_workbook(file_name)
for s in wb.sheetnames:
ws = wb[s]
MAX_ = ws.max_row
field = f"A1:X{MAX_}"
ws.auto_filter.ref = field
for sheet in wb.sheetnames:
ws = wb[sheet]
significance_column = ws["F"]
for cell in significance_column:
cell.alignment = Alignment(horizontal="center")
wb.save(new_name)
wb.close()
logger.info(f"{template2}: created")
def refine(self):
ambiguous = self.spl[
~(
(self.spl.possibility == "1")
| (self.spl.possibility == "2")
| (self.spl.possibility == "3")
)
]
ambiguous_items = (
ambiguous.part_number.str.strip().tolist()
) # Whitespaces stripped here
for itm in ambiguous_items:
mdl = self.spl.loc[itm, "module"] # module => mdl
self.spl.possibility[
self.spl.part_number == itm, "possibility"
] = self.db.loc[itm, mdl]
@Colors.obsolete(mauve)
@Colors.meter_foot(blue)
@Colors.electric(["Electric Component"], orange)
def extraction(splname, workbook, sht_name):
df = pd.read_excel(splname, sheet_name=sht_name)
sht = workbook.sheets[sht_name]
return (df, sht)
extraction = staticmethod(extraction)
@staticmethod
def add_colors(selected_file, sheet_spl):
wb = xw.Book(selected_file)
Spareparts.extraction(selected_file, wb, sheet_spl)
return wb
@yaspin(sp, side="right", text="Editing excel file, do not close the window ")
def colors_excel(self, selected_file, new_file):
args = {
"nuts": self.nuts,
"asm": self.asm,
"plates": self.plates,
"elec": self.elec,
"garbage": self.garbage,
"spl": self.spl,
}
for tab in args:
wb = Spareparts.add_colors(selected_file, tab)
wb.save(new_file)
wb.close()
logger.info(f"{splname}: created")
@staticmethod
def log_report(_df, df_name):
if _df.shape[0] == 0:
pass
else:
_df["groupe"] = df_name
logger.info("\n"+ _df[["groupe", "part_number", "description_1"]].to_string())
def strain(self):
"""Filters of unwanted parts here."""
# --------------------------------------------------------------------#
# #
# START FILTERS HERE #
# #
# --------------------------------------------------------------------#
# === plates ===
plates_prp1 = ["Aluminium", "Stainless Steel", "Steel"]
self.spl, self.garbage, _plates = trash_prp1(
self.spl, self.garbage, prp1=plates_prp1
)
self.garbage = pd.concat([self.garbage, _plates]).drop_duplicates(keep=False)
Spareparts.log_report(_plates, "_plates")
# === fasteners ===
self.spl, self.garbage, _nuts = trash_fastener(self.spl, self.garbage)
self.garbage = pd.concat([self.garbage, _nuts]).drop_duplicates(keep=False)
Spareparts.log_report(_nuts, "_nuts")
# === assemblies ===
self.spl, self.garbage, _asm = trash_assemblies(
self.spl, self.garbage
) # _ASM SEEMS TO BE THE EXCEPTION
self.garbage = pd.concat([self.garbage, _asm]).drop_duplicates(keep=False)
Spareparts.log_report(_asm, "_asm")
# === uncategorized ===
self.garbage_prp1 = [
"Sign & Label",
"Plumbing Hardware",
"Pièce Manufacturée Magasin",
]
self.spl, self.garbage, _uncatego = trash_prp1(
self.spl, self.garbage, prp1=self.garbage_prp1
)
self.garbage = pd.concat([self.garbage, _uncatego]).drop_duplicates(keep=False)
Spareparts.log_report(_uncatego, "_uncatego")
# === robot ===
self.spl, self.garbage, _robot = trash_robot(self.spl, self.garbage)
self.garbage = pd.concat([self.garbage, _robot]).drop_duplicates(keep=False)
Spareparts.log_report(_robot, "_robot")
# === gripper ===
contents_of_gripper = [
"PT1124830",
"PT0078604",
"PT0078603",
"24104091",
"24101598",
"24101597",
"171257",
"171259",
"171256",
"171255",
"24100056",
"PT0078602",
"PT0078601",
"EEG58C7007P-1",
"24100360",
"EEG58C6002P-6",
"54010220",
"24300030",
"24104854",
"24104591",
"24104548",
"162925_EEG58C",
"171228",
]
self.spl, self.garbage, _inside_gripper = trash_item_number(
self.spl, self.garbage, list_parts=contents_of_gripper
)
self.garbage = pd.concat([self.garbage, _inside_gripper]).drop_duplicates(
keep=False
)
Spareparts.log_report(_inside_gripper, "_inside_gripper")
# === industrial ===
self.spl, self.garbage, _industrial = trash_prp(
self.spl, self.garbage, prp1=["Industrial Engine"], prp2=["Engine Parts"]
)
self.garbage = pd.concat([self.garbage, _industrial]).drop_duplicates(
keep=False
)
Spareparts.log_report(_industrial, "_industrial")
# === _furniture ===
self.spl, self.garbage, _furniture = trash_prp(
self.spl, self.garbage, prp1=["Factory Furniture"], prp2=["Tape"]
)
self.garbage = pd.concat([self.garbage, _furniture]).drop_duplicates(keep=False)
Spareparts.log_report(_furniture, "_furniture")
# === _gearbox ===
self.spl, self.garbage, _gearbox = trash_prp(
self.spl,
self.garbage,
prp1=["Mechanical Component"],
prp2=["Gearbox, Gear, Rack & Pinion", "Gear Motor & Motor"],
)
self.garbage = pd.concat([self.garbage, _gearbox]).drop_duplicates(keep=False)
Spareparts.log_report(_gearbox, "_gearbox")
# === _grommet ===
self.spl, self.garbage, _grommet = trash_description(
self.spl, self.garbage, keyword="GROMMET;RUBBER"
)
self.garbage = pd.concat([self.garbage, _grommet]).drop_duplicates(keep=False)
Spareparts.log_report(_grommet, "_grommet")
# === _pneu_frl ===
self.spl, self.garbage, _pneu_frl = trash_description(
self.spl,
self.garbage,
keyword=r"PNEU\.F\.R\.L",
description="description_1",
)
self.garbage = pd.concat([self.garbage, _pneu_frl]).drop_duplicates(keep=False)
Spareparts.log_report(_pneu_frl, "_pneu_frl")
# === _clamp ===
self.spl, self.garbage, _clamp = trash_description(
self.spl,
self.garbage,
keyword="CLAMP;TRANSPORT UNIT",
description="description_2",
)
self.garbage = pd.concat([self.garbage, _clamp]).drop_duplicates(keep=False)
Spareparts.log_report(_clamp, "_clamp")
# === electric ===
self.spl, self.garbage, _elec = trash_prp(
self.spl,
self.garbage,
prp1=["Electric Component"],
prp2=[
"Cable Tray & Cable Carrier",
"Conduits & fittings",
"Enclosures",
"Sensors",
"Lights & bulbs",
"Switches",
"General hardware",
"Stickers",
"Buttons & pilot lights",
"Connectors & crimps",
],
)
self.garbage = pd.concat([self.garbage, _elec]).drop_duplicates(keep=False)
# logger.info(f"***_elec: {_elec}")
Spareparts.log_report(_elec, "_elec")
# === _par ===
self.spl, self.garbage, _par = trash_file_name(
self.spl, self.garbage, keyword=r"^par\s*$"
)
self.garbage = pd.concat([self.garbage, _par]).drop_duplicates(keep=False)
Spareparts.log_report(_par, "_par")
# === _P1_A1 ===
self.spl, self.garbage, _P1_A1 = trash_parts_ending_P1_or_A1(
self.spl, self.garbage
)
self.garbage = pd.concat([self.garbage, _P1_A1]).drop_duplicates(keep=False)
Spareparts.log_report(_P1_A1, "_P1_A1")
# === _collar ===
collar = r"COLLAR"
self.spl, self.garbage, _collar = trash_description(
self.spl, self.garbage, keyword=collar
)
self.garbage = | pd.concat([self.garbage, _collar]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 26 18:17:30 2015
@author: <NAME>
"""
import pandas
import numpy
import scipy.stats
import seaborn
import matplotlib.pyplot as plt
data = pandas.read_csv('gapminder.csv', low_memory=False)
# new code setting variables you will be working with to numeric
data['Alcoholuse'] = | pandas.to_numeric(data['Alcoholuse'], errors='coerce') | pandas.to_numeric |
"""Lexical mapping of ontology classes
The core data structure used here is a Mapping Graph. This is a
networkx Graph object (i.e. singly labeled, non-directional) that
connects lexically mapped nodes between two ontologies.
Edge Properties
---------------
idpair: (string,string)
the pair of identifiers mapped
score: number
Number between 0 and 100 indicating strength of match based on multiple criteria
synonyms: (Synonym,Synonym)
pair of Synonym objects (including primary labels) used to create mapping
simscores: (number, number)
Semantic similarity A to B and B to A respectively.
Note that false positives or negatives in the ancestors or descendants in the xref graph will lead to bias in these scores.
reciprocal_score: int
A number between 0 and 4 that indicates whether this was a reciprocal best match (RBM), with additional gradation based on whether
ties are included. We distinguish between a true BM and a tied BM. 4 indicates true RBM. 1 indicates reciprocal tied BM (ie both are tied BMs). 2 indicates a combo of
a true BM and a tied BM.
Note that ties are less likely if semantic similarity is considered in the match.
"""
import networkx as nx
from networkx.algorithms import strongly_connected_components
import logging
import re
from ontobio.ontol import Synonym, Ontology
from collections import defaultdict
import pandas as pd
import numpy as np
import math
from marshmallow import Schema, fields, pprint, post_load
LABEL_OR_EXACT = 'label_or_exact'
logger = logging.getLogger(__name__)
def logit(p):
return math.log2(p/(1-p))
def inv_logit(w):
return 1/(1+2**(-w))
def default_wsmap():
"""
Default word to normalized synonym list
"""
return {
'a':'',
'of':'',
'the':'',
'i':'1',
'ii':'2',
'iii':'3',
'iv':'4',
'v':'5',
'vi':'6',
'vii':'7',
'viii':'8',
'ix':'9',
'x':'10',
'xi':'11',
'xii':'12',
'xiii':'13',
'xiv':'14',
'xv':'15',
'xvi':'16',
'xvii':'17',
'xviii':'18',
'xix':'19',
'xx':'20',
'':''
}
class LexicalMapEngine():
"""
generates lexical matches between pairs of ontology classes
"""
SCORE='score'
LEXSCORE='lexscore'
SIMSCORES='simscores'
CONDITIONAL_PR='cpr'
def __init__(self, wsmap=default_wsmap(), config=None):
"""
Arguments
---------
wdmap: dict
maps words to normalized synonyms.
config: dict
A configuration conforming to LexicalMapConfigSchema
"""
# maps label or syn value to Synonym object
self.lmap = {}
# maps node id to synonym objects
self.smap = {}
self.wsmap = wsmap
self.npattern = re.compile('[\W_]+')
self.exclude_obsolete = True
self.ontology_pairs = None
self.id_to_ontology_map = defaultdict(list)
self.merged_ontology = Ontology()
self.config = config if config is not None else {}
self.stats = {}
def index_ontologies(self, onts):
logger.info('Indexing: {}'.format(onts))
for ont in onts:
self.index_ontology(ont)
def index_ontology(self, ont):
"""
Adds an ontology to the index
This iterates through all labels and synonyms in the ontology, creating an index
"""
self.merged_ontology.merge([ont])
syns = ont.all_synonyms(include_label=True)
include_id = self._is_meaningful_ids()
logger.info("Include IDs as synonyms: {}".format(include_id))
if include_id:
for n in ont.nodes():
v = n
# Get fragment
if v.startswith('http'):
v = re.sub('.*/','',v)
v = re.sub('.*#','',v)
syns.append(Synonym(n, val=v, pred='label'))
logger.info("Indexing {} syns in {}".format(len(syns),ont))
logger.info("Distinct lexical values: {}".format(len(self.lmap.keys())))
for syn in syns:
self.index_synonym(syn, ont)
for nid in ont.nodes():
self.id_to_ontology_map[nid].append(ont)
def label(self, nid):
return self.merged_ontology.label(nid)
def index_synonym(self, syn, ont):
"""
Index a synonym
Typically not called from outside this object; called by `index_ontology`
"""
if not syn.val:
if syn.pred == 'label':
if not self._is_meaningful_ids():
if not ont.is_obsolete(syn.class_id):
pass
#logger.error('Use meaningful ids if label not present: {}'.format(syn))
else:
logger.warning("Incomplete syn: {}".format(syn))
return
if self.exclude_obsolete and ont.is_obsolete(syn.class_id):
return
syn.ontology = ont
prefix,_ = ont.prefix_fragment(syn.class_id)
v = syn.val
caps_match = re.match('[A-Z]+',v)
if caps_match:
# if > 75% of length is caps, assume abbreviation
if caps_match.span()[1] >= len(v)/3:
syn.is_abbreviation(True)
# chebi 'synonyms' are often not real synonyms
# https://github.com/ebi-chebi/ChEBI/issues/3294
if not re.match('.*[a-zA-Z]',v):
if prefix != 'CHEBI':
logger.warning('Ignoring suspicous synonym: {}'.format(syn))
return
v = self._standardize_label(v)
# TODO: do this once ahead of time
wsmap = {}
for w,s in self.wsmap.items():
wsmap[w] = s
for ss in self._get_config_val(prefix,'synsets',[]):
# TODO: weights
wsmap[ss['synonym']] = ss['word']
nv = self._normalize_label(v, wsmap)
self._index_synonym_val(syn, v)
nweight = self._get_config_val(prefix, 'normalized_form_confidence', 0.8)
if nweight > 0 and not syn.is_abbreviation():
if nv != v:
nsyn = Synonym(syn.class_id,
val=syn.val,
pred=syn.pred,
lextype=syn.lextype,
ontology=ont,
confidence=syn.confidence * nweight)
self._index_synonym_val(nsyn, nv)
def _index_synonym_val(self, syn, v):
lmap = self.lmap
smap = self.smap
cid = syn.class_id
if v not in lmap:
lmap[v] = []
lmap[v].append(syn)
if cid not in smap:
smap[cid] = []
smap[cid].append(syn)
def _standardize_label(self, v):
# Add spaces separating camelcased strings
v = re.sub('([a-z])([A-Z])',r'\1 \2',v)
# always use lowercase when comparing
# we may want to make this configurable in future
v = v.lower()
return v
def _normalize_label(self, s, wsmap):
"""
normalized form of a synonym
"""
toks = []
for tok in list(set(self.npattern.sub(' ', s).split(' '))):
if tok in wsmap:
tok=wsmap[tok]
if tok != "":
toks.append(tok)
toks.sort()
return " ".join(toks)
def _get_config_val(self, prefix, k, default=None):
v = None
for oc in self.config.get('ontology_configurations', []):
if prefix == oc.get('prefix', ''):
v = oc.get(k, None)
if v is None:
v = self.config.get(k, None)
if v is None:
v = default
return v
def _is_meaningful_ids(self):
return self.config.get('meaningful_ids', False)
def find_equiv_sets(self):
return self.lmap
def get_xref_graph(self):
"""
Generate mappings based on lexical properties and return as nx graph.
Algorithm
~~~~~~~~~
- A dictionary is stored between ref:`Synonym` values and synonyms. See ref:`index_synonym`.
Note that Synonyms include the primary label
- Each key in the dictionary is examined to determine if there exist two Synonyms from
different ontology classes
This avoids N^2 pairwise comparisons: instead the time taken is linear
After initial mapping is made, additional scoring is performed on each mapping
Edge properties
~~~~~~~~~~~~~~~
The return object is a nx graph, connecting pairs of ontology classes.
Edges are annotated with metadata about how the match was found:
syns: pair
pair of `Synonym` objects, corresponding to the synonyms for the two nodes
score: int
score indicating strength of mapping, between 0 and 100
Returns
-------
Graph
nx graph (bidirectional)
"""
# initial graph; all matches
g = nx.MultiDiGraph()
# lmap collects all syns by token
items = self.lmap.items()
logger.info("collecting initial xref graph, items={}".format(len(items)))
i = 0
sum_nsyns = 0
n_skipped = 0
has_self_comparison = False
if self.ontology_pairs:
for (o1id,o2id) in self.ontology_pairs:
if o1id == o2id:
has_self_comparison = True
for (v,syns) in items:
sum_nsyns += len(syns)
i += 1
if i % 1000 == 1:
logger.info('{}/{} lexical items avgSyns={}, skipped={}'.format(i,len(items), sum_nsyns/len(items), n_skipped))
if len(syns) < 2:
n_skipped += 1
next
if len(syns) > 10:
logger.info('Syns for {} = {}'.format(v,len(syns)))
for s1 in syns:
s1oid = s1.ontology.id
s1cid = s1.class_id
for s2 in syns:
# optimization step: although this is redundant with _is_comparable,
# we avoid inefficient additional calls
if s1oid == s2.ontology.id and not has_self_comparison:
next
if s1cid != s2.class_id:
if self._is_comparable(s1,s2):
g.add_edge(s1.class_id, s2.class_id, syns=(s1,s2))
logger.info("getting best supporting synonym pair for each match")
# graph of best matches
xg = nx.Graph()
for i in g.nodes():
for j in g.neighbors(i):
best = 0
bestm = None
for m in g.get_edge_data(i,j).values():
(s1,s2) = m['syns']
score = self._combine_syns(s1,s2)
if score > best:
best = score
bestm = m
syns = bestm['syns']
xg.add_edge(i, j,
score=best,
lexscore=best,
syns=syns,
idpair=(i,j))
self.score_xrefs_by_semsim(xg)
self.assign_best_matches(xg)
if self.merged_ontology.xref_graph is not None:
self.compare_to_xrefs(xg, self.merged_ontology.xref_graph)
else:
logger.error("No xref graph for merged ontology")
logger.info("finished xref graph")
return xg
# true if syns s1 and s2 should be compared.
# - if ontology_pairs is set, then only consider (s1,s2) if their respective source ontologies are in the list of pairs
# - otherwise compare all classes, but only in one direction
def _is_comparable(self, s1, s2):
if s1.class_id == s2.class_id:
return False
if self.ontology_pairs is not None:
#logger.debug('TEST: {}{} in {}'.format(s1.ontology.id, s2.ontology.id, self.ontology_pairs))
return (s1.ontology.id, s2.ontology.id) in self.ontology_pairs
else:
return s1.class_id < s2.class_id
def _blanket(self, nid):
nodes = set()
for ont in self.id_to_ontology_map[nid]:
nodes.update(ont.ancestors(nid))
nodes.update(ont.descendants(nid))
return list(nodes)
def score_xrefs_by_semsim(self, xg, ont=None):
"""
Given an xref graph (see ref:`get_xref_graph`), this will adjust scores based on
the semantic similarity of matches.
"""
logger.info("scoring xrefs by semantic similarity for {} nodes in {}".format(len(xg.nodes()), ont))
for (i,j,d) in xg.edges(data=True):
pfx1 = self._id_to_ontology(i)
pfx2 = self._id_to_ontology(j)
ancs1 = self._blanket(i)
ancs2 = self._blanket(j)
s1,_,_ = self._sim(xg, ancs1, ancs2, pfx1, pfx2)
s2,_,_ = self._sim(xg, ancs2, ancs1, pfx2, pfx1)
s = 1 - ((1-s1) * (1-s2))
logger.debug("Score {} x {} = {} x {} = {} // {}".format(i,j,s1,s2,s, d))
xg[i][j][self.SIMSCORES] = (s1,s2)
xg[i][j][self.SCORE] *= s
def _sim(self, xg, ancs1, ancs2, pfx1, pfx2):
"""
Compare two lineages
"""
xancs1 = set()
for a in ancs1:
if a in xg:
# TODO: restrict this to neighbors in single ontology
for n in xg.neighbors(a):
pfx = self._id_to_ontology(n)
if pfx == pfx2:
xancs1.add(n)
logger.debug('SIM={}/{} ## {}'.format(len(xancs1.intersection(ancs2)), len(xancs1), xancs1.intersection(ancs2), xancs1))
n_shared = len(xancs1.intersection(ancs2))
n_total = len(xancs1)
return (1+n_shared) / (1+n_total), n_shared, n_total
# given an ontology class id,
# return map keyed by ontology id, value is a list of (score, ext_class_id) pairs
def _neighborscores_by_ontology(self, xg, nid):
xrefmap = defaultdict(list)
for x in xg.neighbors(nid):
score = xg[nid][x][self.SCORE]
for ont in self.id_to_ontology_map[x]:
xrefmap[ont.id].append( (score,x) )
return xrefmap
# normalize direction
def _dirn(self, edge, i, j):
if edge['idpair'] == (i,j):
return 'fwd'
elif edge['idpair'] == (j,i):
return 'rev'
else:
return None
def _id_to_ontology(self, id):
return self.merged_ontology.prefix(id)
#onts = self.id_to_ontology_map[id]
#if len(onts) > 1:
# logger.warning(">1 ontology for {}".format(id))
def compare_to_xrefs(self, xg1, xg2):
"""
Compares a base xref graph with another one
"""
ont = self.merged_ontology
for (i,j,d) in xg1.edges(data=True):
ont_left = self._id_to_ontology(i)
ont_right = self._id_to_ontology(j)
unique_lr = True
num_xrefs_left = 0
same_left = False
if i in xg2:
for j2 in xg2.neighbors(i):
ont_right2 = self._id_to_ontology(j2)
if ont_right2 == ont_right:
unique_lr = False
num_xrefs_left += 1
if j2 == j:
same_left = True
unique_rl = True
num_xrefs_right = 0
same_right = False
if j in xg2:
for i2 in xg2.neighbors(j):
ont_left2 = self._id_to_ontology(i2)
if ont_left2 == ont_left:
unique_rl = False
num_xrefs_right += 1
if i2 == i:
same_right = True
(x,y) = d['idpair']
xg1[x][y]['left_novel'] = num_xrefs_left==0
xg1[x][y]['right_novel'] = num_xrefs_right==0
xg1[x][y]['left_consistent'] = same_left
xg1[x][y]['right_consistent'] = same_right
def assign_best_matches(self, xg):
"""
For each node in the xref graph, tag best match edges
"""
logger.info("assigning best matches for {} nodes".format(len(xg.nodes())))
for i in xg.nodes():
xrefmap = self._neighborscores_by_ontology(xg, i)
for (ontid,score_node_pairs) in xrefmap.items():
score_node_pairs.sort(reverse=True)
(best_score,best_node) = score_node_pairs[0]
logger.info("BEST for {}: {} in {} from {}".format(i, best_node, ontid, score_node_pairs))
edge = xg[i][best_node]
dirn = self._dirn(edge, i, best_node)
best_kwd = 'best_' + dirn
if len(score_node_pairs) == 1 or score_node_pairs[0] > score_node_pairs[1]:
edge[best_kwd] = 2
else:
edge[best_kwd] = 1
for (score,j) in score_node_pairs:
edge_ij = xg[i][j]
dirn_ij = self._dirn(edge_ij, i, j)
edge_ij['cpr_'+dirn_ij] = score / sum([s for s,_ in score_node_pairs])
for (i,j,edge) in xg.edges(data=True):
# reciprocal score is set if (A) i is best for j, and (B) j is best for i
rs = 0
if 'best_fwd' in edge and 'best_rev' in edge:
rs = edge['best_fwd'] * edge['best_rev']
edge['reciprocal_score'] = rs
edge['cpr'] = edge['cpr_fwd'] * edge['cpr_rev']
def _best_match_syn(self, sx, sys, scope_map):
"""
The best match is determined by the highest magnitude weight
"""
SUBSTRING_WEIGHT = 0.2
WBEST = None
sbest = None
sxv = self._standardize_label(sx.val)
sxp = self._id_to_ontology(sx.class_id)
for sy in sys:
syv = self._standardize_label(sy.val)
syp = self._id_to_ontology(sy.class_id)
W = None
if sxv == syv:
confidence = sx.confidence * sy.confidence
if sx.is_abbreviation() or sy.is_abbreviation:
confidence *= self._get_config_val(sxp, 'abbreviation_confidence', 0.5)
confidence *= self._get_config_val(syp, 'abbreviation_confidence', 0.5)
W = scope_map[sx.scope()][sy.scope()] + logit(confidence/2)
elif sxv in syv:
W = np.array((-SUBSTRING_WEIGHT, SUBSTRING_WEIGHT, 0, 0))
elif syv in sxv:
W = np.array((SUBSTRING_WEIGHT, -SUBSTRING_WEIGHT, 0, 0))
if W is not None:
# The best match is determined by the highest magnitude weight
if WBEST is None or max(abs(W)) > max(abs(WBEST)):
WBEST = W
sbest = sy
return WBEST, sbest
def weighted_axioms(self, x, y, xg):
"""
return a tuple (sub,sup,equiv,other) indicating estimated prior probabilities for an interpretation of a mapping
between x and y.
See kboom paper
"""
# TODO: allow additional weighting
# weights are log odds w=log(p/(1-p))
# (Sub,Sup,Eq,Other)
scope_pairs = [
('label', 'label', 0.0, 0.0, 3.0,-0.8),
('label', 'exact', 0.0, 0.0, 2.5,-0.5),
('label', 'broad', -1.0, 1.0, 0.0, 0.0),
('label', 'narrow', 1.0,-1.0, 0.0, 0.0),
('label', 'related', 0.0, 0.0, 0.0, 0.0),
('exact', 'exact', 0.0, 0.0, 2.5,-0.5),
('exact', 'broad', -1.0, 1.0, 0.0, 0.0),
('exact', 'narrow', 1.0,-1.0, 0.0, 0.0),
('exact', 'related', 0.0, 0.0, 0.0, 0.0),
('related', 'broad', -0.5, 0.5, 0.0, 0.0),
('related', 'narrow', 0.5,-0.5, 0.0, 0.0),
('related', 'related', 0.0, 0.0, 0.0, 0.0),
('broad', 'broad', 0.0, 0.0, 0.0, 1.0),
('broad', 'narrow', -0.5, 0.5, 0.0, 0.2),
('narrow', 'narrow', 0.0, 0.0, 0.0, 0.0)
]
# populate symmetric lookup matrix
scope_map = defaultdict(dict)
for (l,r,w1,w2,w3,w4) in scope_pairs:
l = l.upper()
r = r.upper()
scope_map[l][r] = np.array((w1,w2,w3,w4))
scope_map[r][l] = np.array((w2,w1,w3,w4))
# TODO: get prior based on ontology pair
# cumulative sum of weights
WS = None
pfx1 = self._id_to_ontology(x)
pfx2 = self._id_to_ontology(y)
for mw in self.config.get('match_weights', []):
mpfx1 = mw.get('prefix1','')
mpfx2 = mw.get('prefix2','')
X = np.array(mw['weights'])
if mpfx1 == pfx1 and mpfx2 == pfx2:
WS = X
elif mpfx2 == pfx1 and mpfx1 == pfx2:
WS = self._flipweights(X)
elif mpfx1 == pfx1 and mpfx2 == '' and WS is None:
WS = X
elif mpfx2 == pfx1 and mpfx1 == '' and WS is None:
WS = self._flipweights(X)
if WS is None:
WS = np.array((0.0, 0.0, 0.0, 0.0))
# defaults
WS += np.array(self.config.get('default_weights', [0.0, 0.0, 1.5, -0.1]))
logger.info('WS defaults={}'.format(WS))
for xw in self.config.get('xref_weights', []):
left = xw.get('left','')
right = xw.get('right','')
X = np.array(xw['weights'])
if x == left and y == right:
WS += X
logger.info('MATCH: {} for {}-{}'.format(X, x, y))
elif y == left and x == right:
WS += self._flipweights(X)
logger.info('IMATCH: {}'.format(X))
smap = self.smap
# TODO: symmetrical
WT = np.array((0.0, 0.0, 0.0, 0.0))
WBESTMAX = np.array((0.0, 0.0, 0.0, 0.0))
n = 0
for sx in smap[x]:
WBEST, _ = self._best_match_syn(sx, smap[y], scope_map)
if WBEST is not None:
WT += WBEST
n += 1
if max(abs(WBEST)) > max(abs(WBESTMAX)):
WBESTMAX = WBEST
for sy in smap[y]:
WBEST, _ = self._best_match_syn(sy, smap[x], scope_map)
if WBEST is not None:
WT += WBEST
n += 1
# average best match
if n > 0:
logger.info('Adding BESTMAX={}'.format(WBESTMAX))
WS += WBESTMAX
# TODO: xref, many to many
WS += self._graph_weights(x, y, xg)
# TODO: include additional defined weights, eg ORDO
logger.info('Adding WS, gw={}'.format(WS))
# jaccard similarity
(ss1,ss2) = xg[x][y][self.SIMSCORES]
WS[3] += ((1-ss1) + (1-ss2)) / 2
# reciprocal best hits are higher confidence of equiv
rs = xg[x][y]['reciprocal_score']
if rs == 4:
WS[2] += 0.5
if rs == 0:
WS[2] -= 0.2
#P = np.expit(WS)
P = 1/(1+np.exp(-WS))
logger.info('Final WS={}, init P={}'.format(WS, P))
# probs should sum to 1.0
P = P / np.sum(P)
return P
def _graph_weights(self, x, y, xg):
ont = self.merged_ontology
xancs = ont.ancestors(x)
yancs = ont.ancestors(y)
pfx = self._id_to_ontology(x)
pfy = self._id_to_ontology(y)
xns = [n for n in xg.neighbors(y) if n != x and pfx == self._id_to_ontology(n)]
yns = [n for n in xg.neighbors(x) if n != y and pfy == self._id_to_ontology(n)]
pweight = 1.0
W = np.array((0,0,0,0))
card = '11'
if len(xns) > 0:
card = 'm1'
for x2 in xns:
if x2 in xancs:
W[0] += pweight
if x in ont.ancestors(x2):
W[1] += pweight
if len(yns) > 0:
if card == '11':
card = '1m'
else:
card = 'mm'
for y2 in yns:
if y2 in yancs:
W[1] += pweight
if y in ont.ancestors(y2):
W[0] += pweight
logger.debug('CARD: {}/{} <-> {}/{} = {} // X={} Y={} // W={}'.format(x,pfx, y,pfy, card, xns, yns, W))
invcard = card
if card == '1m':
invcard = 'm1'
elif card == 'm1':
invcard = '1m'
CW = None
DEFAULT_CW = None
for cw in self.config.get('cardinality_weights', []):
if 'prefix1' not in cw and 'prefix2' not in cw:
if card == cw['cardinality']:
DEFAULT_CW = np.array(cw['weights'])
if invcard == cw['cardinality']:
DEFAULT_CW = self._flipweights(np.array(cw['weights']))
if 'prefix1' in cw and 'prefix2' in cw:
if pfx == cw['prefix1'] and pfy == cw['prefix2'] and card == cw['cardinality']:
CW = np.array(cw['weights'])
if pfx == cw['prefix2'] and pfy == cw['prefix1'] and invcard == cw['cardinality']:
CW = self._flipweights(np.array(cw['weights']))
if CW is None:
if DEFAULT_CW is not None:
CW = DEFAULT_CW
else:
if card == '11':
CW = np.array((0.0, 0.0, 1.0, 0.0))
elif card == '1m':
CW = np.array((0.6, 0.4, 0.0, 0.0))
elif card == 'm1':
CW = np.array((0.4, 0.6, 0.0, 0.0))
elif card == 'mm':
CW = np.array((0.2, 0.2, 0.0, 0.5))
return W + CW
def _flipweights(self, W):
return np.array((W[1],W[0],W[2],W[3]))
def grouped_mappings(self,id):
"""
return all mappings for a node, grouped by ID prefix
"""
g = self.get_xref_graph()
m = {}
for n in g.neighbors(id):
[prefix, local] = n.split(':')
if prefix not in m:
m[prefix] = []
m[prefix].append(n)
return m
def unmapped_nodes(self, xg, rs_threshold=0):
unmapped_set = set()
for nid in self.merged_ontology.nodes():
if nid in xg:
for (j,edge) in xg[nid].items():
rs = edge.get('reciprocal_score',0)
if rs < rs_threshold:
unmapped_set.add(nid)
else:
unmapped_set.add(nid)
return unmapped_set
def unmapped_dataframe(self, xg, **args):
unodes = self.unmapped_nodes(xg, **args)
ont = self.merged_ontology
eg = ont.equiv_graph()
items = []
for n in unodes:
mapped_equivs = ''
if n in eg:
equivs = set(eg.neighbors(n))
mapped_equivs = list(equivs - unodes)
items.append(dict(id=n,label=ont.label(n),mapped_equivs=mapped_equivs))
df = | pd.DataFrame(items, columns=['id','label', 'mapped_equivs']) | pandas.DataFrame |
import numpy as np
from numpy.core.numeric import _rollaxis_dispatcher
import pandas as pd
from pymbar import BAR as BAR_
from pymbar import MBAR as MBAR_
from alchemlyb.estimators import MBAR
from sklearn.base import BaseEstimator
import copy
import re
import itertools
import logging
logger = logging.getLogger(__name__)
class Estimators():
"""
Return Estimated binding free energy (dG).
Returns the dG between state A and state B using 3 differant Energy estimators
Zwanzig, Thermodynamic Integration TI, or Bennett Acceptance Ratio (BAR).
"""
def Zwanzig(dEs,steps):
"""
Return the estimated binding free energy using Zwanzig estimator.
Computes the binding free (dG) form molecular dynamics simulation
between state A and state B using Zwanzig estimator.
Parameters
----------
dEs : Pandas Dataframe
contains the reduced potentail (dE) between the states.
steps : interger
the number of the steps to be included in the calculation, set to "None" if all steps are needed.
Returns
---------
Zwanzig_df : Pandas Dataframe
contains the binding free energy (dG) between the states.
Examples
--------
>>> Zwanzig(dEs,None)
>>> Zwanzig(dEs,1000)
"""
dEs_df=pd.DataFrame(-0.592*np.log(np.mean(np.exp(-dEs.iloc[:steps]/0.592))))
Lambdas=[]
dGF=[]
dGF_sum=[]
dGR=[]
dGR_sum=[]
dG_Average=[]
dGR.append(0.0)
dG_Average.append(0.0)
for i in range(1,len(dEs_df.index),2):
Lambdas.append(re.split('_|-',dEs_df.index[i-1])[1])
dGF.append(dEs_df.iloc[i,0])
dGR.append(dEs_df.iloc[i-1,0])
Lambdas.append(re.split('_|-',dEs_df.index[-1])[1])
dGF.append(0.0)
dGF=dGF[::-1]
for i in range(len(dGF)):
dGF_sum.append(sum(dGF[:i+1]))
dGR_sum.append(sum(dGR[:i+1]))
dG_average_raw=((pd.DataFrame(dGF[1:]))-pd.DataFrame(dGR[1:][::-1]))/2
for i in range(len(list(dG_average_raw.values))):
dG_Average.append(np.sum(dG_average_raw.values[:i+1]))
Zwanzig_df=pd.DataFrame.from_dict({"Lambda":Lambdas,"dG_Forward":dGF,"SUM_dG_Forward":dGF_sum,"dG_Reverse":dGR[::-1],"SUM_dG_Reverse":dGR_sum[::-1],"dG_Average":dG_Average})
Zwanzig_Final_dG = Zwanzig_df['dG_Average'].iloc[-1]
logger.info('Final DG computed from Zwanzig estimator: ' +str(Zwanzig_Final_dG))
return Zwanzig_df, Zwanzig_Final_dG
def Create_df_TI(State_A_df, State_B_df):
"""
create the input dataframe needed for the Thermodynamic Integration (TI) function.
Parameters
----------
State_A_df : Pandas DataFrame for state A energies
State_B_df : Pandas DataFrame for state B energies
----------
Returns
----------
dU_dH_df : Pandas DataFrame
"""
dU_dH_df=(pd.DataFrame({"lambda":State_A_df["Lambda"],"fep":State_B_df["Q_sum"] - State_A_df["Q_sum"] })).sort_values('lambda')
dU_dH_df.reset_index(drop=True,inplace=True)
dU_dH_df.index.names = ['time']
dU_dH_df.set_index(['lambda'], append=True,inplace=True)
return dU_dH_df
def TI(State_A_df,State_B_df,steps):
"""
Return the estimated binding free energy using Thermodynamic integration (TI) estimator.
Compute free energy differences between each state by integrating
dHdl across lambda values.
Parameters
----------
dHdl : Pandas DataFrame
----------
Returns
----------
delta_f_ : DataFrame
The estimated dimensionless free energy difference between each state.
d_delta_f_ : DataFrame
The estimated statistical uncertainty (one standard deviation) in
dimensionless free energy differences.
states_ : list
Lambda states for which free energy differences were obtained.
TI : float
The free energy difference between state 0 and state 1.
"""
if steps != None:
Energies_df=(pd.DataFrame({"lambda":State_A_df["Lambda"],"fep":State_B_df["Q_sum"] - State_A_df["Q_sum"] })).sort_values('lambda')
Energies_df=pd.DataFrame.from_dict(dict(Energies_df.groupby('lambda',sort=False)['fep'].apply(list)),orient='index')
Energies_df=Energies_df.transpose()
Energies_df=Energies_df.iloc[:steps]
dfl=pd.DataFrame(columns=['lambda','fep'])
dU_dH_df=pd.DataFrame(columns=['lambda','fep'])
for state in range (len(Energies_df.columns)):
dfl=pd.DataFrame(columns=['lambda','fep'])
dfl['fep']=Energies_df.iloc[:,state]
dfl['lambda']=Energies_df.columns.values[state]
dU_dH_df=dU_dH_df.append(dfl)
else:
dU_dH_df=(pd.DataFrame({"lambda":State_A_df["Lambda"],"fep":State_B_df["Q_sum"] - State_A_df["Q_sum"] })).sort_values('lambda')
dU_dH_df.reset_index(drop=True,inplace=True)
dU_dH_df.index.names = ['time']
dU_dH_df.set_index(['lambda'], append=True,inplace=True)
# dU_dH_df=(pd.DataFrame({"lambda":State_A_df["Lambda"][:steps],"fep":State_B_df["Q_sum"][:steps] - State_A_df["Q_sum"][:steps] })).sort_values('lambda')
# dU_dH_df.reset_index(drop=True,inplace=True)
# dU_dH_df.index.names = ['time']
# dU_dH_df.set_index(['lambda'], append=True,inplace=True)
dHdl=dU_dH_df
# sort by state so that rows from same state are in contiguous blocks,
# and adjacent states are next to each other
dHdl = dHdl.sort_index(level=dHdl.index.names[1:])
# obtain the mean and variance of the mean for each state
# variance calculation assumes no correlation between points
# used to calculate mean
means = dHdl.mean(level=dHdl.index.names[1:])
variances = np.square(dHdl.sem(level=dHdl.index.names[1:]))
# get the lambda names
l_types = dHdl.index.names[1:]
# obtain vector of delta lambdas between each state
dl = means.reset_index()[means.index.names[:]].diff().iloc[1:].values
# apply trapezoid rule to obtain DF between each adjacent state
deltas = (dl * (means.iloc[:-1].values + means.iloc[1:].values)/2).sum(axis=1)
# build matrix of deltas between each state
adelta = np.zeros((len(deltas)+1, len(deltas)+1))
ad_delta = np.zeros_like(adelta)
for j in range(len(deltas)):
out = []
dout = []
for i in range(len(deltas) - j):
out.append(deltas[i] + deltas[i+1:i+j+1].sum())
# Define additional zero lambda
a = [0.0] * len(l_types)
# Define dl series' with additional zero lambda on the left and right
dll = np.insert(dl[i:i + j + 1], 0, [a], axis=0)
dlr = np.append(dl[i:i + j + 1], [a], axis=0)
# Get a series of the form: x1, x1 + x2, ..., x(n-1) + x(n), x(n)
dllr = dll + dlr
# Append deviation of free energy difference between state i and i+j+1
dout.append((dllr ** 2 * variances.iloc[i:i + j + 2].values / 4).sum(axis=1).sum())
adelta += np.diagflat(np.array(out), k=j+1)
ad_delta += np.diagflat(np.array(dout), k=j+1)
# yield standard delta_f_ free energies between each state
delta_f_ = pd.DataFrame(adelta - adelta.T,
columns=means.index.values,
index=means.index.values)
# yield standard deviation d_delta_f_ between each state
d_delta_f_ = pd.DataFrame(np.sqrt(ad_delta + ad_delta.T),
columns=variances.index.values,
index=variances.index.values)
states_ = means.index.values.tolist()
TI=( delta_f_.loc[0.00, 1.00])
return delta_f_ , TI
def Create_df_BAR_MBAR(State_A_df, State_B_df):
"""
Create the input dataframe needed for the Bennett Acceptance Ratio (BAR) and multistate Bennett Acceptance Ratio (MBAR) estimators.
Parameters
----------
State_A_df : Pandas DataFrame for state A energies
State_B_df : Pandas DataFrame for state B energies
----------
Returns
----------
u_nk_df : Pandas DataFrame
"""
Energies_df=(pd.DataFrame({"State_A_Lambda":State_A_df["Lambda"],"State_A_G":State_A_df["Q_sum"] ,"State_B_Lambda":State_B_df["Lambda"],"State_B_G":State_B_df["Q_sum"],"E":State_B_df["Q_sum"] - State_A_df["Q_sum"] })).sort_values('State_A_Lambda')
State_A_Energies_df=pd.DataFrame.from_dict(dict(Energies_df.groupby('State_A_Lambda',sort=False)['State_A_G'].apply(list)),orient='index')
State_A_Energies_df=State_A_Energies_df.transpose()
State_B_Energies_df=pd.DataFrame.from_dict(dict(Energies_df.groupby('State_B_Lambda',sort=False)['State_B_G'].apply(list)),orient="index")
State_B_Energies_df=State_B_Energies_df.transpose()
lambdas_list_A=list(State_A_Energies_df.columns)
lambdas_list_B=list(State_B_Energies_df.columns)
time= [i for i in range(len(State_A_Energies_df))]
lambdas_df=[i for i in State_A_Energies_df.columns]
States={i:[] for i in range(len(lambdas_list_A))}
States_dicts={i:[] for i in range(len(lambdas_list_A))}
for i in range(len(State_A_Energies_df.columns)):
State_A_Energies=State_A_Energies_df.iloc[:,[i]]
State_A_Energies.columns=["0"]
State_A_Lambda_float=State_A_Energies_df.columns[i]
State_B_Energies=State_B_Energies_df.iloc[:,[i]]
State_B_Energies.columns=["0"]
State_B_Lambda_float=State_B_Energies_df.columns[i]
E0=State_A_Energies*State_A_Lambda_float+State_B_Energies*State_B_Lambda_float
for x in range(len(lambdas_list_A)):
E1=State_A_Energies*lambdas_list_A[x]+State_B_Energies*lambdas_list_B[x]
dE=E1-E0
dE=dE.values.tolist()
dE=list(itertools.chain(*dE))
States_dicts[i].append(dE)
for i in range(len(States_dicts)):
States[i]=list(itertools.chain(*States_dicts[i]))
u_nk_df=pd.DataFrame.from_dict(States)
u_nk_df.columns=lambdas_list_A
lambdas_df=lambdas_df*len(State_A_Energies_df)
lambdas_df.sort()
u_nk_df['time']=time*len(State_A_Energies_df.columns)
u_nk_df['fep-lambda']=lambdas_df
u_nk_df=u_nk_df.astype('float')
u_nk_df.set_index(['time'] ,append=False,inplace=True)
u_nk_df.set_index(['fep-lambda'], append=True,inplace=True)
u_nk_df.columns= u_nk_df.columns.astype('float')
u_nk_df.dropna(axis=0,inplace=True)
return u_nk_df,States_dicts,State_A_Energies_df
def Create_df_dG_BAR(States_dicts,State_A_Energies_df,steps):
"""
Create the input dataframe needed for the Bennett Acceptance Ratio (BAR) estimator and calculates the free energy.
Parameters
----------
States_dicts : Pandas DataFrame for state A energies
State_A_Energies_df : Pandas DataFrame for state A energies
steps : Integer
maximum number of steps to use
----------
Returns
----------
BAR_dG : float
"""
States_dicts2=copy.deepcopy(States_dicts)
States_dicts3={}
lambdas_list_A=list(State_A_Energies_df.columns)
time = [i for i in range(len(State_A_Energies_df))]
lambdas_df=lambdas_list_A
for x in States_dicts.keys():
for i in range(len(States_dicts[x])):
States_dicts2[x][i]=States_dicts[x][i][:steps]
for i in range(len(States_dicts2)):
States_dicts3[i]=list(itertools.chain(*States_dicts2[i]))
u_nk_df=pd.DataFrame.from_dict(States_dicts3)
u_nk_df.columns=lambdas_list_A
lambdas_df=lambdas_df*len(State_A_Energies_df.iloc[:steps])
lambdas_df.sort()
u_nk_df['time']=time[:steps]*len(State_A_Energies_df.columns)
u_nk_df['fep-lambda']=lambdas_df
u_nk_df=u_nk_df.astype('float')
u_nk_df.set_index(['time'] ,append=False,inplace=True)
u_nk_df.set_index(['fep-lambda'], append=True,inplace=True)
u_nk_df.columns= u_nk_df.columns.astype('float')
u_nk_df.dropna(axis=0,inplace=True)
BAR_df=BAR().fit(u_nk_df)
BAR_dG = BAR_df.delta_f_.loc[0.00, 1.00]
return BAR_dG
def Create_df_dG_MBAR(States_dicts,State_A_Energies_df,steps):
"""
Create the input dataframe needed for the multistate Bennett Acceptance Ratio (MBAR) estimator and calculates the free energy..
Parameters
----------
States_dicts : Pandas DataFrame for state A energies
State_A_Energies_df : Pandas DataFrame for state A energies
steps : Integer
maximum number of steps to use
----------
Returns
----------
MBAR_dG : float
"""
States_length=[]
for x in States_dicts.keys():
for i in range(len(States_dicts[x])):
States_length.append(len(States_dicts[x][i]))
if min(States_length)==max(States_length):
States_dicts2=copy.deepcopy(States_dicts)
else:
print("energy files dosen't have the same length",'min',min(States_length),'max',max(States_length))
for x in States_dicts.keys():
for i in range(len(States_dicts[x])):
States_dicts[x][i]=States_dicts[x][i][:min(States_length)]
States_dicts2=copy.deepcopy(States_dicts)
States_dicts3={}
lambdas_list_A=list(State_A_Energies_df.columns)
time = [i for i in range(len(State_A_Energies_df))]
lambdas_df=lambdas_list_A
for x in States_dicts.keys():
for i in range(len(States_dicts[x])):
States_dicts2[x][i]=States_dicts[x][i][:steps]
for i in range(len(States_dicts2)):
States_dicts3[i]=list(itertools.chain(*States_dicts2[i]))
u_nk_df=pd.DataFrame.from_dict(States_dicts3)
u_nk_df.columns=lambdas_list_A
lambdas_df=lambdas_df*len(State_A_Energies_df.iloc[:steps])
lambdas_df.sort()
u_nk_df['time']=time[:steps]*len(State_A_Energies_df.columns)
u_nk_df['fep-lambda']=lambdas_df
u_nk_df=u_nk_df.astype('float')
u_nk_df.set_index(['time'] ,append=False,inplace=True)
u_nk_df.set_index(['fep-lambda'], append=True,inplace=True)
u_nk_df.columns= u_nk_df.columns.astype('float')
u_nk_df.dropna(axis=0,inplace=True)
MBAR_df= MBAR().fit(u_nk_df)
MBAR_dG = MBAR_df.delta_f_.loc[0.00, 1.00]
return MBAR_dG
def Convergence(df1,df2,Estimator,StepsChunk_Int,ReplicatiesCount_Int,EnergyOutputInterval_Int):
# the last and first steps are not included in the reading
"""
Convergence analysis
Retrun a dateframe contains computed free energy dG at a differant steps intervals using 3 differant Energy estimators
Zwanzig, Thermodynamic Integration TI, or Bennett Acceptance Ratio (BAR).
Parameters
----------
df : Pandas DataFrame
Contains the dEs between the states
Estimator : funcation
The Free energy estimating method (Zwanzig or TI or BAR)
StepsChunk_Int: integer
The Number of Steps(fs) to be used.
ReplicatiesCount_Int: integer
The Number of used replicates.
EnergyOutputInterval_Int: integer
The interval which the molecular dynamics simulation softwear
is writing the energies at.
----------
Returns
----------
Convergence_df : Pandas DataFrame
Contains the computed dG at each interval.
Examples
--------
>>> Convergence(dEs,Zwanzig,1000,1,10)
>>> Convergence(dEs,TI,10000,3,10)
"""
if isinstance(df1, pd.DataFrame) and isinstance(df2, pd.DataFrame) :
dGs_Lst=[Estimator(df1,df2,steps_limit)[1] for steps_limit in range((StepsChunk_Int-2)*ReplicatiesCount_Int,int((len(df1)/len(df1['Lambda'].unique())))+1,StepsChunk_Int*ReplicatiesCount_Int)]
StepsChunk_Lst=[EnergyOutputInterval_Int*steps_limit/ReplicatiesCount_Int for steps_limit in range((StepsChunk_Int-2)*ReplicatiesCount_Int,int((len(df1)/len(df1['Lambda'].unique())))+1,StepsChunk_Int*ReplicatiesCount_Int)]
elif isinstance(df2, pd.DataFrame) and not isinstance(df1, pd.DataFrame):
dGs_Lst=[Estimator(df1,df2,steps_limit) for steps_limit in range((StepsChunk_Int-2)*ReplicatiesCount_Int,len(df2)+1,StepsChunk_Int*ReplicatiesCount_Int)]
StepsChunk_Lst=[EnergyOutputInterval_Int*steps_limit/ReplicatiesCount_Int for steps_limit in range((StepsChunk_Int-2)*ReplicatiesCount_Int,len(df2)+1,StepsChunk_Int*ReplicatiesCount_Int)]
else:
dGs_Lst=[Estimator(df1,steps_limit)[1] for steps_limit in range((StepsChunk_Int-2)*ReplicatiesCount_Int,len(df1)+1,StepsChunk_Int*ReplicatiesCount_Int)]
StepsChunk_Lst=[EnergyOutputInterval_Int*steps_limit/ReplicatiesCount_Int for steps_limit in range((StepsChunk_Int-2)*ReplicatiesCount_Int,len(df1)+1,StepsChunk_Int*ReplicatiesCount_Int)]
#StepsChunk_Lst=[EnergyOutputInterval_Int*steps_limit/ReplicatiesCount_Int for steps_limit in range((StepsChunk_Int-2)*ReplicatiesCount_Int,len(df1)+1,StepsChunk_Int*ReplicatiesCount_Int)]
Convergence_df= | pd.DataFrame({'Number of Steps':StepsChunk_Lst, 'dG':dGs_Lst }) | pandas.DataFrame |
"""
Helper function for detect missing values
"""
import numpy as np
import pandas as pd
__all__ = (
"missing",
"missing_count",
"single_missing",
)
def single_missing(
points, single_missing_value: (int, float, str, None.__class__)
):
"""Function that realize check on missing of given value.
Parameters
----------
points : numpy array, pandas Series, pandas DataFrame
An numobservations by numdimensions array of observations.
single_missing_value : int, float, str
A single value that we accept as indicator of missing value.
Returns
-------
mask : A numobservations-length boolean array.
Raises
------
AttributeError
If passed invalid type of `points` value, e.g. all types
except pandas Series/DataFrame or numpy ndarray.
If passed invalid type of `missing_value` value.
"""
if isinstance(points, (pd.DataFrame, pd.Series, np.ndarray)):
if single_missing_value is np.NAN or single_missing_value is None:
return | pd.isnull(points) | pandas.isnull |
import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
concat,
)
import pandas._testing as tm
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_ewm_pairwise_cov_corr(func, frame):
result = getattr(frame.ewm(span=10, min_periods=5), func)()
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = getattr(frame[1].ewm(span=10, min_periods=5), func)(frame[5])
tm.assert_series_equal(result, expected, check_names=False)
@pytest.mark.parametrize("name", ["cov", "corr"])
def test_ewm_corr_cov(name):
A = Series(np.random.randn(50), index=np.arange(50))
B = A[2:] + np.random.randn(48)
A[:10] = np.NaN
B[-10:] = np.NaN
result = getattr(A.ewm(com=20, min_periods=5), name)(B)
assert np.isnan(result.values[:14]).all()
assert not np.isnan(result.values[14:]).any()
@pytest.mark.parametrize("min_periods", [0, 1, 2])
@pytest.mark.parametrize("name", ["cov", "corr"])
def test_ewm_corr_cov_min_periods(name, min_periods):
# GH 7898
A = Series(np.random.randn(50), index=np.arange(50))
B = A[2:] + np.random.randn(48)
A[:10] = np.NaN
B[-10:] = np.NaN
result = getattr(A.ewm(com=20, min_periods=min_periods), name)(B)
# binary functions (ewmcov, ewmcorr) with bias=False require at
# least two values
assert np.isnan(result.values[:11]).all()
assert not np.isnan(result.values[11:]).any()
# check series of length 0
empty = Series([], dtype=np.float64)
result = getattr(empty.ewm(com=50, min_periods=min_periods), name)(empty)
tm.assert_series_equal(result, empty)
# check series of length 1
result = getattr(Series([1.0]).ewm(com=50, min_periods=min_periods), name)(
Series([1.0])
)
tm.assert_series_equal(result, Series([np.NaN]))
@pytest.mark.parametrize("name", ["cov", "corr"])
def test_different_input_array_raise_exception(name):
A = Series(np.random.randn(50), index=np.arange(50))
A[:10] = np.NaN
msg = "other must be a DataFrame or Series"
# exception raised is Exception
with pytest.raises(ValueError, match=msg):
getattr(A.ewm(com=20, min_periods=5), name)(np.random.randn(50))
def create_mock_weights(obj, com, adjust, ignore_na):
if isinstance(obj, DataFrame):
if not len(obj.columns):
return DataFrame(index=obj.index, columns=obj.columns)
w = concat(
[
create_mock_series_weights(
obj.iloc[:, i], com=com, adjust=adjust, ignore_na=ignore_na
)
for i, _ in enumerate(obj.columns)
],
axis=1,
)
w.index = obj.index
w.columns = obj.columns
return w
else:
return create_mock_series_weights(obj, com, adjust, ignore_na)
def create_mock_series_weights(s, com, adjust, ignore_na):
w = | Series(np.nan, index=s.index) | pandas.Series |
# 去哪儿热门景点excel文件保存路径
import os
import random
import pandas as pd
import requests
import time
"""
获取详细教程、获取代码帮助、提出意见建议
关注微信公众号「裸睡的猪」与猪哥联系
@Author : 猪哥
"""
# 去哪儿热门景点excel文件保存路径
PLACE_EXCEL_PATH = '../edition_sample/NationalDayTravel/qunar_place.xlsx'
def spider_place(keyword, page):
"""
爬取景点
:param keyword: 搜索关键字
:param page: 分页参数
:return:
"""
url = f'http://piao.qunar.com/ticket/list.json?keyword={keyword}®ion=&from=mpl_search_suggest&page={page}'
headers = {
# 'Accept': 'application / json, text / javascript, * / *; q = 0.01',
# 'Accept-Encoding': 'gzip, deflate',
# 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
# 'Connection': 'keep-alive',
# 'Host': 'piao.qunar.com',
# 'Referer': 'http://piao.qunar.com/ticket/list.htm?keyword=%E5%9B%BD%E5%BA%86%E6%97%85%E6%B8%B8%E6%99%AF%E7%82%B9®ion=&from=mpl_search_suggest',
# 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36',
# 'X-Requested-With': 'XMLHttpRequest'
'Accept': 'text / html, application / xhtml + xml, application / xml;q = 0.9, image / webp, image / apng, * / *;q = 0.8, application / signed - exchange;v = b3',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN, zh;q = 0.9, en;q = 0.8',
'Cache-Control': 'max-age = 0',
'Connection': 'keep - alive',
# 'Cookie': 'QN1 = 000014802eb41a41e438fe4b;QN300 = organic;_i = RBTKSLgC8tExmy6x6oQ_wxhBqZix;QN269 = 9DF13A54E35811E9AD86FA163E29CDD5;QN57 = 15698305360030.1948380886333314;fid = 6cd00ebd - 9d1b - 4f24 - bae9 - 32e38f050de6;csrfToken = <PASSWORD>;JSESSIONID = 17E996574DAC1D6250C71078E28E7F22;QN58 = 1571887859354 % 7C1571887859354 % 7C1;_vi = MJBvjAa53MHETW57G_goUAm_GQSaoi - GBUneWO30QGcu4yEn5ZAm6bNC50W1loLtNtRh4tkTX8cF9TXHyRdj7D347E_7hLBm1L1xteyFPHtXP8QdGL7mhoIPjRrxTjQK5xJC13Ui8OY05_wUtrP_v45onPaubNvLYfxiu1wy70GW;QN271 = 6f9f5622 - ea0e - 4543 - 9b12 - 99758fea69eb;QN267 = 1607978286d15e2317',
'Host': 'piao.qunar.com',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla / 5.0(Linux;Android6.0;Nexus5Build / MRA58N) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 77.0.3865.90MobileSafari / 537.36'
}
# 代理需要的自取,猪哥没有用代理
# 站大爷:http://ip.zdaye.com/dayProxy.html
proxies = {'http': '172.16.58.3:1080',
'https': '172.16.31.10:3128'}
response = requests.get(url, headers=headers)
# 提取景点信息
place_list = get_place_info(response.json())
print(place_list)
if len(place_list) != 0:
# 保存景点信息
save_excel(place_list)
def get_place_info(response_json):
"""
解析json,获取想要字段
:param response_json:
:return:
"""
sight_list = response_json['data']['sightList']
place_list = []
for sight in sight_list:
goods = {'id': sight['sightId'], # 景点id
'name': sight['sightName'], # 景点名称
'star': sight.get('star', '无'), # 景点星级,使用get方法防止触发KeyError
'score': sight.get('score', 0), # 评分
'price': sight.get('qunarPrice', 0), # 门票价格
'sale': sight.get('saleCount', 0), # 销量
'districts': sight['districts'], # 省 市 县
'point': sight['point'], # 坐标
'intro': sight.get('intro', ''), # 简介
}
place_list.append(goods)
return place_list
def save_excel(place_list):
"""
将json数据生成excel文件
:param place_list: 景点数据
:return:
"""
# pandas没有对excel没有追加模式,只能先读后写
if os.path.exists(PLACE_EXCEL_PATH):
df = pd.read_excel(PLACE_EXCEL_PATH)
df = df.append(place_list)
else:
df = | pd.DataFrame(place_list) | pandas.DataFrame |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.cloud import storage
from pandas.io import gbq
import pandas as pd
import pickle
import re
import os
class PatentLandscapeExpander:
"""Class for L1&L2 expansion as 'Automated Patent Landscaping' describes.
This object takes a seed set and a Google Cloud BigQuery project name and
exposes methods for doing expansion of the project. The logical entry-point
to the class is load_from_disk_or_do_expansion, which checks for cached
expansions for the given self.seed_name, and if a previous run is available
it will load it from disk and return it; otherwise, it does L1 and L2
expansions, persists it in a cached 'data/[self.seed_name]/' directory,
and returns the data to the caller.
"""
seed_file = None
# BigQuery must be enabled for this project
bq_project = 'patent-landscape-165715'
patent_dataset = 'patents-public-data:patents.publications_latest'
#tmp_table = 'patents._tmp'
l1_tmp_table = 'patents._l1_tmp'
l2_tmp_table = 'patents._l2_tmp'
antiseed_tmp_table = 'patents.antiseed_tmp'
country_codes = set(['US'])
num_anti_seed_patents = 15000
us_only = True
# ratios and multipler for finding uniquely common CPC codes from seed set
min_ratio_of_code_to_seed = 0.04
min_seed_multiplier = 50.0
# persisted expansion information
training_data_full_df = None
seed_patents_df = None
l1_patents_df = None
l2_patents_df = None
anti_seed_patents = None
seed_data_path = None
def __init__(self, seed_file, seed_name, bq_project=None, patent_dataset=None, num_antiseed=None, us_only=None, prepare_training=True):
self.seed_file = seed_file
self.seed_data_path = os.path.join('data', seed_name)
if bq_project is not None:
self.bq_project = bq_project
if patent_dataset is not None:
self.patent_dataset = patent_dataset
#if tmp_table is not None:
# self.tmp_table = tmp_table
if num_antiseed is not None:
self.num_anti_seed_patents = num_antiseed
if us_only is not None:
self.us_only = us_only
self.prepare_training = prepare_training
def load_seeds_from_bq(self, seed_df):
where_clause = ",".join("'" + seed_df.PubNum + "'")
if self.us_only:
seed_patents_query = '''
SELECT
b.publication_number,
'Seed' as ExpansionLevel,
STRING_AGG(citations.publication_number) AS refs,
STRING_AGG(cpcs.code) AS cpc_codes
FROM
`patents-public-data.patents.publications` AS b,
UNNEST(citation) AS citations,
UNNEST(cpc) AS cpcs
WHERE
REGEXP_EXTRACT(b.publication_number, r'\w+-(\w+)-\w+') IN
(
{}
)
AND b.country_code = 'US'
AND citations.publication_number != ''
AND cpcs.code != ''
GROUP BY b.publication_number
;
'''.format(where_clause)
else:
seed_patents_query = '''
SELECT
b.publication_number,
'Seed' as ExpansionLevel,
STRING_AGG(citations.publication_number) AS refs,
STRING_AGG(cpcs.code) AS cpc_codes
FROM
`patents-public-data.patents.publications` AS b,
UNNEST(citation) AS citations,
UNNEST(cpc) AS cpcs
WHERE
b.publication_number IN
(
{}
)
AND citations.publication_number != ''
AND cpcs.code != ''
GROUP BY b.publication_number
;
'''.format(where_clause)
seed_patents_df = gbq.read_gbq(
query=seed_patents_query,
project_id=self.bq_project,
verbose=False,
dialect='standard')
return seed_patents_df
def load_seed_pubs(self, seed_file=None):
if seed_file is None:
seed_file = self.seed_file
#if self.us_only:
seed_df = | pd.read_csv(seed_file, header=None, names=['PubNum'], dtype={'PubNum': 'str'}) | pandas.read_csv |
# generator.py
# load libraries
import numpy as np
import pandas as pd
'''
var= np.round(np.random.normal(distribution mean,
standard deviation, number of samples),
number of digits after the decimal point)
# .5 is prob dist of 0 and 1
label = np.random.choice([0, 1], size=7000, p=[.5, .5])
'''
class Generator():
def __init__(self):
self.name = "gemerata"
def get_dataframe(self, datag):
'''
'''
mean = datag['mean']
stdev = datag['stdev']
samples = datag['samples']
decimals = datag['decimals']
nn = datag['number_of_numerical_features']
nc = datag['number_of_categorical_features']
ncats = datag['number_of_categories']
df = | pd.DataFrame() | pandas.DataFrame |
from io import StringIO
import pandas as pd
import numpy as np
import pytest
import bioframe
import bioframe.core.checks as checks
# import pyranges as pr
# def bioframe_to_pyranges(df):
# pydf = df.copy()
# pydf.rename(
# {"chrom": "Chromosome", "start": "Start", "end": "End"},
# axis="columns",
# inplace=True,
# )
# return pr.PyRanges(pydf)
# def pyranges_to_bioframe(pydf):
# df = pydf.df
# df.rename(
# {"Chromosome": "chrom", "Start": "start", "End": "end", "Count": "n_intervals"},
# axis="columns",
# inplace=True,
# )
# return df
# def pyranges_overlap_to_bioframe(pydf):
# ## convert the df output by pyranges join into a bioframe-compatible format
# df = pydf.df.copy()
# df.rename(
# {
# "Chromosome": "chrom_1",
# "Start": "start_1",
# "End": "end_1",
# "Start_b": "start_2",
# "End_b": "end_2",
# },
# axis="columns",
# inplace=True,
# )
# df["chrom_1"] = df["chrom_1"].values.astype("object") # to remove categories
# df["chrom_2"] = df["chrom_1"].values
# return df
chroms = ["chr12", "chrX"]
def mock_bioframe(num_entries=100):
pos = np.random.randint(1, 1e7, size=(num_entries, 2))
df = pd.DataFrame()
df["chrom"] = np.random.choice(chroms, num_entries)
df["start"] = np.min(pos, axis=1)
df["end"] = np.max(pos, axis=1)
df.sort_values(["chrom", "start"], inplace=True)
return df
############# tests #####################
def test_select():
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
region1 = "chr1:4-10"
df_result = pd.DataFrame([["chr1", 4, 5]], columns=["chrom", "start", "end"])
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX:4-6"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
### select with non-standard column names
region1 = "chrX:4-6"
new_names = ["chr", "chrstart", "chrend"]
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=new_names,
)
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]],
columns=new_names,
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1, cols=new_names).reset_index(drop=True)
)
region1 = "chrX"
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1, cols=new_names).reset_index(drop=True)
)
### select from a DataFrame with NaNs
colnames = ["chrom", "start", "end", "view_region"]
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=colnames,
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_result = pd.DataFrame(
[["chr1", -6, 12, "chr1p"]],
columns=colnames,
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
region1 = "chr1:0-1"
pd.testing.assert_frame_equal(
df_result, bioframe.select(df, region1).reset_index(drop=True)
)
def test_trim():
### trim with view_df
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 32, 36, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 26, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
with pytest.raises(ValueError):
bioframe.trim(df, view_df=view_df)
# df_view_col already exists, so need to specify it:
pd.testing.assert_frame_equal(
df_trimmed, bioframe.trim(df, view_df=view_df, df_view_col="view_region")
)
### trim with view_df interpreted from dictionary for chromsizes
chromsizes = {"chr1": 20, "chrX_0": 5}
df = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX_0", 1, 8],
],
columns=["chrom", "startFunky", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 20],
["chrX_0", 1, 5],
],
columns=["chrom", "startFunky", "end"],
).astype({"startFunky": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(
df,
view_df=chromsizes,
cols=["chrom", "startFunky", "end"],
return_view_columns=False,
),
)
### trim with default limits=None and negative values
df = pd.DataFrame(
[
["chr1", -4, 12],
["chr1", 13, 26],
["chrX", -5, -1],
],
columns=["chrom", "start", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX", 0, 0],
],
columns=["chrom", "start", "end"],
)
pd.testing.assert_frame_equal(df_trimmed, bioframe.trim(df))
### trim when there are NaN intervals
df = pd.DataFrame(
[
["chr1", -4, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", -5, -1, "chrX_0"],
],
columns=["chrom", "start", "end", "region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", 0, 0, "chrX_0"],
],
columns=["chrom", "start", "end", "region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(df_trimmed, bioframe.trim(df))
### trim with view_df and NA intervals
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 12, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12],
["chr1", 0, 12],
[pd.NA, pd.NA, pd.NA],
["chrX", 1, 20],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, pd.NA],
["chrX", 1, 12, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
# infer df_view_col with assign_view and ignore NAs
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(df, view_df=view_df, df_view_col=None, return_view_columns=True)[
["chrom", "start", "end", "view_region"]
],
)
def test_expand():
d = """chrom start end
0 chr1 1 5
1 chr1 50 55
2 chr2 100 200"""
fake_bioframe = pd.read_csv(StringIO(d), sep=r"\s+")
expand_bp = 10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp)
d = """chrom start end
0 chr1 -9 15
1 chr1 40 65
2 chr2 90 210"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with negative pad
expand_bp = -10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp)
d = """chrom start end
0 chr1 3 3
1 chr1 52 52
2 chr2 110 190"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
expand_bp = -10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp, side="left")
d = """chrom start end
0 chr1 3 5
1 chr1 52 55
2 chr2 110 200"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with multiplicative pad
mult = 0
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 3 3
1 chr1 52 52
2 chr2 150 150"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
mult = 2.0
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 -1 7
1 chr1 48 58
2 chr2 50 250"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with NA and non-integer multiplicative pad
d = """chrom start end
0 chr1 1 5
1 NA NA NA
2 chr2 100 200"""
fake_bioframe = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
mult = 1.10
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 1 5
1 NA NA NA
2 chr2 95 205"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(df, fake_expanded)
def test_overlap():
### test consistency of overlap(how='inner') with pyranges.join ###
### note does not test overlap_start or overlap_end columns of bioframe.overlap
df1 = mock_bioframe()
df2 = mock_bioframe()
assert df1.equals(df2) == False
# p1 = bioframe_to_pyranges(df1)
# p2 = bioframe_to_pyranges(df2)
# pp = pyranges_overlap_to_bioframe(p1.join(p2, how=None))[
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
# ]
# bb = bioframe.overlap(df1, df2, how="inner")[
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
# ]
# pp = pp.sort_values(
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
# ignore_index=True,
# )
# bb = bb.sort_values(
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
# ignore_index=True,
# )
# pd.testing.assert_frame_equal(bb, pp, check_dtype=False, check_exact=False)
# print("overlap elements agree")
### test overlap on= [] ###
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=["chrom1", "start", "end", "strand", "animal"],
)
df2 = pd.DataFrame(
[["chr1", 6, 10, "+", "dog"], ["chrX", 7, 10, "-", "dog"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
)
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 3
b = bioframe.overlap(
df1,
df2,
on=["strand"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 2
b = bioframe.overlap(
df1,
df2,
on=None,
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 0
### test overlap 'left', 'outer', and 'right'
b = bioframe.overlap(
df1,
df2,
on=None,
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 5
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="inner",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 0
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="right",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 2
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
### test keep_order and NA handling
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+"],
[pd.NA, pd.NA, pd.NA, "-"],
["chrX", 1, 8, "+"],
],
columns=["chrom", "start", "end", "strand"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[["chr1", 6, 10, "+"], [pd.NA, pd.NA, pd.NA, "-"], ["chrX", 7, 10, "-"]],
columns=["chrom2", "start2", "end2", "strand"],
).astype({"start2": pd.Int64Dtype(), "end2": pd.Int64Dtype()})
assert df1.equals(
bioframe.overlap(
df1, df2, how="left", keep_order=True, cols2=["chrom2", "start2", "end2"]
)[["chrom", "start", "end", "strand"]]
)
assert ~df1.equals(
bioframe.overlap(
df1, df2, how="left", keep_order=False, cols2=["chrom2", "start2", "end2"]
)[["chrom", "start", "end", "strand"]]
)
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chrX", 1, 8, pd.NA, pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[["chr1", 6, 10, pd.NA, "tiger"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
).astype({"start2": pd.Int64Dtype(), "end2": pd.Int64Dtype()})
assert (
bioframe.overlap(
df1,
df2,
how="outer",
cols2=["chrom2", "start2", "end2"],
return_index=True,
keep_order=False,
).shape
== (3, 12)
)
### result of overlap should still have bedframe-like properties
overlap_df = bioframe.overlap(
df1,
df2,
how="outer",
cols2=["chrom2", "start2", "end2"],
return_index=True,
suffixes=("", ""),
)
assert checks.is_bedframe(
overlap_df[df1.columns],
)
assert checks.is_bedframe(
overlap_df[df2.columns], cols=["chrom2", "start2", "end2"]
)
overlap_df = bioframe.overlap(
df1,
df2,
how="innter",
cols2=["chrom2", "start2", "end2"],
return_index=True,
suffixes=("", ""),
)
assert checks.is_bedframe(
overlap_df[df1.columns],
)
assert checks.is_bedframe(
overlap_df[df2.columns], cols=["chrom2", "start2", "end2"]
)
# test keep_order incompatible if how!= 'left'
with pytest.raises(ValueError):
bioframe.overlap(
df1,
df2,
how="outer",
on=["animal"],
cols2=["chrom2", "start2", "end2"],
keep_order=True,
)
def test_cluster():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
["chr1", 3, 8],
["chr1", 8, 10],
["chr1", 12, 14],
],
columns=["chrom", "start", "end"],
)
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 1])
).all() # the last interval does not overlap the first three
df_annotated = bioframe.cluster(df1, min_dist=2)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 0])
).all() # all intervals part of the same cluster
df_annotated = bioframe.cluster(df1, min_dist=None)
assert (
df_annotated["cluster"].values == np.array([0, 0, 1, 2])
).all() # adjacent intervals not clustered
df1.iloc[0, 0] = "chrX"
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([2, 0, 0, 1])
).all() # do not cluster intervals across chromosomes
# test consistency with pyranges (which automatically sorts df upon creation and uses 1-based indexing for clusters)
# assert (
# (bioframe_to_pyranges(df1).cluster(count=True).df["Cluster"].values - 1)
# == bioframe.cluster(df1.sort_values(["chrom", "start"]))["cluster"].values
# ).all()
# test on=[] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert (
bioframe.cluster(df1, on=["animal"])["cluster"].values == np.array([0, 1, 0, 2])
).all()
assert (
bioframe.cluster(df1, on=["strand"])["cluster"].values == np.array([0, 1, 1, 2])
).all()
assert (
bioframe.cluster(df1, on=["location", "animal"])["cluster"].values
== np.array([0, 2, 1, 3])
).all()
### test cluster with NAs
df1 = pd.DataFrame(
[
["chrX", 1, 8, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chr1", 8, 12, "+", pd.NA],
["chr1", 1, 8, np.nan, pd.NA],
[pd.NA, np.nan, pd.NA, "-", pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert bioframe.cluster(df1)["cluster"].max() == 3
assert bioframe.cluster(df1, on=["strand"])["cluster"].max() == 4
pd.testing.assert_frame_equal(df1, bioframe.cluster(df1)[df1.columns])
assert checks.is_bedframe(
bioframe.cluster(df1, on=["strand"]),
cols=["chrom", "cluster_start", "cluster_end"],
)
assert checks.is_bedframe(
bioframe.cluster(df1), cols=["chrom", "cluster_start", "cluster_end"]
)
assert checks.is_bedframe(bioframe.cluster(df1))
def test_merge():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
["chr1", 3, 8],
["chr1", 8, 10],
["chr1", 12, 14],
],
columns=["chrom", "start", "end"],
)
# the last interval does not overlap the first three with default min_dist=0
assert (bioframe.merge(df1)["n_intervals"].values == np.array([3, 1])).all()
# adjacent intervals are not clustered with min_dist=none
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values == np.array([2, 1, 1])
).all()
# all intervals part of one cluster
assert (
bioframe.merge(df1, min_dist=2)["n_intervals"].values == np.array([4])
).all()
df1.iloc[0, 0] = "chrX"
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values
== np.array([1, 1, 1, 1])
).all()
assert (
bioframe.merge(df1, min_dist=0)["n_intervals"].values == np.array([2, 1, 1])
).all()
# total number of intervals should equal length of original dataframe
mock_df = mock_bioframe()
assert np.sum(bioframe.merge(mock_df, min_dist=0)["n_intervals"].values) == len(
mock_df
)
# # test consistency with pyranges
# pd.testing.assert_frame_equal(
# pyranges_to_bioframe(bioframe_to_pyranges(df1).merge(count=True)),
# bioframe.merge(df1),
# check_dtype=False,
# check_exact=False,
# )
# test on=['chrom',...] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert len(bioframe.merge(df1, on=None)) == 2
assert len(bioframe.merge(df1, on=["strand"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location", "animal"])) == 4
d = """ chrom start end animal n_intervals
0 chr1 3 10 cat 2
1 chr1 3 8 dog 1
2 chrX 6 10 cat 1"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(
df,
bioframe.merge(df1, on=["animal"]),
check_dtype=False,
)
# merge with repeated indices
df = pd.DataFrame(
{"chrom": ["chr1", "chr2"], "start": [100, 400], "end": [110, 410]}
)
df.index = [0, 0]
pd.testing.assert_frame_equal(
df.reset_index(drop=True), bioframe.merge(df)[["chrom", "start", "end"]]
)
# test merge with NAs
df1 = pd.DataFrame(
[
["chrX", 1, 8, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chr1", 8, 12, "+", pd.NA],
["chr1", 1, 8, np.nan, pd.NA],
[pd.NA, np.nan, pd.NA, "-", pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert bioframe.merge(df1).shape[0] == 4
assert bioframe.merge(df1)["start"].iloc[0] == 1
assert bioframe.merge(df1)["end"].iloc[0] == 12
assert bioframe.merge(df1, on=["strand"]).shape[0] == df1.shape[0]
assert bioframe.merge(df1, on=["animal"]).shape[0] == df1.shape[0]
assert bioframe.merge(df1, on=["animal"]).shape[1] == df1.shape[1] + 1
assert checks.is_bedframe(bioframe.merge(df1, on=["strand", "animal"]))
def test_complement():
### complementing a df with no intervals in chrX by a view with chrX should return entire chrX region
df1 = pd.DataFrame(
[["chr1", 1, 5], ["chr1", 3, 8], ["chr1", 8, 10], ["chr1", 12, 14]],
columns=["chrom", "start", "end"],
)
df1_chromsizes = {"chr1": 100, "chrX": 100}
df1_complement = pd.DataFrame(
[
["chr1", 0, 1, "chr1:0-100"],
["chr1", 10, 12, "chr1:0-100"],
["chr1", 14, 100, "chr1:0-100"],
["chrX", 0, 100, "chrX:0-100"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=df1_chromsizes), df1_complement
)
### test complement with two chromosomes ###
df1.iloc[0, 0] = "chrX"
df1_complement = pd.DataFrame(
[
["chr1", 0, 3, "chr1:0-100"],
["chr1", 10, 12, "chr1:0-100"],
["chr1", 14, 100, "chr1:0-100"],
["chrX", 0, 1, "chrX:0-100"],
["chrX", 5, 100, "chrX:0-100"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=df1_chromsizes), df1_complement
)
### test complement with no view_df and a negative interval
df1 = pd.DataFrame(
[["chr1", -5, 5], ["chr1", 10, 20]], columns=["chrom", "start", "end"]
)
df1_complement = pd.DataFrame(
[
["chr1", 5, 10, "chr1:0-9223372036854775807"],
["chr1", 20, np.iinfo(np.int64).max, "chr1:0-9223372036854775807"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(bioframe.complement(df1), df1_complement)
### test complement with an overhanging interval
df1 = pd.DataFrame(
[["chr1", -5, 5], ["chr1", 10, 20]], columns=["chrom", "start", "end"]
)
chromsizes = {"chr1": 15}
df1_complement = pd.DataFrame(
[
["chr1", 5, 10, "chr1:0-15"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=chromsizes, view_name_col="VR"), df1_complement
)
### test complement where an interval from df overlaps two different regions from view
### test complement with no view_df and a negative interval
df1 = pd.DataFrame([["chr1", 5, 15]], columns=["chrom", "start", "end"])
chromsizes = [("chr1", 0, 9, "chr1p"), ("chr1", 11, 20, "chr1q")]
df1_complement = pd.DataFrame(
[["chr1", 0, 5, "chr1p"], ["chr1", 15, 20, "chr1q"]],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(bioframe.complement(df1, chromsizes), df1_complement)
### test complement with NAs
df1 = pd.DataFrame(
[[pd.NA, pd.NA, pd.NA], ["chr1", 5, 15], [pd.NA, pd.NA, pd.NA]],
columns=["chrom", "start", "end"],
).astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(bioframe.complement(df1, chromsizes), df1_complement)
with pytest.raises(ValueError): # no NAs allowed in chromsizes
bioframe.complement(
df1, [("chr1", pd.NA, 9, "chr1p"), ("chr1", 11, 20, "chr1q")]
)
assert checks.is_bedframe(bioframe.complement(df1, chromsizes))
def test_closest():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[["chr1", 4, 8], ["chr1", 10, 11]], columns=["chrom", "start", "end"]
)
### closest(df1,df2,k=1) ###
d = """chrom start end chrom_ start_ end_ distance
0 chr1 1 5 chr1 4 8 0"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_": pd.Int64Dtype(),
"end_": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df1, df2, k=1))
### closest(df1,df2, ignore_overlaps=True)) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), ignore_overlaps=True)
)
### closest(df1,df2,k=2) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 4 8 0
1 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), k=2)
)
### closest(df2,df1) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 4 8 chr1 1 5 0
1 chr1 10 11 chr1 1 5 5 """
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df2, df1, suffixes=("_1", "_2")))
### change first interval to new chrom ###
df2.iloc[0, 0] = "chrA"
d = """chrom start end chrom_ start_ end_ distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_": pd.Int64Dtype(),
"end_": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df1, df2, k=1))
### test other return arguments ###
df2.iloc[0, 0] = "chr1"
d = """
index index_ have_overlap overlap_start overlap_end distance
0 0 0 True 4 5 0
1 0 1 False <NA> <NA> 5
"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(
df,
bioframe.closest(
df1,
df2,
k=2,
return_overlap=True,
return_index=True,
return_input=False,
return_distance=True,
),
check_dtype=False,
)
# closest should ignore empty groups (e.g. from categorical chrom)
df = pd.DataFrame(
[
["chrX", 1, 8],
["chrX", 2, 10],
],
columns=["chrom", "start", "end"],
)
d = """ chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chrX 1 8 chrX 2 10 0
1 chrX 2 10 chrX 1 8 0"""
df_closest = pd.read_csv(StringIO(d), sep=r"\s+")
df_cat = pd.CategoricalDtype(categories=["chrX", "chr1"], ordered=True)
df = df.astype({"chrom": df_cat})
pd.testing.assert_frame_equal(
df_closest,
bioframe.closest(df, suffixes=("_1", "_2")),
check_dtype=False,
check_categorical=False,
)
# closest should ignore null rows: code will need to be modified
# as for overlap if an on=[] option is added
df1 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
["chr1", 1, 5],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
["chr1", 4, 8],
[pd.NA, pd.NA, pd.NA],
["chr1", 10, 11],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_1": pd.Int64Dtype(),
"end_1": pd.Int64Dtype(),
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), ignore_overlaps=True, k=5)
)
with pytest.raises(ValueError): # inputs must be valid bedFrames
df1.iloc[0, 0] = "chr10"
bioframe.closest(df1, df2)
def test_coverage():
#### coverage does not exceed length of original interval
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame([["chr1", 2, 10]], columns=["chrom", "start", "end"])
d = """chrom start end coverage
0 chr1 3 8 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### coverage of interval on different chrom returns zero for coverage and n_overlaps
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame([["chrX", 3, 8]], columns=["chrom", "start", "end"])
d = """chrom start end coverage
0 chr1 3 8 0 """
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### when a second overlap starts within the first
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame(
[["chr1", 3, 6], ["chr1", 5, 8]], columns=["chrom", "start", "end"]
)
d = """chrom start end coverage
0 chr1 3 8 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### coverage of NA interval returns zero for coverage
df1 = pd.DataFrame(
[
["chr1", 10, 20],
[pd.NA, pd.NA, pd.NA],
["chr1", 3, 8],
[pd.NA, pd.NA, pd.NA],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[["chr1", 3, 6], ["chr1", 5, 8], [pd.NA, pd.NA, pd.NA]],
columns=["chrom", "start", "end"],
)
df1 = bioframe.sanitize_bedframe(df1)
df2 = bioframe.sanitize_bedframe(df2)
df_coverage = pd.DataFrame(
[
["chr1", 10, 20, 0],
[pd.NA, pd.NA, pd.NA, 0],
["chr1", 3, 8, 5],
[pd.NA, pd.NA, pd.NA, 0],
],
columns=["chrom", "start", "end", "coverage"],
).astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype(), "coverage": | pd.Int64Dtype() | pandas.Int64Dtype |
"""
.. Copyright (c) 2014 <NAME>
license http://opensource.org/licenses/MIT
Data - building features (:mod:`pynance.data.feat`)
==============================================================
.. currentmodule:: pynance.data.feat
These functions are intended to be used in conjunction
with `functools.partial` and other function decorators
to pass to `data.labeledfeatures()`.
For example,
>>> from functools import partial
>>> featfunc = pn.decorate(partial(pn.data.feat.fromfuncs, [fn1, fn2, fn3], skipatstart=averaging_window),
averaging_window + n_feature_sessions - 1)
>>> features, labels = pn.data.labeledfeatures(eqdata, featfunc, labelfunc)
"""
from functools import partial
import numpy as np
import pandas as pd
def add_const(features):
"""
Prepend the constant feature 1 as first feature and return the modified
feature set.
Parameters
----------
features : ndarray or DataFrame
"""
content = np.empty((features.shape[0], features.shape[1] + 1), dtype='float64')
content[:, 0] = 1.
if isinstance(features, np.ndarray):
content[:, 1:] = features
return content
content[:, 1:] = features.iloc[:, :].values
cols = ['Constant'] + features.columns.tolist()
return | pd.DataFrame(data=content, index=features.index, columns=cols, dtype='float64') | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.