prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import unittest
from nose.tools import assert_equal, assert_list_equal, nottest, raises
from py_stringmatching.tokenizer.delimiter_tokenizer import DelimiterTokenizer
from py_stringmatching.tokenizer.qgram_tokenizer import QgramTokenizer
import numpy as np
import pandas as pd
from py_stringsimjoin.filter.overlap_filter import OverlapFilter
from py_stringsimjoin.utils.converter import dataframe_column_to_str
from py_stringsimjoin.utils.generic_helper import remove_redundant_attrs
# test OverlapFilter.filter_pair method
class FilterPairTestCases(unittest.TestCase):
def setUp(self):
self.dlm = DelimiterTokenizer(delim_set=[' '], return_set=True)
def test_overlap_dlm_1_prune(self):
self.test_filter_pair('aa bb cc', 'xx yy',
self.dlm, 1, '>=', False, True)
def test_overlap_dlm_1_pass(self):
self.test_filter_pair('aa bb cc', 'xx yy aa',
self.dlm, 1, '>=', False, False)
def test_overlap_dlm_1_gt_prune(self):
self.test_filter_pair('aa bb cc', 'xx yy aa',
self.dlm, 1, '>', False, True)
def test_overlap_dlm_1_eq_pass(self):
self.test_filter_pair('aa bb cc', 'xx yy aa',
self.dlm, 1, '=', False, False)
def test_overlap_pass_missing_left(self):
self.test_filter_pair(None, 'fg ty',
self.dlm, 1, '>=', True, False)
def test_overlap_pass_missing_right(self):
self.test_filter_pair('fg ty', np.NaN,
self.dlm, 1, '>=', True, False)
def test_overlap_pass_missing_both(self):
self.test_filter_pair(None, np.NaN,
self.dlm, 1, '>=', True, False)
# tests for empty string input
def test_empty_lstring(self):
self.test_filter_pair('ab', '', self.dlm, 1, '>=', False, True)
def test_empty_rstring(self):
self.test_filter_pair('', 'ab', self.dlm, 1, '>=', False, True)
def test_empty_strings(self):
self.test_filter_pair('', '', self.dlm, 1, '>=', False, True)
@nottest
def test_filter_pair(self, lstring, rstring, tokenizer,
overlap_size, comp_op, allow_missing, expected_output):
overlap_filter = OverlapFilter(tokenizer, overlap_size,
comp_op, allow_missing)
actual_output = overlap_filter.filter_pair(lstring, rstring)
assert_equal(actual_output, expected_output)
# test OverlapFilter.filter_tables method
class FilterTablesTestCases(unittest.TestCase):
def setUp(self):
self.dlm = DelimiterTokenizer(delim_set=[' '], return_set=True)
self.A = pd.DataFrame([{'id': 1, 'attr':'ab cd ef aa bb'},
{'id': 2, 'attr':''},
{'id': 3, 'attr':'ab'},
{'id': 4, 'attr':'ll oo pp'},
{'id': 5, 'attr':'xy xx zz fg'},
{'id': 6, 'attr':None}])
self.B = pd.DataFrame([{'id': 1, 'attr':'mn'},
{'id': 2, 'attr':'he ll'},
{'id': 3, 'attr':'xy pl ou'},
{'id': 4, 'attr':'aa'},
{'id': 5, 'attr':'fg cd aa ef'},
{'id': 6, 'attr':np.NaN}])
self.empty_table = pd.DataFrame(columns=['id', 'attr'])
self.default_l_out_prefix = 'l_'
self.default_r_out_prefix = 'r_'
def test_overlap_dlm_1(self):
expected_pairs = set(['1,4', '1,5', '4,2', '5,3', '5,5'])
self.test_filter_tables(self.dlm, 1, '>=', False,
(self.A, self.B,
'id', 'id', 'attr', 'attr'),
expected_pairs)
def test_overlap_dlm_1_eq(self):
expected_pairs = set(['1,4', '4,2', '5,3', '5,5'])
self.test_filter_tables(self.dlm, 1, '=', False,
(self.A, self.B,
'id', 'id', 'attr', 'attr'),
expected_pairs)
def test_overlap_dlm_1_gt(self):
expected_pairs = set(['1,5'])
self.test_filter_tables(self.dlm, 1, '>', False,
(self.A, self.B,
'id', 'id', 'attr', 'attr'),
expected_pairs)
def test_overlap_dlm_3(self):
expected_pairs = set(['1,5'])
self.test_filter_tables(self.dlm, 3, '>=', False,
(self.A, self.B,
'id', 'id', 'attr', 'attr'),
expected_pairs)
def test_overlap_dlm_1_with_allow_missing(self):
expected_pairs = set(['1,4', '1,5', '4,2', '5,3', '5,5',
'6,1', '6,2', '6,3', '6,4', '6,5',
'6,6', '1,6', '2,6', '3,6', '4,6', '5,6'])
self.test_filter_tables(self.dlm, 1, '>=', True,
(self.A, self.B,
'id', 'id', 'attr', 'attr'),
expected_pairs)
# test with n_jobs above 1
def test_overlap_dlm_1_njobs_above_1(self):
expected_pairs = set(['1,4', '1,5', '4,2', '5,3', '5,5'])
self.test_filter_tables(self.dlm, 1, '>=', False,
(self.A, self.B,
'id', 'id', 'attr', 'attr',
['attr'], ['attr'],
'ltable.', 'rtable.', False, 2),
expected_pairs)
def test_overlap_dlm_1_with_out_attrs(self):
expected_pairs = set(['1,4', '1,5', '4,2', '5,3', '5,5'])
self.test_filter_tables(self.dlm, 1, '>=', False,
(self.A, self.B,
'id', 'id', 'attr', 'attr',
['id', 'attr'], ['id', 'attr']),
expected_pairs)
def test_overlap_dlm_1_with_out_prefix(self):
expected_pairs = set(['1,4', '1,5', '4,2', '5,3', '5,5'])
self.test_filter_tables(self.dlm, 1, '>=', False,
(self.A, self.B,
'id', 'id', 'attr', 'attr',
['attr'], ['attr'],
'ltable.', 'rtable.'),
expected_pairs)
# tests for empty table input
def test_empty_ltable(self):
expected_pairs = set()
self.test_filter_tables(self.dlm, 1, '>=', False,
(self.empty_table, self.B,
'id', 'id', 'attr', 'attr'),
expected_pairs)
def test_empty_rtable(self):
expected_pairs = set()
self.test_filter_tables(self.dlm, 1, '>=', False,
(self.A, self.empty_table,
'id', 'id', 'attr', 'attr'),
expected_pairs)
def test_empty_tables(self):
expected_pairs = set()
self.test_filter_tables(self.dlm, 1, '>=', False,
(self.empty_table, self.empty_table,
'id', 'id', 'attr', 'attr'),
expected_pairs)
@nottest
def test_filter_tables(self, tokenizer, overlap_size, comp_op,
allow_missing, args, expected_pairs):
overlap_filter = OverlapFilter(tokenizer, overlap_size,
comp_op, allow_missing)
actual_candset = overlap_filter.filter_tables(*args)
expected_output_attrs = ['_id']
l_out_prefix = self.default_l_out_prefix
r_out_prefix = self.default_r_out_prefix
# Check for l_out_prefix in args.
if len(args) > 8:
l_out_prefix = args[8]
expected_output_attrs.append(l_out_prefix + args[2])
# Check for r_out_prefix in args.
if len(args) > 9:
r_out_prefix = args[9]
expected_output_attrs.append(r_out_prefix + args[3])
# Check for l_out_attrs in args.
if len(args) > 6:
if args[6]:
l_out_attrs = remove_redundant_attrs(args[6], args[2])
for attr in l_out_attrs:
expected_output_attrs.append(l_out_prefix + attr)
# Check for r_out_attrs in args.
if len(args) > 7:
if args[7]:
r_out_attrs = remove_redundant_attrs(args[7], args[3])
for attr in r_out_attrs:
expected_output_attrs.append(r_out_prefix + attr)
# verify whether the output table has the necessary attributes.
assert_list_equal(list(actual_candset.columns.values),
expected_output_attrs)
actual_pairs = set()
for idx, row in actual_candset.iterrows():
actual_pairs.add(','.join((str(row[l_out_prefix + args[2]]),
str(row[r_out_prefix + args[3]]))))
# verify whether the actual pairs and the expected pairs match.
assert_equal(len(expected_pairs), len(actual_pairs))
common_pairs = actual_pairs.intersection(expected_pairs)
assert_equal(len(common_pairs), len(expected_pairs))
# test OverlapFilter.filter_candset method
class FilterCandsetTestCases(unittest.TestCase):
def setUp(self):
self.dlm = DelimiterTokenizer(delim_set=[' '], return_set=True)
self.A = pd.DataFrame([{'l_id': 1, 'l_attr':'ab cd ef aa bb'},
{'l_id': 2, 'l_attr':''},
{'l_id': 3, 'l_attr':'ab'},
{'l_id': 4, 'l_attr':'ll oo pp'},
{'l_id': 5, 'l_attr':'xy xx zz fg'},
{'l_id': 6, 'l_attr':np.NaN}])
self.B = pd.DataFrame([{'r_id': 1, 'r_attr':'mn'},
{'r_id': 2, 'r_attr':'he ll'},
{'r_id': 3, 'r_attr':'xy pl ou'},
{'r_id': 4, 'r_attr':'aa'},
{'r_id': 5, 'r_attr':'fg cd aa ef'},
{'r_id': 6, 'r_attr':None}])
# generate cartesian product A x B to be used as candset
self.A['tmp_join_key'] = 1
self.B['tmp_join_key'] = 1
self.C = pd.merge(self.A[['l_id', 'tmp_join_key']],
self.B[['r_id', 'tmp_join_key']],
on='tmp_join_key').drop('tmp_join_key', 1)
self.empty_A = pd.DataFrame(columns=['l_id', 'l_attr'])
self.empty_B = pd.DataFrame(columns=['r_id', 'r_attr'])
self.empty_candset = pd.DataFrame(columns=['l_id', 'r_id'])
def test_overlap_dlm_1(self):
expected_pairs = set(['1,4', '1,5', '4,2', '5,3', '5,5'])
self.test_filter_candset(self.dlm, 1, '>=', False,
(self.C, 'l_id', 'r_id',
self.A, self.B,
'l_id', 'r_id', 'l_attr', 'r_attr'),
expected_pairs)
def test_overlap_dlm_1_allow_missing(self):
expected_pairs = set(['1,4', '1,5', '4,2', '5,3', '5,5',
'6,1', '6,2', '6,3', '6,4', '6,5',
'6,6', '1,6', '2,6', '3,6', '4,6', '5,6'])
self.test_filter_candset(self.dlm, 1, '>=', True,
(self.C, 'l_id', 'r_id',
self.A, self.B,
'l_id', 'r_id', 'l_attr', 'r_attr'),
expected_pairs)
def test_njobs_above_1(self):
expected_pairs = set(['1,4', '1,5', '4,2', '5,3', '5,5'])
self.test_filter_candset(self.dlm, 1, '>=', False,
(self.C, 'l_id', 'r_id',
self.A, self.B,
'l_id', 'r_id', 'l_attr', 'r_attr', 2),
expected_pairs)
def test_candset_with_join_attr_of_type_int(self):
A = pd.DataFrame([{'l_id': 1, 'l_attr':1990},
{'l_id': 2, 'l_attr':2000},
{'l_id': 3, 'l_attr':0},
{'l_id': 4, 'l_attr':-1},
{'l_id': 5, 'l_attr':1986}])
B = pd.DataFrame([{'r_id': 1, 'r_attr':2001},
{'r_id': 2, 'r_attr':1992},
{'r_id': 3, 'r_attr':1886},
{'r_id': 4, 'r_attr':2007},
{'r_id': 5, 'r_attr':2012}])
dataframe_column_to_str(A, 'l_attr', inplace=True)
dataframe_column_to_str(B, 'r_attr', inplace=True)
A['tmp_join_key'] = 1
B['tmp_join_key'] = 1
C = pd.merge(A[['l_id', 'tmp_join_key']],
B[['r_id', 'tmp_join_key']],
on='tmp_join_key').drop('tmp_join_key', 1)
qg2_tok = QgramTokenizer(2, return_set=True)
expected_pairs = set(['1,2', '1,3', '2,1', '2,4', '2,5',
'4,1', '5,2', '5,3'])
self.test_filter_candset(qg2_tok, 1, '>=', False,
(C, 'l_id', 'r_id',
A, B, 'l_id', 'r_id',
'l_attr', 'r_attr'),
expected_pairs)
# tests for empty candset input
def test_empty_candset(self):
expected_pairs = set()
self.test_filter_candset(self.dlm, 1, '>=', False,
(self.empty_candset, 'l_id', 'r_id',
self.empty_A, self.empty_B,
'l_id', 'r_id', 'l_attr', 'r_attr'),
expected_pairs)
@raises(TypeError)
def test_invalid_candset(self):
overlap_filter = OverlapFilter(self.dlm)
overlap_filter.filter_candset([], 'l_id', 'r_id',
self.A, self.B,
'l_id', 'r_id',
'l_attr', 'r_attr')
@raises(TypeError)
def test_invalid_ltable(self):
overlap_filter = OverlapFilter(self.dlm)
overlap_filter.filter_candset(self.C, 'l_id', 'r_id',
[], self.B,
'l_id', 'r_id',
'l_attr', 'r_attr')
@raises(TypeError)
def test_invalid_rtable(self):
overlap_filter = OverlapFilter(self.dlm)
overlap_filter.filter_candset(self.C, 'l_id', 'r_id',
self.A, [],
'l_id', 'r_id',
'l_attr', 'r_attr')
@raises(AssertionError)
def test_invalid_candset_l_key_attr(self):
overlap_filter = OverlapFilter(self.dlm)
overlap_filter.filter_candset(self.C, 'invalid_attr', 'r_id',
self.A, self.B,
'l_id', 'r_id',
'l_attr', 'r_attr')
@raises(AssertionError)
def test_invalid_candset_r_key_attr(self):
overlap_filter = OverlapFilter(self.dlm)
overlap_filter.filter_candset(self.C, 'l_id', 'invalid_attr',
self.A, self.B,
'l_id', 'r_id',
'l_attr', 'r_attr')
@raises(AssertionError)
def test_invalid_ltable_l_key_attr(self):
overlap_filter = OverlapFilter(self.dlm)
overlap_filter.filter_candset(self.C, 'l_id', 'r_id',
self.A, self.B,
'invalid_attr', 'r_id',
'l_attr', 'r_attr')
@raises(AssertionError)
def test_invalid_ltable_l_filter_attr(self):
overlap_filter = OverlapFilter(self.dlm)
overlap_filter.filter_candset(self.C, 'l_id', 'r_id',
self.A, self.B,
'l_id', 'r_id',
'invalid_attr', 'r_attr')
@raises(AssertionError)
def test_invalid_rtable_r_key_attr(self):
overlap_filter = OverlapFilter(self.dlm)
overlap_filter.filter_candset(self.C, 'l_id', 'r_id',
self.A, self.B,
'l_id', 'invalid_attr',
'l_attr', 'r_attr')
@raises(AssertionError)
def test_invalid_rtable_r_filter_attr(self):
overlap_filter = OverlapFilter(self.dlm)
overlap_filter.filter_candset(self.C, 'l_id', 'r_id',
self.A, self.B,
'l_id', 'r_id',
'l_attr', 'invalid_attr')
@raises(AssertionError)
def test_ltable_l_key_attr_with_missing_value(self):
overlap_filter = OverlapFilter(self.dlm)
overlap_filter.filter_candset(self.C, 'l_id', 'r_id',
self.A, self.B,
'l_attr', 'r_id',
'l_attr', 'r_attr')
@raises(AssertionError)
def test_rtable_r_key_attr_with_missing_value(self):
overlap_filter = OverlapFilter(self.dlm)
overlap_filter.filter_candset(self.C, 'l_id', 'r_id',
self.A, self.B,
'l_id', 'r_attr',
'l_attr', 'r_attr')
@raises(AssertionError)
def test_candset_with_numeric_l_filter_attr(self):
A = pd.DataFrame([{'l_id': 1, 'l_attr':1990}])
B = pd.DataFrame([{'r_id': 1, 'r_attr':'2001'}])
A['tmp_join_key'] = 1
B['tmp_join_key'] = 1
C = pd.merge(A[['l_id', 'tmp_join_key']],
B[['r_id', 'tmp_join_key']],
on='tmp_join_key').drop('tmp_join_key', 1)
qg2_tok = QgramTokenizer(2, return_set=True)
overlap_filter = OverlapFilter(qg2_tok)
overlap_filter.filter_candset(C, 'l_id', 'r_id',
A, B, 'l_id', 'r_id',
'l_attr', 'r_attr')
@raises(AssertionError)
def test_candset_with_numeric_r_filter_attr(self):
A = pd.DataFrame([{'l_id': 1, 'l_attr':'1990'}])
B = pd.DataFrame([{'r_id': 1, 'r_attr':2001}])
A['tmp_join_key'] = 1
B['tmp_join_key'] = 1
C = pd.merge(A[['l_id', 'tmp_join_key']],
B[['r_id', 'tmp_join_key']],
on='tmp_join_key').drop('tmp_join_key', 1)
qg2_tok = QgramTokenizer(2, return_set=True)
overlap_filter = OverlapFilter(qg2_tok)
overlap_filter.filter_candset(C, 'l_id', 'r_id',
A, B, 'l_id', 'r_id',
'l_attr', 'r_attr')
@nottest
def test_filter_candset(self, tokenizer, overlap_size, comp_op,
allow_missing, args, expected_pairs):
overlap_filter = OverlapFilter(tokenizer, overlap_size,
comp_op, allow_missing)
actual_output_candset = overlap_filter.filter_candset(*args)
# verify whether the output table has the necessary attributes.
assert_list_equal(list(actual_output_candset.columns.values),
list(args[0].columns.values))
actual_pairs = set()
for idx, row in actual_output_candset.iterrows():
actual_pairs.add(','.join((str(row[args[1]]), str(row[args[2]]))))
# verify whether the actual pairs and the expected pairs match.
assert_equal(len(expected_pairs), len(actual_pairs))
common_pairs = actual_pairs.intersection(expected_pairs)
assert_equal(len(common_pairs), len(expected_pairs))
class OverlapFilterInvalidTestCases(unittest.TestCase):
def setUp(self):
self.A = pd.DataFrame([{'A.id':1, 'A.attr':'hello', 'A.int_attr':5}])
self.B = | pd.DataFrame([{'B.id':1, 'B.attr':'world', 'B.int_attr':6}]) | pandas.DataFrame |
# coding: utf-8
"""Mapping of production and consumption mixes in Europe and their effect on
the carbon footprint of electric vehicles
This code performs the following:
- Import data from ENTSO-E (production quantities, trades relationships)
- Calculates the production and consumption electricity mixes for European countries
- Calculates the carbon footprint (CF) for the above electricity mixes](#CF_el)
- Calculates the production, use-phase and end-of-life emissions for battery electric vehicles (BEVs) under
the following assumptions:](#BEV_calcs)
- Production in Korea (with electricity intensity 684 g CO2-eq/kWh)
- Use phase uses country-specific production and consumption mix
- End-of-life emissions static for all countries
Requires the following files for input:
- ENTSO_production_volumes.csv (from hybridized_impact_factors.py)
- final_emission_factors.csv (from hybridized_impact_factors.py)
- trades.csv (from hybridized_impact_factors.py)
- trade_ef_hv.csv (from hybridized_impact_factors.py)
- API_EG.ELC.LOSS.ZS_DS2_en_csv_v2_673578.csv (transmission losses, from OECD)
- car_specifications.xlsx
"""
import os
from datetime import datetime
import numpy as np
import pandas as pd
import country_converter as coco
import logging
#%% Main function
def run_calcs(run_id, year, no_ef_countries, export_data=True, include_TD_losses=True, BEV_lifetime=180000, ICEV_lifetime=180000, flowtrace_el=True, allocation=True, production_el_intensity=679, incl_ei=False, energy_sens=False):
"""Run all electricity mix and vehicle calculations and exports results."""
# Korean el-mix 679 g CO2/kWh, from ecoinvent
fp = os.path.curdir
production, trades, trade_ef, country_total_prod_disagg, country_total_cons_disagg, g_raw, C = load_prep_el_data(fp, year)
codecheck_file, elmixes, trade_only, country_el, CFEL, CFCI = el_calcs(flowtrace_el, run_id, fp, C, production, country_total_prod_disagg, country_total_cons_disagg, g_raw, trades, trade_ef, include_TD_losses, incl_ei, export_data) # Leontief electricity calculations
results_toSI, ICEV_total_impacts, ICEV_prodEOL_impacts, ICEV_op_int = BEV_calcs(fp, country_el, production, elmixes, BEV_lifetime, ICEV_lifetime, production_el_intensity, CFCI, allocation, energy_sens)
SI_fp = export_SI(run_id, results_toSI, production, trades, C, CFEL, no_ef_countries)
pickle_results(run_id, results_toSI, CFEL, ICEV_total_impacts, codecheck_file, export_data)
return results_toSI['BEV footprint'].xs('Consumption mix', level=1, axis=1), ICEV_prodEOL_impacts, ICEV_op_int, SI_fp
#%% Load and format data for calculations
def load_prep_el_data(fp, year):
"""Load electricity data and emissions factors."""
fp_output = os.path.join(fp, 'output')
# Output from bentso.py
filepath_production = os.path.join(fp_output, 'entsoe', 'ENTSO_production_volumes_'+ str(year) +'.csv')
filepath_intensities = os.path.join(fp_output, 'final_emission_factors_'+ str(year) +'.csv')
filepath_trades = os.path.join(fp_output, 'entsoe', 'trades_'+ str(year) +'.csv')
filepath_tradeonly_ef = os.path.join(fp_output, 'ecoinvent_ef_hv.csv')
# read in production mixes (annual average)
production = pd.read_csv(filepath_production, index_col=0)
production.rename_axis(index='', inplace=True)
# matrix of total imports/exports of electricity between regions; aka Z matrix
trades = pd.read_csv(filepath_trades, index_col=0)
trades.fillna(0, inplace=True) # replace np.nan with 0 for matrix math, below
# manually remove Cyprus for now
production.drop(index='CY', inplace=True)
trades = trades.drop(columns='CY').drop(index='CY')
imports = trades.sum(axis=0)
exports = trades.sum(axis=1)
""" Make into sum of production and production + import - export"""
country_total_prod_disagg = production.sum(axis=1)
country_total_cons_disagg = country_total_prod_disagg + imports - exports
waste = (production['Waste'] / production.sum(axis=1))
waste_min = waste[waste > 0].min()
waste_max = waste.max()
g_raw = production.sum(axis=1) # Vector of total electricity production (regionalized)
""" Read power plant CO2 intensities [tech averages] """
# average technology CO2 intensities (i.e., non-regionalized)
all_C = pd.read_csv(filepath_intensities, index_col=0)
all_C.drop(index='CY', inplace=True)
# use ecoinvent factors for these countries as a proxy to calculate consumption mixes for receiving countries
trade_ef = pd.read_csv(filepath_tradeonly_ef, index_col=[0, 1, 2, 3], header=[0])
trade_ef.index = trade_ef.index.droplevel([0, 1, 3]) # remove DSID, activityName and productName (leaving geography)
trade_ef.index.rename('geo', inplace=True)
trade_ef.columns = ['emission factor']
# Generate regionalized tech generation matrix
C = all_C.T
C.sort_index(axis=1, inplace=True)
C.sort_index(axis=0, inplace=True)
return production, trades, trade_ef, country_total_prod_disagg, country_total_cons_disagg, g_raw, C
#%% el_calcs
def el_calcs(flowtrace_el, run_id, fp, C, production, country_total_prod_disagg, country_total_cons_disagg, g_raw, trades, trade_ef, include_TD_losses, incl_ei, export_data):
fp_data = os.path.join(fp, 'data')
# Make list of full-country resolution
original_countries = list(production.index)
# Make list of aggregated countries (affects Nordic countries + GB (UK+NI))
# read 3-letter ISO codes
countries = list(trades.index)
""" Calculates national production mixes and consumption mixes using Leontief assumption """
# Start electricity calculations (ELFP.m)
# Calculate production and consumption mixes
# Carbon intensity of production mix
CFPI_no_TD = pd.DataFrame(production.multiply(C.T).sum(axis=1) / production.sum(axis=1), columns=['Production mix intensity']) # production mix intensity without losses
CFPI_no_TD.fillna(0, inplace=True)
# List of countries that have trade relationships, but no production data
trade_only = list(set(trades.index) - set(production.loc[production.sum(axis=1) > 0].index))
# Add ecoinvent proxy emission factors for trade-only countries
logging.info('Replacing missing production mix intensities with values from ecoinvent:')
for country in trade_only:
if CFPI_no_TD.loc[country, 'Production mix intensity'] == 0:
logging.info(country)
CFPI_no_TD.loc[country] = trade_ef.loc[country].values
i = country_total_cons_disagg.size # Number of European regions
g = g_raw
g = g.sort_index() # total generation vector (local production for each country)
total_imported = trades.sum(axis=0) # sum rows for total imports
total_exported = trades.sum(axis=1) # sum columns for total exports
y = total_imported + g - total_exported # total final demand (consumption) of electricity
q = g + total_imported # vector of total consumption
q.replace(np.nan, 0, inplace=True)
if flowtrace_el:
# For flow tracing approach: make Leontief production functions (normalize columns of A)
# normalized trade matrix quadrant
Atmx = pd.DataFrame(np.matmul(trades, np.linalg.pinv(np.diag(q))))
# normalized production matrix quadrant
Agen = pd.DataFrame(np.diag(g) * np.linalg.pinv(np.diag(q)), index=countries, columns=countries) # coefficient matrix, generation
# "Trade" Leontief inverse
# Total imports from region i to j per unit demand on j
Ltmx = pd.DataFrame(np.linalg.pinv(np.identity(i) - Atmx), trades.columns, trades.index)
# Production in country i for trade to country j
# Total generation in i (rows) per unit demand j
Lgen = pd.DataFrame(np.matmul(Agen, Ltmx), index=Agen.index, columns=Ltmx.columns)
y_diag = pd.DataFrame(np.diag(y), index=countries, columns=countries)
# total imports for given demand
Xtmx = pd.DataFrame(np.matmul(np.linalg.pinv(np.identity(i) - Atmx), y_diag))
# Total generation to satisfy demand (consumption)
Xgen = np.matmul(np.matmul(Agen, Ltmx), y_diag)
Xgen.sum(axis=0)
Xgen_df = pd.DataFrame(Xgen, index=Agen.index, columns=y_diag.columns)
# ### Check electricity generated matches demand
totgen = Xgen.sum(axis=0)
r_gendem = totgen / y # All countries should be 1
#%% Generation techonlogy matrix
# TC is a country-by-generation technology matrix - normalized to share of total domestic generation, i.e., normalized generation/production mix
# technology generation, kWh/ kWh domestic generated electricity
TC = pd.DataFrame(np.matmul(np.linalg.pinv(np.diag(g)), production), index=g.index, columns=production.columns)
TCsum = TC.sum(axis=1) # Quality assurance - each country should sum to 1
# Calculate technology generation mix in GWh based on production in each region
TGP = pd.DataFrame(np.matmul(TC.transpose(), np.diag(g)), index=TC.columns, columns=g.index) #.== production
# Carbon intensity of consumption mix
CFCI_no_TD = pd.DataFrame(np.matmul(CFPI_no_TD.T.values, Lgen), columns=CFPI_no_TD.index).T
else:
# Use grid-average assumption for trade
prod_emiss = production.multiply(C.T).sum(axis=1)
trade_emiss = (pd.DataFrame(np.diag(CFPI_no_TD.iloc(axis=1)[0]), index=CFPI_no_TD.index, columns=CFPI_no_TD.index)).dot(trades)
CFCI_no_TD = pd.DataFrame((prod_emiss + trade_emiss.sum(axis=0) - trade_emiss.sum(axis=1)) / y)
CFCI_no_TD.columns = ['Consumption mix intensity']
# use ecoinvent for missing countries
if incl_ei:
CFCI_no_TD.update(trade_ef.rename(columns={'emission factor':'Consumption mix intensity'}))
#%% Calculate losses
# Transpose added after removing country aggregation as data pre-treatment
if include_TD_losses:
# Calculate technology characterization factors including transmission and distribution losses
# First, read transmission and distribution losses, downloaded from World Bank economic indicators (most recent values from 2014)
if isinstance(include_TD_losses, float):
TD_losses = include_TD_losses # apply constant transmission and distribution losses to all countries
elif isinstance(include_TD_losses, bool):
losses_fp = os.path.join(fp_data, 'API_EG.ELC.LOSS.ZS_DS2_en_csv_v2_673578.csv')
try:
TD_losses = pd.read_csv(losses_fp, skiprows=[0,1,2,3], usecols=[1, 58], index_col=0)
TD_losses = TD_losses.iloc[:, -7:].dropna(how='all', axis=1)
TD_losses = TD_losses.apply(lambda x: x / 100 + 1) # convert losses to a multiplicative factor
# ## Calculate total national carbon emissions from el - production and consumption mixes
TD_losses.index = coco.convert(names=TD_losses.index.tolist(), to='ISO2', not_found=None)
TD_losses = TD_losses.loc[countries]
TD_losses = pd.Series(TD_losses.iloc[:, 0])
except:
print("Warning! Transmission and distribution losses input files not found!")
TD_losses = pd.Series(np.zeros(len(production.index)), index=production.index)
else:
print('invalid entry for losses')
# Caclulate carbon intensity of production and consumption mixes including losses
CFPI_TD_losses = CFPI_no_TD.multiply(TD_losses, axis=0).dropna(how='any', axis=0) # apply transmission and distribution losses to production mix intensity
CFCI_TD_losses = CFCI_no_TD.multiply(TD_losses, axis=0).dropna(how='any', axis=0)
if len(CFCI_TD_losses) < len(CFPI_TD_losses):
CFCI_TD_losses = CFCI_no_TD.multiply(TD_losses, axis=0)
CFPI = CFPI_TD_losses
CFCI = CFCI_TD_losses
else:
CFPI = CFPI_no_TD
CFCI = CFCI_no_TD
elmixes = (CFPI.copy()).join(CFCI.copy()).T
#%%
# Aggregate multi-nodes to single countries using weighted average of production/consumption as appropriate
country_total_prod_disagg.columns = ["Total production (TWh)"]
country_total_prod_disagg.index = original_countries
country_total_cons_disagg.columns = ["Total consumption (TWh)"]
country_total_cons_disagg.index = original_countries
country_el = pd.concat([country_total_prod_disagg, country_total_cons_disagg], axis=1)
country_el.columns = ['Total production (TWh)', 'Total consumption (TWh)']
CFEL_mixes = elmixes.T
CFEL = pd.concat([country_el, CFEL_mixes], axis=1)
imports = trades.sum(axis=0)
exports = trades.sum(axis=1)
CFEL['Trade percentage, gross'] = (imports + exports) / CFEL['Total production (TWh)']
CFEL['Import percentage'] = imports / CFEL['Total production (TWh)']
CFEL['Export percentage'] = exports / CFEL['Total production (TWh)']
CFEL['imports'] = imports
CFEL['exports'] = exports
#Calculate total carbon footprint intensity ratio production vs consumption
rCP = CFCI['Consumption mix intensity'].divide(CFPI['Production mix intensity'])
rCP.columns = ["ratio consumption:production mix"]
# Export intermediate variables from calculations for troubleshooting
if export_data:
keeper = run_id + "{:%d-%m-%y, %H_%M}".format(datetime.now())
fp_results = os.path.join(fp, 'results')
codecheck_file = os.path.join(os.path.abspath(fp_results), 'code_check_' + keeper + '.xlsx')
writer = pd.ExcelWriter(codecheck_file)
g.to_excel(writer, "g")
q.to_excel(writer, "q")
y.to_excel(writer, 'y')
if flowtrace_el:
Atmx.to_excel(writer, "Atmx")
Agen.to_excel(writer, "Agen")
Ltmx.to_excel(writer, "LTmx")
Lgen.to_excel(writer, "Lgen")
Xtmx.to_excel(writer, "Xtmx")
TGP.to_excel(writer, "TGP")
CFPI.T.to_excel(writer, "CFPI")
CFCI.T.to_excel(writer, "CFCI")
rCP.to_excel(writer, "rCP")
C.T.to_excel(writer, "C")
writer.save()
return codecheck_file, elmixes, trade_only, country_el, CFEL, CFCI
#%%
def BEV_calcs(fp, country_el, production, elmixes, BEV_lifetime, ICEV_lifetime, production_el_intensity, CFCI, allocation=True, energy_sens=False):
"""Calculate BEV lifecycle emissions."""
# First, setup calculations
# read in data
fp_data = os.path.join(fp, 'data')
vehicle_fp = os.path.join(fp_data, 'car_specifications.xlsx')
cars = pd.read_excel(vehicle_fp, sheet_name='veh_emiss', index_col=[0, 1, 2], usecols='A:G')
cars = cars.sort_index()
vehicle_CO2 = ["BEV", "ICEV"]
if energy_sens:
# if performing the experiment for battery energy demand in manufacturing,
# update with new energy values
alt_energy = pd.read_excel(vehicle_fp, sheet_name='alt_energy', index_col=[0,1,2], usecols='A:H') # column A is scenario name
if isinstance(energy_sens, str):
cars.update(alt_energy.loc[energy_sens])
# Impacts from electricity demand in cell production
battery_prod_el = production_el_intensity / 1e6 * cars.loc["BEV", "Production el, battery"] # in t CO2/vehicle
batt_prod_impacts = cars.loc["BEV", "Production, RObattery"].add(battery_prod_el, fill_value=0).sum(axis=0)
if allocation:
alloc_share = BEV_lifetime / ((cars.loc["BEV", "Max EFC", "cycles"] * (cars.loc["BEV", "Batt size", "kWh"]*.9) * 1000) / cars.loc["BEV", "Use phase", "Wh/km"])
else:
alloc_share = 1
alloc_batt_prod_impacts = alloc_share * batt_prod_impacts
# Total vehicle production impacts - sum of battery emissions + rest of vehicle
BEV_prod_impacts = cars.loc["BEV", "Production, ROV"] + alloc_batt_prod_impacts
# Modify for battery production in Europe
# batt_prod_EU = pd.DataFrame(np.matmul(CFCI.values / 1e6, cars.loc["BEV", "Production el, battery"].values), index=CFCI.index, columns=cars.columns)
batt_prod_EU = pd.DataFrame(np.matmul((elmixes.T['Consumption mix intensity'].values / 1e6).reshape(-1, 1),
cars.loc["BEV", "Production el, battery"].values),
index=elmixes.columns, columns=cars.columns)
# Total battery production impacts in Europe
batt_prod_EU = batt_prod_EU + cars.loc["BEV", "Production, RObattery", "t CO2"]
alloc_batt_prod_EU = alloc_share * batt_prod_EU
BEV_prod_EU = pd.DataFrame(index=elmixes.columns, columns=["A", "C", "JC", "JE"])
BEV_prod_EU = alloc_batt_prod_EU + cars.loc["BEV", "Production, ROV", "t CO2"]
BEV_prod_EU.columns = pd.MultiIndex.from_product([["EUR production impacts BEV"], BEV_prod_EU.columns, ["Consumption mix"]], names=["", "Segment", "Elmix"])
# Calculate use phase emissions
segs = cars.loc['BEV', 'Use phase', 'Wh/km']
mi = pd.MultiIndex.from_product([list(elmixes.index), list(segs.index)])
segs = segs.reindex(mi, level=1)
segs = pd.DataFrame(segs)
segs.columns = ['a']
segs = segs.reindex(elmixes.columns, axis=1, method='bfill')
elmixes_for_calc = elmixes.reindex(mi, level=0, axis=0)
BEV_use = (segs.multiply(elmixes_for_calc / 1000)).T
# Add production and EOL intensity for BEVs
BEV_other = BEV_prod_impacts + cars.loc["BEV", "EOL", "t CO2"].values
BEV_other_intensity = BEV_other / BEV_lifetime * 1e6 # in g CO2-eq
BEV_other_intensity.index = ["g CO2/km"]
# Calculate full lifecycle intensity using production and consumption mixes
BEVp = pd.DataFrame(BEV_use["Production mix intensity"] + BEV_other_intensity.loc["g CO2/km"])
BEVc = pd.DataFrame(BEV_use["Consumption mix intensity"] + BEV_other_intensity.loc["g CO2/km"])
# BEV impacts with production and consumption mixes
# Check which technology lifetime to use as baseline for use phase
# (use shortest lifetime between the two for comparison to avoid vehicle replacement)
# if BEV_lifetime <= ICEV_lifetime:
# lifetime = BEV_lifetime
# elif BEV_lifetime > ICEV_lifetime:
# # in the case for BEV lifetimes longer than ICEV lifetimes
# lifetime = ICEV_lifetime
# Calculate total absolute lifecycle emissions in t CO2e
BEV_impactsp = (BEV_use['Production mix intensity'].T * BEV_lifetime / 1e6).add(BEV_other.loc['t CO2'], axis=0)
BEV_impactsc = (BEV_use['Consumption mix intensity'].T * BEV_lifetime / 1e6).add(BEV_other.loc['t CO2'], axis=0)
BEV_impacts = pd.concat([BEV_impactsp.T, BEV_impactsc.T], axis=1, keys=['Production mix', 'Consumption mix'])
BEV_impacts = BEV_impacts.swaplevel(axis=1, i=0, j=1)
BEV_impacts.sort_index(level=0, axis=1, inplace=True)
# Calculate share of production phase in total BEV lifecycle emissions
BEV_prod_sharesp = BEV_prod_impacts.values / (BEV_impactsp.T)
BEV_prod_sharesc = BEV_prod_impacts.values / (BEV_impactsc.T)
BEV_prod_sharesc.columns = pd.MultiIndex.from_product([BEV_prod_sharesc.columns, ['Consumption mix']])
# Calculate share of use phase in total BEV lifecycle emissions
BEV_use_sharesp = (BEV_use['Production mix intensity'] * BEV_lifetime / 1e6) / (BEV_impactsp.T)
BEV_use_sharesc = (BEV_use['Consumption mix intensity'] * BEV_lifetime / 1e6) / (BEV_impactsc.T)
# Calculate BEV footprints with EUR (domestic) battery production
BEV_fp_EU = (BEV_prod_EU.add(cars.loc["BEV", "EOL", "t CO2"], level=1, axis=1) / BEV_lifetime * 1e6)
# Currently, EU production assumes consumption mix, so only examine using consumption mix for both manufacturing and use phase for consistency
EU_BEVc = BEV_fp_EU.add(BEV_use['Consumption mix intensity'].reindex(BEV_fp_EU.columns, axis=1, level=1), axis=1)
EU_BEVc.rename(columns={"EUR production impacts BEV": "BEV footprint, EUR production"}, inplace=True)
# Calculate EU production:Asian production footprint ratios
fp_ratio = EU_BEVc.divide(BEVc, level=1)
# Calculate total lifecycle emissions for ICEVs
ICEV_prodEOL_impacts = cars.loc['ICEV', 'Production', 't CO2'].add(cars.loc['ICEV', 'EOL', 't CO2'], axis=0)
ICEV_total_impacts = ICEV_prodEOL_impacts.add(cars.loc['ICEV', 'Use phase', 'g CO2/km'] * ICEV_lifetime / 1e6, axis=0)
ICEV_prod_EOL_fp = ICEV_prodEOL_impacts * 1e6 / ICEV_lifetime
ICEV_lc_footprint = ICEV_prod_EOL_fp + cars.loc['ICEV', 'Use phase', 'g CO2/km']
ICEV_total_impacts = pd.DataFrame(ICEV_total_impacts).T # force to dataframe
ICEV_lc_shares = cars.loc['ICEV'] / cars.loc['ICEV'].sum(axis=0)
#%% # Calculate BEV:ICEV ratios
ratio_use_prod = BEV_use["Production mix intensity"] / cars.loc["ICEV", "Use phase", "t CO2"]
# Ratios comparing use phase only
ratio_use_prod = pd.DataFrame(BEV_use["Production mix intensity"] / cars.loc["ICEV", "Use phase", "t CO2"])
ratio_use_cons = pd.DataFrame(BEV_use["Consumption mix intensity"] / cars.loc["ICEV", "Use phase", "t CO2"])
ratio_use_cons = ratio_use_cons.rename_axis("Segment", axis=1)
ratio_use_cons = pd.concat([ratio_use_cons], keys=["RATIO: use phase"], axis=1)
ratio_use_cons = pd.concat([ratio_use_cons], keys=["Consumption mix"], names=["Elmix"], axis=1)
ratio_use_cons = ratio_use_cons.reorder_levels([1,2,0], axis=1)
# Ratios with lifecycle impacts
ratiop = pd.DataFrame(BEVp / (ICEV_lc_footprint))
ratioc = pd.DataFrame(BEVc / (ICEV_lc_footprint))
# Ratios with EU production
ratioc_EU_prod = (EU_BEVc["BEV footprint, EUR production"].stack() / (ICEV_lc_footprint)).unstack()
ratioc_EU_prod = pd.concat([ratioc_EU_prod], keys=["Ratio BEV:ICEV, European BEV production"], axis=1)
# Extra calculations
BEV_total_use = BEV_use * BEV_lifetime / 1e6 # absolute lifetime operation emissions
# Assemble total results table
# CFEL - the CO2 footprint of electricity in different European countries based on either i) production or ii) consumption perspective.
# BEV – the gCO2/km for electric vehicles for i) all EU countries ii) all segments iii) production and consumption el mix.
# RATIO - the ratio of the gCO2/km for BEVs vs ICEs for i) all EU countries ii) all segments iii) production and consumption el mix.
fp = pd.concat({"Production mix": BEVp}, axis=1, names=["Elmix", "Segment"])
fp = pd.concat([fp, pd.concat({"Consumption mix": BEVc}, axis=1, names=["Elmix", "Segment"])], axis=1)
fp = fp.swaplevel(axis=1).sort_index(axis=1, level="Segment", sort_remaining=False)
fp = pd.concat({"BEV footprint": fp}, axis=1)
RATIOS = pd.concat({"Production mix": ratiop}, axis=1, names=["Elmix", "Segment"])
RATIOS = pd.concat([RATIOS, pd.concat({"Consumption mix": ratioc}, axis=1, names=["Elmix", "Segment"])], axis=1)
RATIOS = RATIOS.swaplevel(axis=1).sort_index(axis=1, level="Segment", sort_remaining=False)
RATIOS = pd.concat({"RATIO BEV:ICEV": RATIOS}, axis=1)
results = fp.join(pd.concat({'BEV impacts': BEV_impacts}, axis=1, names=['', 'Elmix', 'Segment']))
results = results.join(RATIOS)
results = results.join(ratio_use_cons)
results = results.join(BEV_prod_EU)
results = results.join(EU_BEVc)
results = results.join(ratioc_EU_prod)
results = results.join(pd.concat({"Production as share of total footprint":BEV_prod_sharesc}, axis=1))
results_toSI = results.copy()
return results_toSI, ICEV_total_impacts, ICEV_prodEOL_impacts, cars.loc['ICEV', 'Use phase', 'g CO2/km']
#%% Export functions
def export_SI(run_id, results_toSI, production, trades, C, CFEL, no_ef_countries):
"""Format dataframes for export to SI tables."""
drop_countries = production.loc[production.sum(axis=1)==0].index
CFEL_toSI = CFEL[['Production mix intensity', 'Consumption mix intensity']]
CFEL_toSI = CFEL_toSI.round(0)
CFEL_toSI.loc[drop_countries, 'Consumption mix intensity'] = '-' # remove ecoinvent-based countries
CFEL_toSI.sort_index(inplace=True)
country_intensities = C.round(0).fillna(value='-').T
country_intensities.drop(index=drop_countries, inplace=True)
production.drop(index=drop_countries, inplace=True) # drop countries with no production data
production = production.round(2).replace(0, np.nan).fillna(value='-')
trades_forSI = trades.round(2).replace(0, np.nan).fillna(value='-')
trades_forSI = pd.concat([trades_forSI], keys=['Exporting countries'])
trades_forSI = pd.concat([trades_forSI], keys=['Importing countries'], axis=1)
trades_forSI.index = trades_forSI.index.rename([None, None])
trade_pct = CFEL['Trade percentage, gross']
trade_pcti = CFEL['Import percentage']
trade_pcte = CFEL['Export percentage']
trade_pct_toSI = pd.concat([trade_pct, trade_pcti, trade_pcte], axis=1)
trade_pct_toSI.replace(np.inf, np.nan, inplace=True)
trade_pct_toSI.dropna(how='all', inplace=True)
# Export results for SI tables
keeper = run_id + " {:%d-%m-%y, %H_%M}".format(datetime.now())
fp_results = os.path.join(os.path.curdir, 'results')
excel_dict = {'Table S1 - El footprints': 'Data from Figure 2 in manuscript. Calculated national production and consumption mix hybridized lifecycle carbon intensities in g CO2-eq/kWh. Shaded values indicate countries with no production data; consumption mixes are therefore not calculated, and production intensity is obtained from ecoinvent 3.5.',
'Table S2 - intensity matrix':'Regionalized lifecycle carbon intensities of electricity generation technologies in g CO2-eq/kWh. Shaded cells indicate proxy data used (see Methods)',
'Table S3 - Production mix': ' Electricity generation mixes (2020), in TWh',
'Table S4 - trades': 'Trades between studied countries (2020), in TWh/year. Countries denoted in red italics are trade-only and do not have production data from ENTSO-E; ecoinvent values for production mix used for these.',
'Table S5 - trade shares':'Total (gross) electricity traded, total imports of electricity and total exports of electricity. Relative to domestic production. Used in colorbar, Figure 2 and Figure 1, Supplementary Information',
'Table S6 - BEV fp':'Data from Figure 3 in manuscript. BEV carbon intensities in (g CO2-eq/km) for consumption electricity mix',
'Table S7 - Prod share of fp':'Data from Figure 4 in manuscript. Contribution of vehicle production emissions to total carbon footprint',
'Table S8 - Abs BEV impacts':'Data used in Figure 5 in manuscript. Regionalized total lifecycle emissions from BEV in t CO2-eq, with 180 000 km lifetime, using consumption mixes',
'Table S9 - Ratio':'Ratio of BEV:ICEV carbon footprints using consumption electricity mix',
'Table S10 - EUR prod imp':'Production impacts of BEVs with domestic battery production using consumption electricity mix in t CO2-eq',
'Table S11 - EUR prod fp':'Data from Figure 7 in manuscript. Lifecycle BEV footprint with domestic battery production using consumption electricity mix, in g CO2-eq/km, and % change in lifecycle BEV impact from batteries with Asian production.',
}
results_filepath = os.path.join(fp_results, 'SI_results ' + keeper + '.xlsx')
results_toSI.drop(index=drop_countries, inplace=True) # remove ecoinvent-based countries
# select parts of results_toSI DataFrame for each table
table6 = results_toSI.loc(axis=1)['BEV footprint', :, 'Consumption mix'].round(0)
table7 = results_toSI.loc(axis=1)['Production as share of total footprint', :,'Consumption mix']
table8 = results_toSI.loc(axis=1)['BEV impacts', :, 'Consumption mix'].round(1)
table9 = results_toSI.loc(axis=1)['RATIO BEV:ICEV', :, 'Consumption mix'].round(2)
table10 = results_toSI.loc(axis=1)['EUR production impacts BEV', :, 'Consumption mix'].round(1)
table11 = results_toSI.loc(axis=1)['BEV footprint, EUR production', :, 'Consumption mix'].round(0)
# append data for building Figure 7 in mansucript to Table 11
A_diff = (results_toSI['BEV footprint, EUR production', 'A', 'Consumption mix'] -
results_toSI['BEV footprint', 'A', 'Consumption mix']) / results_toSI['BEV footprint', 'A', 'Consumption mix']
F_diff = (results_toSI['BEV footprint, EUR production', 'JE', 'Consumption mix'] -
results_toSI['BEV footprint', 'JE', 'Consumption mix']) / results_toSI['BEV footprint', 'JE', 'Consumption mix']
diff_cols = pd.MultiIndex.from_product([['% change from Asian production'], ['A', 'JE'],['']])
df = pd.DataFrame([A_diff, F_diff], index=diff_cols).T
table11 = pd.concat([table11, df], axis=1)
fig_data = pd.DataFrame([A_diff, F_diff], index=['A segment', 'F segment'])
# reorder technologies in Tables S2 and S3 to place "other" categories at end
tec_order = ['Biomass',
'Fossil Brown coal/Lignite',
'Fossil Coal-derived gas',
'Fossil Gas',
'Fossil Hard coal',
'Fossil Oil',
'Fossil Oil shale',
'Fossil Peat',
'Geothermal',
'Hydro Pumped Storage',
'Hydro Run-of-river and poundage',
'Hydro Water Reservoir',
'Marine',
'Nuclear',
'Solar',
'Waste',
'Wind Offshore',
'Wind Onshore',
'Other',
'Other renewable'
]
country_intensities = country_intensities.reindex(labels=tec_order, axis=1)
country_intensities.index = country_intensities.index.rename(None)
production = production.reindex(labels=tec_order, axis=1)
# Build dictionary of cells to be shaded for Table S2. Keys are columns,
# items are countries (may be a list)
shade_dict = {key: val for key, val in no_ef_countries.items() if len(val)}
# Write to Excel
writer = | pd.ExcelWriter(results_filepath) | pandas.ExcelWriter |
#%%
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from textblob import TextBlob
import twitterscraper as ts
import os
import re
import json
import datetime as dt
import yfinance as yf
import plotly
import plotly.express as px
import plotly.graph_objs as go
#%%
# ------------------
# Got this method of pulling tweets form here:
# ------------------
# https: // medium.com/@kevin.a.crystal/scraping-twitter-with-tweetscraper-and-python-ea783b40443b
# https: // github.com/jenrhill/Power_Outage_Identification/blob/master/code/1_Data_Collection_and_EDA.ipynb
# https: // www.youtube.com/watch?v = zF_Q2v_9zKY
user = 'elonmusk'
limit = 10000
tweets = ts.query_tweets_from_user(user=user, limit=limit)
#%%
class TweetAnalyzer():
"""
Functionality for analyzing and categorizing content from tweets.
"""
#clean tweets
def clean_tweet(self, tweet):
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
#creating sentimental score using TextBlob
def analyze_sentiment_score(self, tweet):
analysis_score = TextBlob(self.clean_tweet(tweet))
analysis_score = analysis_score.sentiment.polarity
return analysis_score
#Determining positive vs negative tweets
def analyze_sentiment_result(self, tweet):
analysis_result = TextBlob(self.clean_tweet(tweet))
if analysis_result.sentiment.polarity >= 0.3:
return 'Positive'
elif analysis_result.sentiment.polarity <= -0.3:
return 'Negative'
else:
return '0'
def tweets_to_data_frame(self, tweets):
df2 = pd.DataFrame(
data=[tweet.timestamp for tweet in tweets], columns=['Date'])
df2['Tweet'] = np.array([tweet.text for tweet in tweets])
df2['Replied_Tweet'] = np.array([tweet.is_replied for tweet in tweets])
df2['Likes'] = np.array([tweet.likes for tweet in tweets])
df2['Reply_Count'] = np.array([tweet.replies for tweet in tweets])
df2['Retweets'] = np.array([tweet.retweets for tweet in tweets])
return df2
#%%
if __name__ == '__main__':
tweet_analyzer = TweetAnalyzer()
df2 = tweet_analyzer.tweets_to_data_frame(tweets)
df2['Sentiment_Score'] = np.array(
[tweet_analyzer.analyze_sentiment_score(tweet) for tweet in df2['Tweet']])
df2['Sentiment_Result'] = np.array(
[tweet_analyzer.analyze_sentiment_result(tweet) for tweet in df2['Tweet']])
mainData = df2.copy()
mainData.head()
#%%
neg = mainData[mainData['Sentiment_Score'] == "Negative"]
# .where('Sentiment Result'=='Positive')
neg = neg.drop(columns = ['Replied_Tweet','Likes','Reply_Count','Retweets'])
neg.sort_values('Sentiment_Score').to_csv('neg.csv')
# %%
# Truly determining what day the tweet will affect
# Later than 4pm est then the tweet will affect the next day
# Tweets during the day will affect the current day
def checkDates(d):
if d.date().weekday() == 4 and d.time().hour >= 16:
return d + | pd.Timedelta(days=3) | pandas.Timedelta |
# Celligner
from re import sub
from celligner.params import *
from celligner import limma
from genepy.utils import helper as h
from genepy.utils import plot
from sklearn.decomposition import PCA, IncrementalPCA
from sklearn.linear_model import LinearRegression
from scipy.spatial import cKDTree
import umap.umap_ as umap
from scanpy.tl import louvain
from scanpy.pp import neighbors
from anndata import AnnData
# import louvain
# import pynndescent
from sklearn.cluster import KMeans
from sklearn import metrics
from collections import Counter
import os
import pickle
import pandas as pd
import numpy as np
from contrastive import CPCA
import mnnpy
def runDiffExprOnCluster(
expression, clustered, clust_covariates=None,
):
"""
Runs DESEQ2 on the clustered data.
Args:
expression (pd.Dataframe): expression data
clustered (list): the clusters
clust_covariates (pd.Dataframe, optional): covariates for the clustering. Defaults to None.
Returns:
(pd.Dataframe): limmapy results
Raises:
ValueError: if the number of genes in the expression matrix and the gene file do not match
"""
n_clusts = len(set(clustered))
print("running differential expression on " + str(n_clusts) + " clusters")
clusts = set(clustered) - set([-1])
# TODO: add covariates
if clust_covariates:
if len(clust_covariates) != n_clusts:
raise ValueError("number of covariates does not match number of clusters")
design_matrix = clust_covariates
# make a design matrix
design_matrix = pd.DataFrame(
index=expression.index,
data=np.array([clustered == i for i in clusts]).T,
columns=["C" + str(i) + "C" for i in clusts],
)
design_matrix.index = design_matrix.index.astype(str).str.replace("-", ".")
design_matrix = design_matrix[design_matrix.sum(1) > 0]
# creating the matrix
data = expression.T
data = data[data.columns[clustered != -1].tolist()]
# running limmapy
print("running limmapy on the samples")
res = (
limma.limmapy()
.lmFit(data, design_matrix)
.eBayes(trend=False)
.topTable(number=len(data))
.iloc[:, len(clusts) :]
)
return res.sort_values(by="F", ascending=False)
class Celligner(object):
def __init__(
self,
gene_file=None,
onlyGenes=GENE_TYPE,
ensemble_server="http://nov2020.archive.ensembl.org/biomart",
umap_kwargs=UMAP_PARAMS,
pca_kwargs=PCA_PARAMS,
neightbors_kwargs=SC_NEIGH_PARAMS,
topKGenes=TOP_K_GENES,
cpca_kwargs=CPCA_PARAMS,
cpca_ncomp=CPCA_NCOMP,
mnn_kwargs=MNN_PARAMS,
make_plots=False,
low_mem=False,
louvain_kwargs=LOUVAIN_PARAMS,
method="mnn_marioni",
priotize_fit=False,
):
"""initialize Celligner object
Args:
onlyGenes (str, optional): one of 'usefull', 'all', 'protein_coding'. Defaults to "usefull".
gene_file (pd.Dataframe, optional): Needs to contain at least 15000 genes
and an "ensembl_gene_id", columns. Defaults to None.
ensemble_server (str, optional): the ensembl biomart server to map genes to.
Defaults to "http://nov2020.archive.ensembl.org/biomart".
umap_kwargs (dict, optional): see params.py . Defaults to {}.
pca_kwargs (dict, optional): see see params.py . Defaults to {}.
topKGenes (int, optional): see params.py. Defaults to 1000.
cpca_kwargs (dict, optional): see see params.py . Defaults to {}.
cpca_ncomp (int, optional): see params.py. Defaults to 10.
mnn_kwargs (dict, optional): see params.py . Defaults to {}.
make_plots (bool, optional): whether to log multiple plots along the way. Defaults to False.
low_mem (bool, optional): adviced if you have less than 32Gb of RAM. Defaults to False.
louvain_kwargs (dict, optional): see params.py . Defaults to {}.
neightbors_kwargs (dict, optional): see params.py . Defaults to {}.
method (str, optional): either "mnn_marioni" or "mnn". Defaults to "mnn_marioni".
"""
if gene_file:
self.gene_file = gene_file
else:
self.gene_file = h.generateGeneNames(
ensemble_server=ensemble_server, useCache=True
)
if onlyGenes == "protein_coding":
print("using only protein coding genes")
self.gene_file[self.gene_file.gene_biotype == "protein_coding"]
elif onlyGenes == "usefull":
print("using only usefull genes")
self.gene_file[self.gene_file.gene_biotype.isin(USEFUL_GENE_BIOTYPES)]
else:
print("using all genes")
self.gene_file.ensembl_gene_id.drop_duplicates(keep="first", inplace=True)
self.umap_kwargs = umap_kwargs
self.pca_kwargs = pca_kwargs
self.topKGenes = topKGenes
self.cpca_kwargs = cpca_kwargs
self.cpca_ncomp = cpca_ncomp
self.mnn_kwargs = mnn_kwargs
self.number_of_datasets = 0
self.make_plots = make_plots
self.low_mem = low_mem
self.louvain_kwargs = louvain_kwargs
self.neightbors_kwargs = neightbors_kwargs
self.method = method
self.priotize_fit = priotize_fit
self.fit_input = None
self.fit_clusters = None
self.differential_genes_input = None
self.differential_genes_names = None
self.fit_annotations = None
self.transform_annotations = None
self.transform_input = None
self.transform_clusters = None
self.corrected = None
self.pca_fit = None
self.pca_transform = None
self.common_genes = None
self.cpca_loadings = None
def _check_Xpression(self, X_pression, gene_file):
"""
Args:
X_pression (pd.Dataframe): expression data
gene_file (pd.Dataframe): gene file with an ensembl_gene_id column
Raises:
ValueError: if the number of genes in the expression matrix and the gene file do not match
ValueError: if the expression matrix contains nan values
Returns:
(pd.Dataframe): the expression matrix
"""
common_genes = set(X_pression.columns) & set(gene_file.ensembl_gene_id)
if len(common_genes) < MIN_GENES:
raise ValueError(
"X_pression columns do not match gene_file enough only "
+ str(len(common_genes))
+ " common genes"
)
if self.fit_input is not None:
common_genes = set(self.common_genes) & set(common_genes)
if len(common_genes) < MIN_GENES:
raise ValueError(
"X_pression columns do not match gene_file enough only "
+ str(len(common_genes))
+ " common genes"
)
if self.cpca_loadings is not None:
self.cpca_loadings = self.cpca_loadings[
:, self.fit_input.columns.isin(common_genes)
]
if self.transform_input is not None:
self.transform_input = self.transform_input.loc[:, common_genes]
self.fit_input = self.fit_input.loc[:, common_genes]
print("found " + str(len(common_genes)) + " common genes")
# drop genes not in gene_file
X_pression = X_pression.loc[:, common_genes].astype(float)
# raise issue if there are any NaNs
if X_pression.isnull().values.any():
raise ValueError("X_pression contains NaNs")
return X_pression
def addToFit(
self, X_pression, annotations=None, do_fit=True, do_add=True,
):
"""adds expression data to the fit dataframe
Args:
X_pression (pd.Dataframe): expression data
annotations (pd.Dataframe, optional): sample annotations. Defaults to None.
Raises:
ValueError: if the expression matrix and annotations matrix do not have the same index
ValueError: if the new expression matrix has different gene names than the current one
"""
count = X_pression.shape[0] + (
self.fit_input.shape[0] if self.fit_input is not None else 0
)
print("looking at " + str(count) + " samples.")
fit_input = self._check_Xpression(X_pression, self.gene_file)
if annotations is not None:
if len(annotations) != len(fit_input) or list(fit_input.index) != list(
annotations.index
):
raise ValueError("annotations do not match X_pression")
else:
# create fake annotations
annotations = pd.DataFrame(
index=X_pression.index,
columns=["cell_type", "disease_type", "tissue_type"],
data=np.zeros((len(X_pression), 3)) + self.number_of_datasets,
)
if self.fit_input is None or not do_add:
# it is the first time we run it.
print("creating a fit dataset..")
self.common_genes = fit_input.columns
self.fit_input = fit_input
self.fit_annotations = annotations
else:
print("adding to fit dataset..")
# add annotations together
self.fit_annotations = self.fit_annotations.append(annotations)
# add fit_input together
self.fit_input = self.fit_input.append(fit_input)
self.number_of_datasets += 1
if do_fit:
return self.fit()
elif do_add:
return self.fit(_rerun=False)
def fit(self, X_pression=None, annotations=None, _rerun=True):
"""fit the model using X_pression
Args:
X_pression (pd.Dataframe): contains the expression data as RSEM expected counts with
ensembl_gene_id as columns and samplenames as index.
annotations (pd.Dataframe, optional): sample annotations, for each sample,
needs to contain ['cell_type', 'disease_type', 'tissue_type'].
Defaults to None (will create an empty dataframe).
_rerun (bool, optional): whether to rerun the function entirely or not. Defaults to True.
Raises:
ValueError: if the expression matrix and annotations matrix do not have the same index
ValueError: if the new expression matrix has different gene names than the current one
"""
# check if X_pression is compatible with the model
if X_pression is not None:
self.addToFit(X_pression, annotations, do_fit=False, do_add=False)
elif self.fit_input is None:
raise ValueError("no input provided")
# mean center the dataframe
# TODO? a bit different from R's version as it was using an approximate fast centering method
self.fit_input = self.fit_input.sub(self.fit_input.mean(0), 1)
# clustering: doing SNN on the reduced data
print("clustering...")
# anndata from df
# TODO? a bit different from R's version. ScanPy and Seurat differ in their implementation.
adata = AnnData(self.fit_input)
neighbors(adata, **self.neightbors_kwargs)
louvain(adata, **self.louvain_kwargs)
self.fit_clusters = adata.obs["louvain"].values.astype(int)
del adata
# do differential expression between clusters and getting the top K most expressed genes
if self.make_plots:
# dimensionality reduction
print("reducing dimensionality...")
if _rerun:
self.pca_fit = (
PCA(**self.pca_kwargs)
if not self.low_mem
else IncrementalPCA(**self.pca_kwargs)
)
fit_reduced = self.pca_fit.fit_transform(self.fit_input)
else:
fit_reduced = self.pca_fit.transform(self.fit_input)
# plotting
plot.scatter(
umap.UMAP(**self.umap_kwargs).fit_transform(fit_reduced),
xname="UMAP1",
yname="UMAP2",
colors=self.fit_clusters,
labels=["C" + str(i) for i in self.fit_clusters],
title="SNN clusters",
radi=0.1,
)
if len(set(self.fit_clusters)) < 2:
raise ValueError(
"only one cluster found, no differential expression possible\
try to change your parameters..."
)
if _rerun:
print("doing differential expression analysis on the clusters")
self.differential_genes_input = runDiffExprOnCluster(
self.fit_input, self.fit_clusters
)
# need enough genes to be significant
if (
len(self.differential_genes_input[self.differential_genes_input.F > 10])
< self.topKGenes
):
raise ValueError("not enough differentially expressed genes found..")
print("done")
return self
def putAllToFit(self, redo_diff=False):
"""puts all the data to the fit dataframe"""
self.fit_annotations = self.fit_annotations.append(self.transform_annotations)
self.fit_input = self.fit_input.append(self.corrected)
# clustering
print("clustering...")
adata = AnnData(self.fit_input)
neighbors(adata, **self.neightbors_kwargs)
louvain(adata, **self.louvain_kwargs)
self.fit_clusters = adata.obs["louvain"].values.astype(int)
del adata
if redo_diff:
print("doing differential expression analysis on the clusters")
self.differential_genes_input = runDiffExprOnCluster(
self.fit_input, self.fit_clusters
)
# need enough genes to be significant
if (
len(self.differential_genes_input[self.differential_genes_input.F > 10])
< self.topKGenes
):
raise ValueError("not enough differentially expressed genes found..")
# cleaning up transform
self.transform_annotations = None
self.corrected = None
self.transform_input = None
self.pca_transform = None
print("done")
def addToTransform(
self, X_pression, annotations=None, do_transform=True, do_add=True, **kwargs
):
"""adds expression data to the transform dataframe
Args:
X_pression (pd.Dataframe): the expression data as RSEM expected counts
with ensembl_gene_id as columns and samplenames as index.
annotations (pd.Dataframe, optional): sample annotations, for each sample,
do_transform (bool, optional): if True, will transform the data. Defaults to True.
do_add (bool, optional): if True, will add the data to the transform dataframe.
Returns:
(, optional): transform()'s output
Raises:
ValueError: if the expression matrix and annotations matrix do not have the same index
ValueError: if the new expression matrix has different gene names than the current one
ValueError: if the model has not been fitted yet
"""
count = X_pression.shape[0] + (
self.transform_input.shape[0]
if self.transform_input is not None and do_add
else 0
)
print("looking at " + str(count) + " samples.")
if self.fit_input is None:
raise ValueError("no fit data available, need to run fit or addToFit first")
transform_input = self._check_Xpression(X_pression, self.gene_file)
if annotations is not None:
if len(annotations) != len(transform_input) or list(
transform_input.index
) != list(annotations.index):
raise ValueError("annotations do not match X_pression")
else:
# create fake annotations
annotations = pd.DataFrame(
index=X_pression.index,
columns=["cell_type", "disease_type", "tissue_type"],
data=np.zeros((len(X_pression), 3)) + self.number_of_datasets,
)
if self.transform_input is None or not do_add:
# this is the first time we run it.
print("creating a transform input..")
self.common_genes = transform_input.columns
self.transform_input = transform_input
self.transform_annotations = annotations
else:
print("adding to transform..")
# add annotations together
self.transform_annotations = self.transform_annotations.append(annotations)
# add transform_input together
self.transform_input = self.transform_input.append(transform_input)
self.number_of_datasets += 1
if do_transform:
return self.transform(only_transform=True, **kwargs)
elif do_add:
return self.transform(**kwargs)
def transform(
self,
X_pression=None,
annotations=None,
only_transform=False,
_rerun=True,
recompute_contamination=True,
):
"""transform the cell type for each sample in X_pression
Args:
X_pression (pd.Dataframe, optional): expression dataframe. Defaults to None.
annotations (pd.Dataframe, optional): annotations dataframe. Defaults to None.
only_transform (bool, optional): if True, will only transform the dataframe.
_rerun (bool, optional): if True, will rerun the PCA and SNN. Defaults to True.
Raises:
ValueError: if the model has not been fitted yet
ValueError: if the expression matrix and annotations matrix do not have the same index
ValueError: if the new expression matrix has different gene names than the current one
"""
if X_pression is not None:
self.addToTransform(
X_pression, annotations, do_transform=False, do_add=False
)
elif self.transform_input is None:
raise ValueError("no transform Expression data provided")
# mean center the dataframe
self.transform_input = self.transform_input.sub(self.transform_input.mean(0), 1)
if _rerun:
# clustering: doing SNN on the reduced data
print("clustering..")
# anndata from df
adata = AnnData(self.transform_input)
neighbors(adata, **self.neightbors_kwargs)
louvain(adata, **self.louvain_kwargs)
self.transform_clusters = adata.obs["louvain"].values.astype(int)
del adata
if self.make_plots:
# dimensionality reduction
print("reducing dimensionality...")
if _rerun:
self.pca_transform = (
PCA(**self.pca_kwargs)
if not self.low_mem
else IncrementalPCA(**self.pca_kwargs)
)
reduced = self.pca_transform.fit_transform(
| pd.concat([self.transform_input, self.fit_input]) | pandas.concat |
from __future__ import division
from builtins import str
from builtins import range
from builtins import object
__copyright__ = "Copyright 2015 Contributing Entities"
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
import os
import sys
import numpy as np
import pandas as pd
from .Error import DemandInputError
from .Logger import FastTripsLogger
from .Route import Route
from .TAZ import TAZ
from .Trip import Trip
from .Util import Util
class Passenger(object):
"""
Passenger class.
One instance represents all of the households and persons that could potentially make transit trips.
Stores household information in :py:attr:`Passenger.households_df` and person information in
:py:attr:`Passenger.persons_df`, which are both :py:class:`pandas.DataFrame` instances.
"""
#: File with households
INPUT_HOUSEHOLDS_FILE = "household.txt"
#: Households column: Household ID
HOUSEHOLDS_COLUMN_HOUSEHOLD_ID = 'hh_id'
#: File with persons
INPUT_PERSONS_FILE = "person.txt"
#: Persons column: Household ID
PERSONS_COLUMN_HOUSEHOLD_ID = HOUSEHOLDS_COLUMN_HOUSEHOLD_ID
#: Persons column: Person ID (string)
PERSONS_COLUMN_PERSON_ID = 'person_id'
# ========== Added by fasttrips =======================================================
#: Persons column: Person ID number
PERSONS_COLUMN_PERSON_ID_NUM = 'person_id_num'
#: File with trip list
INPUT_TRIP_LIST_FILE = "trip_list.txt"
#: Trip list column: Person ID
TRIP_LIST_COLUMN_PERSON_ID = PERSONS_COLUMN_PERSON_ID
#: Trip list column: Person Trip ID
TRIP_LIST_COLUMN_PERSON_TRIP_ID = "person_trip_id"
#: Trip list column: Origin TAZ ID
TRIP_LIST_COLUMN_ORIGIN_TAZ_ID = "o_taz"
#: Trip list column: Destination TAZ ID
TRIP_LIST_COLUMN_DESTINATION_TAZ_ID = "d_taz"
#: Trip list column: Mode
TRIP_LIST_COLUMN_MODE = "mode"
#: Trip list column: Departure Time. DateTime.
TRIP_LIST_COLUMN_DEPARTURE_TIME = 'departure_time'
#: Trip list column: Arrival Time. DateTime.
TRIP_LIST_COLUMN_ARRIVAL_TIME = 'arrival_time'
#: Trip list column: Time Target (either 'arrival' or 'departure')
TRIP_LIST_COLUMN_TIME_TARGET = 'time_target'
# ========== Added by fasttrips =======================================================
#: Trip list column: Unique numeric ID for this passenger/trip
TRIP_LIST_COLUMN_TRIP_LIST_ID_NUM = "trip_list_id_num"
#: Trip list column: Origin TAZ Numeric ID
TRIP_LIST_COLUMN_ORIGIN_TAZ_ID_NUM = "o_taz_num"
#: Trip list column: Destination Numeric TAZ ID
TRIP_LIST_COLUMN_DESTINATION_TAZ_ID_NUM = "d_taz_num"
#: Trip list column: Departure Time. Float, minutes after midnight.
TRIP_LIST_COLUMN_DEPARTURE_TIME_MIN = 'departure_time_min'
#: Trip list column: Departure Time. Float, minutes after midnight.
TRIP_LIST_COLUMN_ARRIVAL_TIME_MIN = 'arrival_time_min'
#: Trip list column: Transit Mode
TRIP_LIST_COLUMN_TRANSIT_MODE = "transit_mode"
#: Trip list column: Access Mode
TRIP_LIST_COLUMN_ACCESS_MODE = "access_mode"
#: Trip list column: Egress Mode
TRIP_LIST_COLUMN_EGRESS_MODE = "egress_mode"
#: Trip list column: Outbound (bool), true iff time target is arrival
TRIP_LIST_COLUMN_OUTBOUND = "outbound"
#: Option for :py:attr:`Passenger.TRIP_LIST_COLUMN_TIME_TARGET` (arrival time)
TIME_TARGET_ARRIVAL = "arrival"
#: Option for :py:attr:`Passenger.TRIP_LIST_COLUMN_TIME_TARGET` (departure time)
TIME_TARGET_DEPARTURE = "departure"
#: Generic transit. Specify this for mode when you mean walk, any transit modes, walk
#: TODO: get rid of this? Maybe user should always specify.
MODE_GENERIC_TRANSIT = "transit"
#: Generic transit - Numeric mode number
MODE_GENERIC_TRANSIT_NUM = 1000
#: Minumum Value of Time: 1 dollar shouldn't be worth 180 minutes
MIN_VALUE_OF_TIME = 60.0/180.0
#: Trip list column: User class. String.
TRIP_LIST_COLUMN_USER_CLASS = "user_class"
#: Trip list column: Purpose. String.
TRIP_LIST_COLUMN_PURPOSE = "purpose"
#: Trip list column: Value of time. Float.
TRIP_LIST_COLUMN_VOT = "vot"
#: Trip list column: Trace. Boolean.
TRIP_LIST_COLUMN_TRACE = "trace"
#: Column names from pathfinding
PF_COL_PF_ITERATION = 'pf_iteration' #: 0.01*pathfinding_iteration + iteration during which this path was found
PF_COL_PAX_A_TIME = 'pf_A_time' #: time path-finder thinks passenger arrived at A
PF_COL_PAX_B_TIME = 'pf_B_time' #: time path-finder thinks passenger arrived at B
PF_COL_LINK_TIME = 'pf_linktime' #: time path-finder thinks passenger spent on link
PF_COL_LINK_FARE = 'pf_linkfare' #: fare path-finder thinks passenger spent on link
PF_COL_LINK_COST = 'pf_linkcost' #: cost (generalized) path-finder thinks passenger spent on link
PF_COL_LINK_DIST = 'pf_linkdist' #: dist path-finder thinks passenger spent on link
PF_COL_WAIT_TIME = 'pf_waittime' #: time path-finder thinks passenger waited for vehicle on trip links
PF_COL_PATH_NUM = 'pathnum' #: path number, starting from 0
PF_COL_LINK_NUM = 'linknum' #: link number, starting from access
PF_COL_LINK_MODE = 'linkmode' #: link mode (Access, Trip, Egress, etc)
PF_COL_MODE = TRIP_LIST_COLUMN_MODE #: supply mode
PF_COL_ROUTE_ID = Trip.TRIPS_COLUMN_ROUTE_ID #: link route ID
PF_COL_TRIP_ID = Trip.TRIPS_COLUMN_TRIP_ID #: link trip ID
PF_COL_DESCRIPTION = 'description' #: path text description
#: todo replace/rename ??
PF_COL_PAX_A_TIME_MIN = 'pf_A_time_min'
#: pathfinding results
PF_PATHS_CSV = r"enumerated_paths.csv"
PF_LINKS_CSV = r"enumerated_links.csv"
#: results - PathSets
PATHSET_PATHS_CSV = r"pathset_paths.csv"
PATHSET_LINKS_CSV = r"pathset_links.csv"
def __init__(self, input_dir, output_dir, today, stops, routes, capacity_constraint):
"""
Constructor from dictionary mapping attribute to value.
"""
# if no demand dir, nothing to do
if input_dir == None:
self.trip_list_df = pd.DataFrame()
return
FastTripsLogger.info("-------- Reading demand --------")
FastTripsLogger.info("Capacity constraint? %x" % capacity_constraint )
self.trip_list_df = pd.read_csv(os.path.join(input_dir, Passenger.INPUT_TRIP_LIST_FILE),
skipinitialspace=True, ##LMZ
dtype={Passenger.TRIP_LIST_COLUMN_PERSON_ID :'S',
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID :'S',
Passenger.TRIP_LIST_COLUMN_ORIGIN_TAZ_ID :'S',
Passenger.TRIP_LIST_COLUMN_DESTINATION_TAZ_ID:'S',
Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME :'S',
Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME :'S',
Passenger.TRIP_LIST_COLUMN_PURPOSE :'S'})
trip_list_cols = list(self.trip_list_df.columns.values)
assert(Passenger.TRIP_LIST_COLUMN_PERSON_ID in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_ORIGIN_TAZ_ID in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_DESTINATION_TAZ_ID in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_TIME_TARGET in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_VOT in trip_list_cols)
FastTripsLogger.debug("=========== TRIP LIST ===========\n" + str(self.trip_list_df.head()))
FastTripsLogger.debug("\n"+str(self.trip_list_df.index.dtype)+"\n"+str(self.trip_list_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.trip_list_df), "person trips", Passenger.INPUT_TRIP_LIST_FILE))
# Error on missing person ids or person_trip_ids
missing_person_ids = self.trip_list_df[pd.isnull(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_PERSON_ID])|
pd.isnull(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID])]
if len(missing_person_ids)>0:
error_msg = "Missing person_id or person_trip_id fields:\n%s\n" % str(missing_person_ids)
error_msg += "Use 0 for person_id for trips without corresponding person."
FastTripsLogger.fatal(error_msg)
raise DemandInputError(Passenger.INPUT_TRIP_LIST_FILE, error_msg)
# Drop (warn) on missing origins or destinations
missing_ods = self.trip_list_df[ pd.isnull(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_ORIGIN_TAZ_ID])|
pd.isnull(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_DESTINATION_TAZ_ID]) ]
if len(missing_ods)>0:
FastTripsLogger.warn("Missing origin or destination for the following trips. Dropping.\n%s" % str(missing_ods))
self.trip_list_df = self.trip_list_df.loc[ pd.notnull(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_ORIGIN_TAZ_ID ])&
pd.notnull(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_DESTINATION_TAZ_ID]) ].reset_index(drop=True)
FastTripsLogger.warn("=> Have %d person trips" % len(self.trip_list_df))
non_zero_person_ids = len(self.trip_list_df.loc[self.trip_list_df[Passenger.TRIP_LIST_COLUMN_PERSON_ID]!="0"])
if non_zero_person_ids > 0 and os.path.exists(os.path.join(input_dir, Passenger.INPUT_PERSONS_FILE)):
self.persons_df = pd.read_csv(os.path.join(input_dir, Passenger.INPUT_PERSONS_FILE),
skipinitialspace=True,
dtype={Passenger.PERSONS_COLUMN_PERSON_ID:'S'})
self.persons_id_df = Util.add_numeric_column(self.persons_df[[Passenger.PERSONS_COLUMN_PERSON_ID]],
id_colname=Passenger.PERSONS_COLUMN_PERSON_ID,
numeric_newcolname=Passenger.PERSONS_COLUMN_PERSON_ID_NUM)
self.persons_df = pd.merge(left=self.persons_df, right=self.persons_id_df,
how="left")
persons_cols = list(self.persons_df.columns.values)
FastTripsLogger.debug("=========== PERSONS ===========\n" + str(self.persons_df.head()))
FastTripsLogger.debug("\n"+str(self.persons_df.index.dtype)+"\n"+str(self.persons_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.persons_df), "persons", Passenger.INPUT_PERSONS_FILE))
self.households_df = pd.read_csv(os.path.join(input_dir, Passenger.INPUT_HOUSEHOLDS_FILE), skipinitialspace=True)
household_cols = list(self.households_df.columns.values)
FastTripsLogger.debug("=========== HOUSEHOLDS ===========\n" + str(self.households_df.head()))
FastTripsLogger.debug("\n"+str(self.households_df.index.dtype)+"\n"+str(self.households_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.households_df), "households", Passenger.INPUT_HOUSEHOLDS_FILE))
else:
self.persons_df = pd.DataFrame()
self.households_df = pd.DataFrame()
# make sure that each tuple TRIP_LIST_COLUMN_PERSON_ID, TRIP_LIST_COLUMN_PERSON_TRIP_ID is unique
self.trip_list_df["ID_dupes"] = self.trip_list_df.duplicated(subset=[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID],
keep=False)
if self.trip_list_df["ID_dupes"].sum() > 0:
error_msg = "Duplicate IDs (%s, %s) found:\n%s" % \
(Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
self.trip_list_df.loc[self.trip_list_df["ID_dupes"]==True].to_string())
FastTripsLogger.fatal(error_msg)
raise DemandInputError(Passenger.INPUT_TRIP_LIST_FILE, error_msg)
# Create unique numeric index
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_TRIP_LIST_ID_NUM] = self.trip_list_df.index + 1
# datetime version
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME] = \
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME].map(lambda x: Util.read_time(x))
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME] = \
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME].map(lambda x: Util.read_time(x))
# float version
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME_MIN] = \
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME].map(lambda x: \
60*x.time().hour + x.time().minute + (x.time().second/60.0) )
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME_MIN] = \
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME].map(lambda x: \
60*x.time().hour + x.time().minute + (x.time().second/60.0) )
# TODO: validate fields?
# value of time must be greater than a threshhold or any fare becomes prohibitively expensive
low_vot = self.trip_list_df.loc[ self.trip_list_df[Passenger.TRIP_LIST_COLUMN_VOT] < Passenger.MIN_VALUE_OF_TIME ]
if len(low_vot) > 0:
FastTripsLogger.warn("These trips have value of time lower than the minimum threshhhold (%f): raising to minimum.\n%s" %
(Passenger.MIN_VALUE_OF_TIME, str(low_vot) ))
self.trip_list_df.loc[ self.trip_list_df[Passenger.TRIP_LIST_COLUMN_VOT] < Passenger.MIN_VALUE_OF_TIME,
Passenger.TRIP_LIST_COLUMN_VOT] = Passenger.MIN_VALUE_OF_TIME
if len(self.persons_df) > 0:
# Join trips to persons
self.trip_list_df = pd.merge(left=self.trip_list_df, right=self.persons_df,
how='left',
on=Passenger.TRIP_LIST_COLUMN_PERSON_ID)
# are any null?
no_person_ids = self.trip_list_df.loc[ pd.isnull(self.trip_list_df[Passenger.PERSONS_COLUMN_PERSON_ID_NUM])&
(self.trip_list_df[Passenger.PERSONS_COLUMN_PERSON_ID]!="0")]
if len(no_person_ids) > 0:
error_msg = "Even though a person list is given, failed to find person information for %d trips" % len(no_person_ids)
FastTripsLogger.fatal(error_msg)
FastTripsLogger.fatal("\n%s\n" % no_person_ids.to_string())
raise DemandInputError(Passenger.INPUT_TRIP_LIST_FILE, error_msg)
# And then to households
self.trip_list_df = pd.merge(left=self.trip_list_df, right=self.households_df,
how='left',
on=Passenger.PERSONS_COLUMN_HOUSEHOLD_ID)
else:
# Give each passenger a unique person ID num
self.trip_list_df[Passenger.PERSONS_COLUMN_PERSON_ID_NUM] = self.trip_list_df.index + 1
# add TAZ numeric ids (stored in the stop mapping)
self.trip_list_df = stops.add_numeric_stop_id(self.trip_list_df,
id_colname =Passenger.TRIP_LIST_COLUMN_ORIGIN_TAZ_ID,
numeric_newcolname=Passenger.TRIP_LIST_COLUMN_ORIGIN_TAZ_ID_NUM,
warn =True,
warn_msg ="TAZ numbers configured as origins in demand file are not found in the network")
self.trip_list_df = stops.add_numeric_stop_id(self.trip_list_df,
id_colname =Passenger.TRIP_LIST_COLUMN_DESTINATION_TAZ_ID,
numeric_newcolname=Passenger.TRIP_LIST_COLUMN_DESTINATION_TAZ_ID_NUM,
warn =True,
warn_msg ="TAZ numbers configured as destinations in demand file are not found in the network")
# trips with invalid TAZs have been dropped
FastTripsLogger.debug("Have %d person trips" % len(self.trip_list_df))
# figure out modes:
if Passenger.TRIP_LIST_COLUMN_MODE not in trip_list_cols:
# default to generic walk-transit-walk
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_MODE] = Passenger.MODE_GENERIC_TRANSIT
self.trip_list_df['mode_dash_count'] = 0
else:
# count the dashes in the mode
self.trip_list_df['mode_dash_count'] = self.trip_list_df[Passenger.TRIP_LIST_COLUMN_MODE]\
.map(lambda x: x.count('-'))
# The only modes allowed are access-transit-egress or MODE_GENERIC_TRANSIT
bad_mode_df = self.trip_list_df.loc[((self.trip_list_df['mode_dash_count']!=2)&
((self.trip_list_df['mode_dash_count']!=0)|
(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_MODE]!=Passenger.MODE_GENERIC_TRANSIT)))]
if len(bad_mode_df) > 0:
FastTripsLogger.fatal("Could not understand column '%s' in the following: \n%s" %
(Passenger.TRIP_LIST_COLUMN_MODE,
bad_mode_df[[Passenger.TRIP_LIST_COLUMN_MODE,'mode_dash_count']].to_string()))
sys.exit(2)
# Take care of the transit generic
self.trip_list_df.loc[self.trip_list_df['mode_dash_count']==0,
Passenger.TRIP_LIST_COLUMN_TRANSIT_MODE] = Passenger.MODE_GENERIC_TRANSIT
self.trip_list_df.loc[self.trip_list_df['mode_dash_count']==0,
Passenger.TRIP_LIST_COLUMN_ACCESS_MODE ] = "%s" % TAZ.ACCESS_EGRESS_MODES[0]
self.trip_list_df.loc[self.trip_list_df['mode_dash_count']==0,
Passenger.TRIP_LIST_COLUMN_EGRESS_MODE ] = "%s" % TAZ.ACCESS_EGRESS_MODES[0]
# Take care of the access-transit-egress
self.trip_list_df.loc[self.trip_list_df['mode_dash_count']==2,
Passenger.TRIP_LIST_COLUMN_ACCESS_MODE] = self.trip_list_df[Passenger.TRIP_LIST_COLUMN_MODE]\
.map(lambda x: "%s" % x[:x.find('-')])
self.trip_list_df.loc[self.trip_list_df['mode_dash_count']==2,
Passenger.TRIP_LIST_COLUMN_TRANSIT_MODE] = self.trip_list_df[Passenger.TRIP_LIST_COLUMN_MODE]\
.map(lambda x: x[x.find('-')+1:x.rfind('-')])
self.trip_list_df.loc[self.trip_list_df['mode_dash_count']==2,
Passenger.TRIP_LIST_COLUMN_EGRESS_MODE] = self.trip_list_df[Passenger.TRIP_LIST_COLUMN_MODE]\
.map(lambda x: "%s" % x[x.rfind('-')+1:])
# We're done with mode_dash_count, thanks for your service
self.trip_list_df.drop('mode_dash_count', axis=1, inplace=True) # replace with cumsum
# validate time_target
invalid_time_target = self.trip_list_df.loc[ self.trip_list_df[Passenger.TRIP_LIST_COLUMN_TIME_TARGET].isin(
[Passenger.TIME_TARGET_ARRIVAL, Passenger.TIME_TARGET_DEPARTURE])==False ]
if len(invalid_time_target) > 0:
error_msg = "Invalid value in column %s:\n%s" % (Passenger.TRIP_LIST_COLUMN_TIME_TARGET, str(invalid_time_target))
FastTripsLogger.fatal(error_msg)
raise DemandInputError(Passenger.INPUT_TRIP_LIST_FILE, error_msg)
# set outbound
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_OUTBOUND] = (self.trip_list_df[Passenger.TRIP_LIST_COLUMN_TIME_TARGET] == Passenger.TIME_TARGET_ARRIVAL)
# Set the user class for each trip
from .PathSet import PathSet
PathSet.set_user_class(self.trip_list_df, Passenger.TRIP_LIST_COLUMN_USER_CLASS)
# Verify that PathSet has all the configuration for these user classes + transit modes + access modes + egress modes
# => Figure out unique user class + mode combinations
self.modes_df = self.trip_list_df[[Passenger.TRIP_LIST_COLUMN_USER_CLASS,
Passenger.TRIP_LIST_COLUMN_PURPOSE,
Passenger.TRIP_LIST_COLUMN_TRANSIT_MODE,
Passenger.TRIP_LIST_COLUMN_ACCESS_MODE,
Passenger.TRIP_LIST_COLUMN_EGRESS_MODE]].set_index([Passenger.TRIP_LIST_COLUMN_USER_CLASS, Passenger.TRIP_LIST_COLUMN_PURPOSE])
# stack - so before we have three columns: transit_mode, access_mode, egress_mode
# after, we have two columns: demand_mode_type and the value, demand_mode
self.modes_df = self.modes_df.stack().to_frame()
self.modes_df.index.names = [Passenger.TRIP_LIST_COLUMN_USER_CLASS, Passenger.TRIP_LIST_COLUMN_PURPOSE, PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE]
self.modes_df.columns = [PathSet.WEIGHTS_COLUMN_DEMAND_MODE]
self.modes_df.reset_index(inplace=True)
self.modes_df.drop_duplicates(inplace=True)
# fix demand_mode_type since transit_mode is just transit, etc
self.modes_df[PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE] = self.modes_df[PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE].apply(lambda x: x[:-5])
FastTripsLogger.debug("Demand mode types by class & purpose: \n%s" % str(self.modes_df))
# Make sure we have all the weights required for these user_class/mode combinations
self.trip_list_df = PathSet.verify_weight_config(self.modes_df, output_dir, routes, capacity_constraint, self.trip_list_df)
# add column trace
from .Assignment import Assignment
if len(Assignment.TRACE_IDS) > 0:
trace_df = pd.DataFrame.from_records(data=Assignment.TRACE_IDS,
columns=[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID]).astype(object)
trace_df[Passenger.TRIP_LIST_COLUMN_TRACE] = True
# combine
self.trip_list_df = pd.merge(left=self.trip_list_df,
right=trace_df,
how="left",
on=[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID])
# make nulls into False
self.trip_list_df.loc[pd.isnull(
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_TRACE]), Passenger.TRIP_LIST_COLUMN_TRACE] = False
else:
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_TRACE] = False
FastTripsLogger.info("Have %d person trips" % len(self.trip_list_df))
FastTripsLogger.debug("Final trip_list_df\n"+str(self.trip_list_df.index.dtype)+"\n"+str(self.trip_list_df.dtypes))
FastTripsLogger.debug("\n"+self.trip_list_df.head().to_string())
#: Maps trip_list_id to :py:class:`PathSet` instance. Use trip_list_id instead of (person_id, person_trip_id) for simplicity and to iterate sequentially
#: in setup_passenger_pathsets()
self.id_to_pathset = collections.OrderedDict()
def add_pathset(self, trip_list_id, pathset):
"""
Stores this path set for the trip_list_id.
"""
self.id_to_pathset[trip_list_id] = pathset
def get_pathset(self, trip_list_id):
"""
Retrieves a stored path set for the given trip_list_id
"""
return self.id_to_pathset[trip_list_id]
def get_person_id(self, trip_list_id):
to_ret = self.trip_list_df.loc[self.trip_list_df[Passenger.TRIP_LIST_COLUMN_TRIP_LIST_ID_NUM]==trip_list_id,
[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID]]
return(to_ret.iloc[0,0], to_ret.iloc[0,1])
def read_passenger_pathsets(self, pathset_dir, stops, modes_df, include_asgn=True):
"""
Reads the dataframes described in :py:meth:`Passenger.setup_passenger_pathsets` and returns them.
:param pathset_dir: Location of csv files to read
:type pathset_dir: string
:param include_asgn: If true, read from files called :py:attr:`Passenger.PF_PATHS_CSV` and :py:attr:`Passenger.PF_LINKS_CSV`.
Otherwise read from files called :py:attr:`Passenger.PATHSET_PATHS_CSV` and :py:attr:`Passenger.PATHSET_LINKS_CSV` which include assignment results.
:return: See :py:meth:`Assignment.setup_passengers`
for documentation on the passenger paths :py:class:`pandas.DataFrame`
:rtype: a tuple of (:py:class:`pandas.DataFrame`, :py:class:`pandas.DataFrame`)
"""
# read existing paths
paths_file = os.path.join(pathset_dir, Passenger.PATHSET_PATHS_CSV if include_asgn else Passenger.PF_PATHS_CSV)
pathset_paths_df = pd.read_csv(paths_file,
skipinitialspace=True,
dtype={Passenger.TRIP_LIST_COLUMN_PERSON_ID :'S',
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID:'S'})
FastTripsLogger.info("Read %s" % paths_file)
FastTripsLogger.debug("pathset_paths_df.dtypes=\n%s" % str(pathset_paths_df.dtypes))
from .Assignment import Assignment
date_cols = [Passenger.PF_COL_PAX_A_TIME, Passenger.PF_COL_PAX_B_TIME]
if include_asgn:
date_cols.extend([Assignment.SIM_COL_PAX_BOARD_TIME,
Assignment.SIM_COL_PAX_ALIGHT_TIME,
Assignment.SIM_COL_PAX_A_TIME,
Assignment.SIM_COL_PAX_B_TIME])
links_dtypes = {Passenger.TRIP_LIST_COLUMN_PERSON_ID :'S',
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID:'S',
Trip.TRIPS_COLUMN_TRIP_ID :'S',
"A_id" :'S',
"B_id" :'S',
Passenger.PF_COL_ROUTE_ID :'S',
Passenger.PF_COL_TRIP_ID :'S'}
# read datetimes as string initially
for date_col in date_cols:
links_dtypes[date_col] = 'S'
links_file = os.path.join(pathset_dir, Passenger.PATHSET_LINKS_CSV if include_asgn else Passenger.PF_LINKS_CSV)
pathset_links_df = pd.read_csv(links_file, skipinitialspace=True, dtype=links_dtypes)
# convert time strings to datetimes
for date_col in date_cols:
if date_col in pathset_links_df.columns.values:
pathset_links_df[date_col] = pathset_links_df[date_col].map(lambda x: Util.read_time(x))
# convert time duration columns to time durations
link_cols = list(pathset_links_df.columns.values)
if Passenger.PF_COL_LINK_TIME in link_cols:
pathset_links_df[Passenger.PF_COL_LINK_TIME] = pd.to_timedelta(pathset_links_df[Passenger.PF_COL_LINK_TIME])
elif "%s min" % Passenger.PF_COL_LINK_TIME in link_cols:
pathset_links_df[Passenger.PF_COL_LINK_TIME] = pd.to_timedelta(pathset_links_df["%s min" % Passenger.PF_COL_LINK_TIME], unit='m')
if Passenger.PF_COL_WAIT_TIME in link_cols:
pathset_links_df[Passenger.PF_COL_WAIT_TIME] = pd.to_timedelta(pathset_links_df[Passenger.PF_COL_WAIT_TIME])
elif "%s min" % Passenger.PF_COL_WAIT_TIME in link_cols:
pathset_links_df[Passenger.PF_COL_WAIT_TIME] = pd.to_timedelta(pathset_links_df["%s min" % Passenger.PF_COL_WAIT_TIME], unit='m')
# if simulation results are available
if Assignment.SIM_COL_PAX_LINK_TIME in link_cols:
pathset_links_df[Assignment.SIM_COL_PAX_LINK_TIME] = pd.to_timedelta(pathset_links_df[Assignment.SIM_COL_PAX_LINK_TIME])
elif "%s min" % Assignment.SIM_COL_PAX_WAIT_TIME in link_cols:
pathset_links_df[Assignment.SIM_COL_PAX_LINK_TIME] = pd.to_timedelta(pathset_links_df["%s min" % Assignment.SIM_COL_PAX_LINK_TIME], unit='m')
if Assignment.SIM_COL_PAX_WAIT_TIME in link_cols:
pathset_links_df[Assignment.SIM_COL_PAX_WAIT_TIME] = pd.to_timedelta(pathset_links_df[Assignment.SIM_COL_PAX_WAIT_TIME])
elif "%s min" % Assignment.SIM_COL_PAX_WAIT_TIME in link_cols:
pathset_links_df[Assignment.SIM_COL_PAX_WAIT_TIME] = pd.to_timedelta(pathset_links_df["%s min" % Assignment.SIM_COL_PAX_WAIT_TIME], unit='m')
# and drop the numeric version
if "%s min" % Passenger.PF_COL_LINK_TIME in link_cols:
pathset_links_df.drop(["%s min" % Passenger.PF_COL_LINK_TIME,
"%s min" % Passenger.PF_COL_WAIT_TIME], axis=1, inplace=True)
if "%s min" % Assignment.SIM_COL_PAX_LINK_TIME in link_cols:
pathset_links_df.drop(["%s min" % Assignment.SIM_COL_PAX_LINK_TIME,
"%s min" % Assignment.SIM_COL_PAX_WAIT_TIME], axis=1, inplace=True)
# if A_id_num isn't there, add it
if "A_id_num" not in pathset_links_df.columns.values:
pathset_links_df = stops.add_numeric_stop_id(pathset_links_df, id_colname="A_id", numeric_newcolname="A_id_num",
warn=True, warn_msg="read_passenger_pathsets: invalid stop ID", drop_failures=False)
if "B_id_num" not in pathset_links_df.columns.values:
pathset_links_df = stops.add_numeric_stop_id(pathset_links_df, id_colname="B_id", numeric_newcolname="B_id_num",
warn=True, warn_msg="read_passenger_pathsets: invalid stop ID", drop_failures=False)
# if trip_list_id_num is in trip list and not in these, add it
if Passenger.TRIP_LIST_COLUMN_TRIP_LIST_ID_NUM in self.trip_list_df.columns.values:
if Passenger.TRIP_LIST_COLUMN_TRIP_LIST_ID_NUM not in pathset_paths_df.columns.values:
pathset_paths_df = pd.merge(left =pathset_paths_df,
right =self.trip_list_df[[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
Passenger.TRIP_LIST_COLUMN_TRIP_LIST_ID_NUM]],
how ="left")
if Passenger.TRIP_LIST_COLUMN_TRIP_LIST_ID_NUM not in pathset_links_df.columns.values:
pathset_links_df = pd.merge(left =pathset_links_df,
right =self.trip_list_df[[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
Passenger.TRIP_LIST_COLUMN_TRIP_LIST_ID_NUM]],
how ="left")
# add mode_num if it's not there
if Route.ROUTES_COLUMN_MODE_NUM not in pathset_links_df.columns.values:
pathset_links_df = pd.merge(left=pathset_links_df, right=modes_df[[Route.ROUTES_COLUMN_MODE_NUM, Route.ROUTES_COLUMN_MODE]], how="left")
FastTripsLogger.info("Read %s" % links_file)
FastTripsLogger.debug("pathset_links_df head=\n%s" % str(pathset_links_df.head()))
FastTripsLogger.debug("pathset_links_df.dtypes=\n%s" % str(pathset_links_df.dtypes))
return (pathset_paths_df, pathset_links_df)
@staticmethod
def process_path(pathnum, pathset, pathset_id, pf_iteration, is_skimming=False):
from .PathSet import PathSet
from .Assignment import Assignment
if is_skimming:
origin = pathset_id
pathlist_entry = [
origin,
pathset.direction,
# pathset.mode, # where does this come from?
pathnum,
]
else:
trip_list_id = pathset_id
pathlist_entry = [
pathset.person_id,
pathset.person_trip_id,
trip_list_id,
(pathset.person_id, pathset.person_trip_id) in Assignment.TRACE_IDS,
pathset.direction,
pathset.mode,
pf_iteration,
pathnum,
]
pathlist_entry.extend([
pathset.pathdict[pathnum][PathSet.PATH_KEY_COST],
pathset.pathdict[pathnum][PathSet.PATH_KEY_FARE],
pathset.pathdict[pathnum][PathSet.PATH_KEY_PROBABILITY],
pathset.pathdict[pathnum][PathSet.PATH_KEY_INIT_COST],
pathset.pathdict[pathnum][PathSet.PATH_KEY_INIT_FARE]
])
return pathlist_entry
@staticmethod
def process_path_state(
pathnum, pathset, pathset_id, state_id, state, pf_iteration,
linkmode, link_num, prev_linkmode, prev_state_id, state_list_for_logging, is_skimming=False,
):
# OUTBOUND passengers have states like this:
# stop: label departure dep_mode successor linktime
# orig_taz Access b stop1
# b stop1 trip1 a stop2
# a stop2 Transfer b stop3
# b stop3 trip2 a stop4
# a stop4 Egress dest_taz
#
# stop: label dep_time dep_mode successor seq suc linktime cost arr_time
# 460: 0:20:49.4000 17:41:10 Access 3514 -1 -1 0:03:08.4000 0:03:08.4000 17:44:18
# 3514: 0:17:41.0000 17:44:18 5131292 4313 30 40 0:06:40.0000 0:12:21.8000 17:50:59
# 4313: 0:05:19.2000 17:50:59 Transfer 5728 -1 -1 0:00:19.2000 0:00:19.2000 17:51:18
# 5728: 0:04:60.0000 17:57:00 5154302 5726 16 17 0:07:33.8000 0:03:02.4000 17:58:51
# 5726: 0:01:57.6000 17:58:51 Egress 231 -1 -1 0:01:57.6000 0:01:57.6000 18:00:49
# INBOUND passengers have states like this
# stop: label arrival arr_mode predecessor linktime
# dest_taz Egress a stop4
# a stop4 trip2 b stop3
# b stop3 Transfer a stop2
# a stop2 trip1 b stop1
# b stop1 Access orig_taz
#
# stop: label arr_time arr_mode predecessor seq pred linktime cost dep_time
# 15: 0:36:38.4000 17:30:38 Egress 3772 -1 -1 0:02:38.4000 0:02:38.4000 17:28:00
# 3772: 0:34:00.0000 17:28:00 5123368 6516 22 14 0:24:17.2000 0:24:17.2000 17:05:50
# 6516: 0:09:42.8000 17:03:42 Transfer 4766 -1 -1 0:00:16.8000 0:00:16.8000 17:03:25
# 4766: 0:09:26.0000 17:03:25 5138749 5671 7 3 0:05:30.0000 0:05:33.2000 16:57:55
# 5671: 0:03:52.8000 16:57:55 Access 943 -1 -1 0:03:52.8000 0:03:52.8000 16:54:03
from .PathSet import PathSet
from .Assignment import Assignment
prev_linkmode = None
prev_state_id = None
mode_num = None
trip_id = None
waittime = None
if linkmode in [PathSet.STATE_MODE_ACCESS, PathSet.STATE_MODE_TRANSFER, PathSet.STATE_MODE_EGRESS]:
mode_num = state[PathSet.STATE_IDX_TRIP]
else:
# trip mode_num will need to be joined
trip_id = state[PathSet.STATE_IDX_TRIP]
linkmode = PathSet.STATE_MODE_TRIP
if (not is_skimming) and pathset.outbound:
a_id_num = state_id
b_id_num = state[PathSet.STATE_IDX_SUCCPRED]
a_seq = state[PathSet.STATE_IDX_SEQ]
b_seq = state[PathSet.STATE_IDX_SEQ_SUCCPRED]
b_time = state[PathSet.STATE_IDX_ARRDEP]
a_time = b_time - state[PathSet.STATE_IDX_LINKTIME]
trip_time = state[PathSet.STATE_IDX_ARRDEP] - state[PathSet.STATE_IDX_DEPARR]
else: # skimming always inbound
a_id_num = state[PathSet.STATE_IDX_SUCCPRED]
b_id_num = state_id
a_seq = state[PathSet.STATE_IDX_SEQ_SUCCPRED]
b_seq = state[PathSet.STATE_IDX_SEQ]
b_time = state[PathSet.STATE_IDX_DEPARR]
a_time = b_time - state[PathSet.STATE_IDX_LINKTIME]
trip_time = state[PathSet.STATE_IDX_DEPARR] - state[PathSet.STATE_IDX_ARRDEP]
# trips: linktime includes wait
if linkmode == PathSet.STATE_MODE_TRIP:
waittime = state[PathSet.STATE_IDX_LINKTIME] - trip_time
# two trips in a row -- this shouldn't happen
if linkmode == PathSet.STATE_MODE_TRIP and prev_linkmode == PathSet.STATE_MODE_TRIP:
if not is_skimming:
warn_msg =("Two trip links in a row... this shouldn't happen. person_id is %s trip is %s\npathnum is %d\nstatelist (%d): %s\n" % (
pathset.person_id, pathset.person_trip_id, pathnum, len(state_list_for_logging), str(state_list_for_logging)))
else:
warn_msg = "Two trip links in a row... this shouldn't happen."
FastTripsLogger.warn(warn_msg)
sys.exit()
if is_skimming:
linklist_entry = [pathset_id] # origin
else:
linklist_entry = [
pathset.person_id,
pathset.person_trip_id,
pathset_id, # trip list id
(pathset.person_id, pathset.person_trip_id) in Assignment.TRACE_IDS,
pf_iteration,
# 0.01 * pathfinding_iteration + iteration,
]
linklist_entry.extend(
[
pathnum,
linkmode,
mode_num,
trip_id,
a_id_num,
b_id_num,
a_seq,
b_seq,
a_time,
b_time,
state[PathSet.STATE_IDX_LINKTIME],
state[PathSet.STATE_IDX_LINKFARE],
state[PathSet.STATE_IDX_LINKCOST],
state[PathSet.STATE_IDX_LINKDIST],
waittime,
link_num
]
)
return linklist_entry
def setup_passenger_pathsets(self, iteration, pathfinding_iteration, stops, trip_id_df, trips_df, modes_df,
transfers, tazs, prepend_route_id_to_trip_id,
):
"""
Converts pathfinding results (which is stored in each Passenger :py:class:`PathSet`) into two
:py:class:`pandas.DataFrame` instances.
Returns two :py:class:`pandas.DataFrame` instances: pathset_paths_df and pathset_links_df.
These only include pathsets for person trips which have just been sought (e.g. those in
:py:attr:`Passenger.pathfind_trip_list_df`)
pathset_paths_df has path set information, where each row represents a passenger's path:
================== =============== =====================================================================================================
column name column type description
================== =============== =====================================================================================================
`person_id` object person ID
`person_trip_id` object person trip ID
`trip_list_id_num` int64 trip list numerical ID
`trace` bool Are we tracing this person trip?
`pathdir` int64 the :py:attr:`PathSet.direction`
`pathmode` object the :py:attr:`PathSet.mode`
`pf_iteration` float64 iteration + 0.01*pathfinding_iteration in which these paths were found
`pathnum` int64 the path number for the path within the pathset
`pf_cost` float64 the cost of the entire path
`pf_fare` float64 the fare of the entire path
`pf_probability` float64 the probability of the path
`pf_initcost` float64 the initial cost of the entire path
`pf_initfare` float64 the initial fare of the entire path
`description` object string representation of the path
================== =============== =====================================================================================================
pathset_links_df has path link information, where each row represents a link in a passenger's path:
================== =============== =====================================================================================================
column name column type description
================== =============== =====================================================================================================
`person_id` object person ID
`person_trip_id` object person trip ID
`trip_list_id_num` int64 trip list numerical ID
`trace` bool Are we tracing this person trip?
`pf_iteration` float64 iteration + 0.01*pathfinding_iteration in which these paths were found
`pathnum` int64 the path number for the path within the pathset
`linkmode` object the mode of the link, one of :py:attr:`PathSet.STATE_MODE_ACCESS`, :py:attr:`PathSet.STATE_MODE_EGRESS`,
:py:attr:`PathSet.STATE_MODE_TRANSFER` or :py:attr:`PathSet.STATE_MODE_TRIP`. PathSets will always start with
access, followed by trips with transfers in between, and ending in an egress following the last trip.
`mode_num` int64 the mode number for the link
`mode` object the supply mode for the link
`route_id` object the route ID for trip links. Set to :py:attr:`numpy.nan` for non-trip links.
`trip_id` object the trip ID for trip links. Set to :py:attr:`numpy.nan` for non-trip links.
`trip_id_num` float64 the numerical trip ID for trip links. Set to :py:attr:`numpy.nan` for non-trip links.
`A_id` object the stop ID at the start of the link, or TAZ ID for access links
`A_id_num` int64 the numerical stop ID at the start of the link, or a numerical TAZ ID for access links
`B_id` object the stop ID at the end of the link, or a TAZ ID for access links
`B_id_num` int64 the numerical stop ID at the end of the link, or a numerical TAZ ID for access links
`A_seq` int64 the sequence number for the stop at the start of the link, or -1 for access links
`B_seq` int64 the sequence number for the stop at the start of the link, or -1 for access links
`pf_A_time` datetime64[ns] the time the passenger arrives at `A_id`
`pf_B_time` datetime64[ns] the time the passenger arrives at `B_id`
`pf_linktime` timedelta64[ns] the time spent on the link
`pf_linkfare` float64 the fare of the link
`pf_linkcost` float64 the generalized cost of the link
`pf_linkdist` float64 the distance for the link
`A_lat` float64 the latitude of A (if it's a stop)
`A_lon` float64 the longitude of A (if it's a stop)
`B_lat` float64 the latitude of B (if it's a stop)
`B_lon` float64 the longitude of B (if it's a stop)
================== =============== =====================================================================================================
"""
from .PathSet import PathSet
trip_list_id_nums = self.pathfind_trip_list_df[Passenger.TRIP_LIST_COLUMN_TRIP_LIST_ID_NUM].tolist()
# only process if we just did pathfinding for this person trip
pathset_dict = {
trip_list_id: pathset for trip_list_id, pathset in self.id_to_pathset.items() if
trip_list_id in trip_list_id_nums
}
pf_iteration = 0.01 * pathfinding_iteration + iteration
pathlist, linklist = self.setup_pathsets_generic(pathset_dict, pf_iteration, is_skimming=False)
FastTripsLogger.debug("setup_passenger_pathsets(): pathlist and linklist constructed")
pathset_paths_df = pd.DataFrame(pathlist, columns=[ \
Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
Passenger.TRIP_LIST_COLUMN_TRIP_LIST_ID_NUM,
Passenger.TRIP_LIST_COLUMN_TRACE,
'pathdir', # for debugging
'pathmode', # for output
Passenger.PF_COL_PF_ITERATION,
Passenger.PF_COL_PATH_NUM,
PathSet.PATH_KEY_COST,
PathSet.PATH_KEY_FARE,
PathSet.PATH_KEY_PROBABILITY,
PathSet.PATH_KEY_INIT_COST,
PathSet.PATH_KEY_INIT_FARE])
pathset_links_df = pd.DataFrame(linklist, columns=[ \
Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
Passenger.TRIP_LIST_COLUMN_TRIP_LIST_ID_NUM,
Passenger.TRIP_LIST_COLUMN_TRACE,
Passenger.PF_COL_PF_ITERATION,
Passenger.PF_COL_PATH_NUM,
Passenger.PF_COL_LINK_MODE,
Route.ROUTES_COLUMN_MODE_NUM,
Trip.TRIPS_COLUMN_TRIP_ID_NUM,
'A_id_num', 'B_id_num',
'A_seq', 'B_seq',
Passenger.PF_COL_PAX_A_TIME,
Passenger.PF_COL_PAX_B_TIME,
Passenger.PF_COL_LINK_TIME,
Passenger.PF_COL_LINK_FARE,
Passenger.PF_COL_LINK_COST,
Passenger.PF_COL_LINK_DIST,
Passenger.PF_COL_WAIT_TIME,
Passenger.PF_COL_LINK_NUM])
FastTripsLogger.debug(
"setup_passenger_pathsets(): pathset_paths_df(%d) and pathset_links_df(%d) dataframes constructed" % (
len(pathset_paths_df), len(pathset_links_df)))
return self.clean_pathset_dfs(
pathset_paths_df, pathset_links_df, stops, trip_id_df, trips_df, modes_df,
prepend_route_id_to_trip_id=prepend_route_id_to_trip_id, is_skimming=False
)
@staticmethod
def clean_pathset_dfs(pathset_paths_df, pathset_links_df, stops, trip_id_df, trips_df, modes_df, *,
prepend_route_id_to_trip_id, is_skimming):
from .Skimming import Skimming
from .PathSet import PathSet
if prepend_route_id_to_trip_id and is_skimming:
raise NotImplementedError("prepend_route_id_to_trip_id not implemented for skimming")
# get A_id and B_id and trip_id
pathset_links_df = stops.add_stop_id_for_numeric_id(pathset_links_df, 'A_id_num', 'A_id')
pathset_links_df = stops.add_stop_id_for_numeric_id(pathset_links_df, 'B_id_num', 'B_id')
# get A_lat, A_lon, B_lat, B_lon
pathset_links_df = stops.add_stop_lat_lon(pathset_links_df, id_colname="A_id", new_lat_colname="A_lat",
new_lon_colname="A_lon")
pathset_links_df = stops.add_stop_lat_lon(pathset_links_df, id_colname="B_id", new_lat_colname="B_lat",
new_lon_colname="B_lon")
# get trip_id
pathset_links_df = Util.add_new_id(input_df=pathset_links_df, id_colname=Trip.TRIPS_COLUMN_TRIP_ID_NUM,
newid_colname=Trip.TRIPS_COLUMN_TRIP_ID,
mapping_df=trip_id_df, mapping_id_colname=Trip.TRIPS_COLUMN_TRIP_ID_NUM,
mapping_newid_colname=Trip.TRIPS_COLUMN_TRIP_ID)
# get route id
# mode_num will appear in left (for non-transit links) and right (for transit link) both, so we need to consolidate
pathset_links_df = pd.merge(left=pathset_links_df, right=trips_df[
[Trip.TRIPS_COLUMN_TRIP_ID, Trip.TRIPS_COLUMN_ROUTE_ID, Route.ROUTES_COLUMN_MODE_NUM]],
how="left", on=Trip.TRIPS_COLUMN_TRIP_ID)
pathset_links_df[Route.ROUTES_COLUMN_MODE_NUM] = pathset_links_df["%s_x" % Route.ROUTES_COLUMN_MODE_NUM]
pathset_links_df.loc[
pd.notnull(pathset_links_df["%s_y" % Route.ROUTES_COLUMN_MODE_NUM]), Route.ROUTES_COLUMN_MODE_NUM] = \
pathset_links_df["%s_y" % Route.ROUTES_COLUMN_MODE_NUM]
pathset_links_df.drop(["%s_x" % Route.ROUTES_COLUMN_MODE_NUM,
"%s_y" % Route.ROUTES_COLUMN_MODE_NUM], axis=1, inplace=True)
if not is_skimming:
# TODO should this apply in both cases>?
# verify it's always set
FastTripsLogger.debug("Have %d links with no mode number set" % len(
pathset_links_df.loc[ | pd.isnull(pathset_links_df[Route.ROUTES_COLUMN_MODE_NUM]) | pandas.isnull |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import operator
import warnings
from functools import wraps, partial
from numbers import Number, Integral
from operator import getitem
from pprint import pformat
import numpy as np
import pandas as pd
from pandas.util import cache_readonly, hash_pandas_object
from pandas.api.types import is_bool_dtype, is_timedelta64_dtype, \
is_numeric_dtype, is_datetime64_any_dtype
from toolz import merge, first, unique, partition_all, remove
try:
from chest import Chest as Cache
except ImportError:
Cache = dict
from .. import array as da
from .. import core
from ..utils import partial_by_order, Dispatch, IndexCallable
from .. import threaded
from ..compatibility import (apply, operator_div, bind_method, string_types,
isidentifier,
Iterator, Sequence)
from ..context import globalmethod
from ..utils import (random_state_data, pseudorandom, derived_from, funcname,
memory_repr, put_lines, M, key_split, OperatorMethodMixin,
is_arraylike, typename, skip_doctest)
from ..array.core import Array, normalize_arg
from ..array.utils import empty_like_safe
from ..blockwise import blockwise, Blockwise
from ..base import DaskMethodsMixin, tokenize, dont_optimize, is_dask_collection
from ..delayed import delayed, Delayed, unpack_collections
from ..highlevelgraph import HighLevelGraph
from . import methods
from .accessor import DatetimeAccessor, StringAccessor
from .categorical import CategoricalAccessor, categorize
from .optimize import optimize
from .utils import (meta_nonempty, make_meta, insert_meta_param_description,
raise_on_meta_error, clear_known_categories,
is_categorical_dtype, has_known_categories, PANDAS_VERSION,
index_summary, is_dataframe_like, is_series_like,
is_index_like, valid_divisions)
no_default = '__no_default__'
pd.set_option('compute.use_numexpr', False)
def _concat(args):
if not args:
return args
if isinstance(first(core.flatten(args)), np.ndarray):
return da.core.concatenate3(args)
if not has_parallel_type(args[0]):
try:
return pd.Series(args)
except Exception:
return args
# We filter out empty partitions here because pandas frequently has
# inconsistent dtypes in results between empty and non-empty frames.
# Ideally this would be handled locally for each operation, but in practice
# this seems easier. TODO: don't do this.
args2 = [i for i in args if len(i)]
return args[0] if not args2 else methods.concat(args2, uniform=True)
def finalize(results):
return _concat(results)
class Scalar(DaskMethodsMixin, OperatorMethodMixin):
""" A Dask object to represent a pandas scalar"""
def __init__(self, dsk, name, meta, divisions=None):
# divisions is ignored, only present to be compatible with other
# objects.
if not isinstance(dsk, HighLevelGraph):
dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[])
self.dask = dsk
self._name = name
meta = make_meta(meta)
if is_dataframe_like(meta) or is_series_like(meta) or is_index_like(meta):
raise TypeError("Expected meta to specify scalar, got "
"{0}".format(typename(type(meta))))
self._meta = meta
def __dask_graph__(self):
return self.dask
def __dask_keys__(self):
return [self.key]
def __dask_tokenize__(self):
return self._name
def __dask_layers__(self):
return (self.key,)
__dask_optimize__ = globalmethod(optimize, key='dataframe_optimize',
falsey=dont_optimize)
__dask_scheduler__ = staticmethod(threaded.get)
def __dask_postcompute__(self):
return first, ()
def __dask_postpersist__(self):
return Scalar, (self._name, self._meta, self.divisions)
@property
def _meta_nonempty(self):
return self._meta
@property
def dtype(self):
return self._meta.dtype
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
if not hasattr(self._meta, 'dtype'):
o.remove('dtype') # dtype only in `dir` if available
return list(o)
@property
def divisions(self):
"""Dummy divisions to be compat with Series and DataFrame"""
return [None, None]
def __repr__(self):
name = self._name if len(self._name) < 10 else self._name[:7] + '...'
if hasattr(self._meta, 'dtype'):
extra = ', dtype=%s' % self._meta.dtype
else:
extra = ', type=%s' % type(self._meta).__name__
return "dd.Scalar<%s%s>" % (name, extra)
def __array__(self):
# array interface is required to support pandas instance + Scalar
# Otherwise, above op results in pd.Series of Scalar (object dtype)
return np.asarray(self.compute())
@property
def _args(self):
return (self.dask, self._name, self._meta)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self._name, self._meta = state
@property
def key(self):
return (self._name, 0)
@classmethod
def _get_unary_operator(cls, op):
def f(self):
name = funcname(op) + '-' + tokenize(self)
dsk = {(name, 0): (op, (self._name, 0))}
meta = op(self._meta_nonempty)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return Scalar(graph, name, meta)
return f
@classmethod
def _get_binary_operator(cls, op, inv=False):
return lambda self, other: _scalar_binary(op, self, other, inv=inv)
def to_delayed(self, optimize_graph=True):
"""Convert into a ``dask.delayed`` object.
Parameters
----------
optimize_graph : bool, optional
If True [default], the graph is optimized before converting into
``dask.delayed`` objects.
"""
dsk = self.__dask_graph__()
if optimize_graph:
dsk = self.__dask_optimize__(dsk, self.__dask_keys__())
name = 'delayed-' + self._name
dsk = HighLevelGraph.from_collections(name, dsk, dependencies=())
return Delayed(self.key, dsk)
def _scalar_binary(op, self, other, inv=False):
name = '{0}-{1}'.format(funcname(op), tokenize(self, other))
dependencies = [self]
dsk = {}
return_type = get_parallel_type(other)
if isinstance(other, Scalar):
dependencies.append(other)
other_key = (other._name, 0)
elif is_dask_collection(other):
return NotImplemented
else:
other_key = other
if inv:
dsk.update({(name, 0): (op, other_key, (self._name, 0))})
else:
dsk.update({(name, 0): (op, (self._name, 0), other_key)})
other_meta = make_meta(other)
other_meta_nonempty = meta_nonempty(other_meta)
if inv:
meta = op(other_meta_nonempty, self._meta_nonempty)
else:
meta = op(self._meta_nonempty, other_meta_nonempty)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)
if return_type is not Scalar:
return return_type(graph, name, meta,
[other.index.min(), other.index.max()])
else:
return Scalar(graph, name, meta)
class _Frame(DaskMethodsMixin, OperatorMethodMixin):
""" Superclass for DataFrame and Series
Parameters
----------
dsk: dict
The dask graph to compute this DataFrame
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame / Series
meta: pandas.DataFrame, pandas.Series, or pandas.Index
An empty pandas object with names, dtypes, and indices matching the
expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
def __init__(self, dsk, name, meta, divisions):
if not isinstance(dsk, HighLevelGraph):
dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[])
self.dask = dsk
self._name = name
meta = make_meta(meta)
if not self._is_partition_type(meta):
raise TypeError("Expected meta to specify type {0}, got type "
"{1}".format(type(self).__name__,
typename(type(meta))))
self._meta = meta
self.divisions = tuple(divisions)
def __dask_graph__(self):
return self.dask
def __dask_keys__(self):
return [(self._name, i) for i in range(self.npartitions)]
def __dask_layers__(self):
return (self._name,)
def __dask_tokenize__(self):
return self._name
__dask_optimize__ = globalmethod(optimize, key='dataframe_optimize',
falsey=dont_optimize)
__dask_scheduler__ = staticmethod(threaded.get)
def __dask_postcompute__(self):
return finalize, ()
def __dask_postpersist__(self):
return type(self), (self._name, self._meta, self.divisions)
@property
def _constructor(self):
return new_dd_object
@property
def npartitions(self):
"""Return number of partitions"""
return len(self.divisions) - 1
@property
def size(self):
"""Size of the Series or DataFrame as a Delayed object.
Examples
--------
>>> series.size # doctest: +SKIP
dd.Scalar<size-ag..., dtype=int64>
"""
return self.reduction(methods.size, np.sum, token='size', meta=int,
split_every=False)
@property
def _meta_nonempty(self):
""" A non-empty version of `_meta` with fake data."""
return meta_nonempty(self._meta)
@property
def _args(self):
return (self.dask, self._name, self._meta, self.divisions)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self._name, self._meta, self.divisions = state
def copy(self):
""" Make a copy of the dataframe
This is strictly a shallow copy of the underlying computational graph.
It does not affect the underlying data
"""
return new_dd_object(self.dask, self._name,
self._meta, self.divisions)
def __array__(self, dtype=None, **kwargs):
self._computed = self.compute()
x = np.array(self._computed)
return x
def __array_wrap__(self, array, context=None):
raise NotImplementedError
def __array_ufunc__(self, numpy_ufunc, method, *inputs, **kwargs):
out = kwargs.get('out', ())
for x in inputs + out:
# ufuncs work with 0-dimensional NumPy ndarrays
# so we don't want to raise NotImplemented
if isinstance(x, np.ndarray) and x.shape == ():
continue
elif not isinstance(x, (Number, Scalar, _Frame, Array,
pd.DataFrame, pd.Series, pd.Index)):
return NotImplemented
if method == '__call__':
if numpy_ufunc.signature is not None:
return NotImplemented
if numpy_ufunc.nout > 1:
# ufuncs with multiple output values
# are not yet supported for frames
return NotImplemented
else:
return elemwise(numpy_ufunc, *inputs, **kwargs)
else:
# ufunc methods are not yet supported for frames
return NotImplemented
@property
def _elemwise(self):
return elemwise
def _repr_data(self):
raise NotImplementedError
@property
def _repr_divisions(self):
name = "npartitions={0}".format(self.npartitions)
if self.known_divisions:
divisions = pd.Index(self.divisions, name=name)
else:
# avoid to be converted to NaN
divisions = pd.Index([''] * (self.npartitions + 1), name=name)
return divisions
def __repr__(self):
data = self._repr_data().to_string(max_rows=5, show_dimensions=False)
return """Dask {klass} Structure:
{data}
Dask Name: {name}, {task} tasks""".format(klass=self.__class__.__name__,
data=data, name=key_split(self._name),
task=len(self.dask))
@property
def index(self):
"""Return dask Index instance"""
return self.map_partitions(getattr, 'index', token=self._name + '-index',
meta=self._meta.index)
@index.setter
def index(self, value):
self.divisions = value.divisions
result = map_partitions(methods.assign_index, self, value)
self.dask = result.dask
self._name = result._name
self._meta = result._meta
def reset_index(self, drop=False):
"""Reset the index to the default index.
Note that unlike in ``pandas``, the reset ``dask.dataframe`` index will
not be monotonically increasing from 0. Instead, it will restart at 0
for each partition (e.g. ``index1 = [0, ..., 10], index2 = [0, ...]``).
This is due to the inability to statically know the full length of the
index.
For DataFrame with multi-level index, returns a new DataFrame with
labeling information in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if any are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
drop : boolean, default False
Do not try to insert index into dataframe columns.
"""
return self.map_partitions(M.reset_index, drop=drop).clear_divisions()
@property
def known_divisions(self):
"""Whether divisions are already known"""
return len(self.divisions) > 0 and self.divisions[0] is not None
def clear_divisions(self):
""" Forget division information """
divisions = (None,) * (self.npartitions + 1)
return type(self)(self.dask, self._name, self._meta, divisions)
def get_partition(self, n):
"""Get a dask DataFrame/Series representing the `nth` partition."""
if 0 <= n < self.npartitions:
name = 'get-partition-%s-%s' % (str(n), self._name)
divisions = self.divisions[n:n + 2]
layer = {(name, 0): (self._name, n)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=[self])
return new_dd_object(graph, name, self._meta, divisions)
else:
msg = "n must be 0 <= n < {0}".format(self.npartitions)
raise ValueError(msg)
@derived_from(pd.DataFrame)
def drop_duplicates(self, split_every=None, split_out=1, **kwargs):
# Let pandas error on bad inputs
self._meta_nonempty.drop_duplicates(**kwargs)
if 'subset' in kwargs and kwargs['subset'] is not None:
split_out_setup = split_out_on_cols
split_out_setup_kwargs = {'cols': kwargs['subset']}
else:
split_out_setup = split_out_setup_kwargs = None
if kwargs.get('keep', True) is False:
raise NotImplementedError("drop_duplicates with keep=False")
chunk = M.drop_duplicates
return aca(self, chunk=chunk, aggregate=chunk, meta=self._meta,
token='drop-duplicates', split_every=split_every,
split_out=split_out, split_out_setup=split_out_setup,
split_out_setup_kwargs=split_out_setup_kwargs, **kwargs)
def __len__(self):
return self.reduction(len, np.sum, token='len', meta=int,
split_every=False).compute()
def __bool__(self):
raise ValueError("The truth value of a {0} is ambiguous. "
"Use a.any() or a.all()."
.format(self.__class__.__name__))
__nonzero__ = __bool__ # python 2
def _scalarfunc(self, cast_type):
def wrapper():
raise TypeError("cannot convert the series to "
"{0}".format(str(cast_type)))
return wrapper
def __float__(self):
return self._scalarfunc(float)
def __int__(self):
return self._scalarfunc(int)
__long__ = __int__ # python 2
def __complex__(self):
return self._scalarfunc(complex)
@insert_meta_param_description(pad=12)
def map_partitions(self, func, *args, **kwargs):
""" Apply Python function on each DataFrame partition.
Note that the index and divisions are assumed to remain unchanged.
Parameters
----------
func : function
Function applied to each partition.
args, kwargs :
Arguments and keywords to pass to the function. The partition will
be the first argument, and these will be passed *after*. Arguments
and keywords may contain ``Scalar``, ``Delayed`` or regular
python objects. DataFrame-like args (both dask and pandas) will be
repartitioned to align (if necessary) before applying the function.
$META
Examples
--------
Given a DataFrame, Series, or Index, such as:
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
One can use ``map_partitions`` to apply a function on each partition.
Extra arguments and keywords can optionally be provided, and will be
passed to the function after the partition.
Here we apply a function with arguments and keywords to a DataFrame,
resulting in a Series:
>>> def myadd(df, a, b=1):
... return df.x + df.y + a + b
>>> res = ddf.map_partitions(myadd, 1, b=2)
>>> res.dtype
dtype('float64')
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with no name, and dtype
``float64``:
>>> res = ddf.map_partitions(myadd, 1, b=2, meta=(None, 'f8'))
Here we map a function that takes in a DataFrame, and returns a
DataFrame with a new column:
>>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y))
>>> res.dtypes
x int64
y float64
z float64
dtype: object
As before, the output metadata can also be specified manually. This
time we pass in a ``dict``, as the output is a DataFrame:
>>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y),
... meta={'x': 'i8', 'y': 'f8', 'z': 'f8'})
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ddf.map_partitions(lambda df: df.head(), meta=df)
Also note that the index and divisions are assumed to remain unchanged.
If the function you're mapping changes the index/divisions, you'll need
to clear them afterwards:
>>> ddf.map_partitions(func).clear_divisions() # doctest: +SKIP
"""
return map_partitions(func, self, *args, **kwargs)
@insert_meta_param_description(pad=12)
def map_overlap(self, func, before, after, *args, **kwargs):
"""Apply a function to each partition, sharing rows with adjacent partitions.
This can be useful for implementing windowing functions such as
``df.rolling(...).mean()`` or ``df.diff()``.
Parameters
----------
func : function
Function applied to each partition.
before : int
The number of rows to prepend to partition ``i`` from the end of
partition ``i - 1``.
after : int
The number of rows to append to partition ``i`` from the beginning
of partition ``i + 1``.
args, kwargs :
Arguments and keywords to pass to the function. The partition will
be the first argument, and these will be passed *after*.
$META
Notes
-----
Given positive integers ``before`` and ``after``, and a function
``func``, ``map_overlap`` does the following:
1. Prepend ``before`` rows to each partition ``i`` from the end of
partition ``i - 1``. The first partition has no rows prepended.
2. Append ``after`` rows to each partition ``i`` from the beginning of
partition ``i + 1``. The last partition has no rows appended.
3. Apply ``func`` to each partition, passing in any extra ``args`` and
``kwargs`` if provided.
4. Trim ``before`` rows from the beginning of all but the first
partition.
5. Trim ``after`` rows from the end of all but the last partition.
Note that the index and divisions are assumed to remain unchanged.
Examples
--------
Given a DataFrame, Series, or Index, such as:
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 4, 7, 11],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
A rolling sum with a trailing moving window of size 2 can be computed by
overlapping 2 rows before each partition, and then mapping calls to
``df.rolling(2).sum()``:
>>> ddf.compute()
x y
0 1 1.0
1 2 2.0
2 4 3.0
3 7 4.0
4 11 5.0
>>> ddf.map_overlap(lambda df: df.rolling(2).sum(), 2, 0).compute()
x y
0 NaN NaN
1 3.0 3.0
2 6.0 5.0
3 11.0 7.0
4 18.0 9.0
The pandas ``diff`` method computes a discrete difference shifted by a
number of periods (can be positive or negative). This can be
implemented by mapping calls to ``df.diff`` to each partition after
prepending/appending that many rows, depending on sign:
>>> def diff(df, periods=1):
... before, after = (periods, 0) if periods > 0 else (0, -periods)
... return df.map_overlap(lambda df, periods=1: df.diff(periods),
... periods, 0, periods=periods)
>>> diff(ddf, 1).compute()
x y
0 NaN NaN
1 1.0 1.0
2 2.0 1.0
3 3.0 1.0
4 4.0 1.0
If you have a ``DatetimeIndex``, you can use a ``pd.Timedelta`` for time-
based windows.
>>> ts = pd.Series(range(10), index=pd.date_range('2017', periods=10))
>>> dts = dd.from_pandas(ts, npartitions=2)
>>> dts.map_overlap(lambda df: df.rolling('2D').sum(),
... pd.Timedelta('2D'), 0).compute()
2017-01-01 0.0
2017-01-02 1.0
2017-01-03 3.0
2017-01-04 5.0
2017-01-05 7.0
2017-01-06 9.0
2017-01-07 11.0
2017-01-08 13.0
2017-01-09 15.0
2017-01-10 17.0
dtype: float64
"""
from .rolling import map_overlap
return map_overlap(func, self, before, after, *args, **kwargs)
@insert_meta_param_description(pad=12)
def reduction(self, chunk, aggregate=None, combine=None, meta=no_default,
token=None, split_every=None, chunk_kwargs=None,
aggregate_kwargs=None, combine_kwargs=None, **kwargs):
"""Generic row-wise reductions.
Parameters
----------
chunk : callable
Function to operate on each partition. Should return a
``pandas.DataFrame``, ``pandas.Series``, or a scalar.
aggregate : callable, optional
Function to operate on the concatenated result of ``chunk``. If not
specified, defaults to ``chunk``. Used to do the final aggregation
in a tree reduction.
The input to ``aggregate`` depends on the output of ``chunk``.
If the output of ``chunk`` is a:
- scalar: Input is a Series, with one row per partition.
- Series: Input is a DataFrame, with one row per partition. Columns
are the rows in the output series.
- DataFrame: Input is a DataFrame, with one row per partition.
Columns are the columns in the output dataframes.
Should return a ``pandas.DataFrame``, ``pandas.Series``, or a
scalar.
combine : callable, optional
Function to operate on intermediate concatenated results of
``chunk`` in a tree-reduction. If not provided, defaults to
``aggregate``. The input/output requirements should match that of
``aggregate`` described above.
$META
token : str, optional
The name to use for the output keys.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used,
and all intermediates will be concatenated and passed to
``aggregate``. Default is 8.
chunk_kwargs : dict, optional
Keyword arguments to pass on to ``chunk`` only.
aggregate_kwargs : dict, optional
Keyword arguments to pass on to ``aggregate`` only.
combine_kwargs : dict, optional
Keyword arguments to pass on to ``combine`` only.
kwargs :
All remaining keywords will be passed to ``chunk``, ``combine``,
and ``aggregate``.
Examples
--------
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})
>>> ddf = dd.from_pandas(df, npartitions=4)
Count the number of rows in a DataFrame. To do this, count the number
of rows in each partition, then sum the results:
>>> res = ddf.reduction(lambda x: x.count(),
... aggregate=lambda x: x.sum())
>>> res.compute()
x 50
y 50
dtype: int64
Count the number of rows in a Series with elements greater than or
equal to a value (provided via a keyword).
>>> def count_greater(x, value=0):
... return (x >= value).sum()
>>> res = ddf.x.reduction(count_greater, aggregate=lambda x: x.sum(),
... chunk_kwargs={'value': 25})
>>> res.compute()
25
Aggregate both the sum and count of a Series at the same time:
>>> def sum_and_count(x):
... return pd.Series({'count': x.count(), 'sum': x.sum()},
... index=['count', 'sum'])
>>> res = ddf.x.reduction(sum_and_count, aggregate=lambda x: x.sum())
>>> res.compute()
count 50
sum 1225
dtype: int64
Doing the same, but for a DataFrame. Here ``chunk`` returns a
DataFrame, meaning the input to ``aggregate`` is a DataFrame with an
index with non-unique entries for both 'x' and 'y'. We groupby the
index, and sum each group to get the final result.
>>> def sum_and_count(x):
... return pd.DataFrame({'count': x.count(), 'sum': x.sum()},
... columns=['count', 'sum'])
>>> res = ddf.reduction(sum_and_count,
... aggregate=lambda x: x.groupby(level=0).sum())
>>> res.compute()
count sum
x 50 1225
y 50 3725
"""
if aggregate is None:
aggregate = chunk
if combine is None:
if combine_kwargs:
raise ValueError("`combine_kwargs` provided with no `combine`")
combine = aggregate
combine_kwargs = aggregate_kwargs
chunk_kwargs = chunk_kwargs.copy() if chunk_kwargs else {}
chunk_kwargs['aca_chunk'] = chunk
combine_kwargs = combine_kwargs.copy() if combine_kwargs else {}
combine_kwargs['aca_combine'] = combine
aggregate_kwargs = aggregate_kwargs.copy() if aggregate_kwargs else {}
aggregate_kwargs['aca_aggregate'] = aggregate
return aca(self, chunk=_reduction_chunk, aggregate=_reduction_aggregate,
combine=_reduction_combine, meta=meta, token=token,
split_every=split_every, chunk_kwargs=chunk_kwargs,
aggregate_kwargs=aggregate_kwargs,
combine_kwargs=combine_kwargs, **kwargs)
@derived_from(pd.DataFrame)
def pipe(self, func, *args, **kwargs):
# Taken from pandas:
# https://github.com/pydata/pandas/blob/master/pandas/core/generic.py#L2698-L2707
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError('%s is both the pipe target and a keyword '
'argument' % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def random_split(self, frac, random_state=None):
""" Pseudorandomly split dataframe into different pieces row-wise
Parameters
----------
frac : list
List of floats that should sum to one.
random_state: int or np.random.RandomState
If int create a new RandomState with this as the seed
Otherwise draw from the passed RandomState
Examples
--------
50/50 split
>>> a, b = df.random_split([0.5, 0.5]) # doctest: +SKIP
80/10/10 split, consistent random_state
>>> a, b, c = df.random_split([0.8, 0.1, 0.1], random_state=123) # doctest: +SKIP
See Also
--------
dask.DataFrame.sample
"""
if not np.allclose(sum(frac), 1):
raise ValueError("frac should sum to 1")
state_data = random_state_data(self.npartitions, random_state)
token = tokenize(self, frac, random_state)
name = 'split-' + token
layer = {(name, i): (pd_split, (self._name, i), frac, state)
for i, state in enumerate(state_data)}
out = []
for i in range(len(frac)):
name2 = 'split-%d-%s' % (i, token)
dsk2 = {(name2, j): (getitem, (name, j), i)
for j in range(self.npartitions)}
graph = HighLevelGraph.from_collections(name2, merge(dsk2, layer), dependencies=[self])
out_df = type(self)(graph, name2, self._meta, self.divisions)
out.append(out_df)
return out
def head(self, n=5, npartitions=1, compute=True):
""" First n rows of the dataset
Parameters
----------
n : int, optional
The number of rows to return. Default is 5.
npartitions : int, optional
Elements are only taken from the first ``npartitions``, with a
default of 1. If there are fewer than ``n`` rows in the first
``npartitions`` a warning will be raised and any found rows
returned. Pass -1 to use all partitions.
compute : bool, optional
Whether to compute the result, default is True.
"""
return self._head(n=n, npartitions=npartitions, compute=compute, safe=True)
def _head(self, n, npartitions, compute, safe):
if npartitions <= -1:
npartitions = self.npartitions
if npartitions > self.npartitions:
msg = "only {} partitions, head received {}"
raise ValueError(msg.format(self.npartitions, npartitions))
name = 'head-%d-%d-%s' % (npartitions, n, self._name)
if safe:
head = safe_head
else:
head = M.head
if npartitions > 1:
name_p = 'head-partial-%d-%s' % (n, self._name)
dsk = {}
for i in range(npartitions):
dsk[(name_p, i)] = (M.head, (self._name, i), n)
concat = (_concat, [(name_p, i) for i in range(npartitions)])
dsk[(name, 0)] = (head, concat, n)
else:
dsk = {(name, 0): (head, (self._name, 0), n)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
result = new_dd_object(graph, name, self._meta,
[self.divisions[0], self.divisions[npartitions]])
if compute:
result = result.compute()
return result
def tail(self, n=5, compute=True):
""" Last n rows of the dataset
Caveat, the only checks the last n rows of the last partition.
"""
name = 'tail-%d-%s' % (n, self._name)
dsk = {(name, 0): (M.tail, (self._name, self.npartitions - 1), n)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
result = new_dd_object(graph, name, self._meta, self.divisions[-2:])
if compute:
result = result.compute()
return result
@property
def loc(self):
""" Purely label-location based indexer for selection by label.
>>> df.loc["b"] # doctest: +SKIP
>>> df.loc["b":"d"] # doctest: +SKIP
"""
from .indexing import _LocIndexer
return _LocIndexer(self)
def _partitions(self, index):
if not isinstance(index, tuple):
index = (index,)
from ..array.slicing import normalize_index
index = normalize_index(index, (self.npartitions,))
index = tuple(slice(k, k + 1) if isinstance(k, Number) else k
for k in index)
name = 'blocks-' + tokenize(self, index)
new_keys = np.array(self.__dask_keys__(), dtype=object)[index].tolist()
divisions = [self.divisions[i] for _, i in new_keys] + [self.divisions[new_keys[-1][1] + 1]]
dsk = {(name, i): tuple(key) for i, key in enumerate(new_keys)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, self._meta, divisions)
@property
def partitions(self):
""" Slice dataframe by partitions
This allows partitionwise slicing of a Dask Dataframe. You can perform normal
Numpy-style slicing but now rather than slice elements of the array you
slice along partitions so, for example, ``df.partitions[:5]`` produces a new
Dask Dataframe of the first five partitions.
Examples
--------
>>> df.partitions[0] # doctest: +SKIP
>>> df.partitions[:3] # doctest: +SKIP
>>> df.partitions[::10] # doctest: +SKIP
Returns
-------
A Dask DataFrame
"""
return IndexCallable(self._partitions)
# Note: iloc is implemented only on DataFrame
def repartition(self, divisions=None, npartitions=None, freq=None, force=False):
""" Repartition dataframe along new divisions
Parameters
----------
divisions : list, optional
List of partitions to be used. If specified npartitions will be
ignored.
npartitions : int, optional
Number of partitions of output. Only used if divisions isn't
specified.
freq : str, pd.Timedelta
A period on which to partition timeseries data like ``'7D'`` or
``'12h'`` or ``pd.Timedelta(hours=12)``. Assumes a datetime index.
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> df = df.repartition(npartitions=10) # doctest: +SKIP
>>> df = df.repartition(divisions=[0, 5, 10, 20]) # doctest: +SKIP
>>> df = df.repartition(freq='7d') # doctest: +SKIP
"""
if npartitions is not None and divisions is not None:
warnings.warn("When providing both npartitions and divisions to "
"repartition only npartitions is used.")
if npartitions is not None:
return repartition_npartitions(self, npartitions)
elif divisions is not None:
return repartition(self, divisions, force=force)
elif freq is not None:
return repartition_freq(self, freq=freq)
else:
raise ValueError(
"Provide either divisions= or npartitions= to repartition")
@derived_from(pd.DataFrame)
def fillna(self, value=None, method=None, limit=None, axis=None):
axis = self._validate_axis(axis)
if method is None and limit is not None:
raise NotImplementedError("fillna with set limit and method=None")
if isinstance(value, _Frame):
test_value = value._meta_nonempty.values[0]
else:
test_value = value
meta = self._meta_nonempty.fillna(value=test_value, method=method,
limit=limit, axis=axis)
if axis == 1 or method is None:
# Control whether or not dask's partition alignment happens.
# We don't want for a pandas Series.
# We do want it for a dask Series
if is_series_like(value) and not is_dask_collection(value):
args = ()
kwargs = {'value': value}
else:
args = (value,)
kwargs = {}
return self.map_partitions(M.fillna, *args, method=method,
limit=limit, axis=axis, meta=meta,
**kwargs)
if method in ('pad', 'ffill'):
method = 'ffill'
skip_check = 0
before, after = 1 if limit is None else limit, 0
else:
method = 'bfill'
skip_check = self.npartitions - 1
before, after = 0, 1 if limit is None else limit
if limit is None:
name = 'fillna-chunk-' + tokenize(self, method)
dsk = {(name, i): (methods.fillna_check, (self._name, i),
method, i != skip_check)
for i in range(self.npartitions)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
parts = new_dd_object(graph, name, meta, self.divisions)
else:
parts = self
return parts.map_overlap(M.fillna, before, after, method=method,
limit=limit, meta=meta)
@derived_from(pd.DataFrame)
def ffill(self, axis=None, limit=None):
return self.fillna(method='ffill', limit=limit, axis=axis)
@derived_from(pd.DataFrame)
def bfill(self, axis=None, limit=None):
return self.fillna(method='bfill', limit=limit, axis=axis)
def sample(self, n=None, frac=None, replace=False, random_state=None):
""" Random sample of items
Parameters
----------
n : int, optional
Number of items to return is not supported by dask. Use frac
instead.
frac : float, optional
Fraction of axis items to return.
replace : boolean, optional
Sample with or without replacement. Default = False.
random_state : int or ``np.random.RandomState``
If int we create a new RandomState with this as the seed
Otherwise we draw from the passed RandomState
See Also
--------
DataFrame.random_split
pandas.DataFrame.sample
"""
if n is not None:
msg = ("sample does not support the number of sampled items "
"parameter, 'n'. Please use the 'frac' parameter instead.")
if isinstance(n, Number) and 0 <= n <= 1:
warnings.warn(msg)
frac = n
else:
raise ValueError(msg)
if frac is None:
raise ValueError("frac must not be None")
if random_state is None:
random_state = np.random.RandomState()
name = 'sample-' + tokenize(self, frac, replace, random_state)
state_data = random_state_data(self.npartitions, random_state)
dsk = {(name, i): (methods.sample, (self._name, i), state, frac, replace)
for i, state in enumerate(state_data)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, self._meta, self.divisions)
@derived_from(pd.DataFrame)
def replace(self, to_replace=None, value=None, regex=False):
return self.map_partitions(M.replace, to_replace=to_replace,
value=value, regex=regex)
def to_dask_array(self, lengths=None):
"""Convert a dask DataFrame to a dask array.
Parameters
----------
lengths : bool or Sequence of ints, optional
How to determine the chunks sizes for the output array.
By default, the output array will have unknown chunk lengths
along the first axis, which can cause some later operations
to fail.
* True : immediately compute the length of each partition
* Sequence : a sequence of integers to use for the chunk sizes
on the first axis. These values are *not* validated for
correctness, beyond ensuring that the number of items
matches the number of partitions.
Returns
-------
"""
if lengths is True:
lengths = tuple(self.map_partitions(len).compute())
arr = self.values
chunks = self._validate_chunks(arr, lengths)
arr._chunks = chunks
return arr
def to_hdf(self, path_or_buf, key, mode='a', append=False, **kwargs):
""" See dd.to_hdf docstring for more information """
from .io import to_hdf
return to_hdf(self, path_or_buf, key, mode, append, **kwargs)
def to_csv(self, filename, **kwargs):
""" See dd.to_csv docstring for more information """
from .io import to_csv
return to_csv(self, filename, **kwargs)
def to_json(self, filename, *args, **kwargs):
""" See dd.to_json docstring for more information """
from .io import to_json
return to_json(self, filename, *args, **kwargs)
def to_delayed(self, optimize_graph=True):
"""Convert into a list of ``dask.delayed`` objects, one per partition.
Parameters
----------
optimize_graph : bool, optional
If True [default], the graph is optimized before converting into
``dask.delayed`` objects.
Examples
--------
>>> partitions = df.to_delayed() # doctest: +SKIP
See Also
--------
dask.dataframe.from_delayed
"""
keys = self.__dask_keys__()
graph = self.__dask_graph__()
if optimize_graph:
graph = self.__dask_optimize__(graph, self.__dask_keys__())
name = 'delayed-' + self._name
graph = HighLevelGraph.from_collections(name, graph, dependencies=())
return [Delayed(k, graph) for k in keys]
@classmethod
def _get_unary_operator(cls, op):
return lambda self: elemwise(op, self)
@classmethod
def _get_binary_operator(cls, op, inv=False):
if inv:
return lambda self, other: elemwise(op, other, self)
else:
return lambda self, other: elemwise(op, self, other)
def rolling(self, window, min_periods=None, freq=None, center=False,
win_type=None, axis=0):
"""Provides rolling transformations.
Parameters
----------
window : int, str, offset
Size of the moving window. This is the number of observations used
for calculating the statistic. When not using a ``DatetimeIndex``,
the window size must not be so large as to span more than one
adjacent partition. If using an offset or offset alias like '5D',
the data must have a ``DatetimeIndex``
.. versionchanged:: 0.15.0
Now accepts offsets and string offset aliases
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
Provide a window type. The recognized window types are identical
to pandas.
axis : int, default 0
Returns
-------
a Rolling object on which to call a method to compute a statistic
Notes
-----
The `freq` argument is not supported.
"""
from dask.dataframe.rolling import Rolling
if isinstance(window, Integral):
if window < 0:
raise ValueError('window must be >= 0')
if min_periods is not None:
if not isinstance(min_periods, Integral):
raise ValueError('min_periods must be an integer')
if min_periods < 0:
raise ValueError('min_periods must be >= 0')
return Rolling(self, window=window, min_periods=min_periods,
freq=freq, center=center, win_type=win_type, axis=axis)
@derived_from(pd.DataFrame)
def diff(self, periods=1, axis=0):
"""
.. note::
Pandas currently uses an ``object``-dtype column to represent
boolean data with missing values. This can cause issues for
boolean-specific operations, like ``|``. To enable boolean-
specific operations, at the cost of metadata that doesn't match
pandas, use ``.astype(bool)`` after the ``shift``.
"""
axis = self._validate_axis(axis)
if not isinstance(periods, Integral):
raise TypeError("periods must be an integer")
if axis == 1:
return self.map_partitions(M.diff, token='diff', periods=periods,
axis=1)
before, after = (periods, 0) if periods > 0 else (0, -periods)
return self.map_overlap(M.diff, before, after, token='diff',
periods=periods)
@derived_from(pd.DataFrame)
def shift(self, periods=1, freq=None, axis=0):
axis = self._validate_axis(axis)
if not isinstance(periods, Integral):
raise TypeError("periods must be an integer")
if axis == 1:
return self.map_partitions(M.shift, token='shift', periods=periods,
freq=freq, axis=1)
if freq is None:
before, after = (periods, 0) if periods > 0 else (0, -periods)
return self.map_overlap(M.shift, before, after, token='shift',
periods=periods)
# Let pandas error on invalid arguments
meta = self._meta_nonempty.shift(periods, freq=freq)
out = self.map_partitions(M.shift, token='shift', periods=periods,
freq=freq, meta=meta,
transform_divisions=False)
return maybe_shift_divisions(out, periods, freq=freq)
def _reduction_agg(self, name, axis=None, skipna=True,
split_every=False, out=None):
axis = self._validate_axis(axis)
meta = getattr(self._meta_nonempty, name)(axis=axis, skipna=skipna)
token = self._token_prefix + name
method = getattr(M, name)
if axis == 1:
result = self.map_partitions(method, meta=meta,
token=token, skipna=skipna, axis=axis)
return handle_out(out, result)
else:
result = self.reduction(method, meta=meta, token=token,
skipna=skipna, axis=axis,
split_every=split_every)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return handle_out(out, result)
@derived_from(pd.DataFrame)
def abs(self):
_raise_if_object_series(self, "abs")
meta = self._meta_nonempty.abs()
return self.map_partitions(M.abs, meta=meta)
@derived_from(pd.DataFrame)
def all(self, axis=None, skipna=True, split_every=False, out=None):
return self._reduction_agg('all', axis=axis, skipna=skipna,
split_every=split_every, out=out)
@derived_from(pd.DataFrame)
def any(self, axis=None, skipna=True, split_every=False, out=None):
return self._reduction_agg('any', axis=axis, skipna=skipna,
split_every=split_every, out=out)
@derived_from(pd.DataFrame)
def sum(self, axis=None, skipna=True, split_every=False, dtype=None,
out=None, min_count=None):
result = self._reduction_agg('sum', axis=axis, skipna=skipna,
split_every=split_every, out=out)
if min_count:
return result.where(self.notnull().sum(axis=axis) >= min_count,
other=np.NaN)
else:
return result
@derived_from(pd.DataFrame)
def prod(self, axis=None, skipna=True, split_every=False, dtype=None,
out=None, min_count=None):
result = self._reduction_agg('prod', axis=axis, skipna=skipna,
split_every=split_every, out=out)
if min_count:
return result.where(self.notnull().sum(axis=axis) >= min_count,
other=np.NaN)
else:
return result
@derived_from(pd.DataFrame)
def max(self, axis=None, skipna=True, split_every=False, out=None):
return self._reduction_agg('max', axis=axis, skipna=skipna,
split_every=split_every, out=out)
@derived_from(pd.DataFrame)
def min(self, axis=None, skipna=True, split_every=False, out=None):
return self._reduction_agg('min', axis=axis, skipna=skipna,
split_every=split_every, out=out)
@derived_from(pd.DataFrame)
def idxmax(self, axis=None, skipna=True, split_every=False):
fn = 'idxmax'
axis = self._validate_axis(axis)
meta = self._meta_nonempty.idxmax(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.idxmax, self, meta=meta,
token=self._token_prefix + fn,
skipna=skipna, axis=axis)
else:
scalar = not is_series_like(meta)
result = aca([self], chunk=idxmaxmin_chunk, aggregate=idxmaxmin_agg,
combine=idxmaxmin_combine, meta=meta,
aggregate_kwargs={'scalar': scalar},
token=self._token_prefix + fn, split_every=split_every,
skipna=skipna, fn=fn)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
@derived_from(pd.DataFrame)
def idxmin(self, axis=None, skipna=True, split_every=False):
fn = 'idxmin'
axis = self._validate_axis(axis)
meta = self._meta_nonempty.idxmax(axis=axis)
if axis == 1:
return map_partitions(M.idxmin, self, meta=meta,
token=self._token_prefix + fn,
skipna=skipna, axis=axis)
else:
scalar = not is_series_like(meta)
result = aca([self], chunk=idxmaxmin_chunk, aggregate=idxmaxmin_agg,
combine=idxmaxmin_combine, meta=meta,
aggregate_kwargs={'scalar': scalar},
token=self._token_prefix + fn, split_every=split_every,
skipna=skipna, fn=fn)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
@derived_from(pd.DataFrame)
def count(self, axis=None, split_every=False):
axis = self._validate_axis(axis)
token = self._token_prefix + 'count'
if axis == 1:
meta = self._meta_nonempty.count(axis=axis)
return self.map_partitions(M.count, meta=meta, token=token,
axis=axis)
else:
meta = self._meta_nonempty.count()
result = self.reduction(M.count, aggregate=M.sum, meta=meta,
token=token, split_every=split_every)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
@derived_from(pd.DataFrame)
def mean(self, axis=None, skipna=True, split_every=False, dtype=None, out=None):
axis = self._validate_axis(axis)
_raise_if_object_series(self, "mean")
meta = self._meta_nonempty.mean(axis=axis, skipna=skipna)
if axis == 1:
result = map_partitions(M.mean, self, meta=meta,
token=self._token_prefix + 'mean',
axis=axis, skipna=skipna)
return handle_out(out, result)
else:
num = self._get_numeric_data()
s = num.sum(skipna=skipna, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + 'mean-%s' % tokenize(self, axis, skipna)
result = map_partitions(methods.mean_aggregate, s, n,
token=name, meta=meta)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return handle_out(out, result)
@derived_from(pd.DataFrame)
def var(self, axis=None, skipna=True, ddof=1, split_every=False, dtype=None, out=None):
axis = self._validate_axis(axis)
_raise_if_object_series(self, "var")
meta = self._meta_nonempty.var(axis=axis, skipna=skipna)
if axis == 1:
result = map_partitions(M.var, self, meta=meta,
token=self._token_prefix + 'var',
axis=axis, skipna=skipna, ddof=ddof)
return handle_out(out, result)
else:
if self.ndim == 1:
result = self._var_1d(self, skipna, ddof, split_every)
return handle_out(out, result)
count_timedeltas = len(self._meta_nonempty.select_dtypes(include=[np.timedelta64]).columns)
if count_timedeltas == len(self._meta.columns):
result = self._var_timedeltas(skipna, ddof, split_every)
elif count_timedeltas > 0:
result = self._var_mixed(skipna, ddof, split_every)
else:
result = self._var_numeric(skipna, ddof, split_every)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return handle_out(out, result)
def _var_numeric(self, skipna=True, ddof=1, split_every=False):
num = self.select_dtypes(include=['number', 'bool'], exclude=[np.timedelta64])
values_dtype = num.values.dtype
array_values = num.values
if not np.issubdtype(values_dtype, np.number):
array_values = num.values.astype('f8')
var = da.nanvar if skipna or skipna is None else da.var
array_var = var(array_values, axis=0, ddof=ddof, split_every=split_every)
name = self._token_prefix + 'var-numeric' + tokenize(num, split_every)
cols = num._meta.columns if is_dataframe_like(num) else None
var_shape = num._meta_nonempty.values.var(axis=0).shape
array_var_name = (array_var._name,) + (0,) * len(var_shape)
layer = {(name, 0): (methods.wrap_var_reduction, array_var_name, cols)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_var])
return new_dd_object(graph, name, num._meta_nonempty.var(), divisions=[None, None])
def _var_timedeltas(self, skipna=True, ddof=1, split_every=False):
timedeltas = self.select_dtypes(include=[np.timedelta64])
var_timedeltas = [self._var_1d(timedeltas[col_idx], skipna, ddof, split_every)
for col_idx in timedeltas._meta.columns]
var_timedelta_names = [(v._name, 0) for v in var_timedeltas]
name = self._token_prefix + 'var-timedeltas-' + tokenize(timedeltas, split_every)
layer = {(name, 0): (methods.wrap_var_reduction, var_timedelta_names, timedeltas._meta.columns)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=var_timedeltas)
return new_dd_object(graph, name, timedeltas._meta_nonempty.var(), divisions=[None, None])
def _var_mixed(self, skipna=True, ddof=1, split_every=False):
data = self.select_dtypes(include=['number', 'bool', np.timedelta64])
timedelta_vars = self._var_timedeltas(skipna, ddof, split_every)
numeric_vars = self._var_numeric(skipna, ddof, split_every)
name = self._token_prefix + 'var-mixed-' + tokenize(data, split_every)
layer = {(name, 0): (methods.var_mixed_concat,
(numeric_vars._name, 0),
(timedelta_vars._name, 0),
data._meta.columns)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=[numeric_vars, timedelta_vars])
return new_dd_object(graph, name, self._meta_nonempty.var(), divisions=[None, None])
def _var_1d(self, column, skipna=True, ddof=1, split_every=False):
is_timedelta = is_timedelta64_dtype(column._meta)
if is_timedelta:
if not skipna:
is_nan = column.isna()
column = column.astype('i8')
column = column.mask(is_nan)
else:
column = column.dropna().astype('i8')
if PANDAS_VERSION >= '0.24.0':
if pd.Int64Dtype.is_dtype(column._meta_nonempty):
column = column.astype('f8')
if not np.issubdtype(column.dtype, np.number):
column = column.astype('f8')
name = self._token_prefix + 'var-1d-' + tokenize(column, split_every)
var = da.nanvar if skipna or skipna is None else da.var
array_var = var(column.values, axis=0, ddof=ddof, split_every=split_every)
layer = {(name, 0): (methods.wrap_var_reduction, (array_var._name,), None)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_var])
return new_dd_object(graph, name, column._meta_nonempty.var(), divisions=[None, None])
@derived_from(pd.DataFrame)
def std(self, axis=None, skipna=True, ddof=1, split_every=False, dtype=None, out=None):
axis = self._validate_axis(axis)
_raise_if_object_series(self, "std")
meta = self._meta_nonempty.std(axis=axis, skipna=skipna)
if axis == 1:
result = map_partitions(M.std, self, meta=meta,
token=self._token_prefix + 'std',
axis=axis, skipna=skipna, ddof=ddof)
return handle_out(out, result)
else:
v = self.var(skipna=skipna, ddof=ddof, split_every=split_every)
name = self._token_prefix + 'std'
result = map_partitions(np.sqrt, v, meta=meta, token=name)
return handle_out(out, result)
@derived_from(pd.DataFrame)
def sem(self, axis=None, skipna=None, ddof=1, split_every=False):
axis = self._validate_axis(axis)
_raise_if_object_series(self, "sem")
meta = self._meta_nonempty.sem(axis=axis, skipna=skipna, ddof=ddof)
if axis == 1:
return map_partitions(M.sem, self, meta=meta,
token=self._token_prefix + 'sem',
axis=axis, skipna=skipna, ddof=ddof)
else:
num = self._get_numeric_data()
v = num.var(skipna=skipna, ddof=ddof, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + 'sem'
result = map_partitions(np.sqrt, v / n, meta=meta, token=name)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
def quantile(self, q=0.5, axis=0, method='default'):
""" Approximate row-wise and precise column-wise quantiles of DataFrame
Parameters
----------
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
method : {'default', 'tdigest', 'dask'}, optional
What method to use. By default will use dask's internal custom
algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest
for floats and ints and fallback to the ``'dask'`` otherwise.
"""
axis = self._validate_axis(axis)
keyname = 'quantiles-concat--' + tokenize(self, q, axis)
if axis == 1:
if isinstance(q, list):
# Not supported, the result will have current index as columns
raise ValueError("'q' must be scalar when axis=1 is specified")
return map_partitions(M.quantile, self, q, axis,
token=keyname, meta=(q, 'f8'))
else:
_raise_if_object_series(self, "quantile")
meta = self._meta.quantile(q, axis=axis)
num = self._get_numeric_data()
quantiles = tuple(quantile(self[c], q, method) for c in num.columns)
qnames = [(_q._name, 0) for _q in quantiles]
if isinstance(quantiles[0], Scalar):
layer = {(keyname, 0): (pd.Series, qnames, num.columns, None, meta.name)}
graph = HighLevelGraph.from_collections(keyname, layer, dependencies=quantiles)
divisions = (min(num.columns), max(num.columns))
return Series(graph, keyname, meta, divisions)
else:
layer = {(keyname, 0): (methods.concat, qnames, 1)}
graph = HighLevelGraph.from_collections(keyname, layer, dependencies=quantiles)
return DataFrame(graph, keyname, meta, quantiles[0].divisions)
@derived_from(pd.DataFrame)
def describe(self,
split_every=False,
percentiles=None,
percentiles_method='default',
include=None,
exclude=None):
if self._meta.ndim == 1:
return self._describe_1d(self, split_every, percentiles, percentiles_method)
elif (include is None) and (exclude is None):
data = self._meta.select_dtypes(include=[np.number, np.timedelta64])
# when some numerics/timedeltas are found, by default keep them
if len(data.columns) == 0:
chosen_columns = self._meta.columns
else:
# check if there are timedelta or boolean columns
bools_and_timedeltas = self._meta.select_dtypes(include=[np.timedelta64, 'bool'])
if len(bools_and_timedeltas.columns) == 0:
return self._describe_numeric(self, split_every, percentiles, percentiles_method)
else:
chosen_columns = data.columns
elif include == 'all':
if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
chosen_columns = self._meta.columns
else:
chosen_columns = self._meta.select_dtypes(include=include, exclude=exclude)
stats = [self._describe_1d(self[col_idx], split_every,
percentiles, percentiles_method) for col_idx in chosen_columns]
stats_names = [(s._name, 0) for s in stats]
name = 'describe--' + tokenize(self, split_every)
layer = {(name, 0): (methods.describe_aggregate, stats_names)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=stats)
meta = self._meta_nonempty.describe(include=include, exclude=exclude)
return new_dd_object(graph, name, meta, divisions=[None, None])
def _describe_1d(self, data, split_every=False,
percentiles=None, percentiles_method='default'):
if is_bool_dtype(data._meta):
return self._describe_nonnumeric_1d(data, split_every=split_every)
elif is_numeric_dtype(data._meta):
return self._describe_numeric(
data,
split_every=split_every,
percentiles=percentiles,
percentiles_method=percentiles_method)
elif is_timedelta64_dtype(data._meta):
return self._describe_numeric(
data.dropna().astype('i8'),
split_every=split_every,
percentiles=percentiles,
percentiles_method=percentiles_method,
is_timedelta_column=True)
else:
return self._describe_nonnumeric_1d(data, split_every=split_every)
def _describe_numeric(self, data, split_every=False, percentiles=None,
percentiles_method='default', is_timedelta_column=False):
num = data._get_numeric_data()
if data.ndim == 2 and len(num.columns) == 0:
raise ValueError("DataFrame contains only non-numeric data.")
elif data.ndim == 1 and data.dtype == 'object':
raise ValueError("Cannot compute ``describe`` on object dtype.")
if percentiles is None:
percentiles = [0.25, 0.5, 0.75]
else:
# always include the the 50%tle to calculate the median
# unique removes duplicates and sorts quantiles
percentiles = np.array(percentiles)
percentiles = np.append(percentiles, 0.5)
percentiles = np.unique(percentiles)
percentiles = list(percentiles)
stats = [num.count(split_every=split_every),
num.mean(split_every=split_every),
num.std(split_every=split_every),
num.min(split_every=split_every),
num.quantile(percentiles, method=percentiles_method),
num.max(split_every=split_every)]
stats_names = [(s._name, 0) for s in stats]
colname = data._meta.name if isinstance(data._meta, pd.Series) else None
name = 'describe-numeric--' + tokenize(num, split_every)
layer = {(name, 0): (methods.describe_numeric_aggregate, stats_names, colname, is_timedelta_column)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=stats)
meta = num._meta_nonempty.describe()
return new_dd_object(graph, name, meta, divisions=[None, None])
def _describe_nonnumeric_1d(self, data, split_every=False):
vcounts = data.value_counts(split_every)
count_nonzero = vcounts[vcounts != 0]
count_unique = count_nonzero.size
stats = [
# nunique
count_unique,
# count
data.count(split_every=split_every),
# most common value
vcounts._head(1, npartitions=1, compute=False, safe=False)
]
if is_datetime64_any_dtype(data._meta):
min_ts = data.dropna().astype('i8').min(split_every=split_every)
max_ts = data.dropna().astype('i8').max(split_every=split_every)
stats += [min_ts, max_ts]
stats_names = [(s._name, 0) for s in stats]
colname = data._meta.name
name = 'describe-nonnumeric-1d--' + tokenize(data, split_every)
layer = {(name, 0): (methods.describe_nonnumeric_aggregate, stats_names, colname)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=stats)
meta = data._meta_nonempty.describe()
return new_dd_object(graph, name, meta, divisions=[None, None])
def _cum_agg(self, op_name, chunk, aggregate, axis, skipna=True,
chunk_kwargs=None, out=None):
""" Wrapper for cumulative operation """
axis = self._validate_axis(axis)
if axis == 1:
name = '{0}{1}(axis=1)'.format(self._token_prefix, op_name)
result = self.map_partitions(chunk, token=name, **chunk_kwargs)
return handle_out(out, result)
else:
# cumulate each partitions
name1 = '{0}{1}-map'.format(self._token_prefix, op_name)
cumpart = map_partitions(chunk, self, token=name1, meta=self,
**chunk_kwargs)
name2 = '{0}{1}-take-last'.format(self._token_prefix, op_name)
cumlast = map_partitions(_take_last, cumpart, skipna,
meta=pd.Series([]), token=name2)
suffix = tokenize(self)
name = '{0}{1}-{2}'.format(self._token_prefix, op_name, suffix)
cname = '{0}{1}-cum-last-{2}'.format(self._token_prefix, op_name,
suffix)
# aggregate cumulated partisions and its previous last element
layer = {}
layer[(name, 0)] = (cumpart._name, 0)
for i in range(1, self.npartitions):
# store each cumulative step to graph to reduce computation
if i == 1:
layer[(cname, i)] = (cumlast._name, i - 1)
else:
# aggregate with previous cumulation results
layer[(cname, i)] = (aggregate, (cname, i - 1), (cumlast._name, i - 1))
layer[(name, i)] = (aggregate, (cumpart._name, i), (cname, i))
graph = HighLevelGraph.from_collections(cname, layer, dependencies=[cumpart, cumlast])
result = new_dd_object(graph, name, chunk(self._meta), self.divisions)
return handle_out(out, result)
@derived_from(pd.DataFrame)
def cumsum(self, axis=None, skipna=True, dtype=None, out=None):
return self._cum_agg('cumsum',
chunk=M.cumsum,
aggregate=operator.add,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna),
out=out)
@derived_from(pd.DataFrame)
def cumprod(self, axis=None, skipna=True, dtype=None, out=None):
return self._cum_agg('cumprod',
chunk=M.cumprod,
aggregate=operator.mul,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna),
out=out)
@derived_from(pd.DataFrame)
def cummax(self, axis=None, skipna=True, out=None):
return self._cum_agg('cummax',
chunk=M.cummax,
aggregate=methods.cummax_aggregate,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna),
out=out)
@derived_from(pd.DataFrame)
def cummin(self, axis=None, skipna=True, out=None):
return self._cum_agg('cummin',
chunk=M.cummin,
aggregate=methods.cummin_aggregate,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna),
out=out)
@derived_from(pd.DataFrame)
def where(self, cond, other=np.nan):
# cond and other may be dask instance,
# passing map_partitions via keyword will not be aligned
return map_partitions(M.where, self, cond, other)
@derived_from(pd.DataFrame)
def mask(self, cond, other=np.nan):
return map_partitions(M.mask, self, cond, other)
@derived_from(pd.DataFrame)
def notnull(self):
return self.map_partitions(M.notnull)
@derived_from(pd.DataFrame)
def isnull(self):
return self.map_partitions(M.isnull)
@derived_from(pd.DataFrame)
def isna(self):
if hasattr(pd, 'isna'):
return self.map_partitions(M.isna)
else:
raise NotImplementedError("Need more recent version of Pandas "
"to support isna. "
"Please use isnull instead.")
@derived_from(pd.DataFrame)
def isin(self, values):
if is_dataframe_like(self._meta):
# DataFrame.isin does weird alignment stuff
bad_types = (_Frame, pd.Series, pd.DataFrame)
else:
bad_types = (_Frame,)
if isinstance(values, bad_types):
raise NotImplementedError(
"Passing a %r to `isin`" % typename(type(values))
)
meta = self._meta_nonempty.isin(values)
# We wrap values in a delayed for two reasons:
# - avoid serializing data in every task
# - avoid cost of traversal of large list in optimizations
return self.map_partitions(M.isin, delayed(values), meta=meta)
@derived_from(pd.DataFrame)
def astype(self, dtype):
# XXX: Pandas will segfault for empty dataframes when setting
# categorical dtypes. This operation isn't allowed currently anyway. We
# get the metadata with a non-empty frame to throw the error instead of
# segfaulting.
if is_dataframe_like(self._meta) and is_categorical_dtype(dtype):
meta = self._meta_nonempty.astype(dtype)
else:
meta = self._meta.astype(dtype)
if hasattr(dtype, 'items'):
set_unknown = [
k for k, v in dtype.items()
if is_categorical_dtype(v) and getattr(v, 'categories', None) is None
]
meta = clear_known_categories(meta, cols=set_unknown)
elif (is_categorical_dtype(dtype) and
getattr(dtype, 'categories', None) is None):
meta = clear_known_categories(meta)
return self.map_partitions(M.astype, dtype=dtype, meta=meta)
@derived_from(pd.Series)
def append(self, other, interleave_partitions=False):
# because DataFrame.append will override the method,
# wrap by pd.Series.append docstring
from .multi import concat
if isinstance(other, (list, dict)):
msg = "append doesn't support list or dict input"
raise NotImplementedError(msg)
return concat([self, other], join='outer',
interleave_partitions=interleave_partitions)
@derived_from(pd.DataFrame)
def align(self, other, join='outer', axis=None, fill_value=None):
meta1, meta2 = _emulate(M.align, self, other, join, axis=axis,
fill_value=fill_value)
aligned = self.map_partitions(M.align, other, join=join, axis=axis,
fill_value=fill_value)
token = tokenize(self, other, join, axis, fill_value)
name1 = 'align1-' + token
dsk1 = {(name1, i): (getitem, key, 0)
for i, key in enumerate(aligned.__dask_keys__())}
dsk1.update(aligned.dask)
result1 = new_dd_object(dsk1, name1, meta1, aligned.divisions)
name2 = 'align2-' + token
dsk2 = {(name2, i): (getitem, key, 1)
for i, key in enumerate(aligned.__dask_keys__())}
dsk2.update(aligned.dask)
result2 = new_dd_object(dsk2, name2, meta2, aligned.divisions)
return result1, result2
@derived_from(pd.DataFrame)
def combine(self, other, func, fill_value=None, overwrite=True):
return self.map_partitions(M.combine, other, func,
fill_value=fill_value, overwrite=overwrite)
@derived_from(pd.DataFrame)
def combine_first(self, other):
return self.map_partitions(M.combine_first, other)
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
raise NotImplementedError
@derived_from(pd.DataFrame)
def resample(self, rule, closed=None, label=None):
from .tseries.resample import Resampler
return Resampler(self, rule, closed=closed, label=label)
@derived_from(pd.DataFrame)
def first(self, offset):
# Let pandas error on bad args
self._meta_nonempty.first(offset)
if not self.known_divisions:
raise ValueError("`first` is not implemented for unknown divisions")
offset = pd.tseries.frequencies.to_offset(offset)
date = self.divisions[0] + offset
end = self.loc._get_partitions(date)
include_right = offset.isAnchored() or not hasattr(offset, '_inc')
if end == self.npartitions - 1:
divs = self.divisions
else:
divs = self.divisions[:end + 1] + (date,)
name = 'first-' + tokenize(self, offset)
dsk = {(name, i): (self._name, i) for i in range(end)}
dsk[(name, end)] = (methods.boundary_slice, (self._name, end),
None, date, include_right, True, 'loc')
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, self, divs)
@derived_from(pd.DataFrame)
def last(self, offset):
# Let pandas error on bad args
self._meta_nonempty.first(offset)
if not self.known_divisions:
raise ValueError("`last` is not implemented for unknown divisions")
offset = pd.tseries.frequencies.to_offset(offset)
date = self.divisions[-1] - offset
start = self.loc._get_partitions(date)
if start == 0:
divs = self.divisions
else:
divs = (date,) + self.divisions[start + 1:]
name = 'last-' + tokenize(self, offset)
dsk = {(name, i + 1): (self._name, j + 1)
for i, j in enumerate(range(start, self.npartitions))}
dsk[(name, 0)] = (methods.boundary_slice, (self._name, start),
date, None, True, False, 'loc')
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, self, divs)
def nunique_approx(self, split_every=None):
"""Approximate number of unique rows.
This method uses the HyperLogLog algorithm for cardinality
estimation to compute the approximate number of unique rows.
The approximate error is 0.406%.
Parameters
----------
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used.
Default is 8.
Returns
-------
a float representing the approximate number of elements
"""
from . import hyperloglog # here to avoid circular import issues
return aca([self], chunk=hyperloglog.compute_hll_array,
combine=hyperloglog.reduce_state,
aggregate=hyperloglog.estimate_count,
split_every=split_every, b=16, meta=float)
@property
def values(self):
""" Return a dask.array of the values of this dataframe
Warning: This creates a dask.array without precise shape information.
Operations that depend on shape information, like slicing or reshaping,
will not work.
"""
return self.map_partitions(methods.values)
def _validate_chunks(self, arr, lengths):
from dask.array.core import normalize_chunks
if isinstance(lengths, Sequence):
lengths = tuple(lengths)
if len(lengths) != self.npartitions:
raise ValueError(
"The number of items in 'lengths' does not match "
"the number of partitions. "
"{} != {}".format(len(lengths), self.npartitions)
)
if self.ndim == 1:
chunks = normalize_chunks((lengths,))
else:
chunks = normalize_chunks((lengths, (len(self.columns),)))
return chunks
elif lengths is not None:
raise ValueError("Unexpected value for 'lengths': '{}'".format(lengths))
return arr._chunks
def _is_index_level_reference(self, key):
"""
Test whether a key is an index level reference
To be considered an index level reference, `key` must match the index name
and must NOT match the name of any column (if a dataframe).
"""
return (self.index.name is not None and
not is_dask_collection(key) and
(np.isscalar(key) or isinstance(key, tuple)) and
key == self.index.name and
key not in getattr(self, 'columns', ()))
def _contains_index_name(self, columns_or_index):
"""
Test whether the input contains a reference to the index of the DataFrame/Series
"""
if isinstance(columns_or_index, list):
return any(self._is_index_level_reference(n) for n in columns_or_index)
else:
return self._is_index_level_reference(columns_or_index)
def _raise_if_object_series(x, funcname):
"""
Utility function to raise an error if an object column does not support
a certain operation like `mean`.
"""
if isinstance(x, Series) and hasattr(x, "dtype") and x.dtype == object:
raise ValueError("`%s` not supported with object series" % funcname)
class Series(_Frame):
""" Parallel Pandas Series
Do not use this class directly. Instead use functions like
``dd.read_csv``, ``dd.read_parquet``, or ``dd.from_pandas``.
Parameters
----------
dsk: dict
The dask graph to compute this Series
_name: str
The key prefix that specifies which keys in the dask comprise this
particular Series
meta: pandas.Series
An empty ``pandas.Series`` with names, dtypes, and index matching the
expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
See Also
--------
dask.dataframe.DataFrame
"""
_partition_type = pd.Series
_is_partition_type = staticmethod(is_series_like)
_token_prefix = 'series-'
_accessors = set()
def __array_wrap__(self, array, context=None):
if isinstance(context, tuple) and len(context) > 0:
if isinstance(context[1][0], np.ndarray) and context[1][0].shape == ():
index = None
else:
index = context[1][0].index
return pd.Series(array, index=index, name=self.name)
@property
def name(self):
return self._meta.name
@name.setter
def name(self, name):
self._meta.name = name
renamed = _rename_dask(self, name)
# update myself
self.dask = renamed.dask
self._name = renamed._name
@property
def ndim(self):
""" Return dimensionality """
return 1
@property
def shape(self):
"""
Return a tuple representing the dimensionality of a Series.
The single element of the tuple is a Delayed result.
Examples
--------
>>> series.shape # doctest: +SKIP
# (dd.Scalar<size-ag..., dtype=int64>,)
"""
return (self.size,)
@property
def dtype(self):
""" Return data type """
return self._meta.dtype
@cache_readonly
def dt(self):
""" Namespace of datetime methods """
return DatetimeAccessor(self)
@cache_readonly
def cat(self):
return CategoricalAccessor(self)
@cache_readonly
def str(self):
""" Namespace for string methods """
return StringAccessor(self)
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
# Remove the `cat` and `str` accessors if not available. We can't
# decide this statically for the `dt` accessor, as it works on
# datetime-like things as well.
for accessor in ['cat', 'str']:
if not hasattr(self._meta, accessor):
o.remove(accessor)
return list(o)
@property
def nbytes(self):
""" Number of bytes """
return self.reduction(methods.nbytes, np.sum, token='nbytes',
meta=int, split_every=False)
def _repr_data(self):
return _repr_data_series(self._meta, self._repr_divisions)
def __repr__(self):
""" have to overwrite footer """
if self.name is not None:
footer = "Name: {name}, dtype: {dtype}".format(name=self.name,
dtype=self.dtype)
else:
footer = "dtype: {dtype}".format(dtype=self.dtype)
return """Dask {klass} Structure:
{data}
{footer}
Dask Name: {name}, {task} tasks""".format(klass=self.__class__.__name__,
data=self.to_string(),
footer=footer,
name=key_split(self._name),
task=len(self.dask))
def rename(self, index=None, inplace=False, sorted_index=False):
"""Alter Series index labels or name
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
Alternatively, change ``Series.name`` with a scalar value.
Parameters
----------
index : scalar, hashable sequence, dict-like or callable, optional
If dict-like or callable, the transformation is applied to the
index. Scalar or hashable sequence-like will alter the
``Series.name`` attribute.
inplace : boolean, default False
Whether to return a new Series or modify this one inplace.
sorted_index : bool, default False
If true, the output ``Series`` will have known divisions inferred
from the input series and the transformation. Ignored for
non-callable/dict-like ``index`` or when the input series has
unknown divisions. Note that this may only be set to ``True`` if
you know that the transformed index is monotonicly increasing. Dask
will check that transformed divisions are monotonic, but cannot
check all the values between divisions, so incorrectly setting this
can result in bugs.
Returns
-------
renamed : Series
See Also
--------
pandas.Series.rename
"""
from pandas.api.types import is_scalar, is_list_like, is_dict_like
if is_scalar(index) or (is_list_like(index) and not is_dict_like(index)):
res = self if inplace else self.copy()
res.name = index
else:
res = self.map_partitions(M.rename, index)
if self.known_divisions:
if sorted_index and (callable(index) or is_dict_like(index)):
old = pd.Series(range(self.npartitions + 1),
index=self.divisions)
new = old.rename(index).index
if not new.is_monotonic_increasing:
msg = ("sorted_index=True, but the transformed index "
"isn't monotonic_increasing")
raise ValueError(msg)
res.divisions = tuple(new.tolist())
else:
res = res.clear_divisions()
if inplace:
self.dask = res.dask
self._name = res._name
self.divisions = res.divisions
self._meta = res._meta
res = self
return res
@derived_from(pd.Series)
def round(self, decimals=0):
return elemwise(M.round, self, decimals)
@derived_from(pd.DataFrame)
def to_timestamp(self, freq=None, how='start', axis=0):
df = elemwise(M.to_timestamp, self, freq, how, axis)
df.divisions = tuple(pd.Index(self.divisions).to_timestamp())
return df
def quantile(self, q=0.5, method='default'):
""" Approximate quantiles of Series
Parameters
----------
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
method : {'default', 'tdigest', 'dask'}, optional
What method to use. By default will use dask's internal custom
algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest
for floats and ints and fallback to the ``'dask'`` otherwise.
"""
return quantile(self, q, method=method)
def _repartition_quantiles(self, npartitions, upsample=1.0):
""" Approximate quantiles of Series used for repartitioning
"""
from .partitionquantiles import partition_quantiles
return partition_quantiles(self, npartitions, upsample=upsample)
def __getitem__(self, key):
if isinstance(key, Series) and self.divisions == key.divisions:
name = 'index-%s' % tokenize(self, key)
dsk = partitionwise_graph(operator.getitem, name, self, key)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self, key])
return Series(graph, name, self._meta, self.divisions)
raise NotImplementedError(
"Series getitem in only supported for other series objects "
"with matching partition structure"
)
@derived_from(pd.DataFrame)
def _get_numeric_data(self, how='any', subset=None):
return self
@derived_from(pd.Series)
def iteritems(self):
for i in range(self.npartitions):
s = self.get_partition(i).compute()
for item in s.iteritems():
yield item
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 'index', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0}.get(axis, axis)
@derived_from(pd.Series)
def groupby(self, by=None, **kwargs):
from dask.dataframe.groupby import SeriesGroupBy
return SeriesGroupBy(self, by=by, **kwargs)
@derived_from(pd.Series)
def count(self, split_every=False):
return super(Series, self).count(split_every=split_every)
def unique(self, split_every=None, split_out=1):
"""
Return Series of unique values in the object. Includes NA values.
Returns
-------
uniques : Series
"""
return aca(self, chunk=methods.unique, aggregate=methods.unique,
meta=self._meta, token='unique', split_every=split_every,
series_name=self.name, split_out=split_out)
@derived_from(pd.Series)
def nunique(self, split_every=None):
return self.drop_duplicates(split_every=split_every).count()
@derived_from(pd.Series)
def value_counts(self, split_every=None, split_out=1):
return aca(self, chunk=M.value_counts,
aggregate=methods.value_counts_aggregate,
combine=methods.value_counts_combine,
meta=self._meta.value_counts(), token='value-counts',
split_every=split_every, split_out=split_out,
split_out_setup=split_out_on_index)
@derived_from(pd.Series)
def nlargest(self, n=5, split_every=None):
return aca(self, chunk=M.nlargest, aggregate=M.nlargest,
meta=self._meta, token='series-nlargest',
split_every=split_every, n=n)
@derived_from(pd.Series)
def nsmallest(self, n=5, split_every=None):
return aca(self, chunk=M.nsmallest, aggregate=M.nsmallest,
meta=self._meta, token='series-nsmallest',
split_every=split_every, n=n)
@derived_from(pd.Series)
def isin(self, values):
# Added just to get the different docstring for Series
return super(Series, self).isin(values)
@insert_meta_param_description(pad=12)
@derived_from(pd.Series)
def map(self, arg, na_action=None, meta=no_default):
if is_series_like(arg) and is_dask_collection(arg):
return series_map(self, arg)
if not (isinstance(arg, dict) or
callable(arg) or
is_series_like(arg) and not is_dask_collection(arg)):
raise TypeError("arg must be pandas.Series, dict or callable."
" Got {0}".format(type(arg)))
name = 'map-' + tokenize(self, arg, na_action)
dsk = {(name, i): (M.map, k, arg, na_action) for i, k in
enumerate(self.__dask_keys__())}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
if meta is no_default:
meta = _emulate(M.map, self, arg, na_action=na_action, udf=True)
else:
meta = make_meta(meta, index=getattr(make_meta(self), 'index', None))
return Series(graph, name, meta, self.divisions)
@derived_from(pd.Series)
def dropna(self):
return self.map_partitions(M.dropna)
@derived_from(pd.Series)
def between(self, left, right, inclusive=True):
return self.map_partitions(M.between, left=left,
right=right, inclusive=inclusive)
@derived_from(pd.Series)
def clip(self, lower=None, upper=None, out=None):
if out is not None:
raise ValueError("'out' must be None")
# np.clip may pass out
return self.map_partitions(M.clip, lower=lower, upper=upper)
@derived_from(pd.Series)
def clip_lower(self, threshold):
return self.map_partitions(M.clip_lower, threshold=threshold)
@derived_from(pd.Series)
def clip_upper(self, threshold):
return self.map_partitions(M.clip_upper, threshold=threshold)
@derived_from(pd.Series)
def align(self, other, join='outer', axis=None, fill_value=None):
return super(Series, self).align(other, join=join, axis=axis,
fill_value=fill_value)
@derived_from(pd.Series)
def combine(self, other, func, fill_value=None):
return self.map_partitions(M.combine, other, func,
fill_value=fill_value)
@derived_from(pd.Series)
def squeeze(self):
return self
@derived_from(pd.Series)
def combine_first(self, other):
return self.map_partitions(M.combine_first, other)
def to_bag(self, index=False):
""" Create a Dask Bag from a Series """
from .io import to_bag
return to_bag(self, index)
@derived_from(pd.Series)
def to_frame(self, name=None):
return self.map_partitions(M.to_frame, name,
meta=self._meta.to_frame(name))
@derived_from(pd.Series)
def to_string(self, max_rows=5):
# option_context doesn't affect
return self._repr_data().to_string(max_rows=max_rows)
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like Series.add to this class """
def meth(self, other, level=None, fill_value=None, axis=0):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)
return map_partitions(op, self, other, meta=meta,
axis=axis, fill_value=fill_value)
meth.__doc__ = skip_doctest(op.__doc__)
bind_method(cls, name, meth)
@classmethod
def _bind_comparison_method(cls, name, comparison):
""" bind comparison method like Series.eq to this class """
def meth(self, other, level=None, fill_value=None, axis=0):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
if fill_value is None:
return elemwise(comparison, self, other, axis=axis)
else:
op = partial(comparison, fill_value=fill_value)
return elemwise(op, self, other, axis=axis)
meth.__doc__ = skip_doctest(comparison.__doc__)
bind_method(cls, name, meth)
@insert_meta_param_description(pad=12)
def apply(self, func, convert_dtype=True, meta=no_default, args=(), **kwds):
""" Parallel version of pandas.Series.apply
Parameters
----------
func : function
Function to apply
convert_dtype : boolean, default True
Try to find better dtype for elementwise function results.
If False, leave as dtype=object.
$META
args : tuple
Positional arguments to pass to function in addition to the value.
Additional keyword arguments will be passed as keywords to the function.
Returns
-------
applied : Series or DataFrame if func returns a Series.
Examples
--------
>>> import dask.dataframe as dd
>>> s = pd.Series(range(5), name='x')
>>> ds = dd.from_pandas(s, npartitions=2)
Apply a function elementwise across the Series, passing in extra
arguments in ``args`` and ``kwargs``:
>>> def myadd(x, a, b=1):
... return x + a + b
>>> res = ds.apply(myadd, args=(2,), b=1.5) # doctest: +SKIP
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with name ``'x'``, and dtype
``float64``:
>>> res = ds.apply(myadd, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ds.apply(lambda x: x + 1, meta=ds)
See Also
--------
dask.Series.map_partitions
"""
if meta is no_default:
meta = _emulate(M.apply, self._meta_nonempty, func,
convert_dtype=convert_dtype,
args=args, udf=True, **kwds)
warnings.warn(meta_warning(meta))
return map_partitions(M.apply, self, func,
convert_dtype, args, meta=meta, **kwds)
@derived_from(pd.Series)
def cov(self, other, min_periods=None, split_every=False):
from .multi import concat
if not isinstance(other, Series):
raise TypeError("other must be a dask.dataframe.Series")
df = concat([self, other], axis=1)
return cov_corr(df, min_periods, scalar=True, split_every=split_every)
@derived_from(pd.Series)
def corr(self, other, method='pearson', min_periods=None,
split_every=False):
from .multi import concat
if not isinstance(other, Series):
raise TypeError("other must be a dask.dataframe.Series")
if method != 'pearson':
raise NotImplementedError("Only Pearson correlation has been "
"implemented")
df = concat([self, other], axis=1)
return cov_corr(df, min_periods, corr=True, scalar=True,
split_every=split_every)
@derived_from(pd.Series)
def autocorr(self, lag=1, split_every=False):
if not isinstance(lag, Integral):
raise TypeError("lag must be an integer")
return self.corr(self if lag == 0 else self.shift(lag),
split_every=split_every)
@derived_from(pd.Series)
def memory_usage(self, index=True, deep=False):
result = self.map_partitions(M.memory_usage, index=index, deep=deep)
return delayed(sum)(result.to_delayed())
def __divmod__(self, other):
res1 = self // other
res2 = self % other
return res1, res2
def __rdivmod__(self, other):
res1 = other // self
res2 = other % self
return res1, res2
class Index(Series):
_partition_type = pd.Index
_is_partition_type = staticmethod(is_index_like)
_token_prefix = 'index-'
_accessors = set()
_dt_attributes = {'nanosecond', 'microsecond', 'millisecond', 'dayofyear',
'minute', 'hour', 'day', 'dayofweek', 'second', 'week',
'weekday', 'weekofyear', 'month', 'quarter', 'year'}
_cat_attributes = {'known', 'as_known', 'as_unknown', 'add_categories',
'categories', 'remove_categories', 'reorder_categories',
'as_ordered', 'codes', 'remove_unused_categories',
'set_categories', 'as_unordered', 'ordered',
'rename_categories'}
def __getattr__(self, key):
if is_categorical_dtype(self.dtype) and key in self._cat_attributes:
return getattr(self.cat, key)
elif key in self._dt_attributes:
return getattr(self.dt, key)
raise AttributeError("'Index' object has no attribute %r" % key)
def __dir__(self):
out = super(Index, self).__dir__()
out.extend(self._dt_attributes)
if is_categorical_dtype(self.dtype):
out.extend(self._cat_attributes)
return out
@property
def index(self):
msg = "'{0}' object has no attribute 'index'"
raise AttributeError(msg.format(self.__class__.__name__))
def __array_wrap__(self, array, context=None):
return pd.Index(array, name=self.name)
def head(self, n=5, compute=True):
""" First n items of the Index.
Caveat, this only checks the first partition.
"""
name = 'head-%d-%s' % (n, self._name)
dsk = {(name, 0): (operator.getitem, (self._name, 0), slice(0, n))}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
result = new_dd_object(graph, name, self._meta, self.divisions[:2])
if compute:
result = result.compute()
return result
@derived_from(pd.Index)
def max(self, split_every=False):
return self.reduction(M.max, meta=self._meta_nonempty.max(),
token=self._token_prefix + 'max',
split_every=split_every)
@derived_from(pd.Index)
def min(self, split_every=False):
return self.reduction(M.min, meta=self._meta_nonempty.min(),
token=self._token_prefix + 'min',
split_every=split_every)
def count(self, split_every=False):
return self.reduction(methods.index_count, np.sum,
token='index-count', meta=int,
split_every=split_every)
@derived_from(pd.Index)
def shift(self, periods=1, freq=None):
if isinstance(self._meta, pd.PeriodIndex):
if freq is not None:
raise ValueError("PeriodIndex doesn't accept `freq` argument")
meta = self._meta_nonempty.shift(periods)
out = self.map_partitions(M.shift, periods, meta=meta,
token='shift',
transform_divisions=False)
else:
# Pandas will raise for other index types that don't implement shift
meta = self._meta_nonempty.shift(periods, freq=freq)
out = self.map_partitions(M.shift, periods, token='shift',
meta=meta, freq=freq,
transform_divisions=False)
if freq is None:
freq = meta.freq
return maybe_shift_divisions(out, periods, freq=freq)
@derived_from(pd.Index)
def to_series(self):
return self.map_partitions(M.to_series,
meta=self._meta.to_series())
@derived_from(pd.Index, ua_args=['index'])
def to_frame(self, index=True, name=None):
if not index:
raise NotImplementedError()
if PANDAS_VERSION >= '0.24.0':
return self.map_partitions(M.to_frame, index, name,
meta=self._meta.to_frame(index, name))
else:
if name is not None:
raise ValueError("The 'name' keyword was added in pandas 0.24.0. "
"Your version of pandas is '{}'.".format(PANDAS_VERSION))
else:
return self.map_partitions(M.to_frame,
meta=self._meta.to_frame())
class DataFrame(_Frame):
"""
Parallel Pandas DataFrame
Do not use this class directly. Instead use functions like
``dd.read_csv``, ``dd.read_parquet``, or ``dd.from_pandas``.
Parameters
----------
dsk: dict
The dask graph to compute this DataFrame
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame
meta: pandas.DataFrame
An empty ``pandas.DataFrame`` with names, dtypes, and index matching
the expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
_partition_type = pd.DataFrame
_is_partition_type = staticmethod(is_dataframe_like)
_token_prefix = 'dataframe-'
_accessors = set()
def __array_wrap__(self, array, context=None):
if isinstance(context, tuple) and len(context) > 0:
if isinstance(context[1][0], np.ndarray) and context[1][0].shape == ():
index = None
else:
index = context[1][0].index
return | pd.DataFrame(array, index=index, columns=self.columns) | pandas.DataFrame |
import re
from copy import copy
from typing import Iterable, Optional, Union
import pandas as pd
import requests
from bs4 import BeautifulSoup
from pvoutput.consts import (
MAP_URL,
PV_OUTPUT_COUNTRY_CODES,
PV_OUTPUT_MAP_COLUMN_NAMES,
REGIONS_URL,
)
_MAX_NUM_PAGES = 1024
def get_pv_systems_for_country(
country: Union[str, int],
ascending: Optional[bool] = None,
sort_by: Optional[str] = None,
max_pages: int = _MAX_NUM_PAGES,
region: Optional[str] = None,
) -> pd.DataFrame:
"""
Args:
country: either a string such as 'United Kingdom'
(see consts.PV_OUTPUT_COUNTRY_CODES for all recognised strings),
or a PVOutput.org country code, in the range [1, 257].
ascending: if True, ask PVOutput.org to sort results by ascending.
If False, sort by descending. If None, use PVOutput.org's default
sort order.
sort_by: The column to ask PVOutput.org to sort by. One of:
timeseries_duration,
average_generation_per_day,
efficiency,
power_generation,
capacity,
address,
name
max_pages: The maximum number of search pages to scrape.
Returns: pd.DataFrame with index system_id (int) and these columns:
name, system_DC_capacity_W, panel, inverter, address, orientation,
array_tilt_degrees, shade, timeseries_duration,
total_energy_gen_Wh, average_daily_energy_gen_Wh
average_efficiency_kWh_per_kW
"""
country_code = _convert_to_country_code(country)
regions = [region] if region else get_regions_for_country(country_code)
all_metadata = []
for region in regions:
for page_number in range(max_pages):
print(
"\rReading page {:2d} for region: {}".format(page_number, region),
end="",
flush=True,
)
url = _create_map_url(
country_code=country_code,
page_number=page_number,
ascending=ascending,
sort_by=sort_by,
region=region,
)
soup = get_soup(url)
if _page_is_blank(soup):
break
metadata = _process_metadata(soup)
metadata["region"] = region
all_metadata.append(metadata)
if not _page_has_next_link(soup):
break
return pd.concat(all_metadata)
############ LOAD HTML ###################
def _create_map_url(
country_code: Optional[int] = None,
page_number: Optional[int] = None,
ascending: Optional[bool] = None,
sort_by: Optional[str] = None,
region: Optional[str] = None,
) -> str:
"""
Args:
page_number: Get this page number of the search results. Zero-indexed.
The first page is page 0, the second page is page 1, etc.
"""
_check_country_code(country_code)
if ascending is None:
sort_order = None
else:
sort_order = "asc" if ascending else "desc"
if sort_by is None:
sort_by_pv_output_col_name = None
else:
try:
sort_by_pv_output_col_name = PV_OUTPUT_MAP_COLUMN_NAMES[sort_by]
except KeyError:
raise ValueError("sort_by must be one of {}".format(PV_OUTPUT_MAP_COLUMN_NAMES.keys()))
url_params = {
"country": country_code,
"p": page_number,
"d": sort_order,
"o": sort_by_pv_output_col_name,
"region": region,
}
url_params_list = [
"{}={}".format(key, value) for key, value in url_params.items() if value is not None
]
query_string = "&".join(url_params_list)
url = copy(MAP_URL)
if query_string:
url += "?" + query_string
return url
def _raise_country_error(country, msg=""):
country_codes = PV_OUTPUT_COUNTRY_CODES.values()
raise ValueError(
"Wrong value country='{}'. {}country must be an integer country"
" code in the range [{}, {}], or one of {}.".format(
country,
msg,
min(country_codes),
max(country_codes),
", ".join(PV_OUTPUT_COUNTRY_CODES.keys()),
)
)
def _check_country_code(country_code: Union[None, int]):
if country_code is None:
return
country_codes = PV_OUTPUT_COUNTRY_CODES.values()
if not min(country_codes) <= country_code <= max(country_codes):
_raise_country_error(country_code, "country outside of valid range! ")
def _convert_to_country_code(country: Union[str, int]) -> int:
if isinstance(country, str):
try:
return PV_OUTPUT_COUNTRY_CODES[country]
except KeyError:
_raise_country_error(country)
elif isinstance(country, int):
_check_country_code(country)
return country
def _page_has_next_link(soup: BeautifulSoup):
return bool(soup.find_all("a", text="Next"))
############# PROCESS HTML #########################
def _process_metadata(soup: BeautifulSoup, return_constituents=False) -> pd.DataFrame:
pv_system_size_metadata = _process_system_size_col(soup)
index = pv_system_size_metadata.index
pv_systems_metadata = [
pv_system_size_metadata,
_process_output_col(soup, index),
_process_generation_and_average_cols(soup, index),
_process_efficiency_col(soup, index),
]
df = pd.concat(pv_systems_metadata, axis="columns")
df = _convert_metadata_cols_to_numeric(df)
df["system_DC_capacity_W"] = df["capacity_kW"] * 1e3
del df["capacity_kW"]
if return_constituents:
pv_systems_metadata.append(df)
return tuple(pv_systems_metadata)
return df
def _process_system_size_col(soup: BeautifulSoup) -> pd.DataFrame:
pv_system_size_col = soup.find_all("a", href=re.compile("display\.jsp\?sid="))
metadata = []
for row in pv_system_size_col:
metadata_for_row = {}
# Get system ID
href = row.attrs["href"]
p = re.compile("^display\.jsp\?sid=(\d+)$")
href_match = p.match(href)
metadata_for_row["system_id"] = href_match.group(1)
# Process title (lots of metadata in here!)
title, title_meta = row.attrs["title"].split("|")
# Name and capacity
p = re.compile("(.*) (\d+\.\d+kW)")
title_match = p.match(title)
metadata_for_row["name"] = title_match.group(1)
metadata_for_row["capacity"] = title_match.group(2)
# Other key-value pairs:
key_value = title_meta.split("<br/>")
key_value_dict = {}
for line in key_value:
key_value_split = line.split(":")
key = key_value_split[0].strip()
# Some values have a colon(!)
value = ":".join(key_value_split[1:]).strip()
key_value_dict[key] = value
metadata_for_row.update(key_value_dict)
# Some cleaning
# Remove <img ...> from Location
location = metadata_for_row["Location"]
p = re.compile("(<img .*\>)?(.*)")
img_groups = p.search(location).groups()
if img_groups[0] is not None:
metadata_for_row["Location"] = img_groups[1].strip()
metadata.append(metadata_for_row)
df = pd.DataFrame(metadata)
df["system_id"] = pd.to_numeric(df["system_id"])
df = df.set_index("system_id")
df.columns = [col_name.lower() for col_name in df.columns]
df.rename(
{
"location": "address",
"panels": "panel",
"array tilt": "array_tilt_degrees",
"capacity": "capacity_kW",
},
axis="columns",
inplace=True,
)
return df
def _remove_str_and_convert_to_numeric(series: pd.Series, string_to_remove: str) -> pd.Series:
series = series.str.replace(string_to_remove, "")
return pd.to_numeric(series)
def _convert_metadata_cols_to_numeric(df: pd.DataFrame) -> pd.DataFrame:
for col_name, string_to_remove in [
# ('array_tilt_degrees', '°'),
("capacity_kW", "kW"),
("average_efficiency_kWh_per_kW", "kWh/kW"),
]:
df[col_name] = _remove_str_and_convert_to_numeric(df[col_name], string_to_remove)
return df
def _process_output_col(soup: BeautifulSoup, index: Optional[Iterable] = None) -> pd.Series:
outputs_col = soup.find_all(text=re.compile("\d Days"))
duration = | pd.Series(outputs_col, name="timeseries_duration", index=index) | pandas.Series |
import json
import os
import copy
import numpy as np
import pandas as pd
import pytest
from ..utils import sanitize_dataframe, nested_update, prepare_spec
PANDAS_DATA = pd.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
JSON_DATA = {
"values": [
{"x": 1, "y": 4},
{"x": 2, "y": 5},
{"x": 3, "y": 6}
]
}
VEGALITE_SPEC = {
"mark": "circle",
"encoding": {
"x": {"field": "x", "type": "quantitative"},
"y": {"field": "y", "type": "quantitative"}
}
}
def test_nested_update():
D = {'A': {'a': 4, 'b': 5}}
U = {'A': {'a': 40, 'c': 6}, 'B': {'foo': 'bar'}}
output = nested_update(D, U)
assert output is D
assert D == {'A': {'a': 40, 'b': 5, 'c': 6}, 'B': {'foo': 'bar'}}
def test_sanitize_dataframe():
# create a dataframe with various types
df = pd.DataFrame({'s': list('abcde'),
'f': np.arange(5, dtype=float),
'i': np.arange(5, dtype=int),
'd': | pd.date_range('2012-01-01', periods=5, freq='H') | pandas.date_range |
def read_table(filename, datadir='./out', levels=None):
import pandas as pd
import os
file = os.path.join(datadir, filename)
if levels is None:
levels = 0
with open(file, 'r') as fd:
for i in fd.readline().split(','):
if i: break
else: levels += 1
df = pd.read_csv(file, index_col=list(range(levels)))
return df
def import_datadict(datadir='./dat', filename='orb_datadict.txt'):
'''Imports the data dictionary for raw survey data.
See questions and field names in /doc/orb_questionnaire.pdf
Args:
datadir (:obj:`str`): name of directory where dictionary is located
filename (:obj:`str`): name of dictionary file (.txt)
Returns:
A :obj:`dict` containing keys:
* 'dict' which maps to a :obj:`dict` of field names (:obj:`str') mapped to a :obj:`dict` of values (:obj:`str`) mapped to recoded-vaues (:obj:`int`)
* 'desc' which maps to a :obj:`dict` of field names (:obj:`str') mapped to their descriptions (:obj:`str`)
'''
import os
filepath = os.path.join(datadir, filename)
with open(filepath, encoding='utf-8') as fd: data = fd.readlines()
data = [d for d in data if d.strip()]
for i in range(len(data)):
if data[i][0]!='\t': data[i] = [x.strip() for x in data[i].split(':')]
else: data[i] = [data[i].strip()]
dmap = {}
text = {}
curr = ''
multi = ''
for d in data:
if len(d)==1:
tmp = d[0].split('\t')
if len(tmp)==2:
if tmp[0][0]=='[':
curr = tmp[0][1:-1]
desc = tmp[1].strip()
text[curr] = desc
dmap[curr] = dmap[multi].copy()
else:
dmap[curr][tmp[1]] = int(tmp[0])
elif len(d)>1:
if d[0][0]=='[':
curr = d[0][1:-1]
desc = d[1].strip()
text[curr] = desc
dmap[curr] = {}
elif d[0]!='Values':
curr = d[0]
desc = d[1].strip()
text[curr] = desc
dmap[curr] = {}
multi = curr
#rectify some encoding issues
errata = {'DREL':{'Other Christian:':'Other Christian:\xa0'},
'DPOLUK':{'Other:':'Other:\xa0'}
}
#recoding extra variables of age categories and treatment (ANTI) or control (PRO) group
extras = {'imageseen':{'ANTI US':1, 'PRO US':0, 'ANTI UK':1, 'PRO UK':0},
'agerecode':{ '18-24':1, '25-34':2, '35-44':3, '45-54':4, '55-64':5, '65+':6}}
for key in errata:
if key in dmap:
for label in errata[key]:
if label in dmap[key]:
dmap[key][errata[key][label]] = dmap[key][label]
del dmap[key][label]
for key in extras: dmap[key] = extras[key]
return {'dict':dmap,'desc':text}
def import_data(datadir='./dat', filename='orb_200918.sav'):
'''Reads a survey SPSS file and returns a :obj:`pd.DataFrame`.
See questions and field names in /doc/orb_questionnaire.pdf
Args:
datadir (:obj:`str`): name of directory where SPSS file is located
filename (:obj:`str`): name of the SPSS file (.sav)
Returns:
A :obj:`pd.DataFrame` containing field names as columns and record as rows, with values recoded
'''
import os
import pandas as pd
filepath = os.path.join(datadir, filename)
if filepath[-4:] == '.sav': df = pd.read_spss(filepath)
else: df = pd.read_csv(filepath)
for att in list(df):
try: df[att].str.strip('\xa0') #funny encoding issue
except: pass
return df
def transform_data(df, dd, country='UK', group=None, minimal=True, save=''):
'''Cleans, recodes and transforms raw survey data.
See questions and field names in /doc/orb_questionnaire.pdf
Args:
df (:obj:`pd.DataFrame`): contains raw survey data (see return value of ``import_data()``)
dd (:obj:`dict`): contains data dictionary for the raw survey data (see return value of ``import_datadict()``)
country (:obj:`str`=`{'UK', 'US'}`: name of country of interest; default=`UK`
group (:obj:`int`=`{0, 1}`): name of the experiment group, where `0` is for control and `1` is for treatment; default=`None` (imports all samples)
save (:obj:`str`): filepath to save the processed data (as a .csv) and data dictionary (as a .pkl); default='' (does not save anything)
Returns:
A size-2 :obj:`tuple` containing
* A :obj:`pd.DataFrame` containing field names as columns and record as rows of the transformed data
* A :obj:`dict` of field names (:obj:`str`) mapped to a :obj:`dict` of recoded-values (:obj:`int`) mapped to value-names (:obj:`str`)
'''
#define all socio-demographic variables of interest
if minimal:
demo = {'UK': {'agerecode':'Age', 'DGEN':'Gender', 'DEDUUK':'Education_UK', 'DEMP':'Employment', 'DREL':'Religion',
'DPOLUK':'Political_UK', 'DETHUK':'Ethnicity_UK', 'DINCUK':'Income_UK'},
'USA': {'agerecode':'Age', 'DGEN':'Gender', 'DEDUUS':'Education_US', 'DEMP':'Employment', 'DREL':'Religion',
'DPOLUS':'Political_US', 'DETHUS':'Ethnicity_US', 'DINCUS':'Income_US'}}
else:
demo = {'UK': {'agerecode':'Age', 'DGEN':'Gender', 'DEDUUK':'Education_UK', 'DEMP':'Employment', 'DREL':'Religion',
'DPOLUK':'Political_UK', 'DETHUK':'Ethnicity_UK', 'DINCUK':'Income_UK', 'DGEOUK':'Region'},
'USA': {'agerecode':'Age', 'DGEN':'Gender', 'DEDUUS':'Education_US', 'DEMP':'Employment', 'DREL':'Religion',
'DPOLUS':'Political_US', 'DETHUS':'Ethnicity_US', 'DINCUS':'Income_US', 'DGEOUS':'Region'}}
#define recoding of socio-demographics
var_encoding = {'Gender':{(1,):'Male', (2,):'Female', (3, 4):'Other'},
'Education_US':{(1, 2):'Level-0', (3,):'Level-1', (4,):'Level-2', (5,):'Level-3', (6,):'Level-4', (7, 8):'Other'},
'Education_UK':{(1,):'Level-0', (2, 3):'Level-1', (5,):'Level-2', (6,):'Level-3', (7,):'Level-4', (4, 8, 9):'Other'},
'Employment':{(1, 2):'Employed', (3,):'Unemployed', (4,):'Student', (6,):'Retired', (5, 7, 8):'Other'},
'Religion':{(1, 2, 3):'Christian', (4,):'Jewish', (6,):'Muslim', (9,):'Atheist', (5, 7, 8, 10):'Other'},
'Political_US':{(1,):'Republican', (2,):'Democrat', (3, 4, 5):'Other'},
'Political_US_ind':{(1,):'Republican', (2,):'Democrat', (3, 4):'Other'},
'Political_UK':{(1,):'Conservative', (2,):'Labour', (3,):'Liberal-Democrat', (4,):'SNP', (5,6,7):'Other'},
'Ethnicity_US':{(1,):'White', (2,):'Hispanic', (3,):'Black', (5,):'Asian', (4, 6, 7, 8):'Other'},
'Ethnicity_UK':{(1, 2, 3):'White', (4, 11):'Black', (5, 6, 7, 8, 9, 10):'Asian', (12, 13):'Other'},
'Income_US':{(1,):'Level-0', (2, 3):'Level-1', (4, 5): 'Level-2', (6, 7, 8, 9):'Level-3', (10,):'Level-4', (11,):'Other'},
'Income_UK':{(1,):'Level-0', (2,):'Level-1', (3,):'Level-2', (4, 5,):'Level-3', (6, 7, 8, 9, 10):'Level-4', (11,):'Other'}
}
#rename other survey variables of interest to make them human-comprehendable
metrics_any = {'QINFr1': 'Nobody', 'QINFr2': 'Myself', 'QINFr3': 'Family inside HH', 'QINFr4': 'Family outside HH',
'QINFr5': 'Close friend', 'QINFr6': 'Colleague'}
metrics_knl = {'QKNLr1': 'Washing hands', 'QKNLr2': 'Staying indoors for Self', 'QKNLr3': 'Staying indoors for Others',
'QKNLr4': 'Spread before symptoms', 'QKNLr5': 'R-Number', 'QKNLr6': 'Treatments already exist', 'QKNLr7': 'Wearing masks'}
metrics_cov = {'QCOVVCIr3': 'COVID-19 Vax Importance', 'QCOVVCIr1': 'COVID-19 Vax Safety', 'QCOVVCIr2': 'COVID-19 Vax Efficacy',
'QCOVVCIr4': 'COVID-19 Vax Compatibility', 'QCOVVCIr5': 'Contract via COVID-19 Vax', 'QCOVVCIr6': 'COVID-19 Vax benefits outweigh risks'}
metrics_vci = {'QVCIr1': 'Vax Importance', 'QVCIr2': 'Vax Safety', 'QVCIr3': 'Vax Efficacy', 'QVCIr4': 'Vax Compatibility'}
metrics_aff = {'QCOVAFFr1': 'Mental health', 'QCOVAFFr2': 'Financial stability', 'QCOVAFFr3': 'Daily disruption', 'QCOVAFFr4': 'Social disruption'}
trust = {'UK': {'QSRCUKr1': 'Television', 'QSRCUKr2': 'Radio', 'QSRCUKr3': 'Newspapers', 'QSRCUKr4': 'Govt. Briefings',
'QSRCUKr5': 'National Health Authorities', 'QSRCUKr6': 'International Health Authorities', 'QSRCUKr7': 'Healthcare Workers',
'QSRCUKr8': 'Scientists', 'QSRCUKr9': 'Govt. Websites', 'QSRCUKr10': 'Social Media', 'QSRCUKr11': 'Celebrities', 'QSRCUKr12': 'Search Engines',
'QSRCUKr13': 'Family and friends', 'QSRCUKr14': 'Work Guidelines', 'QSRCUKr15': 'Other', 'QSRCUKr16': 'None of these'},
'USA': {'QSRCUSr1': 'Television', 'QSRCUSr2': 'Radio', 'QSRCUSr3': 'Newspapers', 'QSRCUSr4': 'White House Briefings', 'QSRCUSr5':'State Govt. Briefings',
'QSRCUSr6': 'National Health Authorities', 'QSRCUSr7': 'International Health Authorities', 'QSRCUSr8':'Healthcare Workers',
'QSRCUSr9': 'Scientists', 'QSRCUSr10': 'Govt. Websites', 'QSRCUSr11': 'Social Media', 'QSRCUSr12': 'Celebrities', 'QSRCUSr13': 'Search Engines',
'QSRCUSr14': 'Family and friends', 'QSRCUSr15': 'Work Guidelines', 'QSRCUSr16': 'Other', 'QSRCUSr17': 'None of these'}}
reasons = {'QCOVSELFWHYr1': 'Unsure if safe', 'QCOVSELFWHYr2': 'Unsure if effective', 'QCOVSELFWHYr3': 'Not at risk', 'QCOVSELFWHYr4': 'Wait until others',
'QCOVSELFWHYr5': "Won't be ill", 'QCOVSELFWHYr6': 'Other effective treatments', 'QCOVSELFWHYr7': 'Already acquired immunity',
'QCOVSELFWHYr8': 'Approval may be rushed', 'QCOVSELFWHYr9': 'Other', 'QCOVSELFWHYr10': 'Do not know'}
metrics_img = {'QPOSTVACX_Lr': 'Vaccine Intent', 'QPOSTBELIEFX_Lr': 'Agreement', 'QPOSTTRUSTX_Lr': 'Trust',
'QPOSTCHECKX_Lr': 'Fact-check', 'QPOSTSHARE_Lr': 'Share'}
social_atts = {'QSOCTYPr': 'used', 'QSOCINFr': 'to receive info', 'QCIRSHRr': 'to share info'}
if minimal:
other_atts = {'QSOCUSE':'Social media usage',
'QPOSTSIM':'Seen such online content',
'QCOVSELF':'Vaccine Intent for self (Pre)',
'QPOSTCOVSELF':'Vaccine Intent for self (Post)',
'QCOVOTH':'Vaccine Intent for others (Pre)',
'QPOSTCOVOTH':'Vaccine Intent for others (Post)',
'imageseen':'Group'}
else:
other_atts = {'QSHD':'Shielding',
'QSOCUSE':'Social media usage',
'QCOVWHEN':'Expected vax availability',
'QPOSTSIM':'Seen such online content',
'QPOSTFRQ':'Frequency of such online content',
'Q31b':'Engaged with such online content',
'QCOVSELF':'Vaccine Intent for self (Pre)',
'QPOSTCOVSELF':'Vaccine Intent for self (Post)',
'QCOVOTH':'Vaccine Intent for others (Pre)',
'QPOSTCOVOTH':'Vaccine Intent for others (Post)',
'imageseen':'Group'}
def expand_socc(code):
names = ['Facebook', 'Twitter', 'YouTube', 'WhatsApp', 'Instagram', 'Pinterest', 'LinkedIN', 'Other', 'None of these']
out = {}
for k in code:
for i in range(len(names)): out['%s%i'%(k, i+1)] = '%s %s'%(names[i], code[k])
return out
def demo_map(code):
fwd, bwd = {}, {}
for key in code:
fwd[key] = dict(zip(code[key].values(), range(1, len(code[key])+1)))
bwd[key] = dict(zip(range(1, len(code[key])+1), code[key].values()))
return fwd, bwd
def expand_imgc(code, num=5):
out = {}
for i in range(num):
for c in code:
out['%s%i'%(c, i+1)] = 'Image %i:%s'%(i+1, code[c])
return out
def expand_code(code):
new = {}
for key in code:
new[key] = {}
for k, v in code[key].items():
for i in k: new[key][i] = v
return new
metrics_img = expand_imgc(metrics_img)
social_atts = expand_socc(social_atts)
var_fwd, var_bwd = demo_map(var_encoding)
var_encoding = expand_code(var_encoding)
if minimal: atts = []
else: atts = list(metrics_any.keys())+list(metrics_knl.keys())+list(metrics_cov.keys())+list(metrics_vci.keys())+list(metrics_aff.keys())+list(social_atts.keys())
atts += list(trust[country].keys())+list(reasons.keys())+list(metrics_img.keys())
atts += list(other_atts.keys())+list(demo[country].keys())
def recode_treatment(x): return int('ANTI' in x)
def recode_bools(x): return int('NO TO:' not in x)
def recode_likert(x, inverse=False):
if inverse: m = {'Strongly agree': -2, 'Tend to agree': -1, 'Tend to disagree': 1, 'Strongly disagree': 2, 'Do not know': 0}
else: m = {'Strongly agree': 2, 'Tend to agree': 1, 'Tend to disagree': -1, 'Strongly disagree': -2, 'Do not know': 0}
return m[x]
def recode_likert_num(x, inverse=False):
if inverse: m = [-2,-1,0,1,2,0]
else: m = [2,1,0,-1,-2,0]
return m[x-1]
def recode_age(x):
if x>118: x = 118
return (x-18)/100
if group is None:
idx = df['country']==country
if country=='UK': idx = idx & ((df['imageseen']=='PRO UK')|(df['imageseen']=='ANTI UK')) #Country field is unreliable, has a bug
elif country=='USA': idx = idx & ((df['imageseen']=='PRO US')|(df['imageseen']=='ANTI US'))
else:
if country=='UK': idx = df['imageseen']==group+' UK'
elif country=='USA': idx = df['imageseen']==group+' US'
df_new = df.loc[idx,atts]
dd_new = {}
if not minimal:
for key in metrics_any:
df_new[key] = df_new[key].apply(recode_bools)
df_new.rename(columns={key:'Know anyone:%s'%metrics_any[key]}, inplace=True)
dd_new['Know anyone:%s'%metrics_any[key]] = {1:'Checked', 0:'Unchecked'}
for key in metrics_knl:
df_new[key] = df_new[key].apply(recode_likert)
df_new.rename(columns={key:'COVID-19 Knowledge:%s'%metrics_knl[key]}, inplace=True)
dd_new['COVID-19 Knowledge:%s'%metrics_knl[key]] = {2:'Strongly agree',1:'Tend to agree',0:'Do not know',-1:'Tend to disagree',-2:'Strongly disagree'}
for key in metrics_cov:
df_new[key] = df_new[key].apply(recode_likert)
df_new.rename(columns={key:'COVID-19 VCI:%s'%metrics_cov[key]}, inplace=True)
dd_new['COVID-19 VCI:%s'%metrics_cov[key]] = {2:'Strongly agree',1:'Tend to agree',0:'Do not know',-1:'Tend to disagree',-2:'Strongly disagree'}
for key in metrics_vci:
df_new[key] = df_new[key].apply(recode_likert)
df_new.rename(columns={key:'General VCI:%s'%metrics_vci[key]}, inplace=True)
dd_new['General VCI:%s'%metrics_vci[key]] = {2:'Strongly agree',1:'Tend to agree',0:'Do not know',-1:'Tend to disagree',-2:'Strongly disagree'}
for key in metrics_aff:
df_new[key] = df_new[key].apply(recode_likert)
df_new.rename(columns={key:'COVID-19 Impact:%s'%metrics_aff[key]}, inplace=True)
dd_new['COVID-19 Impact:%s'%metrics_aff[key]] = {2:'Strongly agree',1:'Tend to agree',0:'Do not know',-1:'Tend to disagree',-2:'Strongly disagree'}
for key in social_atts:
df_new[key] = df_new[key].apply(recode_bools)
df_new.rename(columns={key:'Social:%s'%social_atts[key]}, inplace=True)
dd_new['Social:%s'%social_atts[key]] = {1:'Checked', 0:'Unchecked'}
for key in trust[country]:
df_new[key] = df_new[key].apply(recode_bools)
df_new.rename(columns={key:'Trust:%s'%trust[country][key]}, inplace=True)
dd_new['Trust:%s'%trust[country][key]] = {1:'Checked', 0:'Unchecked'}
for key in reasons:
df_new[key] = df_new[key].apply(recode_bools)
df_new.rename(columns={key:'Reason:%s'%reasons[key]}, inplace=True)
dd_new['Reason:%s'%reasons[key]] = {1:'Checked', 0:'Unchecked'}
for key in metrics_img:
df_new.replace({key: dd['dict'][key]}, inplace=True)
df_new[key] = df_new[key].apply(recode_likert_num)
df_new.rename(columns={key:metrics_img[key]}, inplace=True)
dd_new[metrics_img[key]] = {2:'Strongly agree',1:'Tend to agree',0:'Do not know',-1:'Tend to disagree',-2:'Strongly disagree'}
df_new.replace({att: dd['dict'][att] for att in other_atts if att!='imageseen'}, inplace=True)
for att in other_atts:
df_new.rename(columns={att:other_atts[att]}, inplace=True)
if att!='imageseen': dd_new[other_atts[att]] = dict(zip(dd['dict'][att].values(), dd['dict'][att].keys()))
df_new.replace({key: dd['dict'][key] for key in demo[country] if key not in ['agerecode', 'DGEOUK', 'DGEOUS']}, inplace=True)
df_new.rename(columns=demo[country], inplace=True)
df_new.replace(var_encoding, inplace=True)
df_new.replace(var_fwd, inplace=True)
for att in demo[country]:
if demo[country][att] in var_fwd: dd_new[demo[country][att].split('_')[0]] = var_bwd[demo[country][att]]
else:
df_new.replace({demo[country][att]: dd['dict'][att]}, inplace=True)
dd_new[demo[country][att]] = {b: a for (a, b) in dd['dict'][att].items()}
df_new['Treatment'] = df_new['Group'].apply(recode_treatment)
del df_new['Group']
dd_new['Treatment'] = {0: 'Control', 1:'Treatment'}
df_new.rename(columns={i:i.split('_')[0] for i in list(df_new)}, inplace=True)
if save:
df_new.to_csv('%s.csv'%save)
import pickle, json
with open('%s.pkl'%save, 'wb') as fp: pickle.dump(dd_new, fp)
with open('%s.json'%save, 'w') as fp: json.dump(dd_new, fp)
return df_new, dd_new
def import_transformed_data(filepath=''):
'''Reads the transformed survey data.
See questions and field names in /doc/orb_questionnaire.pdf, and refer to recoding in ``transform_data()``
Args:
filepath (:obj:`str`): filepath to read the processed data (without the .csv/.pkl suffix)
Returns:
A size-2 :obj:`tuple` containing
* A :obj:`pd.DataFrame` containing field names as columns and record as rows of the transformed data
* A :obj:`dict` of field names (:obj:`str`) mapped to a :obj:`dict` of recoded-values (:obj:`int`) mapped to value-names (:obj:`str`)
'''
import pandas as pd
import pickle
df = pd.read_csv('%s.csv'%filepath, index_col=0)
with open('%s.pkl'%filepath, 'rb') as fp: dd = pickle.load(fp)
return df, dd
def get_socdem_counts(df, dd, by='Treatment'):
'''Returns counts of different socio-demographics broken down by a variable of interest.
Args:
df (:obj:`pd.DataFrame`): contains transformed data (see return value of ``transform_data()``, ``import_transformed_data()``)
dd (:obj:`dict`): contains data dictionary for transformed data (see return value of ``transform_data()``, ``import_transformed_data()``)
by (:obj:`str`): variable of interest; default='Treatment' (returns distribution of demographics across the 2 experiment groups)
Returns:
A :obj:`pd.DataFrame` with 2-level index whose outer index corresponds to soc-demo name, inner index to soc-demo value, and columns correspond to % and counts across categories of variable of interest
'''
import pandas as pd
atts = ['Age', 'Gender', 'Education', 'Employment', 'Religion', 'Political', 'Ethnicity', 'Income', 'Social media usage']
out = []
for idx, d in df.groupby(by):
out.append({})
for att in atts:
tmp = d[att].value_counts().loc[list(dd[att].keys())]
tmp.index = dd[att].values()
tmp.name = '%s (N)'%dd[by][idx]
tmp_perc = (100*tmp/tmp.sum()).round(1)
tmp_perc.name = '%s (%%)'%dd[by][idx]
out[-1][att] = pd.concat([tmp, tmp_perc], axis=1)
out[-1] = pd.concat(out[-1], axis=0)
out = pd.concat(out, axis=1)
return out
def count_attribute(df, att, by_att=None, norm=False, where=None, dd={}, plot=False, att_lab='', by_att_lab='', title='', dpi=90):
'''Returns counts of any variable of interest, possibly conditioned on a second variable.
Args:
df (:obj:`pd.DataFrame`): contains transformed data (see return value of ``transform_data()``, ``import_transformed_data()``)
att (:obj:`str`): primary variable of interest
by_att (:obj:`str`): secondary variable of interest to condition counts of the first one on; default=`None`
norm (:obj:`bool`): whether to normalise the counts to indicate Pr(att); if by_att is not `None` then counts are normalized such that summing Pr(att|by_att) over by_att gives 1
where (:obj:`list` of size-2 :obj:`tuple` of (:obj:`str`, :obj:`int`)): extra variables to subset the samples on where the tuple encodes a (variable-name, value) pair; default=[]
dd (:obj:`dict`): contains data dictionary for transformed data (see return value of ``transform_data()``, ``import_transformed_data()``) for sorting counts by given variable-ordering; default={}
plot (:obj: `bool`): whether to plot the counts; default=`False`
att_lab (:obj:`str`): if plotting, label for y-axis (primary variable); default=`''`
by_att_lab (:obj:`str`): if plotting, label for legend (secondary variable); default=`''`
title (:obj:`str`): if plotting, plot title; default=`''`
dpi (:obj:`int`): if plotting, dpi for figure; default=90
Returns:
A :obj:`pd.DataFrame`/:obj:`pd.Series` whose index corresponds to att and columns to by_att
'''
if where is not None:
if not isinstance(where, list): where = [where]
for w in where:
if w[1] is None: df = df[df[w[0]].isnull()]
else: df = df[df[w[0]]==w[1]]
if by_att is None: counts = df[att].value_counts()
else:
from pandas import concat
groups = df[[att, by_att]].groupby(by_att)
names = list()
counts = list()
for name, group in groups:
names.append(name)
counts.append(group[att].value_counts())
counts = concat(counts, axis=1, keys=names, sort=True)
if dd:
if by_att in dd:
counts = counts[dd[by_att].keys()]
counts.rename(columns=dd[by_att], inplace=True)
counts.fillna(0, inplace=True)
if norm: counts = counts/counts.values.sum(0)
if dd:
if att in dd:
counts = counts.loc[dd[att].keys()]
counts.rename(index=dd[att], inplace=True)
if plot:
import matplotlib.pyplot as plt
from seaborn import countplot
plt.figure(dpi=dpi)
order, hue_order = None, None
if dd:
if by_att is not None and by_att in dd and att in dd:
df = df[[att,by_att]]
df = df.replace({att: dd[att], by_att: dd[by_att]})
hue_order = dd[by_att].values()
order = dd[att].values()
else:
if att in dd:
df = df[[att]]
df = df.replace({att: dd[att]})
order = dd[att].values()
if by_att is None: countplot(y=att, data=df, order=order)
else: countplot(y=att, hue=by_att, data=df, order=order, hue_order=hue_order)
plt.gca().set_xlabel('Count')
if att_lab: plt.gca().set_ylabel(att_lab)
if by_att_lab: plt.gca().get_legend().set_title(by_att_lab)
if not title and where is not None: title = ', '.join([str(w[0])+' = '+str(w[1]) for w in where])
plt.title(title)
plt.show()
return counts
def stats(fit, statistics=['mean', '2.5%', '97.5%', 'n_eff', 'Rhat'], digits=2, exclude_lp=True, save=''):
import pandas as pd
sumobj = fit.summary()
params = list(sumobj['summary_rownames'])
stats = list(sumobj['summary_colnames'])
out = pd.DataFrame(sumobj['summary'], index=params, columns=stats)
if exclude_lp: out.drop(index='lp__', inplace=True)
if statistics: out = out[statistics]
roundmap = dict([(key, 0) if key=='n_eff' else (key, digits) for key in out])
out = out.round(roundmap)
out = out.rename(columns={'mean':'Mean', 'n_eff':'ESS'})
if 'n_eff' in statistics: out = out.astype({'ESS':int})
if save: out.to_csv('%s.csv'%save)
return out
def stats_impact(fit, save=''):
import numpy as np
from .bayesoc import Outcome, Model
import pandas as pd
m = 2
k = 4
def foo(x): return np.diff(np.hstack([0, np.exp(x)/(1+np.exp(x)), 1]))
df = Model(Outcome()).get_posterior_samples(fit=fit)
prob = []
names = ['Yes, definitely', 'Unsure, lean yes', 'Unsure, lean no', 'No, definitely not']
for i in range(1, m+1):
alpha = df[['alpha[%i,%i]'%(i, j) for j in range(1, k)]].values
p = []
for a in alpha: p.append(foo(a))
p = np.vstack(p)
prob.append(pd.DataFrame(p, columns=names))
prob.append(prob[1]-prob[0])
groups = ['Pre Exposure', 'Post Exposure', 'Post-Pre']
out = pd.concat({groups[i]: prob[i].describe(percentiles=[0.025, 0.975]).T[['mean', '2.5%', '97.5%']] for i in range(len(groups))})
if save: out.to_csv('%s.csv'%save)
return out
def stats_impact_causal(fit, save=''):
import numpy as np
from .bayesoc import Outcome, Model
import pandas as pd
m = 2
k = 4
def foo(x): return np.diff(np.hstack([0, np.exp(x)/(1+np.exp(x)), 1]))
df = Model(Outcome()).get_posterior_samples(fit=fit)
prob_full, prob = [], []
dfs_full, dfs = [], [[], [], []]
names = ['Yes, definitely', 'Unsure, lean yes', 'Unsure, lean no', 'No, definitely not']
p_pre = [foo(x) for x in df[['alpha_pre[%i]'%j for j in range(1, k)]].values]
for i in range(1, m+1):
alpha_pre = df[['alpha_pre[%i]'%j for j in range(1, k)]].values
beta = np.hstack([np.zeros((df.shape[0],1)), df[['beta[%i]'%i]].values*df[['delta[%i,%i]'%(i, j) for j in range(1, k)]].values.cumsum(axis=1)])
alpha = df[['alpha[%i,%i]'%(i, j) for j in range(1, k)]].values
p_full, p = [], []
for (p_p, a, b) in zip(p_pre, alpha, beta):
p.append(np.array([foo(a-b_) for b_ in b]))
p_full.append((p_p[:,np.newaxis]*p[-1]).sum(axis=0))
prob_full.append(np.vstack(p_full))
prob.append(np.dstack(p))
dfs_full.append(pd.DataFrame(prob_full[-1], columns=names).describe(percentiles=[0.025, 0.975]).T[['mean', '2.5%', '97.5%']])
for j in range(k):
dfs[i-1].append(pd.DataFrame(prob[-1][j].T, columns=names).describe(percentiles=[0.025, 0.975]).T[['mean', '2.5%', '97.5%']])
diff_full = prob_full[1] - prob_full[0]
diff = prob[1]-prob[0]
dfs_full.append(pd.DataFrame(diff_full, columns=names).describe(percentiles=[0.025, 0.975]).T[['mean', '2.5%', '97.5%']])
dfs_full.append(pd.DataFrame(p_pre, columns=names).describe(percentiles=[0.025, 0.975]).T[['mean', '2.5%', '97.5%']])
for i in range(k):
dfs[-1].append(pd.DataFrame(diff[i].T, columns=names).describe(percentiles=[0.025, 0.975]).T[['mean', '2.5%', '97.5%']])
groups = ['Control', 'Treatment', 'Treatment-Control', 'Baseline']
out = pd.concat({groups[i]: pd.concat({names[j]: dfs[i][j] for j in range(len(names))}) for i in range(len(groups)-1)})
out_full = pd.concat({groups[i]: dfs_full[i] for i in range(len(groups))})
if save:
out.to_csv('%s_CATE.csv'%save)
out_full.to_csv('%s_ATE.csv'%save)
return {'ATE':out_full, 'CATE':out}
def multi2index(index, suffix=''):
att_cat = {}
for att in index:
if ':' in att[0]:
key, val = tuple(att[0].split(':'))
if key in att_cat:
att_cat[key]['idx'].append(att)
att_cat[key]['val'].append(val+suffix)
else: att_cat[key] = {'idx': [att], 'val': [val+suffix]}
else:
if att[0] in att_cat:
att_cat[att[0]]['idx'].append(att)
att_cat[att[0]]['val'].append(att[1]+suffix)
else: att_cat[att[0]] = {'idx': [att], 'val': [att[1]+suffix]}
return att_cat
def plot_stats(df, demos=False, oddsratio=True, fig=None, ax=None, ax_outer=None, fignum=1, figidx=0, figsize=2, stack_h=True,
title='', subtitle=[], xlabel='', subxlabel=[], tick_suffix='', label_suffix='', label_text='', ylabel=True, bars=False, factor=0.4,
signsize=10, ticksize=10, labelsize=10, titlesize=12, subtitlesize=12, hspace=0.3, wspace=0.05, widespace=1, align_labels=False,
title_loc=0.0, label_loc=0.0, highlight=False, show=True, capitalize=False, identical_counts=False, save='', fmt='pdf'):
if not isinstance(df, list): df = [df]
if isinstance(subtitle, str): subtitle = [subtitle]*len(df)
if isinstance(subxlabel, str): subxlabel = [subxlabel]*len(df)
import matplotlib.pyplot as plt
import numpy as np
cols = len(df)
dem = ['Age', 'Gender', 'Education', 'Employment', 'Religion', 'Political', 'Ethnicity', 'Income']
for i in range(cols):
atts = list(df[i].index)
if not isinstance(atts[0], tuple):
from pandas import concat
df[i] = concat({'tmp': df[i]})
atts = list(df[i].index)
ylabel = False
if not demos: atts = [i for i in atts if i[0] not in dem]
att_cat = multi2index(atts, tick_suffix)
rows = len(att_cat)
rows_per = [len(att_cat[k]['idx']) for k in att_cat]
if fig is None:
if fignum>1:
if stack_h:
fig = plt.figure(dpi=180, figsize=(figsize*(cols*fignum+(fignum-1)*widespace), factor*sum(rows_per)))
grid = fig.add_gridspec(nrows=1, ncols=fignum, wspace=widespace/cols)
ax = np.empty((rows, cols*fignum), dtype=object)
ax_outer = np.empty(fignum, dtype=object)
for i in range(fignum):
ax_outer[i] = fig.add_subplot(grid[i], frame_on=False, xticks=[], yticks=[])
inner = grid[i].subgridspec(nrows=rows, ncols=cols, hspace=hspace/sum(rows_per), wspace=wspace, height_ratios=rows_per)
for j in range(rows):
for k in range(cols): ax[j,i*cols+k] = fig.add_subplot(inner[j, k])
else:
fig = plt.figure(dpi=180, figsize=(figsize*cols, factor*(sum(rows_per)*fignum+(fignum-1)*widespace)))
grid = fig.add_gridspec(nrows=fignum, ncols=1, hspace=widespace/sum(rows_per))
ax = np.empty((rows*fignum, cols), dtype=object)
ax_outer = np.empty(fignum, dtype=object)
for i in range(fignum):
ax_outer[i] = fig.add_subplot(grid[i], frame_on=False, xticks=[], yticks=[])
inner = grid[i].subgridspec(nrows=rows, ncols=cols, hspace=hspace/sum(rows_per), wspace=wspace, height_ratios=rows_per)
for j in range(rows):
for k in range(cols): ax[i*rows+j,k] = fig.add_subplot(inner[j, k])
else:
fig, ax = plt.subplots(nrows=rows, ncols=cols, dpi=180, figsize=(figsize*cols, factor*sum(rows_per)), gridspec_kw={'hspace':hspace, 'wspace':wspace, 'height_ratios': rows_per}, squeeze=False)
ax_outer = [fig.add_subplot(111, frame_on=False, xticks=[], yticks=[])]
names = list(att_cat.keys())
def plot_bars(ax, tmp, ticks=[], right=False, base=False):
num = tmp.shape[0]
if highlight: colors = ['k' if tmp['2.5%'][tmp.index[i]]<oddsratio<tmp['97.5%'][tmp.index[i]] else 'r' for i in range(num)]
else: colors = 'k'
if base:
if bars: ax.barh(y=list(range(num+1, num+2-base, -1))+list(range(num+1-base, 0, -1)), width=tmp['mean'].values, xerr=np.vstack([(tmp['mean']-tmp['2.5%']).values, (tmp['97.5%']-tmp['mean']).values]), color='salmon')
else: ax.errorbar(x=tmp['mean'].values, y=list(range(num+1, num+2-base, -1))+list(range(num+1-base, 0, -1)), xerr=np.vstack([(tmp['mean']-tmp['2.5%']).values, (tmp['97.5%']-tmp['mean']).values]), ecolor=colors, marker='o', color='k', ls='')
ax.text(0, num+2-base, 'REFERENCE', size=ticksize)
else:
if bars: ax.barh(y=range(num, 0, -1), width=tmp['mean'].values, xerr=np.vstack([(tmp['mean']-tmp['2.5%']).values, (tmp['97.5%']-tmp['mean']).values]), color='salmon')
else: ax.errorbar(x=tmp['mean'].values, y=range(num, 0, -1), xerr=np.vstack([(tmp['mean']-tmp['2.5%']).values, (tmp['97.5%']-tmp['mean']).values]), ecolor=colors, marker='o', color='k', ls='')
if bars and highlight:
for i in range(num):
lb, ub = tmp['2.5%'][tmp.index[i]], tmp['97.5%'][tmp.index[i]]
if not (lb<oddsratio<ub):
if lb<0: ax.text(lb, num-i, '*', size=signsize)
else: ax.text(ub, num-i, '*', size=signsize)
else: ax.set_ylim(1-0.5, num+0.5)
if ticks:
t = range(1, num+1)
if capitalize: ticks = [x.capitalize() if not x.isupper() else x for x in ticks]
else: t = []
ax.axvline(oddsratio, ls=':', color='gray')
ax.yaxis.set_ticklabels(reversed(ticks))
ax.yaxis.set_ticks(t)
if right: ax.yaxis.tick_right()
for i in range(rows):
for j in range(cols):
if stack_h: u, v = i, cols*figidx+j
else: u, v = rows*figidx+i, j
if j==0:
plot_bars(ax[u,v], df[j].loc[att_cat[names[i]]['idx']], att_cat[names[i]]['val'])
if ylabel: ax[u,v].set_ylabel(names[i]+label_suffix, fontweight='bold', fontsize=labelsize)
elif j==cols-1:
try:
if identical_counts: c = list(map(lambda y: str(int(y)), df[0]['counts'][att_cat[names[i]]['idx']].values))
else: c = [', '.join(list(map(lambda y: str(int(y)), x))) for x in np.array([df[k]['counts'][att_cat[names[i]]['idx']].values for k in range(cols)]).T]
except: c = []
plot_bars(ax[u,v], df[j].loc[att_cat[names[i]]['idx']], c, right=True)
else: plot_bars(ax[u,v], df[j].loc[att_cat[names[i]]['idx']])
if i==0 and subtitle and (stack_h or not(figidx)): ax[u,v].set_title(subtitle[j], fontsize=subtitlesize)
if i==rows-1 and subxlabel: ax[u,v].set_xlabel(subxlabel[j], fontsize=labelsize)
if align_labels: fig.align_ylabels()
if title: ax_outer[figidx].set_title(title, fontweight='bold', fontsize=titlesize, y=1+title_loc)
if label_text and (stack_h or not(figidx)): ax_outer[figidx].text(1+label_loc, 1.01, label_text, size=subtitlesize, transform=ax_outer[figidx].transAxes)
if xlabel and (stack_h or figidx==fignum-1): ax_outer[figidx].set_xlabel(xlabel)
#plt.subplots_adjust(hspace=hspace, wspace=wspace)
#fig.tight_layout()
if save: plt.savefig('%s.%s'%(save, fmt), dpi=180, bbox_inches='tight')
if show: plt.show()
return fig, ax, ax_outer
def plot_causal_flow(df, title='', save='', fmt='pdf'):
def plot_sankey(group):
import plotly.graph_objects as go
src, tgt, val = [], [], []
labs = ['Yes, definitely', 'Unsure, lean yes', 'Unsure, lean no', 'No, definitely not']*2
for i in range(4):
for j in range(4, 8):
src.append(i)
tgt.append(j)
val.append(df['CATE'].loc[(group,labs[i],labs[j]), 'mean']*df['ATE'].loc[('Baseline',labs[i]), 'mean'])
fig = go.Figure(data=[go.Sankey(
node = dict(pad=15, thickness=40, line=dict(color='salmon', width=0.5), color='salmon',
label=['[%i] %s'%(round(100*y), x) for x, y in zip(labs[:4], df['ATE'].loc['Baseline', 'mean'])]+['%s [%i]'%(x, round(100*y)) for x, y in zip(labs[4:], df['ATE'].loc[group, 'mean'])]),
link = dict(source=src, target=tgt, value=val))])
fig.update_layout(title_text='%s %s'%(title, group), font_size=20)
fig.show()
if save: fig.write_image('%s_%s.%s'%(save, group, fmt), scale=4)
plot_sankey('Treatment')
plot_sankey('Control')
def stats_socdem(fit, dd, df, atts=[], causal=True, group=None, oddsratio=True, save=''):
import numpy as np
import pandas as pd
from .bayesoc import Dim, Outcome, Model
import matplotlib.pyplot as plt
cats = ['Age', 'Gender', 'Education', 'Employment', 'Religion', 'Political', 'Ethnicity', 'Income']
if isinstance(atts, str): atts = [atts]
for att in atts: cats += [x for x in list(df) if x[:len(att)]==att]
outs = ['Vaccine Intent for self (Pre)', 'Vaccine Intent for self (Post)', 'Treatment']
bases = [1]*len(cats) #default reference category for all socio-demographics
bases[2] = 5 #reference category for education
bases[7] = 5 #reference category for income
tmp = Model(Outcome())
if causal:
stats = {'Control':{}, 'Treatment':{}, 'Treatment-Control':{}}
counts = {'Control':{}, 'Treatment':{}, 'Treatment-Control':{}}
else:
stats = {}
counts = {}
def foo(x): return np.exp(x)
def getcounts(cat, base, group=None):
vals = np.sort(list(dd[cat].keys()))
if group is None: counts = df[cat].value_counts().loc[vals]
else: counts = df[df['Treatment']==group][cat].value_counts().loc[vals]
counts.index = [dd[cat][k] for k in vals]
return counts.iloc[list(range(base-1))+list(range(base, len(vals)))]
def summarize(stats, counts):
if oddsratio: stats = stats.apply(foo)
stats = stats.describe(percentiles=[0.025, 0.975]).T[['mean', '2.5%', '97.5%']]
stats.drop('chain', inplace=True)
stats.index = counts.index
return stats
def mergecats(stats, counts):
stats = pd.concat(stats)
counts = pd.concat(counts)
counts.name = 'counts'
return stats.merge(counts.to_frame(), left_index=True, right_index=True)
for cat, base in zip(cats, bases):
dim = Dim(name=cat)
if causal:
for idx, key in zip([1, 2], ['Control', 'Treatment']):
counts[key][cat] = getcounts(cat, base, idx-1)
stats[key][cat] = tmp.get_posterior_samples(pars=['beta_%s[%i,%i]'%(dim.name, idx, i+1) for i in range(len(dd[cat]))], contrast='beta_%s[%i,%i]'%(dim.name, idx, base), fit=fit)
stats[key][cat].columns = ['beta_%s[%i]'%(dim.name, i+1) for i in range(len(dd[cat])) if i!=base-1]+['chain']
stats['Treatment-Control'][cat] = stats['Treatment'][cat] - stats['Control'][cat]
counts['Treatment-Control'][cat] = counts['Treatment'][cat] + counts['Control'][cat]
for key in stats: stats[key][cat] = summarize(stats[key][cat], counts[key][cat])
else:
counts[cat] = getcounts(cat, base, group)
stats[cat] = tmp.get_posterior_samples(pars=['beta_%s[%i]'%(dim.name, i+1) for i in range(len(dd[cat]))], contrast='beta_%s[%i]'%(dim.name, base), fit=fit)
stats[cat] = summarize(stats[cat], counts[cat])
if causal: out = pd.concat({key: mergecats(stats[key], counts[key]) for key in stats})
else: out = mergecats(stats, counts)
if save: out.to_csv('%s.csv'%save)
return out
def mean_image_perceptions(df, melt=True, save=''):
import pandas as pd
metrics = ['Vaccine Intent', 'Agreement', 'Trust', 'Fact-check', 'Share']
gmap = {0: 'Control', 1:'Treatment'}
vmap = {i-2: 'p[%i]'%(i+1) for i in range(5)}
out = {}
for group, d in df.groupby('Treatment'):
scores_all = {}
for i in range(5):
scores = {}
for m in metrics:
tmp = d['Image %i:%s'%(i+1, m)].value_counts().sort_index()
tmp = tmp/tmp.sum()
scores[m] = tmp.rename(vmap)
if melt: scores_all[i+1] = pd.concat(scores).to_frame('mean')
else: scores_all[i+1] = pd.DataFrame(scores)
out[gmap[group]] = pd.concat(scores_all)
out = | pd.concat(out) | pandas.concat |
import pandas as pd
import sqlite3
from sqlite3 import Error as SQLError
from datetime import datetime
import re
import csv
import os
import json
from fuzzywuzzy import fuzz
import sys
sys.path.insert(1, "../")
from settings import DB_FP, CORPUS_META
sql_get_members ="""
SELECT c.PimsId, m.name, c.constituency
FROM members as m
INNER JOIN member_constituency as c
ON c.PimsId = m.PimsId
WHERE (((? BETWEEN c.start AND c.end) AND NOT (c.end IS NULL))
OR ((? >= c.start) AND (c.end IS NULL)));""".strip()
sql_get_most_recent_constituency = """
SELECT c.PimsId, m.name, c.constituency, max(c.start)
FROM members as m
INNER JOIN member_constituency as c
ON c.PimsId = m.PimsId
GROUP BY c.PimsId;""".strip()
def create_connection(filename):
""" create a database connection to a database that resides
in the memory
"""
connection = None
try:
connection = sqlite3.connect(filename)
print(sqlite3.version)
except SQLError as e:
print(e)
if connection:
connection.close()
print("Error occurred creating connection")
connection = None
return connection
# This is the old function I was using for processing ref stances of mps
# It attempts to match the names of the mps.
# I have since realised it's probably a better idea to simple do it on consituency.
def process_ref_using_names(mp_ref_fp, conn):
members = pd.read_sql_query("SELECT * FROM members", conn)
mp_ref = pd.read_csv(mp_ref_fp, header=0)
mp_ref = mp_ref.loc[:, ["Title", "First_Name", "Surname", "Constituency", "Stance"]]
pimsIds = []
for i, mp in mp_ref.iterrows():
loose_reg_name = ".* {0}".format(mp.Surname)
tight_reg_name = "(({0}|Mr|Ms|Dr|Lord|Mrs|Sir|Baroness) )?{1} {2}".format(mp.Title, mp.First_Name, mp.Surname)
loose_found = members[members['name'].str.match(loose_reg_name)]
if len(loose_found) == 1:
print("{0} - {1}".format(loose_reg_name, loose_found.iloc[0]['name']))
curr_pims_id = loose_found.iloc[0]['PimsId']
else:
tight_found = members[members['name'].str.match(tight_reg_name)]
if len(tight_found) == 1:
print("{0} - {1}".format(tight_reg_name, tight_found.iloc[0]['name']))
curr_pims_id = tight_found.iloc[0]['PimsId']
else:
for ci, curr in loose_found.iterrows():
found_constituency = loose_found[loose_found["curr_constituency"]==mp['Constituency']]
if len(found_constituency) == 1:
print("{0} - {1}".format(tight_reg_name, found_constituency['name'].iloc[0]))
curr_pims_id = found_constituency.iloc[0]['PimsId']
else:
print("{0} - {1}".format(tight_reg_name, loose_found))
curr_pims_id = None
pimsIds.append(curr_pims_id)
mp_ref['PimsId'] = pimsIds
mp_ref.to_csv("MP_Stance_Thing.csv")
def process_ref(fp, conn):
stances = | pd.read_csv(fp, header=0) | pandas.read_csv |
import requests
from bs4 import BeautifulSoup
from time import sleep
import time
from datetime import datetime
import itertools
import inspect
import pandas as pd
import numpy as np
import re
from classes import LRTlinks
startTime = time.time()
# To do
# rename the Task class
# create a class for the scraping of links
# how to get the phone number and agent etc? use JSON? https://stackoverflow.com/questions/67515161/beautiful-soup-returns-an-empty-string-when-website-has-text
# error handling, add some try, except clauses
# when running for full list, script hung after 1800++ files
# how to add Task class to classes.py? problem with detail_dict
# make sure works for other train stations too etc gombak. alter the code for general use cases
class Task():
""" This is a Task class to carry out all the scraping functions """
def rent_id(self): # START: Standard data on each property page
if property_url != '':
rent_id.append(property_url.split('/')[-2])
print('Rent ID : ' + 'Success')
else:
print('Rent ID : '+'NaN')
rent_id.append('NaN')
def prop_url(self):
if property_url != '':
prop_url.append(property_url)
print('Property URL : ' + 'Success')
else:
print('Property URL : '+'NaN')
prop_url.append('NaN')
def title(self):
if soup.find_all('title')[0].text != '':
title.append(soup.find_all('title')[0].text)
print('Title : ' + 'Success')
else:
print('Title : '+'NaN')
title.append('NaN')
def property_price(self):
if soup.find_all('div', class_='ListingPrice__Price-cYBbuG cspQqH property-price')[
0].text != '':
str_price = soup.find_all('div', class_='ListingPrice__Price-cYBbuG cspQqH property-price')[
0].text.split(' ')[2].replace(',', '')
property_price.append(
int(''.join(itertools.takewhile(str.isdigit, str_price))))
print('Property Price : ' + 'Success')
else:
print('Property Price : '+'NaN')
title.append('NaN')
def property_summary(self):
if soup.find_all('h1', class_='PropertySummarystyle__ProjectTitleWrapper-kAhflS PNQmp')[0].text != '':
property_summary.append(soup.find_all(
'h1', class_='PropertySummarystyle__ProjectTitleWrapper-kAhflS PNQmp')[0].text)
print('Property Summary : ' + 'Success')
else:
print('Property Summary : '+'NaN')
property_summary.append('NaN')
def property_address(self):
if soup.find_all(
'span', class_='property-address rent-default')[0].text != '':
property_address.append(soup.find_all(
'span', class_='property-address rent-default')[0].text)
print('Property Address : ' + 'Success')
else:
print('Property Address : '+'NaN')
property_address.append('NaN')
def built_up(self):
if soup.find_all(
'li', class_='PropertySummarystyle__AreaInfoItem-NjZCY dUovgc')[0].text != '':
built_up.append(soup.find_all(
'li', class_='PropertySummarystyle__AreaInfoItem-NjZCY dUovgc')[0].text.split(': ')[1])
print('Built Up : ' + 'Success')
else:
print('Built Up : '+'NaN')
built_up.append('NaN')
def land_area_sq_ft(self):
if soup.find_all(
'li', class_='PropertySummarystyle__AreaInfoItem-NjZCY dUovgc')[1].text != '':
land_area_sq_ft.append(soup.find_all(
'li', class_='PropertySummarystyle__AreaInfoItem-NjZCY dUovgc')[1].text.split(': ')[1])
print('Land Area_sq_ft : ' + 'Success')
else:
print('Land Area_sq_ft : '+'NaN')
land_area_sq_ft.append('NaN')
def property_details(self):
if str(soup.find_all('pre')) != '':
property_details.append(str(soup.find_all('pre')))
print('Property Details : ' + 'Success')
else:
print('Property Details : '+'NaN')
property_details.append('NaN')
def property_features(self): # END: Standard data on each property page
if [i.text for i in soup.find_all('div', class_='attribute-title-container')] != []:
property_features.append([i.text for i in soup.find_all(
'div', class_='attribute-title-container')])
print('Property Features : ' + 'Success')
else:
print('Property Features : '+'NaN')
property_features.append('NaN')
def property_type(self): # START: Data in property details container (can vary)
if 'Property Type' in details_dict:
temp = details_dict['Property Type']
property_type.append(soup.find_all(
'div', class_='PropertyDetailsListstyle__AttributeItemData-jpQfWB HUTFZ')[temp].text)
print('Property Type : ' + 'Success')
else:
print('Property Type : '+'NaN')
property_type.append('NaN')
def land_title(self):
if 'Land Title' in details_dict:
temp = details_dict['Land Title']
land_title.append(soup.find_all(
'div', class_='PropertyDetailsListstyle__AttributeItemData-jpQfWB HUTFZ')[temp].text)
print('Land Title : ' + 'Success')
else:
print('Land Title : '+'NaN')
land_title.append('NaN')
def property_title_type(self):
if 'Property Title Type' in details_dict:
temp = details_dict['Property Title Type']
property_title_type.append(soup.find_all(
'div', class_='PropertyDetailsListstyle__AttributeItemData-jpQfWB HUTFZ')[temp].text)
print('Property Title Type : '+'Success')
else:
print('Property Title Type : '+'NaN')
property_title_type.append('NaN')
def tenure(self):
if 'Tenure' in details_dict:
temp = details_dict['Tenure']
tenure.append(soup.find_all(
'div', class_='PropertyDetailsListstyle__AttributeItemData-jpQfWB HUTFZ')[temp].text)
print('Tenure : ' + 'Success')
else:
print('Tenure : ' + 'NaN')
tenure.append('NaN')
def built_up_size_sq_ft(self):
if 'Built-up Size' in details_dict:
temp = details_dict['Built-up Size']
built_up_size_sq_ft.append(soup.find_all(
'div', class_='PropertyDetailsListstyle__AttributeItemData-jpQfWB HUTFZ')[temp].text)
print('Built-up Size : ' + 'Success')
else:
print('Built-up Size : ' + 'NaN')
built_up_size_sq_ft.append('NaN')
def built_up_price_per_sq_ft(self):
if 'Built-up Price' in details_dict:
temp = details_dict['Built-up Price']
built_up_price_per_sq_ft.append(soup.find_all(
'div', class_='PropertyDetailsListstyle__AttributeItemData-jpQfWB HUTFZ')[temp].text)
print('Built-up Price : ' + 'Success')
else:
print('Built-up Price : ' + 'NaN')
built_up_price_per_sq_ft.append('NaN')
def furnishing(self):
if 'Furnishing' in details_dict:
temp = details_dict['Furnishing']
furnishing.append(soup.find_all(
'div', class_='PropertyDetailsListstyle__AttributeItemData-jpQfWB HUTFZ')[temp].text)
print('Furnishing : ' + 'Success')
else:
print('Furnishing : ' + 'NaN')
furnishing.append('NaN')
def occupancy(self):
if 'Occupancy' in details_dict:
temp = details_dict['Occupancy']
occupancy.append(soup.find_all(
'div', class_='PropertyDetailsListstyle__AttributeItemData-jpQfWB HUTFZ')[temp].text)
print('Occupancy : ' + 'Success')
else:
print('Occupancy : ' + 'NaN')
occupancy.append('NaN')
def unit_type(self):
if 'Unit Type' in details_dict:
temp = details_dict['Unit Type']
unit_type.append(soup.find_all(
'div', class_='PropertyDetailsListstyle__AttributeItemData-jpQfWB HUTFZ')[temp].text)
print('Unit Type : ' + 'Success')
else:
print('Unit Type : ' + 'NaN')
unit_type.append('NaN')
def facing_direction(self):
if 'Facing Direction' in details_dict:
temp = details_dict['Facing Direction']
facing_direction.append(soup.find_all(
'div', class_='PropertyDetailsListstyle__AttributeItemData-jpQfWB HUTFZ')[temp].text)
print('Facing Direction : '+'Success')
else:
print('Facing Direction : '+'NaN')
facing_direction.append('NaN')
def reference(self):
if 'Reference No.' in details_dict:
temp = details_dict['Reference No.']
reference.append(soup.find_all(
'div', class_='PropertyDetailsListstyle__AttributeItemData-jpQfWB HUTFZ')[temp].text)
print('Reference : ' + 'Success')
else:
print('Reference : ' + 'NaN')
reference.append('NaN')
def available_date(self):
if 'Available Date' in details_dict:
temp = details_dict['Available Date']
available_date.append(soup.find_all(
'div', class_='PropertyDetailsListstyle__AttributeItemData-jpQfWB HUTFZ')[temp].text)
print('Available Date : ' + 'Success')
else:
print('Available Date : ' + 'NaN')
available_date.append('NaN')
def posted_date(self): # END: Data in property details container (can vary)
if 'Posted Date' in details_dict:
temp = details_dict['Posted Date']
posted_date.append(soup.find_all(
'div', class_='PropertyDetailsListstyle__AttributeItemData-jpQfWB HUTFZ')[temp].text)
print('Posted Date : ' + 'Success')
else:
print('Posted Date : ' + 'NaN')
posted_date.append('NaN')
# def get_method(self, method_name): # method1 = callbyname.get_method(method_name)
# method = getattr(self, method_name)
# return method()
# ampang-park-89
# kl-sentral-438
# USER INPUT REQUIRED
location_of_interest = 'ss-15-316' # 'usj-21-531'
num_pages_to_scrape = 50 # 20 results per page
print('\n| iProperty.com.my Scraper |')
# Use the LRTlinks method
# choose which Train Station ID here
l = LRTlinks(location_of_interest)
# choose how many pages to scrape (limit = 100)
l.get_links(num_pages_to_scrape)
file_to_be_read = 'rent-' + location_of_interest + '-property-links.csv'
data_links = | pd.read_csv(file_to_be_read) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', None)
import pandas as pd
from sklearn import preprocessing
from pandas.plotting import scatter_matrix
from matplotlib import pyplot
from sklearn.preprocessing import LabelEncoder
from sklearn.tree import DecisionTreeClassifier
import joblib
# import numpy as np
# In[2]:
df=pd.read_csv("data/data500.csv")
df.head(30)
# In[3]:
df.columns=["Name","gender","marks_1","caste","marks_2","Disabled","Attendance","marks_3","marks_4","marks_5","marks_6","Dropout"]
# In[4]:
df
# In[5]:
df_final=df[["Name","gender","caste","Disabled","Attendance","marks_1","marks_2","marks_3","marks_4","marks_5","marks_6","Dropout"]]
# In[6]:
df_final
# In[7]:
class MultiColumnLabelEncoder:
def __init__(self, columns=None):
self.columns = columns # array of column names to encode
def fit(self, X, y=None):
return self # not relevant here
def transform(self, X):
'''
Transforms columns of X specified in self.columns using
LabelEncoder(). If no columns specified, transforms all
columns in X.
'''
output = X.copy()
if self.columns is not None:
for col in self.columns:
output[col] = LabelEncoder().fit_transform(output[col])
else:
for colname, col in output.iteritems():
output[colname] = LabelEncoder().fit_transform(col)
return output
def fit_transform(self, X, y=None):
return self.fit(X, y).transform(X)
# In[57]:
def generateModel(file, _id):
dataset = MultiColumnLabelEncoder(columns=['gender', 'caste']).fit_transform(file)
array = dataset.values
X = array[:, 1:10]
Y = array[:, 11]
# print(Y)
Y = Y.astype('int')
model = DecisionTreeClassifier()
model.fit(X, Y)
filename = 'schoolModels/' + _id + '.pkl'
joblib.dump(model, filename)
return True
# In[20]:
def get_prediction(data, _id):
filename = 'schoolModels/' + _id + '.pkl'
predict_from_joblib = joblib.load(filename)
X_predict = {}
for key, value in data.items():
X_predict[key] = [value]
X_predict = pd.DataFrame(data, index=[0])
le = preprocessing.LabelEncoder()
X_predict['gender'] = le.fit_transform(X_predict['gender'])
X_predict['caste'] = le.fit_transform(X_predict['caste'])
X_predict.to_numpy()
prediction = predict_from_joblib.predict(X_predict)
return prediction
# In[58]:
df_final = MultiColumnLabelEncoder(columns=['gender', 'caste']).fit_transform(df_final)
array = df_final.values
X = array[:, 1:10]
Y = array[:, 11]
# print(Y)
Y = Y.astype('int')
# In[59]:
X
# In[60]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=0)
# In[61]:
X_train
# In[62]:
y_test,len(y_test)
# In[63]:
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
# In[64]:
preds=model.predict(X_test)
preds
# In[65]:
y_test
# In[66]:
df_pred= | pd.DataFrame({"original":y_test,"predictions":preds}) | pandas.DataFrame |
import os
import pytz
import logging
import pymongo
import multiprocessing
import pandas as pd
from datetime import datetime
from collections import Counter, defaultdict
from typing import List, Set, Tuple
# For non-docker use, change to your url (e.g., localhost:27017)
MONGO_URL = "mongodb://localhost:27017"
CACHE_DIR = "cache/"
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
def get_data() -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""
Returns (projects, libraries, migrations, rules, dep_changes).
This function should be used get the required data for analysis,
to avoid data scope inconsistencies in different analysis modules.
"""
projects = select_projects_from_libraries_io()
libraries = select_libraries()
migrations = select_migrations()
lib_names = set(libraries["name"])
rules = select_rules(lib_names)
dep_changes = select_dependency_changes_all(lib_names)
migrations.startCommitTime = | pd.to_datetime(migrations.startCommitTime, utc=True) | pandas.to_datetime |
import json
import pandas as pd
from pprint import pprint
def reader(reader_csv="reader_results.csv"):
model_rename_map = {
"deepset/roberta-base-squad2": "RoBERTa",
"deepset/minilm-uncased-squad2": "MiniLM",
"deepset/bert-base-cased-squad2": "BERT base",
"deepset/bert-large-uncased-whole-word-masking-squad2": "BERT large",
"deepset/xlm-roberta-large-squad2": "XLM-RoBERTa",
}
column_name_map = {"f1": "F1", "passages_per_second": "Speed", "reader": "Model"}
df = pd.read_csv(reader_csv)
df = df[["f1", "passages_per_second", "reader"]]
df["reader"] = df["reader"].map(model_rename_map)
df = df[list(column_name_map)]
df = df.rename(columns=column_name_map)
ret = [dict(row) for i, row in df.iterrows()]
print("Reader overview")
print(json.dumps(ret, indent=4))
return ret
def retriever(index_csv="retriever_index_results.csv", query_csv="retriever_query_results.csv"):
column_name_map = {
"model": "model",
"n_docs": "n_docs",
"docs_per_second": "index_speed",
"queries_per_second": "query_speed",
"map": "map",
}
name_cleaning = {
"dpr": "DPR",
"elastic": "BM25",
"elasticsearch": "Elasticsearch",
"faiss": "FAISS",
"faiss_flat": "FAISS (flat)",
"faiss_hnsw": "FAISS (HNSW)",
"milvus_flat": "Milvus (flat)",
"milvus_hnsw": "Milvus (HNSW)",
"sentence_transformers": "Sentence Transformers",
"opensearch_flat": "OpenSearch (flat)",
"opensearch_hnsw": "OpenSearch (HNSW)",
}
index = | pd.read_csv(index_csv) | pandas.read_csv |
# -*- coding: utf-8 -*-
# This code is initially based on the Kaggle kernel from <NAME>, which can be found in the following link
# https://www.kaggle.com/neviadomski/how-to-get-to-top-25-with-simple-model-sklearn/notebook
# and the Kaggle kernel from <NAME>, which can be found in the link below
# https://www.kaggle.com/pmarcelino/comprehensive-data-exploration-with-python/notebook
# Also, part of the preprocessing has been inspired by this kernel from Serigne
# https://www.kaggle.com/serigne/stacked-regressions-top-4-on-leaderboard
# Adding needed libraries and reading data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import ensemble, tree, linear_model, preprocessing
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.utils import shuffle
from scipy import stats
from scipy.stats import norm, skew, boxcox
from scipy.special import boxcox1p
import xgboost as xgb
import warnings
warnings.filterwarnings('ignore')
train = pd.read_csv("../../train.csv")
test = | pd.read_csv("../../test.csv") | pandas.read_csv |
import pandas as pd
import json
import io
from datetime import datetime, timedelta
| pd.set_option('display.max_rows', None) | pandas.set_option |
# authors: <NAME>, <NAME>, <NAME>, <NAME>
# date: 2020-11-25
"""Fits a SVR model on the preprocessed data from the IMDB review data set.
Saves the model with optimized hyper-parameters, as well as the search result.
Usage:
imdb_rating_predict_model.py <train> <out>
imdb_rating_predict_model.py (-h | --help)
Options:
<train> Path to the training data file
<out> Path to the directory where output should be written to
-h, --help Display help
"""
import os
from pathlib import Path
import joblib
import numpy as np
import pandas as pd
from docopt import docopt
from sklearn.compose import ColumnTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import (
OrdinalEncoder,
StandardScaler,
)
from sklearn.linear_model import Ridge
numeric_features = ['n_words']
text_feature = 'Text'
ordinal_features = ['sentiment']
drop_features = ['Id', 'Author']
target = 'Rating'
def main(train, out):
# Load data set
train_df = pd.read_csv(train)
X_train, y_train = train_df.drop(columns=[target] + drop_features), train_df[target]
# Create ML pipeline
preprocessor = ColumnTransformer(
transformers=[
('text', CountVectorizer(max_features=20_000, stop_words='english'), text_feature),
('num', StandardScaler(), numeric_features),
('ord', OrdinalEncoder(categories=[['neg', 'compound', 'neu', 'pos']]), ordinal_features)
]
)
ml_pipe = Pipeline(
steps=[
('prepro', preprocessor),
('ridge', Ridge())
]
)
# Tune hyper-parameters
print('Searching for hyper-parameters')
param_grid = {
'ridge__alpha': np.arange(500, 1000, 50)
}
hyper_parameters_search = GridSearchCV(ml_pipe,
param_grid=param_grid,
n_jobs=-1,
scoring='r2',
return_train_score=True,
verbose=1)
hyper_parameters_search.fit(X_train, y_train)
print(f'R2 score for best model: {hyper_parameters_search.best_score_}')
# Write hyper-parameter search result to csv
hyper_parameters_search_result = | pd.DataFrame(hyper_parameters_search.cv_results_) | pandas.DataFrame |
'''
This code will clean the OB datasets and combine all the cleaned data into one
Dataset name: O-27-Da Yan
semi-automate code, needs some hands work. LOL But God is so good to me.
1. 9 different buildings in this dataset, and each building has different rooms
3. each room has different window, door, ac, indoor, outdoor info
4. I processed building A to F by hand, then figured out that I can rename the files first, then use code to process
5. rename the file by type and number, such as window1, indoor1, ac1, door1, etc.
6. code automated G, H, I
7. the folder has multiple types of data, csv and xlsx, figure out the file type, then rean into pandas
8. concat the outdoor datetime and temperature with ac data, then judge if the ac is on or off
'''
import os
import glob
import string
import datetime
import pandas as pd
import matplotlib.pyplot as plt
# specify the path
data_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/2021-05-28-1130-raw-data/Annex 79 Data Collection/O-27-Da Yan/_yapan_processing/processed/'
template_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/OB Database Consolidation/Templates/'
save_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/2021-05-28-1130-raw-data/Annex 79 Data Collection/O-27-Da Yan/_yapan_processing/_sql/'
# generate the name of different building folders
alphabet_string = string.ascii_uppercase
alphabet_list = list(alphabet_string)
building_names = alphabet_list[:9]
''' 1. process data by folders '''
begin_time = datetime.datetime.now()
# create dataframe to store the data
combined_window = pd.DataFrame()
combined_door = pd.DataFrame()
combined_hvac = pd.DataFrame()
combined_indoor = pd.DataFrame()
combined_outdoor = pd.DataFrame()
''' process outdoor data '''
print(f'Process outdoor data')
os.chdir(data_path) # pwd
sub_folders = next(os.walk('.'))[1] # get the names of the child directories, different rooms
root_files = next(os.walk('.'))[2] # get the files under root path
outdoor_files = list(filter(lambda name: 'outdoor_building' in name, root_files)) # filter out the door status files
combined_outdoor = pd.concat([pd.read_csv(f) for f in outdoor_files])
''' manual processed data '''
print(f'Process manually processed data')
building_names_1 = building_names[:6]
# unit test
# i = 0
# folder_name = building_names_1[i]
for index, bld_name in enumerate(building_names_1):
print(f'Reading the data under building folder {bld_name}')
building_path = data_path + bld_name + '/'
os.chdir(building_path) # pwd
sub_folders = next(os.walk('.'))[1] # get the names of the child directories, different rooms
root_files = next(os.walk('.'))[2] # get the files under root path
# combine
indoor_files = list(filter(lambda name: 'indoor' in name, root_files)) # filter out the indoor files
window_files = list(filter(lambda name: 'window' in name, root_files)) # filter out the window files
hvac_files = list(filter(lambda name: 'hvac' in name, root_files)) # filter out the ac files
door_files = list(filter(lambda name: 'door_status' in name, root_files)) # filter out the door status files
# read anc combine the files under this folder
if indoor_files: # make sure it is not empty
indoor_temp_df = pd.concat([pd.read_csv(f) for f in indoor_files])
combined_indoor = pd.concat([combined_indoor, indoor_temp_df], ignore_index=True) # concat the data
else:
pass
if window_files:
window_temp_df = pd.concat([pd.read_csv(f) for f in window_files])
combined_window = pd.concat([combined_window, window_temp_df], ignore_index=True) # concat the data
else:
pass
if hvac_files:
hvac_temp_df = pd.concat([pd.read_csv(f) for f in hvac_files])
combined_hvac = pd.concat([combined_hvac, hvac_temp_df], ignore_index=True) # concat the data
# print(combined_hvac.isnull().sum())
# print(index)
else:
pass
if door_files:
door_temp_df = pd.concat([pd.read_csv(f) for f in door_files])
combined_door = pd.concat([combined_door, door_temp_df], ignore_index=True) # concat the data
# print(combined_door.isnull().sum())
# print(index)
else:
pass
''' auto mated process by building level '''
building_names = ['G', 'H', 'I']
building_ids = [7, 8, 9]
for index, bld_name in enumerate(building_names):
print(f'Dealing with data under building folder {bld_name}')
building_path = data_path + bld_name + '/'
os.chdir(building_path) # pwd
sub_folders = next(os.walk('.'))[1] # get the names of the child directories, different rooms
root_files = next(os.walk('.'))[2] # get the files under root path
'''' room level '''
for room_id in sub_folders:
print(f'Dealing with data under room folder {room_id}')
room_path = building_path + room_id + '/'
os.chdir(room_path) # pwd
file_names = os.listdir() # get all the file names
window_files = list(filter(lambda name: 'window' in name, file_names)) # filter out the window files
hvac_files = list(filter(lambda name: 'ac' in name, file_names)) # filter out the ac files
door_files = list(filter(lambda name: 'door' in name, file_names)) # filter out the door files
# read and combine files
if window_files:
for window_name in window_files:
name, extension = os.path.splitext(window_name) # get the path and extension of a file
if extension == '.CSV': # if the file is csv file
temp_df = pd.read_csv(window_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Window_Status'] # rename the columns
else:
temp_df = pd.read_excel(window_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Window_Status']
temp_df['Window_ID'] = int(name.split('_')[0][6:])
temp_df['Room_ID'] = int(room_id) # assign Room_ID
temp_df['Building_ID'] = building_ids[index] # assign Building_ID
combined_window = pd.concat([combined_window, temp_df], ignore_index=True) # concat the data
else:
pass
if door_files:
for door_name in door_files:
name, extension = os.path.splitext(door_name) # get the path and extension of a file
if extension == '.CSV': # if the file is csv file
temp_df = pd.read_csv(door_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Door_Status'] # rename the columns
else:
temp_df = | pd.read_excel(door_name, usecols=[0, 1]) | pandas.read_excel |
"""
*** <NAME> ***
_________Shubbair__________
TODO Naive Bias
"""
from sklearn.naive_bayes import GaussianNB, MultinomialNB
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_wine
wine = load_wine()
print(dir(wine))
data_frame = | pd.DataFrame(wine.data, columns=wine.feature_names) | pandas.DataFrame |
import pandas as pd
import STRING
import numpy as np
import datetime
from sklearn.cluster import AgglomerativeClustering
from models.cluster_model import cluster_analysis
pd.options.display.max_columns = 500
# SOURCE FILE
offer_df = pd.read_csv(STRING.path_db + STRING.file_offer, sep=',', encoding='utf-8', quotechar='"')
print(len(offer_df.index))
# FILTER RESULTS
offer_df = offer_df[offer_df['oferta_sim_resultado_sinco'].isin(['00'])]
offer_df = offer_df[offer_df['oferta_nivel_sinco'] != '?']
offer_df = offer_df[offer_df['oferta_bonus_simulacion'] != '?']
# offer_df = offer_df[offer_df['oferta_sim_anios_asegurado'] != '?']
# offer_df = offer_df[offer_df['oferta_sim_antiguedad_cia_actual'] != '?']
print(len(offer_df.index))
# DROP DUPLICATES
'''
offer_df = offer_df.sort_values(by=['oferta_poliza', 'oferta_id'], ascending=[True, True]).reset_index(drop=True)
print(offer_df)
offer_df = offer_df.drop_duplicates(subset=['oferta_veh_marca', 'oferta_veh_modelo', 'oferta_veh_version',
'oferta_veh_valor', 'oferta_tomador_cp', 'oferta_conductor_fecha_nac',
'oferta_conductor_fecha_carne'], keep='last')
'''
# INTERMEDIARY FILTER
offer_df = offer_df[offer_df['oferta_cod_intermediario'] != '?']
offer_df['oferta_cod_intermediario'] = offer_df['oferta_cod_intermediario'].map(int)
offer_df = offer_df[offer_df['oferta_cod_intermediario'] != 81083] # ZURICH PRUEBAS OFERTA WEB
print(len(offer_df.index))
# Filter
offer_df = offer_df[offer_df['oferta_prod_tec'] == 721]
del offer_df['oferta_prod_tec']
offer_df = offer_df[offer_df['oferta_tomador_tipo_pers'] == 'F']
del offer_df['oferta_tomador_tipo_pers']
print(len(offer_df.index))
# SEX VALIDATE
offer_df['oferta_tomador_sexo'] = offer_df['oferta_tomador_sexo'].replace('V', 1)
offer_df['oferta_tomador_sexo'] = offer_df['oferta_tomador_sexo'].replace('M', 0)
offer_df['oferta_tomador_sexo'] = offer_df['oferta_tomador_sexo'].replace('O', -1)
offer_df['oferta_tomador_sexo'] = offer_df['oferta_tomador_sexo'].replace('?', -1)
offer_df = offer_df[offer_df['oferta_tomador_sexo'].isin([0, 1])]
print(len(offer_df.index))
# CP VALIDATE
print(len(offer_df.index))
offer_df = offer_df[offer_df['oferta_tomador_cp'] != '?']
offer_df = offer_df[offer_df.oferta_tomador_cp.apply(lambda x: x.isnumeric())]
# offer_df['oferta_tomador_cp'] = offer_df['oferta_tomador_cp'].replace('?', -1)
offer_df = offer_df[offer_df.oferta_tomador_cp != 0]
print(len(offer_df.index))
# STATE POLICY/OFFER
offer_df = offer_df[offer_df['oferta_estado'].isin(['1', '2', '3', 'V', 'P'])]
print(len(offer_df.index))
# 1: FORMALIZADA, 2: VIGOR OFERTA, 3: PENDIENTE OFERTA
# CALCULATE AGE
def calculate_age(birthdate, sep=''):
birthdate = datetime.datetime.strptime(birthdate, '%Y' + sep + '%m' + sep + '%d')
today = datetime.date.today()
return today.year - birthdate.year - ((today.month, today.day) < (birthdate.month, birthdate.day))
# REPLACE BIRTH
offer_df.loc[offer_df['oferta_tomador_fecha_nac'] == 0, 'oferta_tomador_fecha_nac'] = offer_df[
'oferta_propietario_fecha_nac']
offer_df.loc[offer_df['oferta_tomador_fecha_nac'] == 0, 'oferta_tomador_fecha_nac'] = offer_df[
'oferta_conductor_fecha_nac']
offer_df.loc[offer_df['oferta_conductor_fecha_nac'] == 0, 'oferta_conductor_fecha_nac'] = offer_df[
'oferta_tomador_fecha_nac']
offer_df.loc[offer_df['oferta_propietario_fecha_nac'] == 0, 'oferta_propietario_fecha_nac'] = offer_df[
'oferta_tomador_fecha_nac']
print(len(offer_df.index))
offer_df = offer_df[offer_df['oferta_tomador_fecha_nac'] != 0]
offer_df = offer_df[offer_df['oferta_conductor_fecha_nac'] != 0]
print(len(offer_df.index))
offer_df['oferta_conductor_fecha_nac'] = offer_df['oferta_conductor_fecha_nac'].map(str)
offer_df = offer_df[~offer_df['oferta_conductor_fecha_nac'].str.endswith('99')]
offer_df['cliente_edad'] = offer_df.apply(lambda y: calculate_age(y['oferta_conductor_fecha_nac']), axis=1)
offer_df = offer_df[offer_df['cliente_edad'].between(18, 99, inclusive=True)]
offer_df['cliente_edad'] = offer_df['cliente_edad'].map(int)
offer_df['cliente_edad_18_30'] = np.where(offer_df['cliente_edad'] <= 30, 1, 0)
offer_df['cliente_edad_30_65'] = np.where(offer_df['cliente_edad'].between(31, 65), 1, 0)
offer_df['cliente_edad_65'] = np.where(offer_df['cliente_edad'] > 65, 1, 0)
print(len(offer_df.index))
# LICENSE YEARS FIRST DRIVER
offer_df['oferta_conductor_fecha_carne'] = offer_df['oferta_conductor_fecha_carne'].map(str)
offer_df = offer_df[offer_df['oferta_conductor_fecha_carne'] != '0']
offer_df['antiguedad_permiso'] = offer_df.apply(lambda y: calculate_age(y['oferta_conductor_fecha_carne']), axis=1)
offer_df['antiguedad_permiso_riesgo'] = np.where(offer_df['antiguedad_permiso'] <= 1, 1, 0)
offer_df = offer_df[offer_df['cliente_edad'] - offer_df['antiguedad_permiso'] >= 17]
offer_df['cliente_edad'] = pd.cut(offer_df['cliente_edad'], range(18, offer_df['cliente_edad'].max(), 5), right=True)
offer_df['cliente_edad'] = offer_df['cliente_edad'].fillna(offer_df['cliente_edad'].max())
offer_df.loc[offer_df['antiguedad_permiso'].between(0, 5, inclusive=True), 'antiguedad_permiso_range'] = '[0-5]'
offer_df.loc[offer_df['antiguedad_permiso'].between(6, 10, inclusive=True), 'antiguedad_permiso_range'] = '[6-10]'
offer_df.loc[offer_df['antiguedad_permiso'].between(11, 20, inclusive=True), 'antiguedad_permiso_range'] = '[11-20]'
offer_df.loc[offer_df['antiguedad_permiso'].between(21, 30, inclusive=True), 'antiguedad_permiso_range'] = '[21-30]'
offer_df.loc[offer_df['antiguedad_permiso'] >= 31, 'antiguedad_permiso_range'] = '[31-inf]'
print(len(offer_df.index))
# SECOND DRIVER
offer_df.loc[
offer_df['oferta_adicional_fecha_nac'].isin(['?', '0', 0]), 'oferta_adicional_fecha_nac'] = \
datetime.date.today().strftime('%Y%m%d')
offer_df['oferta_adicional_fecha_nac'] = offer_df['oferta_adicional_fecha_nac'].map(str)
offer_df['edad_segundo_conductor'] = offer_df.apply(lambda y: calculate_age(y['oferta_adicional_fecha_nac']),
axis=1)
offer_df['edad_segundo_conductor_riesgo'] = np.where(offer_df['edad_segundo_conductor'].between(18, 25), 1, 0)
del offer_df['edad_segundo_conductor']
print(len(offer_df.index))
# LICENSE YEARS SECOND DRIVER
offer_df.loc[offer_df['oferta_adicional_fecha_carne'].isin(['?', '0', 0]), 'oferta_adicional_fecha_carne'] = '19000101'
offer_df['oferta_adicional_fecha_carne'] = offer_df['oferta_adicional_fecha_carne'].map(str)
offer_df['antiguedad_permiso_segundo'] = offer_df.apply(lambda y: calculate_age(y['oferta_adicional_fecha_carne']),
axis=1)
offer_df['antiguedad_permiso_segundo_riesgo'] = np.where(offer_df['antiguedad_permiso_segundo'] <= 1, 1, 0)
print(len(offer_df.index))
# WHO IS WHO
offer_df = offer_df[offer_df['oferta_tom_cond'].isin(['S', 'N'])]
offer_df = offer_df[offer_df['oferta_propietario_tom'].isin(['S', 'N'])]
offer_df = offer_df[offer_df['oferta_propietario_cond'].isin(['S', 'N'])]
offer_df['oferta_tom_cond'] = np.where(offer_df['oferta_tom_cond'] == 'S', 1, 0)
offer_df['oferta_propietario_tom'] = np.where(offer_df['oferta_propietario_tom'] == 'S', 1, 0)
offer_df['oferta_propietario_cond'] = np.where(offer_df['oferta_propietario_cond'] == 'S', 1, 0)
print(len(offer_df.index))
# FILTER DRIVING COUNTRY
offer_df = offer_df[offer_df['oferta_conductor_pais_circu'] == 'ESP']
print(len(offer_df.index))
# GROUPED NATIONALITY
country_file = pd.read_csv(STRING.path_db_aux + STRING.file_country, sep=';', encoding='latin1')
offer_df = pd.merge(offer_df, country_file[['REGION', 'ISO']], left_on='oferta_conductor_pais_exped_carne',
right_on='ISO', how='left')
dummy_region = pd.get_dummies(offer_df['REGION'], prefix='cliente_region', dummy_na=True)
offer_df = pd.concat([offer_df, dummy_region], axis=1)
offer_df['cliente_extranjero'] = np.where(offer_df['oferta_conductor_pais_exped_carne'] != 'ESP', 1, 0)
print(len(offer_df.index))
# VEHICLE TYPE
for i in ['oferta_veh_marca', 'oferta_veh_modelo', 'oferta_veh_version']:
offer_df[i] = offer_df[i].map(str)
offer_df = offer_df[offer_df['oferta_veh_valor'] != '?']
offer_df['oferta_veh_valor'] = offer_df['oferta_veh_valor'].map(float)
offer_df = offer_df[offer_df['oferta_veh_valor'] >= 300]
offer_df['veh_tipo_agrupacion'] = offer_df['oferta_veh_marca'].map(str) + '-' + offer_df['oferta_veh_modelo'].map(
str) + '-' + offer_df['oferta_veh_version'].map(str) + '-' + offer_df['oferta_veh_accesorio'].map(str)
car_ranking = offer_df[['veh_tipo_agrupacion',
'oferta_veh_plazas', 'oferta_veh_potencia', 'oferta_veh_cilindrada', 'oferta_veh_tara',
'oferta_veh_valor']]
car_ranking = car_ranking.groupby(['veh_tipo_agrupacion',
'oferta_veh_plazas', 'oferta_veh_potencia', 'oferta_veh_cilindrada',
'oferta_veh_tara']).agg({'oferta_veh_valor': 'median'})
car_ranking = car_ranking.reset_index(drop=False)
ward = AgglomerativeClustering(n_clusters=10, linkage='ward',
connectivity=None)
ward.fit(car_ranking.drop('veh_tipo_agrupacion', axis=1))
labels = ward.labels_
df = pd.DataFrame(labels, columns=['car_ranking'], index=car_ranking.index)
car_ranking = pd.concat([car_ranking, df], axis=1)
car_ranking.to_csv(STRING.path_db_aux + '\\cluster_ward_car.csv', index=False, sep=';')
del car_ranking['oferta_veh_valor']
offer_df = pd.merge(offer_df, car_ranking, how='left', on=['veh_tipo_agrupacion',
'oferta_veh_plazas', 'oferta_veh_potencia',
'oferta_veh_cilindrada',
'oferta_veh_tara'])
offer_df['oferta_veh_valor'] = offer_df['oferta_veh_valor'].round()
offer_df['oferta_veh_valor'] = offer_df['oferta_veh_valor'].map(int)
offer_df['oferta_veh_valor_unitary'] = offer_df['oferta_veh_valor'].copy()
offer_df.loc[offer_df['oferta_veh_valor'] > 71000, 'oferta_veh_valor'] = 71000
offer_df['oferta_veh_valor'] = pd.cut(offer_df['oferta_veh_valor'], range(0, offer_df['oferta_veh_valor'].max(), 1000),
right=True)
offer_df['oferta_veh_valor'] = offer_df['oferta_veh_valor'].fillna(offer_df['oferta_veh_valor'].max())
print(len(offer_df.index))
# VEHICULE USE CODE
offer_df['oferta_veh_uso'] = offer_df['oferta_veh_uso'].map(int)
veh_use = pd.read_csv(STRING.path_db_aux + STRING.file_veh_use, sep=';', encoding='latin1',
dtype={'oferta_veh_uso': int})
offer_df = pd.merge(offer_df, veh_use, how='left', on='oferta_veh_uso')
offer_df['d_uso_particular'] = np.where(offer_df['vehiculo_uso_desc'].str.contains('PARTICULAR'), 1, 0)
offer_df['d_uso_alquiler'] = np.where(offer_df['vehiculo_uso_desc'].str.contains('ALQUILER'), 1, 0)
offer_df['veh_uso'] = pd.Series('OTRO', index=offer_df.index)
offer_df.loc[offer_df['d_uso_particular'] == 1, 'veh_uso'] = 'PARTICULAR'
offer_df.loc[offer_df['d_uso_alquiler'] == 1, 'veh_uso'] = 'ALQUILER'
print(len(offer_df.index))
# VEHICLE TYPE
tipo_dict = {'ciclomotor': 'PARTICULAR', 'furgoneta': 'FURGONETA', 'camion': 'CAMION', 'autocar': 'AUTOCAR',
'remolque': 'REMOLQUE', 'agricola': 'AGRICO', 'industrial': 'INDUSTRIAL', 'triciclo': 'TRICICLO'}
for k, v in tipo_dict.items():
offer_df['d_tipo_' + k] = np.where(offer_df['vehiculo_uso_desc'].str.contains(v), 1, 0)
del tipo_dict
offer_df['veh_tipo'] = pd.Series('OTRO', index=offer_df.index)
offer_df.loc[offer_df['d_tipo_ciclomotor'] == 1, 'veh_tipo'] = 'CICLOMOTOR'
offer_df.loc[offer_df['d_tipo_furgoneta'] == 1, 'veh_tipo'] = 'FURGONETA'
offer_df.loc[offer_df['d_tipo_camion'] == 1, 'veh_tipo'] = 'CAMION'
offer_df.loc[offer_df['d_tipo_autocar'] == 1, 'veh_tipo'] = 'AUTOCAR'
offer_df.loc[offer_df['d_tipo_remolque'] == 1, 'veh_tipo'] = 'REMOLQUE'
offer_df.loc[offer_df['d_tipo_agricola'] == 1, 'veh_tipo'] = 'AGRICOLA'
offer_df.loc[offer_df['d_tipo_industrial'] == 1, 'veh_tipo'] = 'INDUSTRIAL'
offer_df.loc[offer_df['d_tipo_triciclo'] == 1, 'veh_tipo'] = 'TRICICLO'
print(len(offer_df.index))
# VEHICLE CLASE
offer_df['oferta_veh_tipo'] = offer_df['oferta_veh_tipo'].map(int)
veh_use = pd.read_csv(STRING.path_db_aux + STRING.file_veh_clase, sep=';', encoding='latin1',
dtype={'oferta_veh_tipo': int})
offer_df = pd.merge(offer_df, veh_use, how='left', on='oferta_veh_tipo')
print(len(offer_df.index))
# VEHICLE HEAVY
offer_df['vehiculo_heavy'] = np.where(offer_df['vehiculo_clase_agrupacion_descripcion'].str.contains('>'),
1, 0)
offer_df['oferta_veh_tara'] = offer_df['oferta_veh_tara'].replace('?', 0)
offer_df['oferta_veh_tara'] = offer_df['oferta_veh_tara'].map(int)
offer_df['oferta_veh_tara'] = np.where(offer_df['oferta_veh_tara'] >= 3500,
1, 0)
offer_df['oferta_veh_puertos'] = np.where(offer_df['oferta_veh_puertos'] == 'S', 1, 0)
print(len(offer_df.index))
# PLATE LICENSE
offer_df['oferta_fecha_matricula'] = offer_df['oferta_fecha_matricula'].map(int)
offer_df = offer_df[offer_df['oferta_fecha_matricula'].between(1900, 2018, inclusive=True)]
offer_df['antiguedad_vehiculo'] = pd.Series(2018 - offer_df['oferta_fecha_matricula'], index=offer_df.index)
del offer_df['oferta_fecha_matricula']
print(len(offer_df.index))
# MATCH BONUS
bonus_df = | pd.read_csv(STRING.path_db_aux + STRING.file_bonus, sep=';', encoding='latin1') | pandas.read_csv |
# Globals #
import re
import numpy as np
import pandas as pd
import dateutil.parser as dp
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import *
from itertools import islice
from scipy.stats import boxcox
from scipy.integrate import simps
from realtime_talib import Indicator
from sklearn.model_selection import train_test_split
from sklearn.utils import resample
from pprint import pprint
from selenium import webdriver
RANDOM_STATE = 42
# Sentiment Preprocessing
def remove_special_chars(headline_list):
"""
Returns list of headlines with all non-alphabetical characters removed.
"""
rm_spec_chars = [re.sub('[^ A-Za-z]+', "", headline) for headline in headline_list]
return rm_spec_chars
def tokenize(headline_list):
"""
Takes list of headlines as input and returns a list of lists of tokens.
"""
tokenized = []
for headline in headline_list:
tokens = word_tokenize(headline)
tokenized.append(tokens)
# print("tokenize")
# pprint(tokenized)
return tokenized
def remove_stop_words(tokenized_headline_list):
"""
Takes list of lists of tokens as input and removes all stop words.
"""
filtered_tokens = []
for token_list in tokenized_headline_list:
filtered_tokens.append([token for token in token_list if token not in set(stopwords.words('english'))])
# print("stop words")
# pprint(filtered_tokens)
return filtered_tokens
def stem(token_list_of_lists):
"""
Takes list of lists of tokens as input and stems every token.
Returns a list of lists of stems.
"""
stemmer = PorterStemmer()
stemmed = []
for token_list in token_list_of_lists:
# print(token_list)
stemmed.append([stemmer.stem(token) for token in token_list])
# print("stem")
# pprint(stemmed)
return stemmed
def make_bag_of_words(df, stemmed):
"""
Create bag of words model.
"""
print("\tCreating Bag of Words Model...")
very_pos = set()
slightly_pos = set()
neutral = set()
slightly_neg = set()
very_neg = set()
# Create sets that hold words in headlines categorized as "slightly_neg" or "slightly_pos" or etc
for stems, sentiment in zip(stemmed, df["Sentiment"].tolist()):
if sentiment == -2:
very_neg.update(stems)
elif sentiment == -1:
slightly_neg.update(stems)
elif sentiment == 0:
neutral.update(stems)
elif sentiment == 1:
slightly_pos.update(stems)
elif sentiment == 2:
very_pos.update(stems)
# Count number of words in each headline in each of the sets and encode it as a list of counts for each headline.
bag_count = []
for x in stemmed:
x = set(x)
bag_count.append(list((len(x & very_neg), len(x & slightly_neg), len(x & neutral), len(x & slightly_pos), len(x & very_pos))))
df["sentiment_class_count"] = bag_count
return df
def sentiment_preprocessing(df):
"""
Takes a dataframe, removes special characters, tokenizes
the headlines, removes stop-tokens, and stems the remaining tokens.
"""
specials_removed = remove_special_chars(df["Headline"].tolist())
tokenized = tokenize(specials_removed)
tokenized_filtered = remove_stop_words(tokenized)
stemmed = stem(tokenized_filtered)
return df, stemmed
def headlines_balanced_split(dataset, test_size):
"""
Randomly splits dataset into balanced training and test sets.
"""
print("\nSplitting headlines into *balanced* training and test sets...")
# pprint(list(dataset.values))
# pprint(dataset)
# Use sklearn.train_test_split to split all features into x_train and x_test,
# and all expected values into y_train and y_test numpy arrays
x_train, x_test, y_train, y_test = train_test_split(dataset.drop(["Sentiment", "Headline"], axis=1).values,
dataset["Sentiment"].values, test_size=test_size,
random_state=RANDOM_STATE)
x_train = [x[0] for x in x_train]
x_test = [x[0] for x in x_test]
# Combine x_train and y_train (numpy arrays) into a single dataframe, with column labels
train = pd.DataFrame(data=x_train, columns=["very_neg", "slightly_neg", "neutral", "slightly_pos", "very_pos"])
train["Sentiment"] = pd.Series(y_train)
# Do the same for x_test and y_test
test = pd.DataFrame(data=x_test, columns=["very_neg", "slightly_neg", "neutral", "slightly_pos", "very_pos"])
test["Sentiment"] = pd.Series(y_test)
train_prediction = train["Sentiment"].values
test_prediction = test["Sentiment"].values
train_trimmed = train.drop(["Sentiment"], axis=1).values
test_trimmed = test.drop(["Sentiment"], axis=1).values
return train_trimmed, test_trimmed, train_prediction, test_prediction
def split(dataset, test_size, balanced=True):
if balanced:
return headlines_balanced_split(dataset, test_size)
else:
# TODO: write imbalanced split function
return None
# Helpers #
def sliding_window(seq, n=2):
"""
Returns a sliding window (of width n) over data from the iterable. https://stackoverflow.com/a/6822773/8740440
"""
"s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ..."
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def integrate(avg_daily_sentiment, interval):
"""
Takes a list of average daily sentiment scores and returns a list of definite integral estimations calculated
with Simpson's method. Each integral interval is determined by the `interval` variable. Shows accumulated sentiment.
"""
# Split into sliding window list of lists
sentiment_windows = sliding_window(avg_daily_sentiment, interval)
integral_simpson_est = []
# https://stackoverflow.com/a/13323861/8740440
for x in sentiment_windows:
# Estimate area using composite Simpson's rule. dx indicates the spacing of the data on the x-axis.
integral_simpson_est.append(simps(x, dx=1))
dead_values = list([None] * interval)
dead_values.extend(integral_simpson_est)
dead_values.reverse()
return dead_values
def random_undersampling(dataset):
"""
Randomly deleting rows that contain the majority class until the number
in the majority class is equal with the number in the minority class.
"""
minority_set = dataset[dataset.Trend == -1.0]
majority_set = dataset[dataset.Trend == 1.0]
# print(dataset.Trend.value_counts())
# If minority set larger than majority set, swap
if len(minority_set) > len(majority_set):
minority_set, majority_set = majority_set, minority_set
# Downsample majority class
majority_downsampled = resample(majority_set,
replace=False, # sample without replacement
n_samples=len(minority_set), # to match minority class
random_state=123) # reproducible results
# Combine minority class with downsampled majority class
return pd.concat([majority_downsampled, minority_set])
def get_popularity(headlines):
# TODO: Randomize user-agents OR figure out how to handle popups
if "Tweets" not in headlines.columns:
counts = []
driver = webdriver.Chrome()
for index, row in headlines.iterrows():
try:
driver.get(row["URL"])
time.sleep(3)
twitter_containers = driver.find_elements_by_xpath("//li[@class='twitter']")
count = twitter_containers[0].find_elements_by_xpath("//span[@class='count']")
if count[0].text == "":
counts.append(1)
else:
counts.append(int(count[0].text))
except:
counts.append(1) # QUESTION: Should it be None?
headlines["Tweets"] = (pd.Series(counts)).values
print(counts)
return headlines
def balanced_split(dataset, test_size):
"""
Randomly splits dataset into balanced training and test sets.
"""
print("\tSplitting data into *balanced* training and test sets")
# Use sklearn.train_test_split to split original dataset into x_train, y_train, x_test, y_test numpy arrays
x_train, x_test, y_train, y_test = train_test_split(dataset.drop(["Date", "Trend"], axis=1).values, dataset["Trend"].values, test_size=test_size, random_state=RANDOM_STATE)
# Combine x_train and y_train (numpy arrays) into a single dataframe, with column labels
train = pd.DataFrame(data=x_train, columns=dataset.columns[1:-1])
train["Trend"] = pd.Series(y_train)
# Do the same for x_test and y__test
test = pd.DataFrame(data=x_test, columns=dataset.columns[1:-1])
test["Trend"] = pd.Series(y_test)
# Apply random undersampling to both data frames
train_downsampled = random_undersampling(train)
test_downsampled = random_undersampling(test)
train_trend = train_downsampled["Trend"].values
test_trend = test_downsampled["Trend"].values
train_trimmed = train_downsampled.drop(["Trend"], axis=1).values
test_trimmed = test_downsampled.drop(["Trend"], axis=1).values
return train_trimmed, test_trimmed, train_trend, test_trend
def unbalanced_split(dataset, test_size):
"""
Randomly splits dataset into unbalanced training and test sets.
"""
print("\tSplitting data into *unbalanced* training and test sets")
dataset = dataset.drop("Date", axis=1)
output = train_test_split(dataset.drop("Trend", axis=1).values, dataset["Trend"].values, test_size=test_size, random_state=RANDOM_STATE)
return output
# Main #
def calculate_indicators(ohlcv):
"""
Extracts technical indicators from OHLCV data.
"""
print("\tCalculating technical indicators")
ohlcv = ohlcv.drop(["Volume (BTC)", "Weighted Price"], axis=1)
ohlcv.columns = ["Date", "Open", "High", "Low", "Close", "Volume"]
temp_ohlcv = ohlcv.copy()
# Converts ISO 8601 timestamps to UNIX
unix_times = [int((dp.parse(temp_ohlcv.iloc[index]["Date"])).strftime("%s")) for index in range(temp_ohlcv.shape[0])]
temp_ohlcv["Date"] = (pd.Series(unix_times)).values
# Converts column headers to lowercase and sorts rows in chronological order
temp_ohlcv.columns = ["date", "open", "high", "low", "close", "volume"]
temp_ohlcv = temp_ohlcv.iloc[::-1]
# Rate of Change Ratio
rocr3 = ((Indicator(temp_ohlcv, "ROCR", 3)).getHistorical())[::-1]
rocr6 = ((Indicator(temp_ohlcv, "ROCR", 6)).getHistorical())[::-1]
# Average True Range
atr = ((Indicator(temp_ohlcv, "ATR", 14)).getHistorical())[::-1]
# On-Balance Volume
obv = ((Indicator(temp_ohlcv, "OBV")).getHistorical())[::-1]
# Triple Exponential Moving Average
trix = ((Indicator(temp_ohlcv, "TRIX", 20)).getHistorical())[::-1]
# Momentum
mom1 = ((Indicator(temp_ohlcv, "MOM", 1)).getHistorical())[::-1]
mom3 = ((Indicator(temp_ohlcv, "MOM", 3)).getHistorical())[::-1]
# Average Directional Index
adx14 = ((Indicator(temp_ohlcv, "ADX", 14)).getHistorical())[::-1]
adx20 = ((Indicator(temp_ohlcv, "ADX", 20)).getHistorical())[::-1]
# Williams %R
willr = ((Indicator(temp_ohlcv, "WILLR", 14)).getHistorical())[::-1]
# Relative Strength Index
rsi6 = ((Indicator(temp_ohlcv, "RSI", 6)).getHistorical())[::-1]
rsi12 = ((Indicator(temp_ohlcv, "RSI", 12)).getHistorical())[::-1]
# Moving Average Convergence Divergence
macd, macd_signal, macd_hist = (Indicator(temp_ohlcv, "MACD", 12, 26, 9)).getHistorical()
macd, macd_signal, macd_hist = macd[::-1], macd_signal[::-1], macd_hist[::-1]
# Exponential Moving Average
ema6 = ((Indicator(temp_ohlcv, "MA", 6, 1)).getHistorical())[::-1]
ema12 = ((Indicator(temp_ohlcv, "MA", 12, 1)).getHistorical())[::-1]
# Append indicators to the input datasets
min_length = min(len(mom1), len(mom3), len(adx14), len(adx20), len(willr), len(rsi6), len(rsi12), len(macd), len(macd_signal), len(macd_hist), len(ema6), len(ema12), len(rocr3), len(rocr6), len(atr), len(obv), len(trix))
ohlcv = ohlcv[:min_length].drop(["Open", "High", "Low"], axis=1)
ohlcv["MOM (1)"], ohlcv["MOM (3)"], ohlcv["ADX (14)"] = (pd.Series(mom1[:min_length])).values, (pd.Series(mom3[:min_length])).values, (pd.Series(adx14[:min_length])).values
ohlcv["ADX (20)"], ohlcv["WILLR"], ohlcv["RSI (6)"] = (pd.Series(adx20[:min_length])).values, (pd.Series(willr[:min_length])).values, (pd.Series(rsi6[:min_length])).values
ohlcv["RSI (12)"], ohlcv["MACD"], ohlcv["MACD (Signal)"] = (pd.Series(rsi12[:min_length])).values, (pd.Series(macd[:min_length])).values, (pd.Series(macd_signal[:min_length])).values
ohlcv["MACD (Historical)"], ohlcv["EMA (6)"], ohlcv["EMA (12)"] = (pd.Series(macd_hist[:min_length])).values, (pd.Series(ema6[:min_length])).values, ( | pd.Series(ema12[:min_length]) | pandas.Series |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: | Series([1, 2, 3]) | pandas.Series |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
result = values.str.split('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.split('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.split('[,_]')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
def test_rsplit(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.rsplit('__')
tm.assert_series_equal(result, exp)
result = values.str.rsplit('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.rsplit('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, | u('f_g_h') | pandas.compat.u |
import datetime
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from pointcloudset import Dataset, PointCloud
@pytest.fixture()
def testdata_path() -> Path:
return Path(__file__).parent.absolute() / "testdata"
@pytest.fixture()
def testbag1():
return Path(__file__).parent.absolute() / "testdata/test.bag"
@pytest.fixture()
def testlas1():
return Path(__file__).parent.absolute() / "testdata/las_files/diamond.las"
@pytest.fixture()
def testset(testbag1):
return Dataset.from_file(testbag1, topic="/os1_cloud_node/points", keep_zeros=False)
@pytest.fixture()
def testset_withzero(testbag1):
return Dataset.from_file(testbag1, topic="/os1_cloud_node/points", keep_zeros=True)
@pytest.fixture()
def testpointcloud(testset):
return testset[1]
@pytest.fixture()
def testframe0(testset):
return testset[0]
@pytest.fixture()
def testpointcloud_withzero(testset_withzero):
return testset_withzero[1]
@pytest.fixture()
def testpointcloud_mini_df():
columns = [
"x",
"y",
"z",
"intensity",
"t",
"reflectivity",
"ring",
"noise",
"range",
]
np.random.seed(5)
df1 = pd.DataFrame(np.zeros(shape=(1, len(columns))), columns=columns)
df2 = pd.DataFrame(np.ones(shape=(1, len(columns))), columns=columns)
df3 = pd.DataFrame(-1.0 * np.ones(shape=(1, len(columns))), columns=columns)
df4 = pd.DataFrame(
np.random.randint(0, 1000, size=(5, len(columns))) * np.random.random(),
columns=columns,
)
return pd.concat([df1, df2, df3, df4]).reset_index(drop=True)
@pytest.fixture()
def reference_data_with_zero_dataframe():
filename = (
Path(__file__).parent.absolute()
/ "testdata/testpointcloud_withzero_dataframe.pkl"
)
return pd.read_pickle(filename)
@pytest.fixture()
def reference_pointcloud_withzero_dataframe():
filename = (
Path(__file__).parent.absolute()
/ "testdata/testpointcloud_withzero_pointcloud.pkl"
)
return | pd.read_pickle(filename) | pandas.read_pickle |
import pymanda
import pandas as pd
import numpy as np
import warnings
"""
ChoiceData
---------
A container for a DataFrame that maintains relevant columns for mergers and
acquisitions analyses
"""
class ChoiceData():
"""
Two-dimensional, size-mutable, potentially heterogeneous tabular data.
Data structure also contains labeled axes (rows and columns).
Arithmetic operations align on both row and column labels. Can be
thought of as a dict-like container for Series objects. The primary
pandas data structure.
Additionally, has parameters to identify variables of interest for
calculating competition metrics using customer level data
Parameters
----------
data : Non-Empty pandas.core.frame.DataFrame object
choice_var : String of Column Name
Column name that identifies the "choice" of each customer
corp_var : String of Column Name, default None
Column Name that identifies a higher level classification of Choice
geog_var : String of Column Name, default None
Column name that identifies a geography for Customer
wght_var : String of Column Name, default None
Column name that identifies weight for customer level
Examples
--------
Constructing ChoiceData from a DataFrame.
>>> choices = ['a' for x in range(100)]
>>> data = pandas.DataFrame({'choice': choices})
>>> cd = pymanda.ChoiceData(data, 'choice')
"""
def __init__(
self,
data,
choice_var,
corp_var= None,
geog_var= None,
wght_var= None):
if corp_var is None:
corp_var = choice_var
self.params = {'choice_var' : choice_var,
'corp_var' : corp_var,
'geog_var' : geog_var,
'wght_var' : wght_var}
self.data = data
self.choice_var = choice_var
self.corp_var = corp_var
self.geog_var = geog_var
self.wght_var = wght_var
if type(data) != pd.core.frame.DataFrame:
raise TypeError ('''Expected type pandas.core.frame.DataFrame Got {}'''.format(type(data)))
if data.empty:
raise ValueError ('''Dataframe is Empty''')
defined_params = [x for x in [choice_var, corp_var, geog_var, wght_var] if x is not None]
for param in defined_params:
if param not in data.columns:
raise KeyError ('''{} is not a column in Dataframe'''.format(param))
for nonull in [choice_var, corp_var]:
if len(data[data['choice'] == ''].index) != 0:
raise ValueError ('''{} has missing values'''.format(nonull))
def corp_map(self):
"""
Utility fuction to map corporation and choices in self.data
Returns
-------
corp_map : pandas.core.frame.DataFrame
2 column data frame with corporation name and choice names.
"""
if self.corp_var == self.choice_var:
raise RuntimeError('''corp_map should only be called when self.corp_var is defined and different than self.choice_var''')
corp_map = self.data.groupby([self.corp_var, self.choice_var]).count()
corp_map = corp_map.reset_index()
corp_map = corp_map[[self.corp_var, self.choice_var]]
return corp_map
def estimate_psa(self, centers, threshold=[.75, .9]):
"""
Return a dictionary idenftifying geographies in a choice's Primary
Service Area (PSA) at a given threshold.
Dictionary keys are labeled as "{center}_{threshold}" and values are
a list of geographies falling within the threshold
Default will use self.corp_var as choice and self.wght_var for count
Parameters
----------
centers: List of choices to find
threshold : float or list of floats,
Threshold or levels of thresholds to find the PSA for each choice
in centers. Default calculates 75% and 90% PSAs.
Returns
-------
Dictionary
Dictionary keys are labeled as "{center}_{threshold}" and
values are a list of geographies falling within the threshold
Examples
--------
>>> choices = ['a' for x in range(100)]
>>> zips = [1 for x in range(20)]
>>> zips += [2 for x in range(20)]
>>> zips += [3 for x in range(20)]
>>> zips += [4 for x in range(20)]
>>> zips += [5 for x in range(20)]
>>> data = pd.DataFrame({'choice': choices, 'geography': zips})
>>> cd = ChoiceData(data, 'choice', geog_var = 'geography')
>>> cd.estimate_psa(['a'])
{'a_0.75': [1, 2, 3, 4], 'a_0.9': [1, 2, 3, 4, 5]}
"""
if self.geog_var is None:
raise KeyError ("geog_var is not defined")
if type(threshold) != list:
threshold = [threshold]
if type(centers) != list:
centers = [centers]
for center in centers:
if not self.data[self.corp_var].isin([center]).any():
raise ValueError ("{cen} is not in {corp}".format(cen=center, corp=self.corp_var))
for alpha in threshold:
if type(alpha) != float:
raise TypeError ('''Expected threshold to be type float. Got {}'''.format(type(alpha)))
if not 0 < alpha <= 1:
raise ValueError ('''Threshold value of {} is not between 0 and 1''').format(alpha)
df = self.data.copy(deep=True)
if self.wght_var is None:
df['count'] = 1
weight = 'count'
else:
weight = self.wght_var
df = df[[self.corp_var, self.geog_var, weight]]
df = df.groupby([self.corp_var, self.geog_var]).sum().reset_index() #calculate counts by geography
df['group_total'] = df[weight].groupby(df[self.corp_var]).transform('sum') #get group totals
df['share'] = df[weight] / df['group_total'] # calculate share
df = df.groupby([self.corp_var, self.geog_var]).sum()
df = df.sort_values([self.corp_var, weight], ascending=False)
df_start = df.groupby(level=0).cumsum().reset_index()
output_dict = {}
for alpha in threshold:
df = df_start
df['keep'] = np.where(df['share'].shift().fillna(1).replace(1, 0) < alpha, 1, 0)
df = df[(df[self.corp_var].isin(centers)) & (df['keep']==1)]
for center in centers:
in_psa = list(df[self.geog_var][df[self.corp_var]==center])
in_psa.sort()
output_dict.update({"{cen}_{a}".format(cen=center, a=alpha) : in_psa})
return output_dict
def restriction_checks(self, restriction):
"""
Checks for custom restrictions
"""
if type(restriction) != pd.core.series.Series:
raise TypeError ("Expected type pandas.core.series.Series. Got {}".format(type(restriction)))
if restriction.dtype != np.dtype('bool'):
raise TypeError ("Expected dtype('bool'). Got {}".format(restriction.dtype))
def restrict_data(self, restriction):
"""
Restrict the data is self.data using an inline series
Parameters
----------
restriction: pandas.core.series.Series with dtyoe=Boolean
Boolean series identifying which rows to keep given a restriction
Examples
--------
>>> choices = ['a' for x in range(10)]
>>> zips = [1 for x in range(2)]
>>> zips += [2 for x in range(2)]
>>> zips += [3 for x in range(2)]
>>> zips += [4 for x in range(2)]
>>> zips += [5 for x in range(2)]
>>> data = pd.DataFrame({'choice': choices, 'geography': zips})
>>> cd = ChoiceData(data, 'choice')
>>> cd.data
choice geography
0 a 1
1 a 1
2 a 2
3 a 2
4 a 3
5 a 3
6 a 4
7 a 4
8 a 5
9 a 5
>>> cd.restrict_data(data['geography'] != 5)
>>> cd.data
choice geography
0 a 1
1 a 1
2 a 2
3 a 2
4 a 3
5 a 3
6 a 4
7 a 4
"""
self.restriction_checks(restriction)
self.data = self.data[restriction]
def calculate_shares(self, psa_dict=None, weight_var=None, restriction=None):
"""
Create Share Table of values in self.wght_var by self.choice_var
Parameters
----------
psa_dict: dictionary
dictionary of lists that contain values to be kept in self.geog_var
If None, calculates shares for full data.
weight_var : str, Optional
Column Name in self.data to use as weight. Default is None,
weighting every observation equally
restriction: pandas.core.series.Series with dtyoe=Boolean
Optional restriction to be applied to data before calculating
shares
Returns
-------
Dictionary
keys are the same as psa_dict if given or "Base Shares"
values are corresponding pandas dataframes of the shares
Examples
--------
>>> choices = ['a' for x in range(30)]
>>> choices += ['b' for x in range(20)]
>>> choices += ['c' for x in range(20)]
>>> choices += ['d' for x in range(5)]
>>> choices += ['e' for x in range(25)]
>>> df = pd.DataFrame({choice" : choices})
>>> cd = ChoiceData(df, "choice")
>>> cd.calculate_shares()
{'Base Shares': choice share
0 a 0.30
1 b 0.20
2 c 0.20
3 d 0.05
4 e 0.25}
"""
if type(psa_dict) != dict and psa_dict is not None:
raise TypeError ("Expected type dict. Got {}".format(type(psa_dict)))
if restriction is not None:
self.restriction_checks(restriction)
if weight_var is None:
weight_var= self.wght_var
if weight_var not in self.data.columns and weight_var is not None:
raise KeyError("{} is not a Column in ChoiceData".format(weight_var))
if psa_dict is None:
psa_dict= {'Base Shares': []}
base_shares = True
else:
if self.geog_var is None:
raise ValueError ("geog_var is not defined in ChoiceData")
elif self.geog_var not in self.data.columns:
raise KeyError("geog_var is not in ChoiceData")
base_shares = False
if self.corp_var == self.choice_var:
group = [self.choice_var]
else:
group = [self.corp_var, self.choice_var]
output_dict = {}
for key in psa_dict.keys():
df = self.data.copy(deep=True)
redefine_weight=False
if weight_var is None:
df['count'] = 1
weight_var = 'count'
redefine_weight=True
if restriction is not None:
df = df[restriction]
if not base_shares:
for geo in psa_dict[key]:
if not df[self.geog_var].isin([geo]).any():
raise ValueError ("{g} is not in {col}".format(g=geo, col=self.geog_var))
df = df[df[self.geog_var].isin(psa_dict[key])]
df = df[group + [weight_var]]
df_shares = (df.groupby(group).sum() / df[weight_var].sum()).reset_index()
df_shares = df_shares.rename(columns = {weight_var: 'share'})
output_dict.update({key: df_shares})
if redefine_weight:
weight_var = None
return output_dict
def shares_checks(self, df, share_col, data="Data"):
"""
Checks for columns that are supposed to contain shares
Parameters
----------
df : pandas.core.frame.DataFrame()
Dataframe containing share_col
share_col : str
Name of column in data frame to chekc.
data : str, optional
Name of parameter being checked for Error Message.
The default is "Data".
"""
if share_col not in df.columns:
raise KeyError("Column '{}' not in ChoiceData".format(share_col))
if (df[share_col] < 0).any():
raise ValueError ("Values of '{col}' in {d} contain negative values".format(col=share_col, d=data))
if df[share_col].sum() != 1:
raise ValueError ("Values of '{col}' in {d} do not sum to 1".format(col=share_col, d=data))
def calculate_hhi(self, shares_dict, share_col="share", group_col=None):
"""
Calculates HHIs from precalculated shares at the corporation level
Parameters
----------
shares_dict: dictionary of share tables
share_col : Column name in dataframe, Optional
column that holds float of shares. Default is 'share'
group_col: Column name in dataframe, Optional
column of names to calculate HHI on. Default is self.corp_var
Returns
-------
Dictionary
keys are the same as shares_dict
values are hhis
Examples
--------
>>> corps = ['x' for x in range(50)]
>>> corps += ['y' for x in range(25)]
>>> corps += ['z' for x in range(25)]
>>> choices = ['a' for x in range(30)]
>>> choices += ['b' for x in range(20)]
>>> choices += ['c' for x in range(20)]
>>> choices += ['d' for x in range(5)]
>>> choices += ['e' for x in range(25)]
>>> df = pd.DataFrame({"corporation": corps,
"choice" : choices})
>>> cd = ChoiceData(df, "choice", corp_var="corporation")
>>> shares = cd.calculate_shares()
>>> cd.calculate_hhi(shares)
{'Base Shares': 3750.0}
"""
if type(shares_dict) != dict:
raise TypeError ("Expected type dict. Got {}".format(type(shares_dict)))
if group_col is None:
group_col = self.corp_var
elif group_col not in self.data.columns:
raise KeyError ('''"{}" is not a column in ChoiceData'''.format(group_col))
output_dict = {}
for key in shares_dict.keys():
df = shares_dict[key]
if type(df) != pd.core.frame.DataFrame:
raise TypeError ('''Expected type pandas.core.frame.DataFrame Got {}'''.format(type(df)))
self.shares_checks(df, share_col, data=key)
df = df.groupby(group_col).sum()
hhi = (df[share_col] * df[share_col]).sum()*10000
output_dict.update({key: hhi})
return output_dict
def hhi_change(self, trans_list, shares, trans_var=None, share_col="share"):
"""
Calculates change in Herfindahl-Hirschman Index (HHI) from combining
a set of choices.
Parameters
----------
trans_list : list
list of choices that will be combined for calculating combined hhi.
shares : dict
dictoinary of dataframes of shares to calculate HHIs on.
trans_var : str, optional
Column name containging objects in trans_list. The default (None)
uses self.corp_var.
share_col : str, optional
Column name containing values of share. The default is "share".
Returns
-------
dictionary
key(s) will match shares parameter
values will be a list of [pre-merge HHI, post-merge HHI, HHI change].
"""
if type(trans_list) != list:
raise TypeError ('''trans_list expected list. got {}'''.format(type(trans_list)))
if len(trans_list) < 2:
raise ValueError ('''trans_list needs atleast 2 elements to compare HHI change''')
if trans_var is None:
trans_var = self.corp_var
for elm in trans_list:
if not self.data[trans_var].isin([elm]).any():
raise ValueError ('''{element} is not an element in column {col}'''.format(element=elm, col=trans_var))
output_dict = {}
for key in shares.keys():
df = shares[key]
self.shares_checks(df, share_col, data=key)
if trans_var not in df.columns:
raise KeyError ('''{var} is not column name in {data}'''.format(var=trans_var, data=key))
pre_hhi = self.calculate_hhi({"x" : df}, share_col, group_col=trans_var)['x']
post_df = df
post_df[trans_var] = post_df[trans_var].where(~post_df[trans_var].isin(trans_list), 'combined')
post_hhi = self.calculate_hhi({"x": post_df}, share_col, group_col=trans_var)['x']
hhi_change = post_hhi - pre_hhi
output_dict.update({key : [pre_hhi, post_hhi, hhi_change]})
return output_dict
class DiscreteChoice():
"""
DiscreteChoice
---------
A solver for estimating discrete choice models and post-estimation
analysis.
Parameters
----------
solver: str of solver to use
copy_x: Bool, Optional
whether to create copies of data in calculations. Default is True.
coef_order: list, Optional
coefficient order used for solver 'semiparametric'
verbose: Boolean, Optional
Verbosity in solvers. Default is False
min_bin: int or float, Optional
Minimum bin size used for solver 'semiparametric'
Examples
--------
DiscreteChoice(solver='semiparametric', coef_order = ['x1', 'x2', 'x3'])
"""
def __init__(
self,
solver='semiperametric',
copy_x=True,
coef_order= None,
verbose= False,
min_bin= 25):
self.params = {'solver' : solver,
'copy_x' : True,
'coef_order' : coef_order,
'verbose': verbose,
'min_bin': min_bin}
self.solver = solver
self.copy_x = copy_x
self.coef_order = coef_order
self.verbose = verbose
self.min_bin = min_bin
current_solvers = ['semiparametric']
if solver not in current_solvers:
raise ValueError ('''{a} is not supported solver. Solvers currently supported are {b}'''.format(a=solver, b=current_solvers))
if type(copy_x) is not bool:
raise ValueError ('''{} is not bool type.'''.format(copy_x))
if type(coef_order) != list:
raise ValueError ('''coef_order expected to be list. got {}'''.format(type(coef_order)))
if len(coef_order) ==0:
raise ValueError ('''coef_order must be a non-empty list''')
if type(verbose) is not bool:
raise ValueError ('''{} is not bool type.'''.format(verbose))
if type(min_bin) != float and type(min_bin) != int:
raise ValueError('''min_bin must be a numeric value greater than 0''')
if min_bin <= 0:
raise ValueError('''min_bin must be greater than 0''')
def check_is_fitted(self):
"""
Check that an Instance has been fitted
"""
try:
self.coef_
except AttributeError:
raise RuntimeError('''Instance of DiscreteChoice is not fitted''')
def fit(self, cd, use_corp=False):
"""
Fit Estimator using ChoiceData and specified solver
Parameters
----------
cd : pymanda.ChoiceData
Contains data to be fitted using DiscreteChoice
use_corp: Boolean
Whether to fit using corp_var as choice. Default is False.
"""
# if type(cd) != pymanda.ChoiceData:
# raise TypeError ('''Expected type pymanda.choices.ChoiceData Got {}'''.format(type(cd)))
for coef in self.coef_order:
if coef not in cd.data.columns:
raise KeyError ('''{} is not a column in ChoiceData'''.format(coef))
if use_corp:
choice= cd.corp_var
else:
choice= cd.choice_var
# currently only supports 'semiparametric' solver. Added solvers should use elif statement
if self.solver=='semiparametric':
X = cd.data[self.coef_order + [choice]].copy()
if cd.wght_var is not None:
X['wght'] = cd.data[cd.wght_var]
else:
X['wght'] = 1
## group observations
X['grouped'] = False
X['group'] = ""
for i in range(4, len(self.coef_order)+4):
bin_by_cols = X.columns[0:-i].to_list()
if self.verbose:
print(bin_by_cols)
screen = X[~X['grouped']].groupby(bin_by_cols).agg({'wght':['sum']})
screen = (screen >= self.min_bin)
screen.columns = screen.columns.droplevel(0)
X = pd.merge(X, screen, how='left', left_on=bin_by_cols,right_index=True)
X['sum'] = X['sum'].fillna(True)
# update grouped and group
X['group'] = np.where((~X['grouped']) & (X['sum']),
X[bin_by_cols].astype(str).agg('\b'.join,axis=1), X['group'])
X['grouped'] = X['grouped'] | X['sum']
X = X.drop('sum', axis=1)
# group ungroupables
X.loc[X['group']=="",'group'] = "ungrouped"
# converts from observations to group descriptions
X = X[[choice] + ['group', 'wght']].pivot_table(index='group', columns=choice, aggfunc='sum', fill_value=0)
#convert from counts to shares
X['rowsum'] = X.sum(axis=1)
for x in X.columns:
X[x] = X[x] / X['rowsum']
X = X.drop('rowsum', axis=1)
X.columns = [col[1] for col in X.columns]
X= X.reset_index()
self.coef_ = X
def predict(self, cd):
"""
Use Estimated model to predict individual choice
Parameters
----------
cd : pymanda.ChoiceData
ChoiceData to be predicted on.
Returns
-------
choice_probs : pandas.core.frame.DataFrame
Dataframe of predictions for each choice.
When solver ='semiparametric', each row contains probabilities of
going to any of the choices.
"""
# if type(cd) != pymanda.ChoiceData:
# raise TypeError ('''Expected type pymanda.choices.ChoiceData Got {}'''.format(type(cd)))
self.check_is_fitted()
if self.solver == 'semiparametric':
#group based on groups
X = cd.data[self.coef_order].copy()
X['group'] = ""
for n in range(len(self.coef_order)):
X['g'] = X[self.coef_order[:len(self.coef_order) - n]].astype(str).agg('\b'.join,axis=1)
X['group'] = np.where((X['g'].isin(self.coef_['group'])) & (X['group'] == ""),
X['g'],
X['group'])
X.loc[X['group']=="",'group'] = "ungrouped"
X = X['group']
choice_probs = pd.merge(X, self.coef_, how='left', on='group')
choice_probs = choice_probs.drop(columns=['group'])
return choice_probs
def diversion(self, cd, choice_probs, div_choices, div_choices_var=None):
'''
Calculate diversions given a DataFrame of observations with diversion
probabilities
Parameters
----------
cd: pymanda.ChoiceData
ChoiceData to calculate diversions on.
choice_probs : pandas.core.frame.DataFrame
DataFrame of observations with diversion probabilities.
div_choices : list
list of choices to calculate diversions for.
Returns
-------
div_shares : pandas.core.frame.DataFrame
Columns are name of choice being diverted,
rows are shares of diversion.
'''
# if type(cd) != pymanda.ChoiceData:
# raise TypeError ('''Expected type pymanda.choices.ChoiceData Got {}'''.format(type(cd)))
if type(div_choices) != list and div_choices is not None:
raise TypeError('''choices is expected to be list. Got {}'''.format(type(div_choices)))
if len(div_choices) == 0:
raise ValueError ('''choices must have atleast a length of 1''')
if type(choice_probs) != pd.core.frame.DataFrame:
raise TypeError ('''Expected Type pandas.core.frame.DataFrame. Got {}'''.format(type(choice_probs)))
if div_choices_var is None:
choice = cd.choice_var
if cd.corp_var != cd.choice_var:
corp_map = cd.corp_map()
elif div_choices_var not in cd.data.columns:
raise KeyError("""div_choices_var not in cd.data""")
else:
choice = div_choices_var
if len(choice_probs) != len(cd.data):
raise ValueError('''length of choice_probs and cd.data should be the same''')
choice_probs['choice'] = cd.data[choice]
all_choices = list(choice_probs['choice'].unique())
for c in all_choices:
if c not in choice_probs.columns:
raise KeyError ('''{} is not a column in choice_probs'''.format(c))
if cd.wght_var is not None:
choice_probs['wght'] = cd.data[cd.wght_var]
else:
choice_probs['wght'] = 1
div_shares = | pd.DataFrame(index=all_choices) | pandas.DataFrame |
"""
Created by adam on 11/8/16
"""
__author__ = 'adam'
import pandas as pd
import environment as env
import Models.TweetORM as TweetORM
pd.options.display.max_rows = 999 # let pandas dataframe listings go long
def isRetweet(text):
"""
Classifies whether a tweet is a retweet based on how it starts
"""
if text[:4] == "RT @":
return True
if text[:4] == "MT @":
return True
return False
def isReply(tweetObject):
if tweetObject.in_reply_to_screen_name != None:
return True
if tweetObject.text[0] == '@':
return True
return False
search_terms = ['Spoonie',
'CRPS',
'Migraine',
'RSD',
'Fibro',
'Fibromyalgia',
'Vulvodynia',
'ChronicPain',
'pain',
'endometriosis',
'neuropathy',
'arthritis',
'neuralgia']
class Condition(object):
"""
Data holder
"""
def __init__(self, name, filepath=env.DATA_FOLDER):
self.name = name
self.datafile = "%s/%s.csv" % (filepath, self.name)
self.indexer_ids = set([])
self.userids = set([])
self.users = ''
def get_total(self):
"""
Returns the total number of users
"""
return len(self.userids)
def get_maxid(self):
"""
Returns the highest indexerid
"""
return max(list(self.indexer_ids))
def add_userid(self, userid):
"""
Add userid to list
"""
self.userids.update([userid])
def add_indexer_id(self, indexer_id):
"""
Add indexerid
"""
self.indexer_ids.update([indexer_id])
class UserGetter(object):
"""
Retrieves user info and builds dataframe of users when given list of ids
"""
def __init__(self, sql_alchemy_engine):
self.engine = sql_alchemy_engine
def _get_user(self, searchterm, userid):
query = "SELECT userID, indexer, '%s' AS term, description AS profile FROM users WHERE userid = %s" % (
searchterm, userid)
return pd.read_sql_query(query, self.engine)
def get_from_list(self, searchterm, userids):
"""
Args:
searchterm: String that was searched for
userids: List of userids to retrieve
Returns:
Dataframe with labels userID, indexer, term, profile
"""
frames = []
for uid in userids:
frames.append(self._get_user(searchterm, uid))
return | pd.concat(frames) | pandas.concat |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = pd.DataFrame({"A": [1, 1],
"B": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series([1.0], index=['A'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_excludeds_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series()
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
result = mixed.min()
expected = Series([pd.Timedelta(timedelta(seconds=5 * 60 + 5)),
pd.Timedelta(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
empty_frame = DataFrame()
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
# min_count=1
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = pd.Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
# prod isn't defined on timedeltas
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [0, 0],
"b": [0, np.nan],
"c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
# 0 by default
result = df2.sum()
expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self, float_frame):
values = float_frame.values.astype(int)
frame = DataFrame(values, index=float_frame.index,
columns=float_frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self, float_frame):
# ensure this works, bug report
bools = np.isnan(float_frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self, float_frame, float_string_frame):
# unit test when have object data
the_mean = float_string_frame.mean(axis=0)
the_sum = float_string_frame.sum(axis=0, numeric_only=True)
| tm.assert_index_equal(the_sum.index, the_mean.index) | pandas.util.testing.assert_index_equal |
"""Get data into JVM for prediction and out again as Spark Dataframe"""
import logging
logger = logging.getLogger('nlu')
import pyspark
from pyspark.sql.functions import monotonically_increasing_id
import numpy as np
import pandas as pd
from pyspark.sql.types import StringType, StructType, StructField
class DataConversionUtils():
# Modin aswell but optional, so we dont import the type yet
supported_types = [pyspark.sql.DataFrame, pd.DataFrame, pd.Series, np.ndarray]
@staticmethod
def except_text_col_not_found(cols):
print(
f'Could not find column named "text" in input Pandas Dataframe. Please ensure one column named such exists. Columns in DF are : {cols} ')
@staticmethod
def sdf_to_sdf(data, spark_sess, raw_text_column='text'):
"""No casting, Spark to Spark. Just add index col"""
logger.info(f"Casting Spark DF to Spark DF")
output_datatype = 'spark'
data = data.withColumn('origin_index', monotonically_increasing_id().alias('origin_index'))
stranger_features = []
if raw_text_column in data.columns:
# store all stranger features
if len(data.columns) > 1:
stranger_features = list(set(data.columns) - set(raw_text_column))
else:
DataConversionUtils.except_text_col_not_found(data.columns)
return data, stranger_features, output_datatype
@staticmethod
def pdf_to_sdf(data, spark_sess, raw_text_column='text'):
"""Casting pandas to spark and add index col"""
logger.info(f"Casting Pandas DF to Spark DF")
output_datatype = 'pandas'
stranger_features = []
sdf = None
# set first col as text column if there is none
if raw_text_column not in data.columns: data.rename(columns={data.columns[0]: 'text'}, inplace=True)
data['origin_index'] = data.index
if raw_text_column in data.columns:
if len(data.columns) > 1:
# make Nans to None, or spark will crash
data = data.where(pd.notnull(data), None)
data = data.dropna(axis=1, how='all')
stranger_features = list(set(data.columns) - set(raw_text_column))
sdf = spark_sess.createDataFrame(data)
else:
DataConversionUtils.except_text_col_not_found(data.columns)
return sdf, stranger_features, output_datatype
@staticmethod
def pds_to_sdf(data, spark_sess, raw_text_column='text'):
"""Casting pandas series to spark and add index col. # for df['text'] colum/series passing casting follows pseries->pdf->spark->pd """
logger.info(f"Casting Pandas Series to Spark DF")
output_datatype = 'pandas_series'
sdf = None
schema = StructType([StructField(raw_text_column, StringType(), True)])
data = pd.DataFrame(data).dropna(axis=1, how='all')
# If series from a column is passed, its column name will be reused.
if raw_text_column not in data.columns and len(data.columns) == 1:
data[raw_text_column] = data[data.columns[0]]
else:
logger.info(
f'INFO: NLU will assume {data.columns[0]} as label column since default text column could not be find')
data[raw_text_column] = data[data.columns[0]]
data['origin_index'] = data.index
if raw_text_column in data.columns:
sdf = spark_sess.createDataFrame(pd.DataFrame(data[raw_text_column]), schema=schema)
else:
DataConversionUtils.except_text_col_not_found(data.columns)
if 'origin_index' not in sdf.columns:
sdf = sdf.withColumn('origin_index', monotonically_increasing_id().alias('origin_index'))
return sdf, [], output_datatype
@staticmethod
def np_to_sdf(data, spark_sess, raw_text_column='text'):
"""Casting numpy array to spark and add index col. This is a bit inefficient. Casting follow np->pd->spark->pd. We could cut out the first pd step """
logger.info(f"Casting Numpy Array to Spark DF")
output_datatype = 'numpy_array'
if len(data.shape) != 1: ValueError(
f"Exception : Input numpy array must be 1 Dimensional for prediction.. Input data shape is{data.shape}")
sdf = spark_sess.createDataFrame(pd.DataFrame({raw_text_column: data, 'origin_index': list(range(len(data)))}))
return sdf, [], output_datatype
@staticmethod
def str_to_sdf(data, spark_sess, raw_text_column='text'):
"""Casting str to spark and add index col. This is a bit inefficient. Casting follow # inefficient, str->pd->spark->pd , we can could first pd"""
logger.info(f"Casting String to Spark DF")
output_datatype = 'string'
sdf = spark_sess.createDataFrame(pd.DataFrame({raw_text_column: data, 'origin_index': [0]}, index=[0]))
return sdf, [], output_datatype
@staticmethod
def str_list_to_sdf(data, spark_sess, raw_text_column='text'):
"""Casting str list to spark and add index col. This is a bit inefficient. Casting follow # # inefficient, list->pd->spark->pd , we can could first pd"""
logger.info(f"Casting String List to Spark DF")
output_datatype = 'string_list'
if all(type(elem) == str for elem in data):
sdf = spark_sess.createDataFrame(
pd.DataFrame({raw_text_column: pd.Series(data), 'origin_index': list(range(len(data)))}))
else:
ValueError("Exception: Not all elements in input list are of type string.")
return sdf, [], output_datatype
@staticmethod
def fallback_modin_to_sdf(data, spark_sess, raw_text_column='text'):
"""Casting potential Modin data to spark and add index col. # Modin tests, This could crash if Modin not installed """
logger.info(f"Casting Modin DF to Spark DF")
sdf = None
output_datatype = ''
try:
import modin.pandas as mpd
if isinstance(data, mpd.DataFrame):
data = pd.DataFrame(data.to_dict()) # create pandas to support type inference
output_datatype = 'modin'
data['origin_index'] = data.index
if raw_text_column in data.columns:
if len(data.columns) > 1:
data = data.where(pd.notnull(data), None) # make Nans to None, or spark will crash
data = data.dropna(axis=1, how='all')
stranger_features = list(set(data.columns) - set(raw_text_column))
sdf = spark_sess.createDataFrame(data)
else:
DataConversionUtils.except_text_col_not_found(data.columns)
if isinstance(data, mpd.Series):
output_datatype = 'modin_series'
data = pd.Series(data.to_dict()) # create pandas to support type inference
data = pd.DataFrame(data).dropna(axis=1, how='all')
data['origin_index'] = data.index
index_provided = True
if raw_text_column in data.columns:
sdf = spark_sess.createDataFrame(data[['text']])
else:
DataConversionUtils.except_text_col_not_found(data.columns)
except:
print(
"If you use Modin, make sure you have installed 'pip install modin[ray]' or 'pip install modin[dask]' backend for Modin ")
return sdf, [], output_datatype
@staticmethod
def to_spark_df(data, spark_sess, raw_text_column='text'):
"""Convert supported datatypes to SparkDF and extract extra data for prediction later on."""
try:
if isinstance(data, pyspark.sql.dataframe.DataFrame):
return DataConversionUtils.sdf_to_sdf(data, spark_sess, raw_text_column)
elif isinstance(data, pd.DataFrame):
return DataConversionUtils.pdf_to_sdf(data, spark_sess, raw_text_column)
elif isinstance(data, pd.Series):
return DataConversionUtils.pds_to_sdf(data, spark_sess, raw_text_column)
elif isinstance(data, np.ndarray):
return DataConversionUtils.np_to_sdf(data, spark_sess, raw_text_column)
elif isinstance(data, str):
return DataConversionUtils.str_to_sdf(data, spark_sess, raw_text_column)
elif isinstance(data, list):
return DataConversionUtils.str_list_to_sdf(data, spark_sess, raw_text_column)
else:
return DataConversionUtils.fallback_modin_to_sdf(data, spark_sess, raw_text_column)
except:
ValueError("Data could not be converted to Spark Dataframe for internal conversion.")
@staticmethod
def str_to_pdf(data,raw_text_column):
logger.info(f"Casting String to Pandas DF")
return pd.DataFrame({raw_text_column:[data]}).reset_index().rename(columns = {'index' : 'origin_index'} ), [], 'string'
@staticmethod
def str_list_to_pdf(data,raw_text_column):
logger.info(f"Casting String List to Pandas DF")
return pd.DataFrame({raw_text_column:data}).reset_index().rename(columns = {'index' : 'origin_index'} ), [], 'string_list'
@staticmethod
def np_to_pdf(data,raw_text_column):
logger.info(f"Casting Numpy Array to Pandas DF")
return pd.DataFrame({raw_text_column:data}).reset_index().rename(columns = {'index' : 'origin_index'} ), [], 'string_list'
@staticmethod
def pds_to_pdf(data,raw_text_column):
return | pd.DataFrame({raw_text_column:data}) | pandas.DataFrame |
import datetime
import os
import sys
import geopandas as gpd
import numpy as np
import pandas as pd
from bokeh.io import output_file, save
from bokeh.layouts import column
from bokeh.models.widgets import Panel, Tabs
from .plotting import PLOT_HEIGHT, PLOT_WIDTH, plot_map, plot_time_series
from .utils import Data, get_data, logger, markdown_html
today = datetime.date.today()
def is_updated(filename):
"""
check if the file exist
if yes, check if it's updated today
"""
if os.path.isfile(filename):
creation_date = os.path.getmtime(filename)
creation_date = datetime.datetime.fromtimestamp(creation_date)
return creation_date.date() == today
else:
return False
def ts_plots(ts_data_file):
# read time series data and plot
ts_data = (
pd.read_csv(ts_data_file)
.assign(Date=lambda d: | pd.to_datetime(d.Date) | pandas.to_datetime |
"""
Monte Carlo-type tests for the BM model
Note that that the actual tests that run are just regression tests against
previously estimated values with small sample sizes that can be run quickly
for continuous integration. However, this file can be used to re-run (slow)
large-sample Monte Carlo tests.
"""
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_allclose
from scipy.signal import lfilter
from statsmodels.tsa.statespace import (
dynamic_factor_mq, sarimax, varmax, dynamic_factor)
def simulate_k_factor1(nobs=1000):
mod_sim = dynamic_factor.DynamicFactor(np.zeros((1, 4)), k_factors=1,
factor_order=1, error_order=1)
loadings = [1.0, -0.75, 0.25, -0.3, 0.5]
p = np.r_[loadings[:mod_sim.k_endog],
[10] * mod_sim.k_endog,
0.5,
[0.] * mod_sim.k_endog]
ix = pd.period_range(start='1935-01', periods=nobs, freq='M')
endog = pd.DataFrame(mod_sim.simulate(p, nobs), index=ix)
true = pd.Series(p, index=mod_sim.param_names)
# Compute levels series (M and Q)
ix = pd.period_range(start=endog.index[0] - 1, end=endog.index[-1],
freq=endog.index.freq)
levels_M = 1 + endog.reindex(ix) / 100
levels_M.iloc[0] = 100
levels_M = levels_M.cumprod()
log_levels_M = np.log(levels_M) * 100
log_levels_Q = (np.log(levels_M).resample('Q', convention='e')
.sum().iloc[:-1] * 100)
# This is an alternative way to compute the quarterly levels
# endog_M = endog.iloc[:, :3]
# x = endog.iloc[:, 3:]
# endog_Q = (x + 2 * x.shift(1) + 3 * x.shift(2) + 2 * x.shift(3) +
# x.shift(4)).resample('Q', convention='e').last().iloc[:-1] / 3
# levels_Q = 1 + endog.iloc[:, 3:] / 100
# levels_Q.iloc[0] = 100
# Here is another alternative way to compute the quarterly levels
# weights = np.array([1, 2, 3, 2, 1])
# def func(x, weights):
# return np.sum(weights * x)
# r = endog_M.rolling(5)
# (r.apply(func, args=(weights,), raw=False).resample('Q', convention='e')
# .last().iloc[:-1].tail())
# Compute the growth rate series that we'll actually run the model on
endog_M = log_levels_M.iloc[:, :3].diff()
endog_Q = log_levels_Q.iloc[:, 3:].diff()
return endog_M, endog_Q, log_levels_M, log_levels_Q, true
def simulate_k_factors3_blocks2(nobs=1000, idiosyncratic_ar1=False):
# Simulate the first two factors
ix = pd.period_range(start='2000-01', periods=1, freq='M')
endog = pd.DataFrame(np.zeros((1, 2)), columns=['f1', 'f2'], index=ix)
mod_f_12 = varmax.VARMAX(endog, order=(1, 0), trend='n')
params = [0.5, 0.1, -0.2, 0.9, 1.0, 0, 1.0]
f_12 = mod_f_12.simulate(params, nobs)
# Simulate the third factor
endog = pd.Series([0], name='f3', index=ix)
mod_f_3 = sarimax.SARIMAX(endog, order=(2, 0, 0))
params = [0.7, 0.1, 1.]
f_3 = mod_f_3.simulate(params, nobs)
# Combine the factors
f = pd.concat([f_12, f_3], axis=1)
# Observed variables
k_endog = 8
design = np.zeros((k_endog, 3))
design[0] = [1.0, 1.0, 1.0]
design[1] = [0.5, -0.8, 0.0]
design[2] = [1.0, 0.0, 0.0]
design[3] = [0.2, 0.0, -0.1]
design[4] = [0.5, 0.0, 0.0]
design[5] = [-0.2, 0.0, 0.0]
design[6] = [1.0, 1.0, 1.0]
design[7] = [-1.0, 0.0, 0.0]
rho = np.array([0.5, 0.2, -0.1, 0.0, 0.4, 0.9, 0.05, 0.05])
if not idiosyncratic_ar1:
rho *= 0.0
eps = [lfilter([1], [1, -rho[i]], np.random.normal(size=nobs))
for i in range(k_endog)]
endog = (design @ f.T).T + eps
endog.columns = [f'y{i + 1}' for i in range(k_endog)]
# True parameters
tmp1 = design.ravel()
tmp2 = np.linalg.cholesky(mod_f_12['state_cov'])
tmp3 = rho if idiosyncratic_ar1 else []
true = np.r_[
tmp1[tmp1 != 0],
mod_f_12['transition', :2, :].ravel(),
mod_f_3['transition', :, 0],
tmp2[np.tril_indices_from(tmp2)],
mod_f_3['state_cov', 0, 0],
tmp3,
[1] * k_endog
]
# Compute levels series (M and Q)
ix = | pd.period_range(endog.index[0] - 1, endog.index[-1], freq='M') | pandas.period_range |
import pytest
import numpy as np
import pandas as pd
from pandas import Categorical, Series, CategoricalIndex
from pandas.core.dtypes.concat import union_categoricals
from pandas.util import testing as tm
class TestUnionCategoricals(object):
def test_union_categorical(self):
# GH 13361
data = [
(list('abc'), list('abd'), list('abcabd')),
([0, 1, 2], [2, 3, 4], [0, 1, 2, 2, 3, 4]),
([0, 1.2, 2], [2, 3.4, 4], [0, 1.2, 2, 2, 3.4, 4]),
(['b', 'b', np.nan, 'a'], ['a', np.nan, 'c'],
['b', 'b', np.nan, 'a', 'a', np.nan, 'c']),
(pd.date_range('2014-01-01', '2014-01-05'),
pd.date_range('2014-01-06', '2014-01-07'),
pd.date_range('2014-01-01', '2014-01-07')),
(pd.date_range('2014-01-01', '2014-01-05', tz='US/Central'),
pd.date_range('2014-01-06', '2014-01-07', tz='US/Central'),
pd.date_range('2014-01-01', '2014-01-07', tz='US/Central')),
(pd.period_range('2014-01-01', '2014-01-05'),
pd.period_range('2014-01-06', '2014-01-07'),
pd.period_range('2014-01-01', '2014-01-07')),
]
for a, b, combined in data:
for box in [Categorical, CategoricalIndex, Series]:
result = union_categoricals([box(Categorical(a)),
box(Categorical(b))])
expected = Categorical(combined)
tm.assert_categorical_equal(result, expected,
check_category_order=True)
# new categories ordered by appearance
s = Categorical(['x', 'y', 'z'])
s2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([s, s2])
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['x', 'y', 'z', 'a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
s = Categorical([0, 1.2, 2], ordered=True)
s2 = Categorical([0, 1.2, 2], ordered=True)
result = union_categoricals([s, s2])
expected = Categorical([0, 1.2, 2, 0, 1.2, 2], ordered=True)
tm.assert_categorical_equal(result, expected)
# must exactly match types
s = Categorical([0, 1.2, 2])
s2 = Categorical([2, 3, 4])
msg = 'dtype of categories must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([s, s2])
msg = 'No Categoricals to union'
with tm.assert_raises_regex(ValueError, msg):
union_categoricals([])
def test_union_categoricals_nan(self):
# GH 13759
res = union_categoricals([pd.Categorical([1, 2, np.nan]),
pd.Categorical([3, 2, np.nan])])
exp = Categorical([1, 2, np.nan, 3, 2, np.nan])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical(['A', 'B']),
pd.Categorical(['B', 'B', np.nan])])
exp = Categorical(['A', 'B', 'B', 'B', np.nan])
tm.assert_categorical_equal(res, exp)
val1 = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-03-01'),
pd.NaT]
val2 = [pd.NaT, pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-02-01')]
res = union_categoricals([pd.Categorical(val1), pd.Categorical(val2)])
exp = Categorical(val1 + val2,
categories=[pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-03-01'),
pd.Timestamp('2011-02-01')])
tm.assert_categorical_equal(res, exp)
# all NaN
res = union_categoricals([pd.Categorical([np.nan, np.nan]),
pd.Categorical(['X'])])
exp = Categorical([np.nan, np.nan, 'X'])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical([np.nan, np.nan]),
pd.Categorical([np.nan, np.nan])])
exp = Categorical([np.nan, np.nan, np.nan, np.nan])
tm.assert_categorical_equal(res, exp)
def test_union_categoricals_empty(self):
# GH 13759
res = union_categoricals([pd.Categorical([]),
pd.Categorical([])])
exp = Categorical([])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical([]),
pd.Categorical([1.0])])
exp = Categorical([1.0])
tm.assert_categorical_equal(res, exp)
# to make dtype equal
nanc = pd.Categorical(np.array([np.nan], dtype=np.float64))
res = union_categoricals([nanc,
pd.Categorical([])])
tm.assert_categorical_equal(res, nanc)
def test_union_categorical_same_category(self):
# check fastpath
c1 = Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4])
c2 = Categorical([3, 2, 1, np.nan], categories=[1, 2, 3, 4])
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, 4, 3, 2, 1, np.nan],
categories=[1, 2, 3, 4])
tm.assert_categorical_equal(res, exp)
c1 = Categorical(['z', 'z', 'z'], categories=['x', 'y', 'z'])
c2 = Categorical(['x', 'x', 'x'], categories=['x', 'y', 'z'])
res = union_categoricals([c1, c2])
exp = Categorical(['z', 'z', 'z', 'x', 'x', 'x'],
categories=['x', 'y', 'z'])
tm.assert_categorical_equal(res, exp)
def test_union_categoricals_ordered(self):
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
msg = 'Categorical.ordered must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
res = union_categoricals([c1, c1])
exp = Categorical([1, 2, 3, 1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, np.nan, 3, 2], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
msg = "to union ordered Categoricals, all categories must be the same"
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
def test_union_categoricals_ignore_order(self):
# GH 15219
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
msg = 'Categorical.ordered must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2], ignore_order=False)
res = union_categoricals([c1, c1], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([c1, c1], ignore_order=False)
exp = Categorical([1, 2, 3, 1, 2, 3],
categories=[1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, np.nan, 3, 2])
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([c2, c1], ignore_order=True,
sort_categories=True)
exp = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([4, 5, 6], ordered=True)
result = union_categoricals([c1, c2], ignore_order=True)
expected = Categorical([1, 2, 3, 4, 5, 6])
tm.assert_categorical_equal(result, expected)
msg = "to union ordered Categoricals, all categories must be the same"
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2], ignore_order=False)
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
def test_union_categoricals_sort(self):
# GH 13846
c1 = Categorical(['x', 'y', 'z'])
c2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['a', 'b', 'c', 'x', 'y', 'z'])
tm.assert_categorical_equal(result, expected)
# fastpath
c1 = Categorical(['a', 'b'], categories=['b', 'a', 'c'])
c2 = Categorical(['b', 'c'], categories=['b', 'a', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['a', 'b'], categories=['c', 'a', 'b'])
c2 = Categorical(['b', 'c'], categories=['c', 'a', 'b'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
# fastpath - skip resort
c1 = Categorical(['a', 'b'], categories=['a', 'b', 'c'])
c2 = Categorical(['b', 'c'], categories=['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['x', np.nan])
c2 = Categorical([np.nan, 'b'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['x', np.nan, np.nan, 'b'],
categories=['b', 'x'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([np.nan])
c2 = Categorical([np.nan])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical([np.nan, np.nan], categories=[])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([])
c2 = Categorical([])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical([])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['b', 'a'], categories=['b', 'a', 'c'], ordered=True)
c2 = Categorical(['a', 'c'], categories=['b', 'a', 'c'], ordered=True)
with pytest.raises(TypeError):
union_categoricals([c1, c2], sort_categories=True)
def test_union_categoricals_sort_false(self):
# GH 13846
c1 = Categorical(['x', 'y', 'z'])
c2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['x', 'y', 'z', 'a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
# fastpath
c1 = Categorical(['a', 'b'], categories=['b', 'a', 'c'])
c2 = Categorical(['b', 'c'], categories=['b', 'a', 'c'])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['b', 'a', 'c'])
tm.assert_categorical_equal(result, expected)
# fastpath - skip resort
c1 = | Categorical(['a', 'b'], categories=['a', 'b', 'c']) | pandas.Categorical |
import pandas as pd
import numpy as np
import json
import io
import random
def prepareSalesData(csvfile):
#Read store 20 sales
store20_sales = pd.read_csv(csvfile, index_col=None)
# Create Year column for grouping data
store20_sales['Date'] = pd.to_datetime(store20_sales['Date'])
store20_sales['Year'] = store20_sales['Date'].dt.year
#Sort weekly sales by department
store20_sales = store20_sales.sort_values(['Date', 'Dept'], ascending=True).reset_index(drop=True)
#Select columns of interest
store20_mod_sales = store20_sales[['Year', 'Date', 'Weekly_Sales', 'Dept', 'IsHoliday']]
#Select departments with 143 weekly sales
store20_mod_sales = store20_mod_sales.groupby(store20_mod_sales.Dept, as_index=True).filter(lambda x: len(x['Weekly_Sales']) > 142)
#Map department numbers to categorical variables
dept_list = store20_mod_sales['Dept'].unique()
cat_values = [i for i in range(0, len(dept_list))]
df_dept = | pd.DataFrame(dept_list, index=cat_values, columns=['Dept']) | pandas.DataFrame |
"""
"""
import os
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from src.utils.constants import REGIONS, LANDCOVER_PERIODS, DICTIONARY
if __name__ == "__main__":
# Project's root
os.chdir("../..")
fig, axs = plt.subplots(2, 2, figsize=(11.69, 4.14))
correlations = | pd.read_csv("results/csv/burned_area_landcover_change_corr.csv") | pandas.read_csv |
#%%
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import networkx as nx
import psycopg2
import datatable as dt
import pickle
import plotly.express as px
# from plotly.subplots import make_subplots
from collections import namedtuple, defaultdict
from datetime import datetime
import torch
from torch.utils.data import TensorDataset, DataLoader
from torch_geometric.data import Dataset, Data
class DBconnector():
"""
Connection to the database
"""
#host="172.18.0.1", port = 5555, database="base-ina", user="postgres", password="<PASSWORD>"
def __init__(self, url: str, port: int, database: str, user: str, password: str) -> None:
self.pg_conn = psycopg2.connect(host=url, port=port, database=database, user=user, password=password)
def query(self, query:str):
cur0 = self.pg_conn.cursor()
cur0.execute(query)
query_result = cur0.fetchall()
return query_result
class GraphEngine():
"""
Initializes the graph of the whole model by doing the correponding queries to the database.
"""
def get_time_steps(self):
time_range_query = f"""
SELECT DISTINCT
elapsed_time
FROM
events_nodes en
WHERE
event_id = '{self.event}'
"""
cur0 = self.conn.cursor()
cur0.execute(time_range_query)
time_range_query_result = cur0.fetchall()
time_range_query_result = sorted([time for time in time_range_query_result])
time_range_query_result_formatted = sorted([time[0].strftime("%Y-%m-%d %H:%M:%S") for time in time_range_query_result])
return time_range_query_result, time_range_query_result_formatted
def which_time_step(self):
time_range = self.get_time_steps()[0]
first_two = time_range[:2]
time_step = (first_two[1][0] - first_two[0][0]).seconds // 60
return time_step, f'{time_step} minutes'
def get_nodes(self):
nodes_query = f"""
SELECT
node_id
FROM
nodes_coordinates nc
WHERE
nc.model_id = '{self.model}'
"""
cur1 = self.conn.cursor()
cur1.execute(nodes_query)
nodes_query_result = cur1.fetchall()
nodes = sorted([node for nodes in nodes_query_result for node in nodes])
return nodes
def nodal_linkage_query(self, elapsed_time:str, attrs:dict, persist:bool=True):
link_attrs = ', '.join(['edge', 'link_id', 'from_node', 'to_node', 'elapsed_time']) + ', ' + ', '.join(attrs['edges'])
nodal_linkage_query = f"""
WITH links_event AS
(
SELECT
*
FROM
events_links el
WHERE
el.event_id = '{self.event}'
AND el.elapsed_time = '{elapsed_time}'
AND el.flow_rate != 0
)
,
links_conduits_model AS
(
SELECT
*
FROM
links_conduits AS lc
WHERE
lc.model_id = '{self.model}'
)
,
links_orifices_model AS
(
SELECT
*
FROM
links_orifices AS lo
WHERE
lo.model_id = '{self.model}'
)
,
links_weirs_model AS
(
SELECT
*
FROM
links_weirs AS lw
WHERE
lw.model_id = '{self.model}'
)
,
links_types_event AS
(
SELECT
(
CASE
WHEN
links.flow_rate > 0
THEN
concat(conduits.from_node, '->', conduits.to_node)
ELSE
concat(conduits.to_node, '->', conduits.from_node)
END
) AS edge, links.link_id,
(
CASE
WHEN
links.flow_rate > 0
THEN
conduits.from_node
ELSE
conduits.to_node
END
)
AS from_node,
(
CASE
WHEN
links.flow_rate < 0
THEN
conduits.from_node
ELSE
conduits.to_node
END
)
AS to_node, elapsed_time, ABS(flow_rate) AS flow_rate , flow_depth, ABS(flow_velocity ) AS flow_velocity, froude_number, capacity, conduits.length, conduits.roughness
FROM
links_event AS links
LEFT JOIN
links_conduits_model AS conduits
ON conduits.conduit_id = links.link_id
WHERE
from_node NOTNULL
UNION
SELECT
(
CASE
WHEN
links.flow_rate > 0
THEN
concat(orifices.from_node, '->', orifices.to_node)
ELSE
concat(orifices.to_node, '->', orifices.from_node)
END
) AS edge, links.link_id,
(
CASE
WHEN
links.flow_rate > 0
THEN
orifices.from_node
ELSE
orifices.to_node
END
)
AS from_node,
(
CASE
WHEN
links.flow_rate < 0
THEN
orifices.from_node
ELSE
orifices.to_node
END
)
AS to_node, elapsed_time, ABS(flow_rate) AS flow_rate , flow_depth, ABS(flow_velocity ) AS flow_velocity, froude_number, capacity, 0 AS length, 0 AS roughness
FROM
links_event AS links
LEFT JOIN
links_orifices_model AS orifices
ON orifices.orifice_id = links.link_id
WHERE
from_node NOTNULL
UNION
SELECT
(
CASE
WHEN
links.flow_rate > 0
THEN
concat(weirs.from_node, '->', weirs.to_node)
ELSE
concat(weirs.to_node, '->', weirs.from_node)
END
) AS edge, links.link_id,
(
CASE
WHEN
links.flow_rate > 0
THEN
weirs.from_node
ELSE
weirs.to_node
END
)
AS from_node,
(
CASE
WHEN
links.flow_rate < 0
THEN
weirs.from_node
ELSE
weirs.to_node
END
)
AS to_node, elapsed_time, ABS(flow_rate) AS flow_rate , flow_depth, ABS(flow_velocity ) AS flow_velocity, froude_number, capacity, 0 AS length, 0 AS roughness
FROM
links_event AS links
LEFT JOIN
links_weirs_model AS weirs
ON weirs.weir_id = links.link_id
WHERE
from_node NOTNULL
)
-- , rain_mdata AS
-- (
-- SELECT
-- *
-- FROM
-- raingages_metadata rm
-- WHERE
-- rm.precipitation_id = '{self.precip}'
-- )
-- ,
-- rain_tseries AS
-- (
-- SELECT
-- *
-- FROM
-- raingages_timeseries rt
-- WHERE
-- rt.precipitation_id = '{self.precip}'
-- AND rt.elapsed_time = '{elapsed_time}'
-- )
-- ,
-- rain AS
-- (
-- SELECT
-- rt.raingage_id,
-- rt.elapsed_time,
-- rt.VALUE,
-- rm.format,
-- rm.unit
-- FROM
-- raingages_timeseries rt
-- JOIN
-- raingages_metadata AS rm
-- ON rt.precipitation_id = rm.precipitation_id
-- )
,
subc AS
(
SELECT
*
FROM
subcatchments s2
WHERE
s2.model_id = '{self.model}'
)
,
event_subc AS
(
SELECT
*
FROM
events_subcatchments es
WHERE
es.event_id = '{self.event}'
AND es.elapsed_time = '{elapsed_time}'
)
,
event_subc_outlet AS
(
SELECT DISTINCT
subc.subcatchment_id,
subc.outlet,
subc.raingage_id,
elapsed_time,
event_subc.rainfall
FROM
subc
INNER JOIN
event_subc
ON subc.subcatchment_id = event_subc.subcatchment_id
)
-- ,
-- event_subc_rainfall AS
-- (
-- SELECT DISTINCT
-- eso.*,
-- rain.VALUE,
-- rain.format,
-- rain.unit
-- FROM
-- event_subc_outlet eso
-- INNER JOIN
-- rain
-- ON rain.raingage_id = eso.raingage_id
-- AND rain.elapsed_time = eso.elapsed_time
-- )
,
final AS
(
SELECT DISTINCT
lte.*,
COALESCE (esr.rainfall, 0) AS rainfall--,
-- COALESCE (esr.VALUE, 0) AS rainfall_acc
FROM
links_types_event lte
LEFT JOIN
event_subc_outlet esr
ON lte.from_node = esr.outlet
AND lte.elapsed_time = esr.elapsed_time
)
SELECT {link_attrs}
FROM final
"""
cur1 = self.conn.cursor()
cur1.execute(nodal_linkage_query)
nodal_linkage_query_result = cur1.fetchall()
if persist:
self.nodal_linkage_query_results[f'{self.event}_{elapsed_time}'] = nodal_linkage_query_result
else:
return nodal_linkage_query_result
def get_nodal_linkage(self, elapsed_time:str, attrs:dict, persist:bool=True):
link_attrs = ','.join(['edge', 'link_id', 'from_node', 'to_node', 'elapsed_time']) + ',' + ','.join(attrs['edges'])
if persist:
try:
self.nodal_linkage_dict[f'{self.event}_{elapsed_time}']
except:
self.nodal_linkage_query(elapsed_time, attrs)
nodal_linkage = {i[0]:
dict(zip(link_attrs.split(','), i))
for i in self.nodal_linkage_query_results[f'{self.event}_{elapsed_time}']
}
self.nodal_linkage_dict[f'{self.event}_{elapsed_time}'] = nodal_linkage
else:
query = self.nodal_linkage_query(elapsed_time, attrs, persist=False)
nodal_linkage = {i[0]:
dict(zip(link_attrs.split(','), i))
for i in query
}
return nodal_linkage
def nodal_data_query(self, elapsed_time:str, attrs:dict, persist:bool=True):
node_attrs = ','.join(['node_id', 'subcatchment_id', 'elapsed_time', 'depth_above_invert']) + ',' + ','.join(attrs['nodes'])
nodal_data_query = f"""
WITH model_node_coordinates AS
(
SELECT
*
FROM
nodes_coordinates AS nc
WHERE
nc.model_id = '{self.model}'
)
,
junctions AS
(
SELECT
*
FROM
nodes_junctions nj
WHERE
nj.model_id = '{self.model}'
)
,
storages AS
(
SELECT
*
FROM
nodes_storage ns
WHERE
ns.model_id = '{self.model}'
)
,
outfalls AS
(
SELECT
*
FROM
nodes_outfalls AS no2
WHERE
no2.model_id = '{self.model}'
)
,
nodes AS
(
SELECT
mnc.node_id,
mnc.lat,
mnc.lon,
j.elevation,
j.init_depth,
j.max_depth
FROM
model_node_coordinates mnc
JOIN
junctions j
ON mnc.node_id = j.junction_id
WHERE
elevation NOTNULL
UNION ALL
SELECT
mnc.node_id,
mnc.lat,
mnc.lon,
s.elevation,
s.init_depth,
s.max_depth
FROM
model_node_coordinates mnc
JOIN
storages s
ON mnc.node_id = s.storage_id
WHERE
elevation NOTNULL
UNION ALL
SELECT
mnc.node_id,
mnc.lat,
mnc.lon,
o.elevation,
0 AS init_depth,
0 AS max_depth
FROM
model_node_coordinates mnc
JOIN
outfalls o
ON mnc.node_id = o.outfall_id
WHERE
elevation NOTNULL
)
,
subcatch AS
(
SELECT
*
FROM
subcatchments s
WHERE
s.model_id = '{self.model}'
)
,
event_nodes AS
(
SELECT
*
FROM
events_nodes en
WHERE
event_id = '{self.event}'
AND en.elapsed_time = '{elapsed_time}'
)
,
event_subc AS
(
SELECT
*
FROM
events_subcatchments es
WHERE
es.event_id = '{self.event}'
AND es.elapsed_time = '{elapsed_time}'
)
,
event_subc_outlet AS
(
SELECT
event_subc.*,
subcatch.outlet,
subcatch.raingage_id
FROM
subcatch
LEFT JOIN
event_subc
ON subcatch.subcatchment_id = event_subc.subcatchment_id
)
,
nodal_out_data AS
(
SELECT
en.node_id,
COALESCE (subcatchment_id, 'SIN CUENCA DE APORTE') AS subcatchment_id,
en.elapsed_time,
en.depth_above_invert,
en.flow_lost_flooding,
en.hydraulic_head,
en.lateral_inflow,
en.total_inflow,
en.volume_stored_ponded,
COALESCE (eso.rainfall, 0) AS rainfall,
COALESCE (eso.evaporation_loss, 0) AS evaporation_loss,
COALESCE (eso.runoff_rate, 0) AS runoff_rate,
COALESCE (eso.infiltration_loss, 0) AS infiltration_loss
FROM
event_nodes AS en
LEFT JOIN
event_subc_outlet AS eso
ON eso.elapsed_time = en.elapsed_time
AND eso.outlet = en.node_id
)
,
nodal_inp_data AS
(
SELECT
nodes.*,
COALESCE (s.area, 0) AS area,
COALESCE (s.imperv, 0) AS imperv,
COALESCE (s.slope, 0) AS slope,
COALESCE (s.width, 0) AS width,
COALESCE (s.curb_len, 0) AS curb_len,
COALESCE (s.raingage_id, '') AS raingage_id
FROM
nodes
LEFT JOIN
subcatch s
ON s.outlet = nodes.node_id
)
,
nodal_data AS
(
SELECT
nod.*,
nid.lon,
nid.lat,
nid.elevation,
nid.init_depth,
nid.max_depth,
nid.area,
nid.imperv,
nid.slope,
nid.width,
nid.curb_len
-- nid.raingage_id
FROM
nodal_out_data AS nod
LEFT JOIN
nodal_inp_data AS nid
ON nod.node_id = nid.node_id
)
-- ,
-- rain_mdata AS
-- (
-- SELECT
-- *
-- FROM
-- raingages_metadata rm
-- WHERE
-- rm.precipitation_id = '{self.precip}'
-- )
-- ,
-- rain_tseries AS
-- (
-- SELECT
-- *
-- FROM
-- raingages_timeseries rt
-- WHERE
-- rt.precipitation_id = '{self.precip}'
-- )
-- ,
-- rain AS
-- (
-- SELECT
-- rt.raingage_id,
-- rt.elapsed_time,
-- COALESCE (rt.VALUE, 0) AS rainfall_acc,
-- rm.format,
-- rm.unit
-- FROM
-- raingages_timeseries rt
-- JOIN
-- raingages_metadata AS rm
-- ON rt.precipitation_id = rm.precipitation_id
-- )
-- ,
-- final AS
-- (
-- SELECT DISTINCT
-- nd.*,
-- COALESCE (r.rainfall_acc, 0) AS rainfall_acc,
-- COALESCE (r.format, '') AS format,
-- COALESCE (r.unit, '') AS unit
-- FROM
-- nodal_data nd
-- LEFT JOIN
-- rain r
-- ON nd.raingage_id = r.raingage_id
-- AND nd.elapsed_time = r.elapsed_time
-- )
SELECT {node_attrs}
FROM nodal_data
"""
cur2 = self.conn.cursor()
cur2.execute(nodal_data_query)
nodal_data_query_result = cur2.fetchall()
if persist:
self.nodal_data_query_results[f'{self.event}_{elapsed_time}'] = nodal_data_query_result
else:
return nodal_data_query_result
def get_nodal_data(self, elapsed_time:str, attrs:dict, persist:bool=True):
node_attrs = ','.join(['node_id', 'subcatchment_id', 'elapsed_time', 'depth_above_invert']) + ',' + ','.join(attrs['nodes'])
if persist:
try:
self.nodal_data_dict[f'{self.event}_{elapsed_time}']
except:
self.nodal_data_query(elapsed_time, attrs)
nodal_data = {
i[0]: dict(zip(node_attrs.split(','), i))
for i in self.nodal_data_query_results[f'{self.event}_{elapsed_time}']
}
self.nodal_data_dict[f'{self.event}_{elapsed_time}'] = nodal_data
else:
query = self.nodal_data_query(elapsed_time, attrs, persist=False)
nodal_data = {i[0]: dict(zip(node_attrs.split(','), i))
for i in query
}
return nodal_data
# graph creation
def build_digraph(self, elapsed_time:str, attrs:dict, persist:bool=True):
if persist:
self.get_nodal_data(elapsed_time, attrs, persist=True)
self.get_nodal_linkage(elapsed_time, attrs, persist=True)
#target definition
def risk_classes(level):
high_risk_level = 0.25
mid_risk_level = 0.15
if level < mid_risk_level:
return 0
elif (level >= mid_risk_level) & (level < high_risk_level):
return 1
else:
return 2
try:
self.digraphs[f'{self.event}_{elapsed_time}']
except:
DG = nx.DiGraph(elapsed_time = elapsed_time, model=self.model, event=self.event)
[DG.add_edge(i[1]['from_node'], i[1]['to_node'], **i[1]) for i in self.nodal_linkage_dict[f'{self.event}_{elapsed_time}'].items()]
[DG.add_node(i[0], **i[1]) for i in self.nodal_data_dict[f'{self.event}_{elapsed_time}'].items()]
#target definition
[DG.add_node(i[0], **{'target': risk_classes(i[1]['depth_above_invert'])}) for i in self.nodal_data_dict[f'{self.event}_{elapsed_time}'].items()]
if persist:
self.digraphs[f'{self.event}_{elapsed_time}'] = DG
self.num_nodes[f'{self.event}_{elapsed_time}'] = len(DG.nodes())
self.num_edges[f'{self.event}_{elapsed_time}'] = len(DG.edges())
else:
nodal_data = self.get_nodal_data(elapsed_time, attrs, persist=False)
nodal_linkage = self.get_nodal_linkage(elapsed_time, attrs, persist=False)
#target definition
def risk_classes(level):
high_risk_level = 0.25
mid_risk_level = 0.15
if level < mid_risk_level:
return 0
elif (level >= mid_risk_level) & (level < high_risk_level):
return 1
else:
return 2
DG = nx.DiGraph(elapsed_time = elapsed_time, model=self.model, event=self.event)
[DG.add_edge(i[1]['from_node'], i[1]['to_node'], **i[1]) for i in nodal_linkage.items()]
[DG.add_node(i[0], **i[1]) for i in nodal_data.items()]
#target definition
[DG.add_node(i[0], **{'target': risk_classes(i[1]['depth_above_invert'])}) for i in nodal_data.items()]
self.num_nodes[f'{self.event}_{elapsed_time}'] = len(DG.nodes())
self.num_edges[f'{self.event}_{elapsed_time}'] = len(DG.edges())
return DG
def build_coordinates_dict(self, elevation:bool=False):
nodes_coordinates_query = f"""
WITH node_coordinates_model AS
(
SELECT
*
FROM
nodes_coordinates AS nc
WHERE
nc.model_id = '{self.model}'
)
SELECT
nc.node_id,
nc.lat,
nc.lon,
nj.elevation,
nj.init_depth,
nj.max_depth
FROM
node_coordinates_model nc
JOIN
nodes_junctions nj
ON nc.node_id = nj.junction_id
WHERE
nj.model_id = '{self.model}'
UNION ALL
SELECT
nc.node_id,
nc.lat,
nc.lon,
ns.elevation,
ns.init_depth,
ns.max_depth
FROM
node_coordinates_model nc
JOIN
nodes_storage ns
ON nc.node_id = ns.storage_id
WHERE
ns.model_id = '{self.model}'
UNION ALL
SELECT
nc.node_id,
nc.lat,
nc.lon,
no2.elevation,
0 AS init_depth,
0 AS max_depth
FROM
node_coordinates_model nc
JOIN
nodes_outfalls no2
ON nc.node_id = no2.outfall_id
WHERE
no2.model_id = '{self.model}'
"""
cur3 = self.conn.cursor()
cur3.execute(nodes_coordinates_query)
coordinates_query_result = cur3.fetchall()
if elevation:
coordinates = {i[0]: {'lat':i[1], 'lon':i[2], 'elevation':i[3]} for i in coordinates_query_result}
return coordinates
else:
coordinates = {i[0]: {'lat':i[1], 'lon':i[2]} for i in coordinates_query_result}
return coordinates
def __init__(self, model:str, event:str, precip:str, conn) -> None:
self.conn = conn.pg_conn
# self.elapsed_time = elapsed_time
self.model = model
self.event = event
self.precip = precip
self.time_range = self.get_time_steps()
self.nodal_linkage_query_results = {}
self.nodal_linkage_dict = {}
self.nodal_data_query_results = {}
self.nodal_data_dict = {}
self.digraphs = {}
self.sub_digraphs = {}
self.pos_dict = self.build_coordinates_dict()
self.num_nodes = defaultdict(int)
self.num_edges = defaultdict(int)
self.torch_data = {}
def build_subgraph(self, node:str, elapsed_time:str, attrs:dict, acc_data:bool, persist:bool=True):
try:
self.digraphs[f'{self.event}_{elapsed_time}']
except:
self.build_digraph(elapsed_time, attrs)
if persist:
try:
self.sub_digraphs[f'{self.event}_{node}_{elapsed_time}']
except:
preds_list = [(i[0],i[1]) for i in nx.edge_dfs(self.digraphs[f'{self.event}_{elapsed_time}'], node, 'reverse')]
if len(preds_list) == 0:
preds_list = [node]
graph_preds = nx.DiGraph(elapsed_time = elapsed_time, model= self.model, outlet_node = node)
# own node data, for the cases without preds
graph_preds.add_node(node, **self.nodal_data_dict[f'{self.event}_{elapsed_time}'][node])
#target definition
def risk_classes(level):
high_risk_level = 0.25
mid_risk_level = 0.15
if level < mid_risk_level:
return 0
elif level == None:
return 0
elif (level >= mid_risk_level) & (level < high_risk_level):
return 1
else:
return 2
if isinstance(preds_list[0], tuple):
[graph_preds.add_edge(edge[0], edge[1], **self.nodal_linkage_dict[f'{self.event}_{elapsed_time}'][edge[0] + '->' + edge[1]]) for edge in preds_list]
[graph_preds.add_node(i, **self.nodal_data_dict[f'{self.event}_{elapsed_time}'][i]) for i in set([i[0] for i in preds_list] + [i[1] for i in preds_list])]
[graph_preds.add_node(
i, **{'target': risk_classes(self.nodal_data_dict[f'{self.event}_{elapsed_time}'][i]['depth_above_invert'])}
) for i in set([i[0] for i in preds_list] + [i[1] for i in preds_list])]
else:
[graph_preds.add_node(i, **self.nodal_data_dict[f'{self.event}_{elapsed_time}'][i]) for i in preds_list]
[graph_preds.add_node(
i, **{'target': risk_classes(self.nodal_data_dict[f'{self.event}_{elapsed_time}'][i]['depth_above_invert'])}
) for i in preds_list]
def division_exception(a, b, default_value):
try:
return a / b
except:
return default_value
if acc_data:
vars_acc = {
'area_aporte_ha': round(sum([graph_preds.nodes()[i]['area'] for i in graph_preds.nodes()]),2),
'perm_media_%':
round(
division_exception(sum(
[graph_preds.nodes()[i]['area'] * graph_preds.nodes()[i]['imperv']
for i in graph_preds.nodes()]
)
, sum([graph_preds.nodes()[i]['area'] for i in graph_preds.nodes()]),0),
4),
'manning_medio_flow_s/m^1/3':
round(
division_exception(sum(
[
graph_preds.edges()[edge[0], edge[1]]['flow_rate']
* graph_preds.edges()[edge[0], edge[1]]['length']
* graph_preds.edges()[edge[0], edge[1]]['roughness']
for edge in graph_preds.edges()
])
, sum([graph_preds.edges()[edge[0], edge[1]]['flow_rate']
* graph_preds.edges()[edge[0], edge[1]]['length']
for edge in graph_preds.edges()
]),0),
3),
'manning_medio_s/m^1/3':
round(
division_exception(sum(
[
graph_preds.edges()[edge[0], edge[1]]['length']
* graph_preds.edges()[edge[0], edge[1]]['roughness']
for edge in graph_preds.edges()
])
, sum([graph_preds.edges()[edge[0], edge[1]]['length']
for edge in graph_preds.edges()
]),0),
3),
# 'precip_media_mm/ha':
# division_exception(
# round(max([graph_preds.edges()[edge[0], edge[1]]['rainfall_acc'] for edge in graph_preds.edges])
# , sum([graph_preds.nodes()[i]['area'] for i in graph_preds.nodes()]),2),
# 0),
'infilt_media_mm/hs': round(np.average([graph_preds.nodes()[i]['infiltration_loss'] for i in graph_preds.nodes()]),2),
# 'vol_almacenado_mm': round(max([graph_preds.edges()[edge[0], edge[1]]['rainfall_acc'] for edge in graph_preds.edges])
# - sum([graph_preds.nodes()[i]['infiltration_loss'] for i in graph_preds.nodes])
# - sum([graph_preds.nodes()[i]['evaporation_loss'] for i in graph_preds.nodes])
# - sum([graph_preds.nodes()[i]['runoff_rate'] * graph_preds.nodes()[i]['area'] for i in graph_preds.nodes()]),2),
# 'vol_precipitado_mm_acc': round(max([graph_preds.edges()[edge[0], edge[1]]['rainfall_acc'] for edge in graph_preds.edges()]),2),
# 'vol_precipitado_mm': round(sum([graph_preds.edges()[edge[0], edge[1]]['rainfall'] for edge in graph_preds.edges()]),2),
'delta_h_medio_m/m':
round(
division_exception(
(
max([graph_preds.nodes()[i]['elevation'] for i in graph_preds.nodes()])
- min([graph_preds.nodes()[i]['elevation'] for i in graph_preds.nodes()])
) , np.sqrt(10000 * sum([graph_preds.nodes()[i]['area'] for i in graph_preds.nodes()])),0),
2),
'pendiente_media_m/m':
division_exception(
(
max([graph_preds.nodes()[i]['elevation'] for i in graph_preds.nodes()])
- min([graph_preds.nodes()[i]['elevation'] for i in graph_preds.nodes()])
) , sum([graph_preds.edges()[edge[0], edge[1]]['length'] for edge in graph_preds.edges()]),
0)
}
graph_preds.add_node(node, **vars_acc)
self.sub_digraphs[self.event + '_' + node + '_' + elapsed_time + '_acc'] = graph_preds
else:
try:
graph = self.digraphs[f'{self.event}_{elapsed_time}']
except:
graph = self.build_digraph(elapsed_time, attrs, persist=True)
nodal_data_dict = self.get_nodal_data(elapsed_time, attrs, persist=False)
nodal_linkage_dict = self.get_nodal_linkage(elapsed_time, attrs, persist=False)
preds_list = [(i[0],i[1]) for i in nx.edge_dfs(graph, node, 'reverse')]
if len(preds_list) == 0:
preds_list = [node]
graph_preds = nx.DiGraph(elapsed_time = elapsed_time, model= self.model, outlet_node = node)
# own node data, for th cases without preds
graph_preds.add_node(node, **nodal_data_dict[node])
#target definition
def risk_classes(level):
high_risk_level = 0.25
mid_risk_level = 0.15
if level < mid_risk_level:
return 0
elif (level >= mid_risk_level) & (level < high_risk_level):
return 1
else:
return 2
if isinstance(preds_list[0], tuple):
[graph_preds.add_edge(edge[0], edge[1], **nodal_linkage_dict[edge[0] + '->' + edge[1]]) for edge in preds_list]
[graph_preds.add_node(i, **nodal_data_dict[i]) for i in set([i[0] for i in preds_list] + [i[1] for i in preds_list])]
[graph_preds.add_node(
i, **{'target': risk_classes(nodal_data_dict[i]['depth_above_invert'])}
) for i in set([i[0] for i in preds_list] + [i[1] for i in preds_list])]
else:
[graph_preds.add_node(i, **nodal_data_dict[i]) for i in preds_list]
[graph_preds.add_node(
i, **{'target': risk_classes(nodal_data_dict[i]['depth_above_invert'])}
) for i in preds_list]
def division_exception(a, b, default_value):
try:
return a / b
except:
return default_value
if acc_data:
vars_acc = {
'area_aporte_ha': round(sum([graph_preds.nodes()[i]['area'] for i in graph_preds.nodes()]),2),
'perm_media_%':
round(
division_exception(sum(
[graph_preds.nodes()[i]['area'] * graph_preds.nodes()[i]['imperv']
for i in graph_preds.nodes()]
)
, sum([graph_preds.nodes()[i]['area'] for i in graph_preds.nodes()]),0),
4),
'manning_medio_flow_s/m^1/3':
round(
division_exception(sum(
[
graph_preds.edges()[edge[0], edge[1]]['flow_rate']
* graph_preds.edges()[edge[0], edge[1]]['length']
* graph_preds.edges()[edge[0], edge[1]]['roughness']
for edge in graph_preds.edges()
])
, sum([graph_preds.edges()[edge[0], edge[1]]['flow_rate']
* graph_preds.edges()[edge[0], edge[1]]['length']
for edge in graph_preds.edges()
]),0),
3),
'manning_medio_s/m^1/3':
round(
division_exception(sum(
[
graph_preds.edges()[edge[0], edge[1]]['length']
* graph_preds.edges()[edge[0], edge[1]]['roughness']
for edge in graph_preds.edges()
])
, sum([graph_preds.edges()[edge[0], edge[1]]['length']
for edge in graph_preds.edges()
]),0),
3),
# 'precip_media_mm/ha':
# division_exception(
# round(max([graph_preds.edges()[edge[0], edge[1]]['rainfall_acc'] for edge in graph_preds.edges])
# , sum([graph_preds.nodes()[i]['area'] for i in graph_preds.nodes()]),2),
# 0),
# ' infilt_media_mm/hs': round(np.average([graph_preds.nodes()[i]['infiltration_loss'] for i in graph_preds.nodes()]),2),
# 'vol_almacenado_mm': round(max([graph_preds.edges()[edge[0], edge[1]]['rainfall_acc'] for edge in graph_preds.edges])
# - sum([graph_preds.nodes()[i]['infiltration_loss'] for i in graph_preds.nodes])
# - sum([graph_preds.nodes()[i]['evaporation_loss'] for i in graph_preds.nodes])
# - sum([graph_preds.nodes()[i]['runoff_rate'] * graph_preds.nodes()[i]['area'] for i in graph_preds.nodes()]),2),
# 'vol_precipitado_mm_acc': round(max([graph_preds.edges()[edge[0], edge[1]]['rainfall_acc'] for edge in graph_preds.edges()]),2),
# 'vol_precipitado_mm': round(sum([graph_preds.edges()[edge[0], edge[1]]['rainfall'] for edge in graph_preds.edges()]),2),
'delta_h_medio_m/m':
round(
division_exception(
(
max([graph_preds.nodes()[i]['elevation'] for i in graph_preds.nodes()])
- min([graph_preds.nodes()[i]['elevation'] for i in graph_preds.nodes()])
) , np.sqrt(10000 * sum([graph_preds.nodes()[i]['area'] for i in graph_preds.nodes()])),0),
2),
'pendiente_media_m/m':
division_exception(
(
max([graph_preds.nodes()[i]['elevation'] for i in graph_preds.nodes()])
- min([graph_preds.nodes()[i]['elevation'] for i in graph_preds.nodes()])
) , sum([graph_preds.edges()[edge[0], edge[1]]['length'] for edge in graph_preds.edges()]),
0)
}
graph_preds.add_node(node, **vars_acc)
return graph_preds
else:
return graph_preds
def graph_to_torch_tensor(self, elapsed_time:str, attrs_dict:dict, raw_data_folder:str, detailed:bool=False, to_pickle:bool=True,):
"""Converts a :obj:`networkx.Graph` or :obj:`networkx.DiGraph` to a
:class:`torch_geometric.data.Data` instance.
Took it from torch_geometric.data.Data
Args:
G (networkx.Graph or networkx.DiGraph): A networkx graph.
"""
# node_attrs = attrs_dict['nodes']
# edge_attrs = attrs_dict['edges']
# graphtensors = {}
# train_DataLoaders = {}
# test_DataLoaders = {}
DG = self.build_digraph(elapsed_time=elapsed_time, attrs=attrs_dict, persist=False)
graph_ = DG.copy()
graph_ = nx.convert_node_labels_to_integers(graph_)
edge_index = torch.tensor(list(graph_.edges)).t().contiguous()
torch_data = defaultdict(int)
# torch_data['y'] = DG.nodes()[node]['target']
# graph_target = torch_data['y']
if detailed:
for i, (_, feat_dict) in enumerate(graph_.nodes(data=True)):
for key, value in feat_dict.items():
torch_data['node_' + str(key)] = [value] if i == 0 else torch_data['node_' + str(key)] + [value]
for i, (_, _, feat_dict) in enumerate(graph_.edges(data=True)):
for key, value in feat_dict.items():
torch_data['edge_' + str(key)] = [value] if i == 0 else torch_data['edge_' + str(key)] + [value]
torch_data['x'] = [list(v[1].values())[4:-1] for v in graph_.nodes(data=True)]
torch_data['x'] = [[v[1][i] for i in attrs_dict['nodes'][:-1]] for v in graph_.nodes(data=True)]
torch_data['y'] = [list(v[1].values())[-1] for v in graph_.nodes(data=True)]
torch_data['y'] = [v[1][attrs_dict['nodes'][-1]] for v in graph_.nodes(data=True)]
# torch_data['edge_attrs'] = [list(v[2].values())[5:] for v in graph_.edges(data=True)]
# torch_data['edge_attrs'] = [[v[1][i] for i in attrs_dict['edges'][-1]] for v in graph_.edges(data=True)]
torch_data['edge_index'] = edge_index.view(2, -1)
for key, data in torch_data.items():
try:
if (key == 'x'):# | (key == 'edge_attrs'):
# torch_data[key] = torch.tensor(item)
torch_data[key] = torch.tensor(data)
elif (key == 'y'):# | (key == 'edge_attrs'):
# torch_data[key] = torch.tensor(item)
torch_data[key] = torch.tensor(data, dtype=torch.long)
elif (key == 'edge_index') | (key == 'edge_attrs'):
torch_data[key] = torch.tensor(data, dtype=torch.long)
# elif (key == 'y'):
# torch_data[key] = torch.tensor(data, dtype=torch.long)
except ValueError:
print(data)
pass
# torch_data = Data.from_dict(torch_data)
# torch_data.num_nodes = graph.number_of_nodes()
if to_pickle:
# open a file, where you ant to store the data
file = open(f'{raw_data_folder}/{self.event}_{elapsed_time}.gpickle', 'wb')
# dump information to that file
pickle.dump(torch_data, file, pickle.HIGHEST_PROTOCOL)
# close the file
file.close()
else:
return torch_data
def subgraphs_to_torch_tensors(self, elapsed_time:str, node:str, attrs_dict:dict, \
raw_data_folder:str, detailed:bool=False, to_pickle:bool=True,):
"""Converts a :obj:`networkx.Graph` or :obj:`networkx.DiGraph` to a
:class:`torch_geometric.data.Data` instance.
Took it from torch_geometric.data.Data
Args:
G (networkx.Graph or networkx.DiGraph): A networkx graph.
"""
node_attrs = attrs_dict['nodes']
edge_attrs = attrs_dict['edges']
graphtensors = {}
train_DataLoaders = {}
test_DataLoaders = {}
DG = self.build_subgraph(node=node, elapsed_time=elapsed_time, attrs=attrs_dict, acc_data=False, persist=False)
graph_ = DG.copy()
graph_ = nx.convert_node_labels_to_integers(graph_)
graph_ = graph_.to_directed() if not nx.is_directed(graph_) else graph_
edge_index = torch.tensor(list(graph_.edges)).t().contiguous()
torch_data = defaultdict(int)
torch_data['y'] = DG.nodes()[node]['target']
graph_target = torch_data['y']
if detailed:
for i, (_, feat_dict) in enumerate(graph_.nodes(data=True)):
for key, value in feat_dict.items():
torch_data['node_' + str(key)] = [value] if i == 0 else torch_data['node_' + str(key)] + [value]
for i, (_, _, feat_dict) in enumerate(graph_.edges(data=True)):
for key, value in feat_dict.items():
torch_data['edge_' + str(key)] = [value] if i == 0 else torch_data['edge_' + str(key)] + [value]
torch_data['x'] = [list(v[1].values())[4:-1] for i,v in enumerate(graph_.nodes(data=True))]
torch_data['edge_attrs'] = [list(v[2].values())[5:] for i,v in enumerate(graph_.edges(data=True))]
torch_data['edge_index'] = edge_index.view(2, -1)
for key, data in torch_data.items():
try:
if (key == 'x'):# | (key == 'edge_attrs'):
# torch_data[key] = torch.tensor(item)
torch_data[key] = torch.tensor(data)
elif (key == 'edge_index') | (key == 'edge_attrs'):
torch_data[key] = torch.tensor(data, dtype=torch.long)
elif (key == 'y'):
torch_data[key] = torch.tensor(data, dtype=torch.long)
except ValueError:
print(data)
pass
# torch_data = Data.from_dict(torch_data)
# torch_data.num_nodes = graph.number_of_nodes()
if to_pickle:
# open a file, where you ant to store the data
file = open(f'{raw_data_folder}/{self.event}_{elapsed_time}_{node}_{graph_target}.gpickle', 'wb')
# dump information to that file
pickle.dump(torch_data, file, pickle.HIGHEST_PROTOCOL)
# close the file
file.close()
else:
return torch_data
def sub_digraphs_timeseries(self, node:str, var:str, time_step:int = 4):
# hardcoded
attrs_dict = {
'nodes': ['area', 'imperv', 'infiltration_loss', 'elevation', 'rainfall' ],
'edges':['flow_rate', 'length', 'roughness', ],
}
[self.build_subgraph(
node, elapsed_time=time, attrs=attrs_dict, acc_data=True,
) for time in (sorted(self.time_range[1])[::time_step])
]
df = pd.DataFrame(
[(datetime.strptime(time, '%Y-%m-%d %H:%M:%S'), self.sub_digraphs[f'{self.event}_{node}_{time}_acc'].nodes()[node][var]
) for time in (sorted(self.time_range[1])[::time_step])])\
.rename({0:'elapsed_time', 1: var}, axis=1).set_index('elapsed_time')
return df
def multi_subgraph_tseries_viz(self, nodes:list, var:str, time_step:int):
rainfall_step = max(self.which_time_step()[0], int((self.which_time_step()[0]) * time_step))
subfig = make_subplots(specs=[[{"secondary_y": True}]])
for i, node in enumerate(nodes):
df_plot_0 = self.sub_digraphs_timeseries(node, 'rainfall', time_step=time_step,)
df_plot_0 = df_plot_0.resample(f'{rainfall_step}min').mean()
plot_rainfall_max = 1.5 * df_plot_0['rainfall'].max()
df_plot_1 = self.sub_digraphs_timeseries(node, var,time_step=time_step)
plot_var_max = 1.5 * df_plot_1[var].max()
plot_var_min = 1.1 * df_plot_1[var].min()
splitted_var = var.split('_')
plot_var_legend = ' '.join([word.capitalize() for word in splitted_var][:-1]) + f' [{splitted_var[-1]}]'
# create two independent figures with px.line each containing data from multiple columns
fig = px.bar(df_plot_0, y='rainfall')#, render_mode="webgl",)
fig2 = px.line(df_plot_1, y=var)
fig2.update_traces(line={'width':5, 'color':'#125AEF'})
fig2.update_traces(yaxis="y2")
# subfig.add_trace(fig, row=1, col=i+1)
subfig.add_trace(fig2.data, row=1, col=i+1)# + fig3.data)
subfig['layout']['yaxis1'].update(title='Precipitation intensity (mm/h)',range=[0, plot_rainfall_max], autorange='reversed')
subfig['layout']['yaxis2'].update(title= plot_var_legend, range=[plot_var_min, plot_var_max], autorange=False)
# subfig.for_each_trace(lambda t: t.update(marker=dict(color=['black'])))
subfig['layout']['xaxis'].update(title='', tickformat='%d-%b %Hh')
subfig['layout'].update(plot_bgcolor='white', font={'size':25})#, template='plotly_white')
subfig.update_xaxes(showline=True, linewidth=3, linecolor='black', mirror=True)
subfig.update_yaxes(showline=True, linewidth=3, linecolor='black', mirror=True)
subfig.update_xaxes(ticks="inside", tickwidth=2, tickcolor='black', ticklen=10)
subfig.update_yaxes(ticks="inside", tickwidth=2, tickcolor='black', ticklen=10)
subfig['layout'].update(height=600, width=1200)
subfig.update_layout(showlegend=False)
return subfig
def subgraph_tseries_viz(self, node:str, var:list, time_step:int):
rainfall_step = max(self.which_time_step()[0], int((self.which_time_step()[0]) * time_step))
df_plot_0 = self.sub_digraphs_timeseries(node, 'rainfall', time_step=time_step,)
df_plot_0 = df_plot_0.resample(f'{rainfall_step}min').mean()
plot_rainfall_max = 1.5 * df_plot_0['rainfall'].max()
df_plot_1 = self.sub_digraphs_timeseries(node, var,time_step=time_step)
plot_var_max = 1.5 * df_plot_1[var].max()
plot_var_min = 1.1 * df_plot_1[var].min()
splitted_var = var.split('_')
plot_var_legend = ' '.join([word.capitalize() for word in splitted_var][:-1]) + f' [{splitted_var[-1]}]'
subfig = make_subplots(specs=[[{"secondary_y": True}]])
# create two independent figures with px.line each containing data from multiple columns
fig = px.bar(df_plot_0, y='rainfall')#, render_mode="webgl",)
fig2 = px.line(df_plot_1, y=var)
fig2.update_traces(line={'width':5, 'color':'#125AEF'})
fig2.update_traces(yaxis="y2")
subfig.add_traces(fig.data + fig2.data)# + fig3.data)
subfig['layout']['yaxis1'].update(title='Precipitation intensity (mm/h)',range=[0, plot_rainfall_max], autorange='reversed')
subfig['layout']['yaxis2'].update(title= plot_var_legend, range=[plot_var_min, plot_var_max], autorange=False)
subfig.for_each_trace(lambda t: t.update(marker=dict(color=['black'])))
subfig['layout']['xaxis'].update(title='', tickformat='%d-%b %Hh')
subfig['layout'].update(plot_bgcolor='white', font={'size':25})#, template='plotly_white')
subfig.update_xaxes(showline=True, linewidth=3, linecolor='black', mirror=True)
subfig.update_yaxes(showline=True, linewidth=3, linecolor='black', mirror=True)
subfig.update_xaxes(ticks="inside", tickwidth=2, tickcolor='black', ticklen=10)
subfig.update_yaxes(ticks="inside", tickwidth=2, tickcolor='black', ticklen=10)
subfig['layout'].update(height=600, width=1200)
subfig.update_layout(showlegend=False)
return subfig
def timeseries(self, item: str, var:list):
"""
Generates the timeseries of any variable of any element.
"""
if item.startswith('NODO'):
nodal_data_vars_query = f"""
WITH model_node_coordinates AS
(
SELECT
*
FROM
nodes_coordinates AS nc
WHERE
nc.model_id = '{self.model}'
AND nc.node_id = '%(node)'
)
,
junctions AS
(
SELECT
*
FROM
nodes_junctions nj
WHERE
nj.model_id = '{self.model}'
AND nj.junction_id = '{item}'
)
,
storages AS
(
SELECT
*
FROM
nodes_storage ns
WHERE
ns.model_id = '{self.model}'
AND ns.storage_id = '{item}'
)
,
outfalls AS
(
SELECT
*
FROM
nodes_outfalls AS no2
WHERE
no2.model_id = '{self.model}'
AND no2.outfall_id = '{item}'
)
,
nodes AS
(
SELECT
mnc.node_id,
mnc.lat,
mnc.lon,
j.elevation,
j.init_depth,
j.max_depth
FROM
model_node_coordinates mnc
JOIN
junctions j
ON mnc.node_id = j.junction_id
WHERE
elevation NOTNULL
UNION ALL
SELECT
mnc.node_id,
mnc.lat,
mnc.lon,
s.elevation,
s.init_depth,
s.max_depth
FROM
model_node_coordinates mnc
JOIN
storages s
ON mnc.node_id = s.storage_id
WHERE
elevation NOTNULL
UNION ALL
SELECT
mnc.node_id,
mnc.lat,
mnc.lon,
o.elevation,
0 AS init_depth,
0 AS max_depth
FROM
model_node_coordinates mnc
JOIN
outfalls o
ON mnc.node_id = o.outfall_id
WHERE
elevation NOTNULL
)
,
subcatch AS
(
SELECT
*
FROM
subcatchments s
WHERE
s.model_id = '{self.model}'
AND s.outlet = '{item}'
)
,
event_nodes AS
(
SELECT
*
FROM
events_nodes en
WHERE
event_id = '{self.event}'
AND en.node_id = '{item}'
)
,
event_subc AS
(
SELECT
*
FROM
events_subcatchments es
WHERE
es.event_id = '{self.event}'
)
,
event_subc_outlet AS
(
SELECT
event_subc.*,
subcatch.outlet,
subcatch.raingage_id
FROM
subcatch
LEFT JOIN
event_subc
ON subcatch.subcatchment_id = event_subc.subcatchment_id
)
,
nodal_out_data AS
(
SELECT
en.node_id,
COALESCE (subcatchment_id, 'SIN CUENCA DE APORTE') AS subcatchment_id,
en.elapsed_time,
en.depth_above_invert,
en.flow_lost_flooding,
en.hydraulic_head,
en.lateral_inflow,
en.total_inflow,
en.volume_stored_ponded,
COALESCE (eso.evaporation_loss, 0) AS evaporation_loss,
COALESCE (eso.runoff_rate, 0) AS runoff_rate,
COALESCE (eso.infiltration_loss, 0) AS infiltration_loss,
COALESCE (eso.rainfall, 0) AS rainfall
FROM
event_nodes AS en
LEFT JOIN
event_subc_outlet AS eso
ON eso.elapsed_time = en.elapsed_time
AND eso.outlet = en.node_id
)
,
nodal_inp_data AS
(
SELECT
nodes.*,
COALESCE (s.area, 0) AS area,
COALESCE (s.imperv, 0) AS imperv,
COALESCE (s.slope, 0) AS slope,
COALESCE (s.width, 0) AS width,
COALESCE (s.curb_len, 0) AS curb_len,
COALESCE (s.raingage_id, '') AS raingage_id
FROM
nodes
LEFT JOIN
subcatch s
ON s.outlet = nodes.node_id
)
,
nodal_data AS
(
SELECT
nod.*,
nid.elevation,
nid.init_depth,
nid.max_depth,
nid.area,
nid.imperv,
nid.slope,
nid.width,
nid.curb_len,
nid.raingage_id
FROM
nodal_out_data AS nod
LEFT JOIN
nodal_inp_data AS nid
ON nod.node_id = nid.node_id
)
,
rain_mdata AS
(
SELECT
*
FROM
raingages_metadata rm
WHERE
rm.precipitation_id = '{self.precip}'
)
,
rain_tseries AS
(
SELECT
*
FROM
raingages_timeseries rt
WHERE
rt.precipitation_id = '{self.precip}'
)
,
rain AS
(
SELECT
rt.raingage_id,
rt.elapsed_time,
COALESCE (rt.VALUE, 0) AS rainfall_acc,
rm.format,
rm.unit
FROM
raingages_timeseries rt
LEFT JOIN
raingages_metadata AS rm
ON rt.precipitation_id = rm.precipitation_id
)
SELECT DISTINCT
nd.*,
r.rainfall_acc,
r.format,
r.unit
FROM
nodal_data nd
LEFT JOIN
rain r
ON nd.raingage_id = r.raingage_id
AND nd.elapsed_time = r.elapsed_time
ORDER by nd.elapsed_time
"""
cur5 = self.conn.cursor()
cur5.execute(nodal_data_vars_query)
nodal_data_result = cur5.fetchall()
nodal_data_cols = [
'node_id',
'subcatchment_id',
'elapsed_time',
'depth_above_invert',
'flow_lost_flooding',
'hydraulic_head',
'lateral_inflow',
'total_inflow',
'volume_stored_ponded',
'evaporation_loss',
'runoff_rate',
'infiltration_loss',
'rainfall',
'elevation',
'init_depth',
'max_depth',
'area',
'imperv',
'slope',
'width',
'curb_len',
'raingage_id',
'rainfall_acc',
'format',
'unit'
]
NodeVars = namedtuple('NodeVars', nodal_data_cols)
dt_nodes = dt.Frame([i for i in map(NodeVars._make, [i for i in nodal_data_result])], names=nodal_data_cols)
if len(var) == 0:
df = dt_nodes.to_pandas()
df.loc[:,'elapsed_time'] = pd.to_datetime(df.loc[:,'elapsed_time'])
df = df.set_index('elapsed_time')
return df
else:
# if 'depth_above_invert' in var:
df = dt_nodes[:, ['node_id','elapsed_time'] + var].to_pandas()
df.loc[:,'elapsed_time'] = pd.to_datetime(df.loc[:,'elapsed_time'])
df['time_to_peak'] = df.iloc[(df['depth_above_invert'].idxmax())]['elapsed_time']
df['peak'] = df['depth_above_invert'].max()
df = df.set_index('elapsed_time')
return df
# else:
# df = dt_nodes[:, ['node_id','elapsed_time'] + var].to_pandas()
# df.loc[:,'elapsed_time'] = pd.to_datetime(df.loc[:,'elapsed_time'])
# df = df.set_index('elapsed_time')
# return df
else:
nodal_linkage_query_link = f"""
WITH links_event AS
(
SELECT
*
FROM
events_links el
WHERE
el.event_id = '{self.event}'
AND el.link_id = '{item}'
)
,
links_conduits_model AS
(
SELECT
*
FROM
links_conduits AS lc
WHERE
lc.model_id = '{self.model}'
AND lc.conduit_id = '{item}'
)
,
links_orifices_model AS
(
SELECT
*
FROM
links_orifices AS lo
WHERE
lo.model_id = '{self.model}'
AND lo.orifice_id = '{item}'
)
,
links_weirs_model AS
(
SELECT
*
FROM
links_weirs AS lw
WHERE
lw.model_id = '{self.model}'
AND lw.weir_id = '{item}'
)
,
links_types_event AS
(
SELECT
links.link_id,
from_node,
to_node,
elapsed_time,
flow_rate AS flow_rate,
flow_depth,
flow_velocity AS flow_velocity,
froude_number,
capacity,
conduits.length,
conduits.roughness
FROM
links_event AS links
LEFT JOIN
links_conduits_model AS conduits
ON conduits.conduit_id = links.link_id
WHERE
from_node NOTNULL
UNION
SELECT
links.link_id,
from_node,
to_node,
elapsed_time,
flow_rate AS flow_rate,
flow_depth,
flow_velocity AS flow_velocity,
froude_number,
capacity,
0 AS length,
0 AS roughness
FROM
links_event AS links
LEFT JOIN
links_orifices_model AS orifices
ON orifices.orifice_id = links.link_id
WHERE
from_node NOTNULL
UNION
SELECT
links.link_id,
from_node,
to_node,
elapsed_time,
flow_rate AS flow_rate,
flow_depth,
flow_velocity AS flow_velocity,
froude_number,
capacity,
0 AS length,
0 AS roughness
FROM
links_event AS links
LEFT JOIN
links_weirs_model AS weirs
ON weirs.weir_id = links.link_id
WHERE
from_node NOTNULL
)
,
rain_mdata AS
(
SELECT
*
FROM
raingages_metadata rm
WHERE
rm.precipitation_id = 'precipitation_{self.event}'
)
,
rain_tseries AS
(
SELECT
*
FROM
raingages_timeseries rt
WHERE
rt.precipitation_id = 'precipitation_{self.event}'
)
,
rain AS
(
SELECT
rt.raingage_id,
rt.elapsed_time,
rt.VALUE,
rm.format,
rm.unit
FROM
raingages_timeseries rt
LEFT JOIN
raingages_metadata AS rm
ON rt.raingage_id = rm.raingage_id
AND rt.precipitation_id = rm.precipitation_id
)
,
subc AS
(
SELECT
*
FROM
subcatchments s2
WHERE
s2.model_id = '{self.model}'
AND s2.outlet =
(
SELECT DISTINCT
from_node
FROM
links_types_event
)
)
,
event_subc AS
(
SELECT
*
FROM
events_subcatchments es
WHERE
es.event_id = '{self.event}'
)
,
event_subc_outlet AS
(
SELECT DISTINCT
subc.subcatchment_id,
subc.outlet,
subc.raingage_id,
elapsed_time,
event_subc.rainfall,
subc.area
FROM
subc
INNER JOIN
event_subc
ON subc.subcatchment_id = event_subc.subcatchment_id
)
,
event_subc_rainfall AS
(
SELECT DISTINCT
eso.*,
rain.VALUE,
rain.format,
rain.unit
FROM
event_subc_outlet eso
INNER JOIN
rain
ON rain.raingage_id = eso.raingage_id
AND rain.elapsed_time = eso.elapsed_time
)
SELECT
lte.*,
esr.rainfall --, coalesce (esr.value, 0) as rainfall_acc, esr.format, esr.unit
FROM
links_types_event lte
LEFT JOIN
event_subc_outlet esr
ON lte.from_node = esr.outlet
AND lte.elapsed_time = esr.elapsed_time
ORDER BY
outlet,
elapsed_time
"""
cur4 = self.conn.cursor()
cur4.execute(nodal_linkage_query_link)
nodal_linkage_result_link = cur4.fetchall()
nodal_linkage_cols = [
'link_id',
'from_node',
'to_node',
'elapsed_time',
'flow_rate',
'flow_depth',
'flow_velocity',
'froude_number',
'capacity',
'length',
'roughness',
'rainfall',
# 'rainfall_acc',
# 'format',
# 'unit'
]
LinkVars = namedtuple('LinksVars', nodal_linkage_cols)
dt_links = dt.Frame([i for i in map(LinkVars._make, [i for i in nodal_linkage_result_link])], names=nodal_linkage_cols)
if len(var) == 0:
df = dt_links.to_pandas()
df.loc[:,'elapsed_time'] = pd.to_datetime(df.loc[:,'elapsed_time'])
df = df.set_index('elapsed_time')
return df
else:
df = dt_links[:, ['link_id','elapsed_time'] + var].to_pandas()
df.loc[:,'elapsed_time'] = | pd.to_datetime(df.loc[:,'elapsed_time']) | pandas.to_datetime |
#!/usr/bin/env python3
import os
from datetime import date
from pathlib import Path
import pandas as pd
import sys
def load(path: Path, d: date, sex: str) -> pd.DataFrame:
print(f"Loading input file {path}")
df = pd.read_excel(
path,
header=2
)
# rename the columns to NUTS? code
column_mapping = {}
for col in df.columns:
parts = col.split('\n')
nuts, name = parts[0], ' '.join(parts[1:])
column_mapping[col] = nuts
df = df.rename(column_mapping, axis='columns').rename({'Věk': 'age'}, axis='columns')
# drop columns with missing values (rows bellow header)
df = df.dropna()
# drop the first row with total sum
df = df.iloc[1:]
# drop the last row with average
df = df.drop(df[df.age.str.contains('Average')].index)
# add extra columns to make it clear what type of data is there
df['sex'] = sex
df['date'] = d.isoformat()
# set index
df = df.set_index(['date', 'sex', 'age'])
# and convert values to int
df = df.astype({
k: int
for k in column_mapping.values() if k in df.columns
})
print(f"File: {path}; Date: {d}; Sex: {sex}")
print(df.head())
print(df.tail())
return df
def save(path_prefix: Path, input_df: pd.DataFrame) -> None:
path_table = str(path_prefix) + '_table.csv'
print(f"Saving as table into {path_table}")
input_df.to_csv(path_table)
data = []
i = 0
for col_name in input_df.columns:
for index, value in input_df[col_name].items():
data.append(index + (col_name, value))
index_columns = ['date', 'sex', 'age', 'NUTS']
tuples = pd.DataFrame(data=data, columns=index_columns + ['population']).set_index(index_columns)
path_tuples = str(path_prefix) + '_tuples.csv'
print(f"Saving as tuples into {path_tuples}")
tuples.to_csv(path_tuples)
DIR_ACT = Path(__file__).parent.absolute()
DIR_ORIGINAL = DIR_ACT / 'original'
DIR_CONVERTED = DIR_ACT / 'converted'
if __name__ == '__main__':
# convert 2019
dir_original_2019 = DIR_ORIGINAL / '2019'
dir_converted_2019 = DIR_CONVERTED / '2019'
dir_converted_2019.mkdir(parents=True, exist_ok=True)
# 2019-01-01
df_2019_01_01_B = load(dir_original_2019 / '1300642001.xlsx', date(2019, 1, 1), 'B')
save(dir_converted_2019 / '1300642001', df_2019_01_01_B)
df_2019_01_01_M = load(dir_original_2019 / '1300642002.xlsx', date(2019, 1, 1), 'M')
save(dir_converted_2019 / '1300642002', df_2019_01_01_M)
df_2019_01_01_F = load(dir_original_2019 / '1300642003.xlsx', date(2019, 1, 1), 'F')
save(dir_converted_2019 / '1300642003', df_2019_01_01_F)
df_2019_01_01 = pd.concat([df_2019_01_01_B, df_2019_01_01_M, df_2019_01_01_F])
save(dir_converted_2019 / '2019_01_01', df_2019_01_01)
del df_2019_01_01_B
del df_2019_01_01_M
del df_2019_01_01_F
del df_2019_01_01
# 2019-07-01
df_2019_07_01_B = load(dir_original_2019 / '1300642004.xlsx', date(2019, 7, 1), 'B')
save(dir_converted_2019 / '1300642004', df_2019_07_01_B)
df_2019_07_01_M = load(dir_original_2019 / '1300642005.xlsx', date(2019, 7, 1), 'M')
save(dir_converted_2019 / '1300642005', df_2019_07_01_M)
df_2019_07_01_F = load(dir_original_2019 / '1300642006.xlsx', date(2019, 7, 1), 'F')
save(dir_converted_2019 / '1300642006', df_2019_07_01_F)
df_2019_07_01 = pd.concat([df_2019_07_01_B, df_2019_07_01_M, df_2019_07_01_F])
save(dir_converted_2019 / '2019_07_01', df_2019_07_01)
del df_2019_07_01_B
del df_2019_07_01_M
del df_2019_07_01_F
del df_2019_07_01
# 2019-12-31
df_2019_12_31_B = load(dir_original_2019 / '1300642007.xlsx', date(2019, 12, 31), 'B')
save(dir_converted_2019 / '1300642007', df_2019_12_31_B)
df_2019_12_31_M = load(dir_original_2019 / '1300642008.xlsx', date(2019, 12, 31), 'M')
save(dir_converted_2019 / '1300642008', df_2019_12_31_M)
df_2019_12_31_F = load(dir_original_2019 / '1300642009.xlsx', date(2019, 12, 31), 'F')
save(dir_converted_2019 / '1300642009', df_2019_12_31_F)
df_2019_12_31 = | pd.concat([df_2019_12_31_B, df_2019_12_31_M, df_2019_12_31_F]) | pandas.concat |
from datetime import datetime
import os
import re
import numpy as np
import pandas as pd
from fetcher.extras.common import MaRawData, zipContextManager
from fetcher.utils import Fields, extract_arcgis_attributes
NULL_DATE = datetime(2020, 1, 1)
DATE = Fields.DATE.name
TS = Fields.TIMESTAMP.name
DATE_USED = Fields.DATE_USED.name
def add_query_constants(df, query):
for k, v in query.constants.items():
df[k] = v
return df
def build_leveled_mapping(mapping):
tab_mapping = {x.split(":")[0]: {} for x in mapping.keys() if x.find(':') > 0}
for k, v in mapping.items():
if k.find(':') < 0:
continue
tab, field = k.split(":")
tab_mapping[tab][field] = v
return tab_mapping
def prep_df(values, mapping):
df = pd.DataFrame(values).rename(columns=mapping).set_index(DATE)
for c in df.columns:
if c.find('status') >= 0:
continue
# convert to numeric
df[c] = pd.to_numeric(df[c])
df.index = pd.to_datetime(df.index, errors='coerce')
return df
def make_cumsum_df(data, timestamp_field=Fields.TIMESTAMP.name):
df = pd.DataFrame(data)
df.set_index(timestamp_field, inplace=True)
df.sort_index(inplace=True)
df = df.select_dtypes(exclude=['string', 'object'])
# .groupby(level=0).last() # can do it here, but not mandatory
cumsum_df = df.cumsum()
cumsum_df[Fields.TIMESTAMP.name] = cumsum_df.index
return cumsum_df
def handle_ak(res, mapping):
tests = res[0]
collected = [x['attributes'] for x in tests['features']]
df = | pd.DataFrame(collected) | pandas.DataFrame |
import numpy as np
import copy
import logging
from IPython.display import display, clear_output
from collections import defaultdict
import pailab.analysis.plot as paiplot
import pailab.analysis.plot_helper as plt_helper
import ipywidgets as widgets
from pailab import MLObjectType, RepoInfoKey, FIRST_VERSION, LAST_VERSION
from pailab.ml_repo.repo import NamingConventions
import pailab.tools.checker as checker
import pailab.tools.tools as tools
import pailab.tools.interpretation as interpretation
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
logger = logging.getLogger(__name__)
# set option so that long lines have a linebreak
pd.set_option('display.max_colwidth', -1)
# set widget use to True so that plotlys FigureWidget is used
paiplot.use_within_widget = True
if paiplot.has_plotly:
import plotly.graph_objs as go
beakerX = False
if beakerX:
from beakerx import TableDisplay
# from beakerx.object import beakerx
else:
def TableDisplay(dt):
display(dt)
class _MLRepoModel:
class _DataModel:
def __init__(self, ml_repo):
self._training_data = {}
self._test_data = {}
for k in ml_repo.get_names(MLObjectType.TRAINING_DATA):
tmp = ml_repo.get(k)
self._training_data[k] = tmp.n_data
self._x_coord_names = tmp.x_coord_names
self._y_coord_names = tmp.y_coord_names
for k in ml_repo.get_names(MLObjectType.TEST_DATA):
tmp = ml_repo.get(k)
self._test_data[k] = tmp.n_data
def get_data_names(self):
result = [k for k in self._test_data.keys()]
result.extend([k for k in self._training_data.keys()])
return result
def get_num_data(self, data):
result = []
for d in data:
if d in self._test_data.keys():
result.append(self._test_data[d])
elif d in self._training_data.keys():
result.append(self._training_data[d])
else:
raise Exception('Cannot find data ' + d)
return result
class _ModelModel:
def __init__(self, ml_repo):
self.labels = {} # dictionary label->model and version
# dictionary (model,version)->labelname or None
self.model_to_label = defaultdict(lambda: None)
self._setup_labels(ml_repo)
self._model_info_table = self._setup_model_info_table(ml_repo)
self._model_names = ml_repo.get_names(
MLObjectType.CALIBRATED_MODEL)
def _setup_labels(self, ml_repo):
label_names = ml_repo.get_names(MLObjectType.LABEL)
if label_names is None:
return
if isinstance(label_names, str):
label_names = [label_names]
for l in label_names:
label = ml_repo.get(l)
self.labels[l] = {'model': label.name,
'version': label.version}
self.model_to_label[(label.name, label.version,)] = l
def _setup_model_info_table(self, ml_repo):
model_rows = []
model_names = ml_repo.get_names(MLObjectType.CALIBRATED_MODEL)
for model_name in model_names:
models = ml_repo.get(model_name, version=(
FIRST_VERSION, LAST_VERSION), full_object=False)
if not isinstance(models, list):
models = [models]
for model in models:
tmp = copy.deepcopy(model.repo_info.get_dictionary())
tmp['model'] = tmp['name']
del tmp['big_objects']
del tmp['modifiers']
del tmp['modification_info']
tmp['label'] = self.model_to_label[(
tmp['model'], tmp['version'],)]
tmp['widget_key'] = tmp['commit_date'][0:16] + ' | ' + \
tmp['author'] + ' | ' + \
str(tmp['label']) + ' | ' + tmp['version']
model_rows.append(tmp)
model_info_table = | pd.DataFrame(model_rows) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import sys
sys.path.append('..')
# In[3]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import timedelta, datetime, date
import os
from utils import data_paths, load_config
from pathlib import Path
from nltk.metrics import edit_distance #(Levenshtein)
import pycountry
import math
# # Estimating The Infected Population From Deaths
# > Estimating the number of infected people by country based on the number of deaths and case fatality rate.
#
# - comments: true
# - author: <NAME>
# - categories: [growth, compare, interactive, estimation]
# - hide: false
# - image: images/covid-estimate-infections.png
# - permalink: /covid-infected/
# - toc: true
# In[4]:
LOCAL_FILES=True
#jupyter or script
IS_SCRIPT = False
# In[5]:
os.getcwd()
# In[6]:
if IS_SCRIPT:
RUN_PATH = Path(os.path.realpath(__file__))
DATA_PARENT = RUN_PATH.parent.parent
else:
#for jupyter
cw = get_ipython().getoutput('pwd')
RUN_PATH = Path(cw[0])
DATA_PARENT = RUN_PATH.parent
# In[7]:
if IS_SCRIPT:
csse_data = data_paths('tools/csse_data_paths.yml')
else:
csse_data = data_paths('csse_data_paths.yml')
# In[8]:
if LOCAL_FILES:
confirmed_url=csse_data.get("csse_ts_local", {}).get('confirmed', {})
deaths_url=csse_data.get("csse_ts_local", {}).get('deaths', {})
recovered_url=csse_data.get("csse_ts_local", {}).get('recovered', {})
confirmed_url = str(DATA_PARENT/confirmed_url)
deaths_url = str(DATA_PARENT/deaths_url)
recovered_url = str(DATA_PARENT/recovered_url)
else:
confirmed_url=csse_data.get("csse_ts_global", {}).get('confirmed', {})
deaths_url=csse_data.get("csse_ts_global", {}).get('deaths', {})
recovered_url=csse_data.get("csse_ts_global", {}).get('recovered', {})
# In[9]:
### UN stats
# In[10]:
df_un_pop_density_info=pd.read_csv(DATA_PARENT/'data/un/df_un_pop_density_info.csv')
df_un_urban_growth_info=pd.read_csv(DATA_PARENT/'data/un/urban_growth_info.csv')
df_un_health_info=pd.read_csv(DATA_PARENT/'data/un/df_un_health_info.csv')
df_un_tourism_info=pd.read_csv(DATA_PARENT/'data/un/df_un_tourism_info.csv')
df_un_gdp_info=pd.read_csv(DATA_PARENT/'data/un/df_un_gdp_info.csv')
df_un_edu_info=pd.read_csv(DATA_PARENT/'data/un/df_un_edu_info.csv')
df_un_pop_growth_info=pd.read_csv(DATA_PARENT/'data/un/df_un_pop_growth_info.csv')
df_un_gdrp_rnd_info=pd.read_csv(DATA_PARENT/'data/un/df_un_gdrp_rnd_info.csv')
df_un_education_info=pd.read_csv(DATA_PARENT/'data/un/df_un_education_info.csv')
df_un_sanitation_info=pd.read_csv(DATA_PARENT/'data/un/df_un_sanitation_info.csv')
df_un_health_expenditure_info=pd.read_csv(DATA_PARENT/'data/un/df_un_health_expenditure_info.csv')
df_un_immigration_info=pd.read_csv(DATA_PARENT/'data/un/df_un_immigration_info.csv')
df_un_trading_info=pd.read_csv(DATA_PARENT/'data/un/df_un_trading_info.csv')
df_un_land_info=pd.read_csv(DATA_PARENT/'data/un/df_un_land_info.csv')
# In[11]:
df_un_health_info.head()
#Health personnel: Pharmacists (per 1000 population)
# In[12]:
df_un_trading_info.tail(n=20)
#column Major trading partner 1 (% of exports)
#Major trading partner 1 (% of exports)
#Major trading partner 2 (% of exports)
#Major trading partner 3 (% of exports)
# In[13]:
df_population_density=df_un_pop_density_info.loc[df_un_pop_density_info['Series'] == 'Population density']
# In[14]:
df_population_density.tail(n=50)
#Population aged 60+ years old (percentage)
#Population density
#Population mid-year estimates (millions)
# In[15]:
df_population_density.loc[df_population_density.groupby('Country')['Year'].idxmax()]
# In[16]:
df_population_density
# In[17]:
### Freedom House stats
# In[18]:
#Freedon House stats
def country_freedom():
global_freedom = str(DATA_PARENT/'data/freedom_house/Global_Freedom.csv')
df_global_free = pd.read_csv(global_freedom)
internet_freedom = str(DATA_PARENT/'data/freedom_house/Internet_Freedom.csv')
df_internet_free = pd.read_csv(internet_freedom)
return df_global_free, df_internet_free
df_global_freedom, df_internet_freedom = country_freedom()
# In[19]:
#csse countries
df_deaths = pd.read_csv(deaths_url, error_bad_lines=False)
df_confirmed = pd.read_csv(confirmed_url, error_bad_lines=False)
df_recovered = pd.read_csv(recovered_url, error_bad_lines=False)
csse_countries = []
for df in [df_deaths, df_confirmed, df_recovered]:
c = set(df["Country/Region"].unique())
csse_countries.append(c)
csse_countries = [item for sublist in csse_countries for item in sublist]
csse_countries = list(set(csse_countries))
# ## CSSE
# In[20]:
# Get data on deaths D_t
df_deaths = | pd.read_csv(deaths_url, error_bad_lines=False) | pandas.read_csv |
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm as cm
import seaborn as sns
sns.set_style("whitegrid")
import sys
import os
from pathlib import Path
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold, GridSearchCV, StratifiedKFold,RepeatedKFold, learning_curve
from xgboost.sklearn import XGBClassifier
from utils import data_handler
from utils import bayesiantests as bt
root_dir = str(Path(os.getcwd())) #.parent
to_dir = root_dir + '/results/'
import warnings
warnings.filterwarnings('ignore')
#res= None
##------------------------------ font, fig size setup------------------------------
plt.rc('font', family='serif')
def set_fig_fonts(SMALL_SIZE=22, MEDIUM_SIZE=24,BIGGER_SIZE = 26):
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
set_fig_fonts()
##------------------------------functions----------------------------------------
def save_fig(fig, title):
to_path = data_handler.format_title(to_dir,title,'.png')
fig.savefig(to_path ,dpi=1000,bbox_inches="tight",pad_inches=0)#, bbox_inches='tight', pad_inches=10
print("Successfully saved to: ",to_path)
return to_path
def plot_correlation_matrix(X,title, col_list, toSaveFig=True):
set_fig_fonts(12,14,16)
# standardization
scaler = StandardScaler()
df_transf = scaler.fit_transform(X)
df = pd.DataFrame(df_transf,columns = col_list)
fig = plt.figure()
ax1 = fig.add_subplot(111)
cmap = cm.get_cmap('coolwarm', 30)
#cax = ax1.pcolor(df.corr(), cmap=cmap, vmin=-1, vmax=1)
mat = df.corr()
flip_mat = mat.iloc[::-1]
cax = ax1.imshow(flip_mat , interpolation="nearest", cmap=cmap,vmin=-1, vmax=1)
ax1.grid(True)
#plt.suptitle('Features\' Correlation', y =0)
labels=df.columns.tolist()
x_labels = labels.copy()
labels.reverse()
#ax1.xaxis.set_ticks_position('top')
ax1.set_xticks(np.arange(len(labels)))#np.arange(len(labels))
ax1.set_yticks(np.arange(len(labels)))
# want a more natural, table-like display
#ax1.xaxis.tick_top()
ax1.set_xticklabels(x_labels, rotation = -45, ha="left") #, , rotation = 45,horizontalalignment="left"
ax1.set_yticklabels(labels, ha="right")
#plt.xticks(rotation=90)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
fig.colorbar(cax, boundaries=np.linspace(-1,1,21),ticks=np.linspace(-1,1,5))
plt.show()
if(toSaveFig):
save_fig(fig,title+'_confusion_matrix')
set_fig_fonts()
def plot_ROC_curve(pipe, tuned_parameters, title = 'roc_curve', save_csv = True,task=0):
# cross validation settup
Ntrials = 1
outter_nsplit = 10
inner_nsplit = 10
# Results store
Y_true = pd.Series(name='Y_true')
pred_results = pd.Series(name='pred_prob')
# load data
assert (task ==0 or task ==2),'Error: invalid task spec!'
X_df, Y_df = data_handler.load_XY(task)
X = X_df.values
Y = Y_df.values
for i in range(Ntrials):
train_index = []
test_index = []
outer_cv = StratifiedKFold(n_splits=outter_nsplit, shuffle=True, random_state=i)
for train_ind,test_ind in outer_cv.split(X,Y):
train_index.append(train_ind.tolist())
test_index.append(test_ind.tolist())
for j in range(outter_nsplit):#outter_nsplit
print("progress >> ",j,' / ',outter_nsplit)
X_train = X[train_index[j]]
Y_train = Y[train_index[j]]
X_test = X[test_index[j]]
Y_test = Y[test_index[j]]
inner_cv = StratifiedKFold(n_splits=inner_nsplit, shuffle=False, random_state=j)
clf = GridSearchCV(pipe,tuned_parameters, cv=inner_cv,scoring='roc_auc')
clf.fit(X_train, Y_train)
pred = pd.Series(clf.predict_proba(X_test)[:,1])
pred_results = pd.concat([pred_results, pred], axis=0,ignore_index=True)
Y_test_df = pd.Series(Y_test,name='Y_test')
Y_true = pd.concat([Y_true,Y_test_df], axis=0,ignore_index=True)
# plotting
fpr, tpr, thresholds = metrics.roc_curve(Y_true,pred_results)
roc_auc = metrics.auc(fpr, tpr)
auc_value = metrics.roc_auc_score(Y_true, pred_results)
fig = plt.figure(figsize=(12,12/1.618))
ax1 = fig.add_subplot(111)
labl = np.linspace(0,1,6)
labels = [float("{0:.2f}".format(x)) for x in labl]
ax1.set_xticks(labels)
ax1.set_xticklabels(labels)
labels[0] = ''
ax1.set_yticklabels(labels)
plt.grid(False)
ax1.plot(fpr, tpr, lw=2, label='ROC curve (area = {:.2f})'.format(auc_value),marker='.', linestyle='-', color='b')
ax1.plot([0,1],[0,1], linestyle='--', color='k')
ax1.set_xlabel('False Positive Rate')
ax1.set_ylabel('True Positive Rate')
ax1.set_xlim(0, 1)
ax1.set_ylim(0,1)
ax1.legend(loc='lower right')
color = 'black'
plt.setp(ax1.spines.values(), color=color)
ax1.yaxis.set_visible(True)
ax1.xaxis.set_visible(True)
ax1.yaxis.set_ticks_position('left')
ax1.xaxis.set_ticks_position('bottom')
ax1.get_yaxis().set_tick_params(direction='out', width=2)
plt.show()
fig.savefig(data_handler.format_title(to_dir,title+'_ROC_curve','.png'),dpi=1000,bbox_inches="tight",pad_inches=0)
# save results to csv if true
if save_csv:
data_mat = np.array([fpr,tpr]).T
ret = pd.DataFrame(data_mat,columns=['fpr','tpr'])
data_handler.save_csv(ret,title+'_ROC_curve')
return True;
def plot_learning_curve_versus_tr_epoch(title='',ntrials=1, nfolds=10, save_csv=False,verbose=True, save_fig=False):
X_df,Y_df = data_handler.load_XY()
X = X_df.values
Y = Y_df.values
_ylabel = 'Mean AUROC'
n_jobs=4
# cross validation settup
Ntrials = ntrials
outter_nsplit = nfolds
tot_count = Ntrials * outter_nsplit
# Results store
train_mat = np.zeros((tot_count,500))
test_mat = np.zeros((tot_count,500))
for i in range(Ntrials):
init_time = time.time()
print("trial = ",i)
train_index = []
test_index = []
outer_cv = StratifiedKFold(n_splits=outter_nsplit, shuffle=True, random_state=i)
for train_ind,test_ind in outer_cv.split(X,Y):
train_index.append(train_ind.tolist())
test_index.append(test_ind.tolist())
for j in range(outter_nsplit):#outter_nsplit
count = i * outter_nsplit + j
print(str(count), " / ",str(tot_count))
X_train = X[train_index[j]]
Y_train = Y[train_index[j]]
X_test = X[test_index[j]]
Y_test = Y[test_index[j]]
eval_sets = [(X_train, Y_train), (X_test,Y_test)]
clf = XGBClassifier(objective="binary:logistic",min_child_weight=1,**{'tree_method':'exact'},silent=True,
n_jobs=4,random_state=3,seed=3,
learning_rate=0.01,
colsample_bylevel=0.9,
colsample_bytree=0.9,
n_estimators=500,
gamma=0.8,
max_depth =11,
reg_lambda = 0.8,
subsample=0.4)
clf.fit(X_train,Y_train, eval_metric=['auc'], eval_set = eval_sets, verbose=False)
results = clf.evals_result()
epochs = len(results['validation_0']['auc'])
# record results
train_mat[count] = results['validation_0']['auc']
test_mat[count] = results['validation_1']['auc']
if(verbose):
print('Iter: %d, epochs: %d'%(count, epochs))
print('training result: %.4f, testing result: %.4f'%(train_mat[count][499], test_mat[count][499]))
print('total time: %.4f mins'% ((time.time()-init_time)/60))
# Results store
epoch_lists=list(range(1,epochs+1))
train_results = pd.DataFrame(data=train_mat,columns=['epoch_'+str(i) for i in epoch_lists])
test_results = pd.DataFrame(data=test_mat,columns=['epoch_'+str(i) for i in epoch_lists])
if(save_csv):
data_handler.save_csv(train_results,title='mos2_learning_curve_train_raw')
data_handler.save_csv(test_results,title='mos2_learning_curve_test_raw')
print('end')
_ylim=(0.5, 1.01)
n_jobs=4
# create learning curve values
train_scores_mean = np.mean(train_mat, axis=0)
train_scores_std = np.std(train_mat, axis=0)
test_scores_mean = np.mean(test_mat, axis=0)
test_scores_std = np.std(test_mat, axis=0)
tr_size_df = pd.Series(epoch_lists, name='training_epoch')
tr_sc_m_df = pd.Series(train_scores_mean, name='training_score_mean')
val_sc_m_df = pd.Series(test_scores_mean, name='val_score_mean')
tr_sc_std_df = pd.Series(train_scores_std, name='training_score_std')
val_sc_std_df = pd.Series(test_scores_std, name='val_score_std')
if(save_csv):
res = pd.concat([tr_size_df, tr_sc_m_df,val_sc_m_df,tr_sc_std_df,val_sc_std_df], axis=1)
data_handler.save_csv(data=res,title=title+'_learning_curve')
# plotting
_ylim=(0.5, 1.01)
fig = plt.figure(figsize=(12,12/1.618))
ax1 = fig.add_subplot(111)
ax1.set_ylim(_ylim)
ax1.set_xlabel("Number of Training Epochs")
ax1.set_ylabel(_ylabel)
plt.grid(False)
ax1.plot(tr_size_df, tr_sc_m_df, color="r", label="Training") #'o-',
ax1.plot(tr_size_df, val_sc_m_df, color="b", label="Validation") #'^--',
# plot error bars
#ax1.errorbar(tr_size_df, tr_sc_m_df, yerr=tr_sc_std_df,color="r", )
#ax1.errorbar(tr_size_df, val_sc_m_df, yerr=val_sc_std_df)
plt.setp(ax1.spines.values(), color='black')
plt.legend(loc="lower right")
plt.show()
to_path = None
if save_fig:
to_path = data_handler.format_title(to_dir,title+'_learning_curve','.png')
fig.savefig(to_path,dpi=1000,bbox_inches="tight",pad_inches=0.1)
return to_path
def plot_learning_curve_versus_tr_set_size(title='',save_csv=True,scoring = 'roc_auc'):
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set
X, Y = data_handler.load_XY()
_ylabel = 'Mean AUROC'
outer_cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=6)
inner_cv = StratifiedKFold(n_splits=5, shuffle=False, random_state=3)
xgb_clf = XGBClassifier(objective="binary:logistic",min_child_weight=1,**{'tree_method':'exact'},
silent=True,n_jobs=1,random_state=3,seed=3);
tuned_parameters = dict(learning_rate=[0.01,0.1],
n_estimators=[100, 300, 500],
colsample_bylevel = [0.5,0.7,0.9],
gamma=[0,0.2,0.4],
max_depth =[3,5,7],
reg_lambda = [0.1,1,10],
subsample=[0.4,0.7,1])
xgb_cv = GridSearchCV(xgb_clf,tuned_parameters, cv=inner_cv,scoring='roc_auc',verbose=0,n_jobs=1)
_ylim=(0.5, 1.01)
n_jobs=4
train_sizes=np.linspace(.2, 1.0, 5)
# create learning curve values
train_sizes, train_scores, test_scores = learning_curve(
xgb_cv, X, Y, cv=outer_cv, n_jobs=4, train_sizes=train_sizes,scoring=scoring)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
tr_size_df = pd.Series(train_sizes, name='training_set_size')
tr_sc_m_df = pd.Series(train_scores_mean, name='training_score_mean')
cv_sc_m_df = pd.Series(test_scores_mean, name='cv_score_mean')
tr_sc_std_df = | pd.Series(train_scores_std, name='training_score_std') | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# # Exploratory Data Analysis
# The purpose of this section of the notebook is to provide some key highlights of the baseline data being used. This showcases the various attributes, any specific transformations, and key relationships.
# In[50]:
import pandas as pd
import matplotlib.pyplot as plt
# Load the primary dataset
df = pd.read_csv('./../datasets/original/WA_Fn-UseC_-Telco-Customer-Churn.csv')
# As can be seen below, the dataset contains 21 columns and 7,043 rows
# In[51]:
df.shape
# In[52]:
df.info()
# ## Attribute Definitions
# - **customerID** is the unique identifier per customer.
# - **MonthlyCharges** and **TotalCharges** identify the monthly and total spending to date for the population. Both should be float attributes.
# - **tenure** is how long the customer has been with the service, measured in months.
# - All other 16 attributes are categorical and highlight customer attributes (e.g. Senior Citizen) or usage of various features or (e.g. Phone Service).
# In[53]:
df.sample(n=5)
# ## Basic Transformations
# Converting **Total Charges** to a float attribute.
# In[54]:
df['TotalCharges'] = df['TotalCharges'].str.replace(r' ','0').astype(float)
df['TotalCharges'].dtypes
# - Changing **Churn** to better represent a binary attribute with 1s and 0s vs. "No" or "Yes". This aids further computation.
# - Changing **Senior Citizen** to a categorical attribute (No/Yes) from 1s and 0s.
# - Changing a number of other columns to showcase visualizations better.
# In[55]:
df['Churn'] = df['Churn'].apply(lambda x: 0 if x == "No" else 1)
df['SeniorCitizen'] = df['SeniorCitizen'].apply(lambda x: "No" if x == 0 else "Yes")
# Cosmetic edits to showcase visualizations better
df['OnlineSecurity'] = df['OnlineSecurity'].apply(lambda x: "Other" if x == "No internet service" else x)
df['OnlineBackup'] = df['OnlineBackup'].apply(lambda x: "Other" if x == "No internet service" else x)
df['DeviceProtection'] = df['DeviceProtection'].apply(lambda x: "Other" if x == "No internet service" else x)
df['TechSupport'] = df['TechSupport'].apply(lambda x: "Other" if x == "No internet service" else x)
df['StreamingTV'] = df['StreamingTV'].apply(lambda x: "Other" if x == "No internet service" else x)
df['StreamingMovies'] = df['StreamingMovies'].apply(lambda x: "Other" if x == "No internet service" else x)
df['MultipleLines'] = df['MultipleLines'].apply(lambda x: "Other" if x == "No phone service" else x)
df['PaymentMethod'] = df['PaymentMethod'].map({'Bank transfer (automatic)':'Bank','Credit card (automatic)':'Credit','Mailed check':'MCheck','Electronic check':'ECheck',})
df.head()
# Bin the **tenure** attribute into 10 buckets.
# In[56]:
df['tenure_bins'] = pd.cut(df['tenure'], bins=10, include_lowest=True)
# Bin the **MonthlyCharges** attribute into 10 buckets.
# In[57]:
df['monthlyCharges_bins'] = pd.cut(df['MonthlyCharges'], bins=10, include_lowest=True)
# ## Basic Analyses
# ### Customer counts by categorical attributes
# In[58]:
fig, axes = plt.subplots(nrows=4, ncols=4, figsize=(16, 15))
column_list=['OnlineSecurity', 'PaymentMethod', 'Partner', 'Dependents','OnlineBackup',
'gender', 'SeniorCitizen', 'PhoneService', 'MultipleLines','InternetService',
'DeviceProtection', 'TechSupport','StreamingTV','StreamingMovies',
'Contract','PaperlessBilling']
# Iterate through all combinations
for i, var in enumerate(column_list):
# Adjusting for the multi-line assignments
if i <=3:
pos1=0
pos2=i
elif i <=7:
pos1=1
pos2=i-4
elif i <=11:
pos1=2
pos2=i-8
elif i <=15:
pos1=3
pos2=i-12
tt = df.groupby(var).size().to_frame()
tt.index.rename('', inplace=True) # To remove extra index labels for visualizations
tdf = tt.plot(kind='bar', ax=axes[pos1,pos2], title=str(var), legend=None)
for container in tdf.containers:
tdf.bar_label(container)
plt.tight_layout()
# ### Histogram of Customer Tenure (in months)
# In[59]:
ax = df['tenure'].plot(
kind='hist',
title="'tenure' histogram for all customers",
figsize=(12,6),
xticks=[1,5,12,20,24,30,36,40,48,50,60,70,72]
)
# This is an interesting histogram where a large percentage of customers are fairly new (less than a year old) while a number of older customers (greater than 5 years) make up another large percentage.
# In[60]:
churned = df[ df['Churn']==1]
ax = churned['tenure'].plot(
kind='hist',
title="'tenure' histogram for customers that churned",
figsize=(12,6),
xticks=[1,5,12,20,24,30,36,40,48,50,60,70,72])
# As one would expect, most of the churned customers are less than a year old. Many organizations find this to be the biggest segment of churn, i.e. customers fairly new to the service or product.
#
# (Note: A good lesson on survivorship bias.)
# ### Churned customer population
# *(Note: 0 = Not churned, 1 = Churned)*
# In[61]:
churn_pp = df.groupby('Churn').size().to_frame()
ax = churn_pp.plot(kind='bar', legend=None)
# ax.bar_label(ax.containers[0])
for container in ax.containers:
ax.bar_label(container)
# ### Churn ratio by categorical attributes
# In[62]:
fig, axes = plt.subplots(nrows=4, ncols=4, figsize=(15, 15))
column_list=['OnlineSecurity', 'PaymentMethod', 'Partner', 'Dependents','OnlineBackup',
'gender', 'SeniorCitizen', 'PhoneService', 'MultipleLines','InternetService',
'DeviceProtection', 'TechSupport','StreamingTV','StreamingMovies',
'Contract','PaperlessBilling']
# Iterate through all combinations
for i, var in enumerate(column_list):
# Adjusting for the multi-line assignments
if i <=3:
pos1=0
pos2=i
elif i <=7:
pos1=1
pos2=i-4
elif i <=11:
pos1=2
pos2=i-8
elif i <=15:
pos1=3
pos2=i-12
c_df = df.groupby(var).agg({'Churn': ['sum','count']})
c_df.columns = ['sum', 'count']
c_df['percent'] = (c_df['sum']/c_df['count']).round(2)
c_df = c_df[['percent']]
c_df.index.rename('', inplace=True) # To remove extra index labels for visualizations
tt = c_df.plot(kind='bar', ax=axes[pos1,pos2], title=str(var), legend=None)
for container in tt.containers:
tt.bar_label(container)
plt.tight_layout()
# ### Exploring the relationship between tenure and monthly and total charges
# In[63]:
ax = df.plot(x='tenure', y='MonthlyCharges', kind='scatter', title="Comparing 'tenure' and monthly charges")
# No particular relationship seems to be apparent.
# In[64]:
ax = df.plot(x='tenure', y='TotalCharges', kind='scatter', title="Comparing 'tenure' and total charges")
# As one would expect, there is a linear relationship between tenure and total charges, i.e. with time, one's total charges grow.
# ### Monthly Charges by Uniform Spending Cohorts
# In[65]:
mc_bin = df.groupby(by='monthlyCharges_bins').size().to_frame()
mc_bin.columns = ['Customer Count']
ax = mc_bin.plot(kind='barh')
# By dividing the customer base into 10 uniform cohorts, one can see that a large proportion spend less than \$28 dollars per month. A non-trivial proportion also spends above \$100 per month.
# ### Feature Importance
# By focusing on most of the categorical attributes, and grouping tenure into specific bins, we can use a classifier to identify the attributes that have a stronger bearing on churn. The fact that being on a month to month contract, being early in the use of the service (tenure bin < 7 months), and paying by electronic check corroborates with the learning above around churn rates.
# In[66]:
feature_df = df.copy()
# Drop un-needed columns, and convert categorical column to object
feature_df = feature_df.drop(['customerID','TotalCharges', 'MonthlyCharges', 'monthlyCharges_bins', 'tenure'], axis=1)
feature_df['tenure_bins'] = feature_df['tenure_bins'].astype(object)
# One hot encode categorical columns
feature_df = pd.get_dummies(feature_df)
# feature_df.info()
# Train the dataset with a Random Forest Classifier
X,y = feature_df.drop('Churn', axis=1), feature_df.Churn
from sklearn.ensemble import RandomForestClassifier
params={'random_state':42, 'n_estimators':500}
clf=RandomForestClassifier(**params)
clf.fit(X,y)
fv = | pd.Series(data=clf.feature_importances_,index=X.columns) | pandas.Series |
import os
import time
import re
import requests
import pandas as pd
from datetime import datetime, timedelta
from dateutil import parser
from concha.environment import FileHandler
class NOAA:
"""Handles NOAA weather operations for finding stations, getting historical weather, and forecasts.
The only setting for this class is the NOAA api key which the user needs to set, and which is saved
in ".../concha_planners/importers/[name from __init__].json
"""
def __init__(self, name="noaa"):
"""
Initializes the NOAA service.
Args:
noaa (str): Name of the weather profile.
"""
# Assign a filehandler
self.filehandler = FileHandler()
importers_path = self.filehandler.check_importer_path()
self.settings_path = os.path.join(importers_path, f"{name}.json")
# Get the settings (i.e. the api if it is set)
if os.path.exists(self.settings_path):
self.settings = self.filehandler.dict_from_file(self.settings_path)
else:
# If not, make a [name].json file
self.settings = {"type": "noaa", "name": name, "api_key": None}
self.filehandler.dict_to_file(self.settings, self.settings_path)
return
def set_api_key(self, api_key):
"""Setter for the NOAA api key."""
self.settings["api_key"] = api_key
self.filehandler.dict_to_file(self.settings, self.settings_path)
print("NOAA API key saved.")
def get_weather_history(self, start_date, end_date, station_id):
"""Looks up the weather within a date range at the station.
Args:
start_date (str): Format from: ['date'].dt.strftime('%Y-%m-%d'), so like '2020-07-15'
end_date (str): End date of range in which to find weather history.
station_id (str): The NOAA GHCND station name. (https://gis.ncdc.noaa.gov/maps/ncei/summaries/daily)
Return:
weather_history (pd.DataFrame): fields: ['date', 'tmin', 'tmax'] and possibly ['prcp', 'snow']
"""
# Assemble actual request to NOAA. "GHCND" is the Global Historical Climate Network Database
try:
api_key = self.settings["api_key"]
except KeyError:
print("An api_key not set up.")
print(self.settings)
headers = {"Accept": "application/json", "token": api_key}
url = "https://www.ncdc.noaa.gov/cdo-web/api/v2/data"
params = {
"stationid": station_id,
"startdate": start_date,
"enddate": end_date,
"datasetid": "GHCND",
"units": "standard",
"datatypeid": ["TMIN", "TMAX", "PRCP", "SNOW"],
"sortorder": "DESC",
"offset": 0,
"limit": 1000,
}
# Loop through requests to the API if more than 1000 results are required.
records = []
for i in range(10):
req = requests.get(url, headers=headers, params=params)
res = req.json()
recs = pd.DataFrame(res["results"])
records.append(recs)
count = res["metadata"]["resultset"]["count"]
if count < params["limit"]:
break
else:
params["offset"] += params["limit"]
records = pd.concat(records)
# Format the results and turn precipitation and snow levels into just yes/no booleans
records["datatype"] = records["datatype"].str.lower()
records = records.pivot(
index="date", columns="datatype", values="value"
).reset_index()
records["date"] = pd.to_datetime(records["date"])
use_fields = ["date", "tmin", "tmax"]
for field in ["prcp", "snow"]:
if field in records.columns:
records[field] = records[field].apply(lambda x: x > 0)
use_fields.append(field)
return records[use_fields]
def get_weather_forecast(self, forecast_url):
"""Finds the weather forecast at the grid covering the station location.
The forecast API gives a day and an overnight forecast. This parses the min
temperature from the *overnight/morning of* instead of that night. This was done
because the morning before temperature has potentially more effect on demand than
the min temperature after the store has closed.
Args:
forecast_url (str): The api.weather.gov forecast url for the given location.
Returns:
by_date (pd.DataFrame): The high/low temp by date with the precipitation
and snow as booleans.
"""
# The forecast api is weird and sometimes won't respond for the first minute or so.
headers = {"User-Agent": "project concha python application"}
# Try to get the forecast 10 times.
for i in range(5):
req = requests.get(forecast_url, headers=headers)
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
# Just ignore and try again later
time.sleep(5)
# If the first tries don't work - just tell the user to try again later.
# The API is weird and sometimes just doesn't work.
try:
req.raise_for_status()
except requests.exceptions.HTTPError as err:
print(
f"""The NOAA forecast site is weird and sometimes doesn't respond. Try the url in your browser,
Then you get a respose, you should be able to run this again and get the forecast. URL:
{forecast_url}"""
)
print(err)
res = req.json()
forecast = pd.DataFrame(res["properties"]["periods"])
# Many fields are returned, this limits them to the ones needed.
forecast = forecast[
["startTime", "endTime", "isDaytime", "temperature", "shortForecast"]
]
# Date is chosen such that the previous overnight temp, and day temp are assigned to the date
forecast["date"] = forecast["endTime"].apply(
lambda x: parser.parse(x).strftime("%Y-%m-%d")
)
# String search used to figure out of rain is in the forecast
forecast["prcp"] = forecast["shortForecast"].str.contains(
"showers|rain|thunderstorms", flags=re.IGNORECASE, regex=True
)
forecast["snow"] = forecast["shortForecast"].str.contains(
"snow|blizzard|flurries", flags=re.IGNORECASE, regex=True
)
# Because two values exist for each date, the they are aggregated to find one value for each date.
by_date = forecast.groupby("date").agg(
tmin=pd.NamedAgg(column="temperature", aggfunc="min"),
tmax=pd.NamedAgg(column="temperature", aggfunc="max"),
count=pd.NamedAgg(column="temperature", aggfunc="count"),
prcp=pd.NamedAgg(column="prcp", aggfunc="any"),
snow= | pd.NamedAgg(column="snow", aggfunc="any") | pandas.NamedAgg |
"""
Testing interaction between the different managers (BlockManager, ArrayManager)
"""
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
import pandas._testing as tm
from pandas.core.internals import (
ArrayManager,
BlockManager,
SingleArrayManager,
SingleBlockManager,
)
def test_dataframe_creation():
with pd.option_context("mode.data_manager", "block"):
df_block = pd.DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": [4, 5, 6]})
assert isinstance(df_block._mgr, BlockManager)
with pd.option_context("mode.data_manager", "array"):
df_array = pd.DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": [4, 5, 6]})
assert isinstance(df_array._mgr, ArrayManager)
# also ensure both are seen as equal
tm.assert_frame_equal(df_block, df_array)
# conversion from one manager to the other
result = df_block._as_manager("block")
assert isinstance(result._mgr, BlockManager)
result = df_block._as_manager("array")
assert isinstance(result._mgr, ArrayManager)
tm.assert_frame_equal(result, df_block)
assert all(
array_equivalent(left, right)
for left, right in zip(result._mgr.arrays, df_array._mgr.arrays)
)
result = df_array._as_manager("array")
assert isinstance(result._mgr, ArrayManager)
result = df_array._as_manager("block")
assert isinstance(result._mgr, BlockManager)
tm.assert_frame_equal(result, df_array)
assert len(result._mgr.blocks) == 2
def test_series_creation():
with pd.option_context("mode.data_manager", "block"):
s_block = pd.Series([1, 2, 3], name="A", index=["a", "b", "c"])
assert isinstance(s_block._mgr, SingleBlockManager)
with | pd.option_context("mode.data_manager", "array") | pandas.option_context |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
""" test get/set & misc """
import pytest
from datetime import timedelta
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_scalar
from pandas import (Series, DataFrame, MultiIndex,
Timestamp, Timedelta, Categorical)
from pandas.tseries.offsets import BDay
from pandas.compat import lrange, range
from pandas.util.testing import (assert_series_equal)
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestMisc(TestData):
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
assert (result == 5).all()
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
assert result == 4
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
assert self.series[idx1] == self.series.get(idx1)
assert self.objSeries[idx2] == self.objSeries.get(idx2)
assert self.series[idx1] == self.series[5]
assert self.objSeries[idx2] == self.objSeries[5]
assert self.series.get(-1) == self.series.get(self.series.index[-1])
assert self.series[5] == self.series.get(self.series.index[5])
# missing
d = self.ts.index[0] - BDay()
pytest.raises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
assert result is None
def test_getitem_int64(self):
idx = np.int64(5)
assert self.ts[idx] == self.ts[5]
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
assert self.series.index[2] == slice1.index[1]
assert self.objSeries.index[2] == slice2.index[1]
assert self.series[2] == slice1[1]
assert self.objSeries[2] == slice2[1]
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
@pytest.mark.parametrize(
'result_1, duplicate_item, expected_1',
[
[
pd.Series({1: 12, 2: [1, 2, 2, 3]}), pd.Series({1: 313}),
pd.Series({1: 12, }, dtype=object),
],
[
pd.Series({1: [1, 2, 3], 2: [1, 2, 2, 3]}),
pd.Series({1: [1, 2, 3]}), pd.Series({1: [1, 2, 3], }),
],
])
def test_getitem_with_duplicates_indices(
self, result_1, duplicate_item, expected_1):
# GH 17610
result = result_1.append(duplicate_item)
expected = expected_1.append(duplicate_item)
assert_series_equal(result[1], expected)
assert result[2] == result_1[2]
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
pytest.raises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
pytest.raises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
assert s.iloc[0] == s['a']
s.iloc[0] = 5
tm.assert_almost_equal(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
assert isinstance(value, np.float64)
def test_series_box_timestamp(self):
rng = pd.date_range('20090415', '20090519', freq='B')
ser = Series(rng)
assert isinstance(ser[5], pd.Timestamp)
rng = pd.date_range('20090415', '20090519', freq='B')
ser = Series(rng, index=rng)
assert isinstance(ser[5], pd.Timestamp)
assert isinstance(ser.iat[5], pd.Timestamp)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
pytest.raises(KeyError, s.__getitem__, 1)
pytest.raises(KeyError, s.loc.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
assert is_scalar(obj['c'])
assert obj['c'] == 0
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .loc internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
expected = s.loc[['foo', 'bar', 'bah', 'bam']]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
pytest.raises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
assert result == s.loc['A']
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.loc[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
assert self.series.index[9] not in numSlice.index
assert self.objSeries.index[9] not in objSlice.index
assert len(numSlice) == len(numSlice.index)
assert self.series[numSlice.index[0]] == numSlice[numSlice.index[0]]
assert numSlice.index[1] == self.series.index[11]
assert tm.equalContents(numSliceEnd, np.array(self.series)[-10:])
# Test return view.
sl = self.series[10:20]
sl[:] = 0
assert (self.series[10:20] == 0).all()
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
assert np.isnan(self.ts[6])
assert np.isnan(self.ts[2])
self.ts[np.isnan(self.ts)] = 5
assert not np.isnan(self.ts[2])
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
assert (series[::2] == 0).all()
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res = self.ts.set_value(idx, 0)
assert res is self.ts
assert self.ts[idx] == 0
# equiv
s = self.series.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res = s.set_value('foobar', 0)
assert res is s
assert res.index[-1] == 'foobar'
assert res['foobar'] == 0
s = self.series.copy()
s.loc['foobar'] = 0
assert s.index[-1] == 'foobar'
assert s['foobar'] == 0
def test_setslice(self):
sl = self.ts[5:20]
assert len(sl) == len(sl.index)
assert sl.index.is_unique
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assert_raises_regex(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assert_raises_regex(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
pytest.raises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
pytest.raises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.loc[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
assert result == expected
result = s.iloc[0]
assert result == expected
result = s['a']
assert result == expected
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
assert orig.dtype == 'datetime64[ns, {0}]'.format(tz)
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
assert vals.dtype == 'datetime64[ns, {0}]'.format(tz)
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
assert orig.dtype == 'datetime64[ns, {0}]'.format(tz)
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00-04:00', tz=tz),
pd.Timestamp('2011-01-01 00:00-05:00', tz=tz),
pd.Timestamp('2016-11-06 01:00-05:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
assert vals.dtype == 'datetime64[ns, {0}]'.format(tz)
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_categorial_assigning_ops(self):
orig = Series(Categorical(["b", "b"], categories=["a", "b"]))
s = orig.copy()
s[:] = "a"
exp = Series(Categorical(["a", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[1] = "a"
exp = Series(Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[s.index > 0] = "a"
exp = Series(Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[[False, True]] = "a"
exp = Series(Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s.index = ["x", "y"]
s["y"] = "a"
exp = Series(Categorical(["b", "a"], categories=["a", "b"]),
index=["x", "y"])
tm.assert_series_equal(s, exp)
# ensure that one can set something to np.nan
s = Series(Categorical([1, 2, 3]))
exp = Series(Categorical([1, np.nan, 3], categories=[1, 2, 3]))
s[1] = np.nan
tm.assert_series_equal(s, exp)
def test_take(self):
s = Series([-1, 5, 6, 2, 4])
actual = s.take([1, 3, 4])
expected = Series([5, 2, 4], index=[1, 3, 4])
tm.assert_series_equal(actual, expected)
actual = s.take([-1, 3, 4])
expected = Series([4, 2, 4], index=[4, 3, 4])
tm.assert_series_equal(actual, expected)
pytest.raises(IndexError, s.take, [1, 10])
pytest.raises(IndexError, s.take, [2, 5])
with tm.assert_produces_warning(FutureWarning):
s.take([-1, 3, 4], convert=False)
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.loc[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.iloc[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.loc[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.loc[d1] = 4
self.series.loc[d2] = 6
assert self.series[d1] == 4
assert self.series[d2] == 6
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# gets coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8,
9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
pytest.raises(IndexError, s.__getitem__, 5)
pytest.raises(IndexError, s.__setitem__, 5, 0)
pytest.raises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
pytest.raises(IndexError, s.__getitem__, 5)
pytest.raises(IndexError, s.__setitem__, 5, 0)
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta('1 days'), index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s, Series(
[np.nan, Timedelta('1 days')], index=['A', 'B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'), index=['A', 'B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
# GH 14155
s = Series(10 * [np.timedelta64(10, 'm')])
s.loc[[1, 2, 3]] = np.timedelta64(20, 'm')
expected = pd.Series(10 * [np.timedelta64(10, 'm')])
expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, 'm'))
tm.assert_series_equal(s, expected)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame({c: [1, 2, 3] for c in ['a', 'b', 'c']})
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2, 2, 2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(
dict(a=[1, 2, 3], b=[1, 2, 3], c=[1, 2, 3], val=[0, 1, 0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df, expected)
# GH 3970
# these are chained assignments as well
pd.set_option('chained_assignment', None)
df = DataFrame({"aa": range(5), "bb": [2.2] * 5})
df["cc"] = 0.0
ck = [True] * len(df)
df["bb"].iloc[0] = .13
# TODO: unused
df_tmp = df.iloc[ck] # noqa
df["bb"].iloc[0] = .15
assert df['bb'].iloc[0] == 0.15
| pd.set_option('chained_assignment', 'raise') | pandas.set_option |
import nose
import unittest
from numpy import nan
import numpy as np
from pandas import Series, DataFrame
from pandas.util.compat import product
from pandas.util.testing import (assert_frame_equal,
assert_series_equal,
assert_almost_equal)
class TestRank(unittest.TestCase):
_multiprocess_can_split_ = True
s = Series([1, 3, 4, 2, nan, 2, 1, 5, nan, 3])
df = DataFrame({'A': s, 'B': s})
results = {
'average': np.array([1.5, 5.5, 7.0, 3.5, nan,
3.5, 1.5, 8.0, nan, 5.5]),
'min': np.array([1, 5, 7, 3, nan, 3, 1, 8, nan, 5]),
'max': np.array([2, 6, 7, 4, nan, 4, 2, 8, nan, 6]),
'first': np.array([1, 5, 7, 3, nan, 4, 2, 8, nan, 6])
}
def test_rank_tie_methods(self):
s = self.s
def _check(s, expected, method='average'):
result = s.rank(method=method)
assert_almost_equal(result, expected)
dtypes = [None, object]
disabled = set([(object, 'first')])
results = self.results
for method, dtype in product(results, dtypes):
if (dtype, method) in disabled:
continue
series = s if dtype is None else s.astype(dtype)
_check(series, results[method], method=method)
def test_rank_descending(self):
dtypes = ['O', 'f8', 'i8']
for dtype, method in product(dtypes, self.results):
if 'i' in dtype:
s = self.s.dropna()
df = self.df.dropna()
else:
s = self.s.astype(dtype)
df = self.df.astype(dtype)
res = s.rank(ascending=False)
expected = (s.max() - s).rank()
assert_series_equal(res, expected)
res = df.rank(ascending=False)
expected = (df.max() - df).rank()
assert_frame_equal(res, expected)
if method == 'first' and dtype == 'O':
continue
expected = (s.max() - s).rank(method=method)
res2 = s.rank(method=method, ascending=False)
| assert_series_equal(res2, expected) | pandas.util.testing.assert_series_equal |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/7/12 15:47
Desc: 东方财富-沪深板块-概念板块
http://quote.eastmoney.com/center/boardlist.html#concept_board
"""
import requests
import pandas as pd
def stock_board_concept_name_em() -> pd.DataFrame:
"""
东方财富-沪深板块-概念板块-名称
http://quote.eastmoney.com/center/boardlist.html#concept_board
:return: 概念板块-名称
:rtype: pandas.DataFrame
"""
url = "http://79.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "2000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:90 t:3 f:!50",
"fields": "f2,f3,f4,f8,f12,f14,f15,f16,f17,f18,f20,f21,f24,f25,f22,f33,f11,f62,f128,f124,f107,f104,f105,f136",
"_": "1626075887768",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.columns = [
"排名",
"最新价",
"涨跌幅",
"涨跌额",
"换手率",
"_",
"板块代码",
"板块名称",
"_",
"_",
"_",
"_",
"总市值",
"_",
"_",
"_",
"_",
"_",
"_",
"上涨家数",
"下跌家数",
"_",
"_",
"领涨股票",
"_",
"_",
"领涨股票-涨跌幅",
]
temp_df = temp_df[
[
"排名",
"板块名称",
"板块代码",
"最新价",
"涨跌额",
"涨跌幅",
"总市值",
"换手率",
"上涨家数",
"下跌家数",
"领涨股票",
"领涨股票-涨跌幅",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["总市值"] = pd.to_ | numeric(temp_df["总市值"]) | pandas.to_numeric |
#!/usr/bin/env python
# coding: utf-8
from install import *
from solvers import *
from params import *
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import rayleigh, norm, kstest
def plot_maxwell(vel, label=None, draw=True):
speed = (vel*vel).sum(1)**0.5
loc, scale = rayleigh.fit(speed, floc=0)
dist = rayleigh(scale=scale)
if draw:
plt.hist(speed, 20, normed=True)
x = np.linspace(dist.ppf(0.01), dist.ppf(0.99), 1000)
plt.plot(x, dist.pdf(x), label=label)
if label:
plt.legend()
return kstest(speed, dist.cdf)[0]
def plot_maxwell_x(vel, label=None, draw=True):
loc, scale = norm.fit(vel[:, 0], floc=0)
dist = norm(scale=scale)
if draw:
plt.hist(vel[:, 0], 20, normed=True)
x = np.linspace(dist.ppf(0.01), dist.ppf(0.99), 1000)
plt.plot(x, dist.pdf(x), label=label)
if label:
plt.legend()
return kstest(vel[:, 0], dist.cdf)[0]
def plot_particles(pos, vel):
plt.xlabel("X")
plt.ylabel("Y")
plt.quiver(pos[:, 0], pos[:, 1], vel[:, 0], vel[:, 1])
def multi_particles(f, title=None, n=None, width=4.5, height=4):
if n is None:
n = min(5, len(f.data))
fig = plt.figure(figsize=(width*n, height))
if title:
fig.suptitle(title)
for i in range(n):
plt.subplot(1, n, i+1)
j = math.floor(i*(len(f.data)-1)/(n-1))
plt.title(f"t = {f.time[j]:.1f}")
r = f.data[j]
plot_particles(r.pos, r.vel)
def multi_maxwell(f, title=None, n=None, draw=True, width=20, height=2):
if n is None:
n = len(f.data)
if draw:
fig = plt.figure(figsize=(width, height*n))
if title:
fig.suptitle(title)
max_vel = max((r.vel*r.vel).sum(1).max() for r in f.data)**0.5
max_x = max((np.abs(r.vel[:,0])).max() for r in f.data)
fits = []
for i in range(n):
j = i*(len(f.data)-1)/(n-1)
r = f.data[j]
if draw:
plt.subplot(n, 2, 2*i+1)
plt.xlim(0, max_vel)
f1 = plot_maxwell(r.vel, f"t = {f.time[j]:.1f}", draw)
if draw:
plt.subplot(n, 2, 2*i+2)
plt.xlim(-max_x, max_x)
f2 = plot_maxwell_x(r.vel, f"t = {f.time[j]:.1f}", draw)
fits.append({"t": f.time[j], "speed_stat":f1, "xvel_stat":f2})
return pd.DataFrame.from_records(fits, index='t')
def run_simulation(generator, solver, compound_step=True, scale=16,
time_steps=500, save_step=100):
dt, n, size, data = generator.gen_on_scale(scale)
frame = pd.DataFrame(solver.simulate(dt, n, size, time_steps, save_step, compound_step, data), columns=["t", "time", "perf", "data"])
return frame
def plot_and_fit(frame, title="", draw_particles=True, draw_maxwell=True,
particles_width=2.5, particles_height=2,
maxwell_width=10, maxwell_height=1):
if draw_particles:
multi_particles(frame, None, width=particles_width, height=particles_height)
fits = multi_maxwell(frame, title, draw=draw_maxwell, width=maxwell_width, height=maxwell_height)
return fits
# In[4]:
import sys
def check(generator, solver, compound, **sim_kwargs):
result = run_simulation(generator, solver, compound, **sim_kwargs)
fits = plot_and_fit(result, draw_maxwell=False, draw_particles=False)
fits = pd.DataFrame( {"initial":fits.iloc[0], "final":fits.iloc[-1]}, columns = ("initial","final"))
fits["improvement"] = fits.initial/fits.final
return fits, result
def check_fits(SolverClass, tpbs=one_tpbs, types=list(gen_sane_types()), params=list(gen_sane_params()), Generator=TaskGenerator, init_kwargs={}, **sim_kwargs):
for t in types:
for p in params:
for tpb in (1,) if SolverClass.no_blocks else tpbs:
solver = SolverClass(types=t, coord_format=p['form'], threadsperblock=tpb, scalar=p['scalar'], **init_kwargs)
result = {k: v.__name__ for k, v in t.items()}
result.update(p)
result["tpb"] = tpb
result["form"] = p["form"].name
sys.stdout.write("\r" + str(result))
fits, data = check(Generator(scalar=p['scalar'], **t), solver, p['compound'], **sim_kwargs)
result["fit_speed"] = fits.final.speed_stat
result["fit_xvel"] = fits.final.xvel_stat
result["data"] = data
yield result
def pick_outliers(checks):
outliers = checks[np.logical_or(checks.fit_speed==checks.fit_speed.max(), checks.fit_xvel==checks.fit_xvel.max())]
for d in outliers.data:
multi_particles(d)
return outliers
def test_fits(SolverClass, gen_types=gen_sane_types, **sim_kwargs):
checks = pd.DataFrame.from_records(check_fits(SolverClass, **sim_kwargs))
for k, c in checks[ | pd.isnull(checks.fit_speed) | pandas.isnull |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""create csv file to train."""
import os
import argparse
import numpy as np
import pandas as pd
parser = argparse.ArgumentParser(description='create csv file to train')
parser.add_argument('--out_path', type=str, default="./", help="Path of csv files, default is ./")
parser.add_argument('--random', type=int, default=0, help='random to create csv files, 0 -- not random, 1 -- random')
def split_train_eval(out_path, max_len=50, index_len=10, random=False, seed=0):
"""split train and eval sets"""
np.random.seed(seed)
number_list = np.arange(max_len)
eval_list = np.random.choice(number_list, size=index_len, replace=False)
if not random:
eval_list = np.array([11, 15, 19, 30, 42, 43, 44, 46, 48, 49])
train_name_list = ['Case' + str(k).zfill(2) + '.mhd' for k in number_list if k not in eval_list]
eval_name_list = ['Case' + str(k).zfill(2) + '.mhd' for k in eval_list]
train_name_p = pd.DataFrame(train_name_list)
train_name_p.to_csv(os.path.join(out_path, 'train.csv'), header=None, index=False)
eval_name_p = | pd.DataFrame(eval_name_list) | pandas.DataFrame |
"""
omg: Omics Mock Generator
Generates a mock dataset of omics data (importable in EDD):
transcriptomics, proteomics, and metabolomics
Requirements: Python 3.7.2, cobra, numpy, pandas.
"""
__author__ = 'LBL-QMM'
__copyright__ = 'Copyright (C) 2019 Berkeley Lab'
__license__ = ''
__status__ = 'Alpha'
__date__ = 'Dec 2019'
__version__ = '0.1.1'
import argparse
import collections as col
import os
import random
import re
import statistics
import sys
import urllib.parse
import urllib.request
import warnings
from shutil import copyfile
from enum import Enum
from typing import NewType, Dict, List, Any, OrderedDict, Counter
import cobra
from cobra.util.array import create_stoichiometric_matrix
import numpy as np
import pandas as pd
from cobra.exceptions import OptimizationError, Infeasible
# Type annotations
Filename = NewType('Filename', str)
# Enumerations
class Omics(Enum):
"""Enumeration with supported omics data types."""
PROTEOMICS = 0
TRANSCRIPTOMICS = 1
METABOLOMICS = 2
def __str__(self):
return f'{str(self.name).lower()}'
# Constants
UNIPROT_URL = '''https://www.uniprot.org/uploadlists/'''
CTS_URL = '''https://cts.fiehnlab.ucdavis.edu/rest/convert/'''
# HOST NAME
HOST_NAME: str = 'ropacus'
# TODO: Move some constants to variables by program arguments
DATA_FILE_PATH: Filename = Filename('data')
# Output file path
OUTPUT_FILE_PATH: Filename = Filename('data/output')
# INCHIKEY_TO_CID_MAP_FILE_PATH: mapping file path to map inchikey to cids
INCHIKEY_TO_CID_MAP_FILE_PATH: Filename = Filename('mapping')
# MODEL_FILENAME: Filename = Filename('iECIAI39_1322.xml') # E. coli
MODEL_FILENAME: Filename = Filename('reannotated_base_v3.sbml') # R. opacus
MODEL_FILEPATH: Filename = Filename('')
# Training file name
TRAINING_FILE_NAME: Filename = Filename('')
TRAINING_FILE_PATH: Filename = Filename('')
# Start time and stop time
TIMESTART: float = 0.0
TIMESTOP: float = 8.0
NUMPOINTS: int = 9
# Initial OD value
INITIAL_OD = 0.01
# number of reactions and instances
NUM_REACTIONS: int = None
NUM_INSTANCES: int = None
# NOTE: user input to the program
REACTION_ID_ECOLI: str = 'BIOMASS_Ec_iJO1366_core_53p95M' # E. coli
REACTION_ID: str = 'biomass_target' # R. opacus
# REACTION_ID: str = 'SRC_C00185_e' # R. opacus
GENE_IDS_DBS: List[str] = ['kegg.genes'] # R. opacus
# GENE_IDS_DBS: List[str] = ['uniprot', 'goa', 'ncbigi'] # E. coli
UNITS: Dict[Omics, str] = {
Omics.PROTEOMICS: 'proteins/cell',
Omics.TRANSCRIPTOMICS: "FPKM",
Omics.METABOLOMICS: "mM"
}
# Fix the flux value to -15 as we have data for this constraint
LOWER_BOUND: int = -15
UPPER_BOUND: int = -15
# Internals
_EPS = np.finfo(np.double).eps
def ansi(num: int):
"""Return function that escapes text with ANSI color n."""
return lambda txt: f'\033[{num}m{txt}\033[0m'
# pylint: disable=invalid-name
gray, red, green, yellow, blue, magenta, cyan, white = map(ansi,
range(90, 98))
# pylint: enable=invalid-name
#=============================================================================
def get_flux_time_series(model, ext_metabolites, grid, user_params):
'''
Generate fluxes and OD
'''
## First unpack the time steps for the grid provided
tspan, delt = grid
## Create a panda series containing the cell concentation for each time point
cell = pd.Series(index=tspan)
cell0 = user_params['initial_OD'] # in gDW/L
t0 = user_params['timestart']
cell[t0] = cell0
## Create a dataframe that constains external metabolite names and their concentrations
# First organize external metabolites and their initial concentrations
met_names = []
initial_concentrations = []
for met, init_conc in ext_metabolites.items():
met_names.append(met)
initial_concentrations.append(init_conc)
# Create dataframe containing external metabolites
Emets = pd.DataFrame(index=tspan, columns=met_names)
# Add initial concentrations for external metabolites
Emets.loc[t0] = initial_concentrations
# Create Dictionary mapping exchange reactions to the corresponding external metabolite
Erxn2Emet = {r.id: r.reactants[0].id for r in model.exchanges if r.reactants[0].id in met_names}
## Create storage for timeseries of models and solutions
# Model time series
model_TS = pd.Series(index=tspan)
# Solution time series
solution_TS = pd.Series(index=tspan)
## Main for loop solving the model for each time step and adding the corresponding OD and external metabolites created
volume = 1.0 # volume set arbitrarily to one because the system is extensive
for t in tspan:
# Adding constraints for each time point without permanent changes to the model
with model:
for rxn, met in Erxn2Emet.items():
# For each exchange reaction set lower bound such that the corresponding
# external metabolite concentration does not become negative
model.reactions.get_by_id(rxn).lower_bound = max(model.reactions.get_by_id(rxn).lower_bound,
-Emets.loc[t,met]*volume/cell[t]/delt)
# Calculate fluxes
solution_t = model.optimize()
# Store the solution and model for each timepoint for future use (e.g. MOMA)
solution_TS[t] = solution_t
model_TS[t] = model.copy()
# Calculate OD and external metabolite concentrations for next time point t+delta
cell[t+delt], Emets.loc[t+delt] = advance_OD_Emets(Erxn2Emet, cell[t], Emets.loc[t], delt, solution_t, user_params)
print(t, solution_t.status, solution_t[user_params['BIOMASS_REACTION_ID']]) # Minimum output for testing
return solution_TS, model_TS, cell, Emets, Erxn2Emet
def advance_OD_Emets(Erxn2Emet, old_cell, old_Emets, delt, solution, user_params):
# Output is same as input if nothing happens in the if clause
new_cell = old_cell
new_Emets = old_Emets
# Obtain the value of mu (growth rate)
mu = solution[user_params['BIOMASS_REACTION_ID']]
# Calculate OD and external metabolite concentrations for next step
if solution.status == 'optimal' and mu > 1e-6: # Update only if solution is optimal and mu is not zero, otherwise do not update
# Calculating next time point's OD
new_cell = old_cell *np.exp(mu*delt)
# Calculating external external metabolite concentrations for next time point
for rxn, met in Erxn2Emet.items():
new_Emets[met] = max(old_Emets.loc[met]-solution[rxn]/mu*old_cell*(1-np.exp(mu*delt)),0.0)
return new_cell, new_Emets
def getBEFluxes(model_TS, design, solution_TS, grid):
## Unpacking time points grid
tspan, delt = grid
## Parameters for flux constraints
high = 1.1
low = 0.50
## Unpack information for desired flux changes
# Get names for reaction targets
reaction_names =list(design.index[1:])
# Find number of target reactions and number of designs (or strains changed)
#n_reactions = design.shape[1] - 1
#n_instances = design.shape[0] - 1
## Time series containing the flux solution obtained through MOMA
solutionsMOMA_TS = pd.Series(index=tspan)
## Main loop: for each strain and at each time point, find new flux profile through MOMA
#for i in range(0,n_instances):
for t in tspan:
model = model_TS[t]
sol1 = solution_TS[t] # Reference solution calculated for each time point
with model:
# Adding the fluxed modifications for chosen reactions
for reaction in reaction_names:
flux = sol1.fluxes[reaction]
lbcoeff =low
ubcoeff =high
if flux < 0:
lbcoeff = high
ubcoeff = low
reaction_constraint = model.problem.Constraint(model.reactions.get_by_id(reaction).flux_expression,
lb = sol1.fluxes[reaction]*design[reaction]*lbcoeff,
ub = sol1.fluxes[reaction]*design[reaction]*ubcoeff)
#lb = model.reactions.get_by_id(reaction).lower_bound*design[reaction],
#ub = model.reactions.get_by_id(reaction).upper_bound*design[reaction])
model.add_cons_vars(reaction_constraint)
# Reference solution calculated for each time point in above cell for wild type
#sol1 = solution_TS[t]
# Moma solution for each time point
sol2 = cobra.flux_analysis.moma(model, solution=sol1, linear=False)
# saving the moma solutions across timepoints
solutionsMOMA_TS[t] = sol2
return solutionsMOMA_TS
def integrate_fluxes(solution_TS, model_TS, ext_metabolites, grid, user_params):
## First unpack the time steps for the grid provided
tspan, delt = grid
## Create a panda series containing the cell concentation for each time point
cell = pd.Series(index=tspan)
cell0 = user_params['initial_OD'] # in gDW/L
t0 = user_params['timestart']
cell[t0] = cell0
## Create a dataframe that constains external metabolite names and their concentrations (DUPLICATED CODE)
# First organize external metabolites and their initial concentrations
model = model_TS[0]
met_names = []
initial_concentrations = []
for met, init_conc in ext_metabolites.items():
met_names.append(met)
initial_concentrations.append(init_conc)
# Create dataframe containing external metabolites
Emets = pd.DataFrame(index=tspan, columns=met_names)
# Add initial concentrations for external metabolites
Emets.loc[t0] = initial_concentrations
# Create Dictionary mapping exchange reactions to the corresponding external metabolite
Erxn2Emet = {r.id: r.reactants[0].id for r in model.exchanges if r.reactants[0].id in met_names}
## Main loop adding contributions for each time step
for t in tspan:
# Calculate OD and external metabolite concentrations for next time point t+delta
cell[t+delt], Emets.loc[t+delt] = advance_OD_Emets(Erxn2Emet, cell[t], Emets.loc[t], delt, solution_TS[t], user_params)
return cell, Emets
def get_proteomics_transcriptomics_data(model, solution):
"""
:param model:
:param solution:
:param condition:
:return:
"""
# pre-determined linear constant (NOTE: Allow user to set this via parameter)
# DISCUSS!!
k = 0.8
q = 0.06
proteomics = {}
transcriptomics = {}
rxnIDs = solution.fluxes.keys()
for rxnId in rxnIDs:
reaction = model.reactions.get_by_id(rxnId)
for gene in list(reaction.genes):
# this will ignore all the reactions that does not have the gene.annotation property
# DISCUSS!!
if gene.annotation:
if 'uniprot' not in gene.annotation:
if 'goa' in gene.annotation:
protein_id = gene.annotation['goa']
else:
break
else:
protein_id = gene.annotation['uniprot'][0]
# add random noise which is 5 percent of the signal
noiseSigma = 0.05 * solution.fluxes[rxnId]/k;
noise = noiseSigma*np.random.randn();
proteomics[protein_id] = abs((solution.fluxes[rxnId]/k) + noise)
# create transcriptomics dict
noiseSigma = 0.05 * proteomics[protein_id]/q;
noise = noiseSigma*np.random.randn();
transcriptomics[gene.id] = abs((proteomics[protein_id]/q) + noise)
return proteomics, transcriptomics
def get_metabolomics_data(model, solution, mapping_file):
"""
:param model:
:param condition:
:return:
"""
metabolomics = {}
metabolomics_with_old_ids = {}
# get metabolites
# read the inchikey to pubchem ids mapping file
inchikey_to_cid = {}
inchikey_to_cid = read_pubchem_id_file(mapping_file)
# create the stoichoimetry matrix fomr the model as a Dataframe and convert all the values to absolute values
sm = create_stoichiometric_matrix(model, array_type='DataFrame')
# get all the fluxes across reactions from the solution
fluxes = solution.fluxes
# calculating the dot product of the stoichiometry matrix and the fluxes to calculate the net change
# in concentration of the metabolites across reactions
net_change_in_concentrations = sm.abs().dot(fluxes.abs())
#net_change_in_concentrations = net_change_in_concentrations.abs()
# converting all na values to zeroes and counting the total number of changes that happens for each metabolite
num_changes_in_metabolites = sm.fillna(0).astype(bool).sum(axis=1)
for met_id, conc in net_change_in_concentrations.items():
metabolite = model.metabolites.get_by_id(met_id)
# if there is an inchikey ID for the metabolite
if 'inchi_key' in metabolite.annotation:
# if it is a list get the first element
if type(metabolite.annotation['inchi_key']) is list:
inchi_key = metabolite.annotation['inchi_key'][0]
else:
inchi_key = metabolite.annotation['inchi_key']
if inchi_key in inchikey_to_cid.keys():
# if the CID is not in the metabolomics dict keys AND the mapped value is not None and the reactions flux is not 0
if (inchikey_to_cid[inchi_key] not in metabolomics.keys()) and (inchikey_to_cid[inchi_key] is not None):
metabolomics[inchikey_to_cid[inchi_key]] = conc/num_changes_in_metabolites.iloc[num_changes_in_metabolites.index.get_loc(met_id)]
metabolomics_with_old_ids[met_id] = conc/num_changes_in_metabolites.iloc[num_changes_in_metabolites.index.get_loc(met_id)]
elif (inchikey_to_cid[inchi_key] is not None):
metabolomics[inchikey_to_cid[inchi_key]] += conc/num_changes_in_metabolites.iloc[num_changes_in_metabolites.index.get_loc(met_id)]
metabolomics_with_old_ids[met_id] = conc/num_changes_in_metabolites.iloc[num_changes_in_metabolites.index.get_loc(met_id)]
return metabolomics, metabolomics_with_old_ids
def get_multiomics(model, solution, mapping_file, old_ids=False):
"""
:param model: cobra model object
:param solution: solution for the model optimization using cobra
:param data_type: defines the type of -omics data to generate (all by default)
:return:
"""
proteomics = {}
transcriptomics = {}
fluxomics = {}
metabolomics = {}
proteomics, transcriptomics = get_proteomics_transcriptomics_data(model, solution)
metabolomics, metabolomics_with_old_ids = get_metabolomics_data(model, solution, mapping_file)
if old_ids:
return (proteomics, transcriptomics, metabolomics, metabolomics_with_old_ids)
else:
return (proteomics, transcriptomics, metabolomics)
def read_pubchem_id_file(mapping_file):
inchikey_to_cid = {}
with open(mapping_file, 'r') as fh:
try:
line = fh.readline()
while line:
# checking to ignore inchikey records with no cid mappings
if (len(line.split()) > 1):
inchikey_to_cid[line.split()[0]] = 'CID:'+line.split()[1]
else:
inchikey_to_cid[line.strip()] = None
line = fh.readline()
# NOTE: propagated exception, raise
except Exception as ex:
print("Error in reading file!")
print(ex)
return inchikey_to_cid
def write_experiment_description_file(output_file_path, line_name='WT', label=''):
# HARD CODED ONLY FOR WILD TYPE!
if not os.path.isdir(output_file_path):
os.mkdir(output_file_path)
# create the filename
experiment_description_file_name = f'{output_file_path}/EDD_experiment_description_file{label}.csv'
#write experiment description file
try:
with open(experiment_description_file_name, 'w') as fh:
fh.write(f'Line Name, Line Description, Part ID, Media, Shaking Speed, Starting OD, Culture Volume, Flask Volume, Growth Temperature, Replicate Count\n')
if line_name == 'WT':
line_descr = 'Wild type E. coli'
part_id = 'ABFPUB_000310'
else:
line_descr = ''
part_id = 'ABFPUB_000310' #THIS SHOULD BE CHANGED!
fh.write(f"{line_name}, {line_descr}, {part_id}, M9, 1, 0.1, 50, 200, 30, 1\n")
except Exception as ex:
print("Error in writing file!")
print(ex)
def write_in_al_format(time_series_omics_data, omics_type, user_params, label=''):
try:
output_file_path = user_params['al_omics_file_path']
if not os.path.isdir(output_file_path):
os.mkdir(output_file_path)
for timepoint, omics_dict in time_series_omics_data.items():
al_file_name = f'{output_file_path}/AL_{omics_type}_{timepoint}_hrs{label}.csv'
with open(al_file_name, 'w') as ofh:
dataframe = pd.DataFrame.from_dict(omics_dict, orient='index', columns=[f'{omics_type}_value'])
for index, series in dataframe.iteritems():
for id, value in series.iteritems():
ofh.write(f'{id},{value}\n')
except:
print('Error in writing in Arrowland format')
def write_in_edd_format(time_series_omics_data, omics_type, user_params, line_name, label=''):
# Dictionary to map omics type with the units of measurement
unit_dict = { "fluxomics": 'mmol/gdwh',\
"proteomics": 'proteins/cell',\
"transcriptomics": "FPKM",\
"metabolomics": "mM"
}
# write in EDD format
output_file_path = user_params['edd_omics_file_path']
# create the filenames
omics_file_name: str = f'{output_file_path}/EDD_{omics_type}{label}.csv'
if not os.path.isdir(output_file_path):
os.mkdir(output_file_path)
# open a file to write omics data for each type and for all timepoints and constraints
try:
with open(omics_file_name, 'w') as fh:
fh.write(f'Line Name,Measurement Type,Time,Value,Units\n')
for timepoint, omics_dict in time_series_omics_data.items():
dataframe = pd.DataFrame.from_dict(omics_dict, orient='index', columns=[f'{omics_type}_value'])
for index, series in dataframe.iteritems():
for id, value in series.iteritems():
fh.write((f'{line_name},{id},{timepoint},{value},{unit_dict[omics_type]}\n'))
except Exception as ex:
print("Error in writing file!")
print(ex)
def write_omics_files(time_series_omics_data, omics_type, user_params, line_name='WT', al_format=False, label=''):
"""
:param dataframe:
:param data_type:
:return:
"""
# check which format we have to create the data in
if not al_format:
# write the omics files in EDD format by separating in terms of the timepoints
write_in_edd_format(time_series_omics_data, omics_type, user_params, line_name, label=label)
else:
# write the omics files in ARROWLAND format by separating in terms of the timepoints
write_in_al_format(time_series_omics_data, omics_type, user_params, label=label)
def write_OD_data(cell, output_file_path, line_name='WT', label=''):
# create the filename
OD_data_file: str = f'{output_file_path}/EDD_OD{label}.csv'
if not os.path.isdir(output_file_path):
os.mkdir(output_file_path)
# write experiment description file
try:
with open(OD_data_file, 'w') as fh:
fh.write(f'Line Name,Measurement Type,Time,Value,Units\n')
for index, value in cell.items():
fh.write((f'{line_name},Optical Density,{index},{value},n/a\n'))
except Exception as ex:
print("Error in writing OD file")
print(ex)
def write_training_data_with_isopentenol(df, filename):
filename = f'{OUTPUT_FILE_PATH}/{filename}'
df.to_csv(filename, header=True, index=False)
def write_external_metabolite(substrates, output_file_path, line_name='WT', label=''):
# create the filename
external_metabolites: str = f'{output_file_path}/EDD_external_metabolites{label}.csv'
if not os.path.isdir(output_file_path):
os.mkdir(output_file_path)
# Table for metabolites to be exported
glucose = substrates.loc[:, 'glc__D_e']
ammonium = substrates.loc[:, 'nh4_e']
isopentenol = substrates.loc[:, 'isoprenol_e']
acetate = substrates.loc[:, 'ac_e']
formate = substrates.loc[:, 'for_e']
lactate = substrates.loc[:, 'lac__D_e']
ethanol = substrates.loc[:, 'etoh_e']
# output_metabolites = {
# "5793": glucose, "16741146": ammonium, "12988": isopentenol, "175": acetate, "283": formate, "612": #lactate, "702": ethanol}
output_metabolites = {
"5793": glucose, "12988": isopentenol, "175": acetate, "283": formate, "612": lactate, "702": ethanol}
# Write file lines
try:
with open(external_metabolites,'w') as fh:
# Top header
fh.write(f'Line Name,Measurement Type,Time,Value,Units\n')
# Metabolite lines
for cid in output_metabolites:
met = output_metabolites[cid]
for index,value in met.items():
fh.write((f'{line_name},CID:{cid},{index},{value},mM\n'))
except Exception as ex:
print("Error in writing OD file")
print(ex)
def get_random_number():
"""
:return:
"""
random.seed(12312)
return random.random()
def add_random_noise():
"""
:return:
"""
pass
def get_list_of_reactions(file_name):
"""
:param file_name: Name of the model file (has to be xml for now)
:return: None (prints the list of reactions that has mass in them)
"""
# Load model¶depending on the kind of file (the file has to be xml)
if file_name.endswith(".xml"):
model = cobra.io.read_sbml_model(file_name)
# Print out the reaction name and reaction id for all reactions related to BIOMASS production:
print("List of reactions related to BIOMASS production:")
for rxn in model.reactions:
if rxn.name is not None and 'BIOMASS' in rxn.id:
print("{}: {}".format(rxn.id, rxn.name))
def get_optimized_solution(model, reaction_id):
"""
:param model:
:param reaction_id:
:return solution:
"""
# fix the flux value to -15 as we have data for this constraint
model.reactions.get_by_id(reaction_id).lower_bound = self.LOWER_BOUND
model.reactions.get_by_id(reaction_id).upper_bound = self.UPPER_BOUND
# print(model.reactions.get_by_id(reaction_id))
print("Displaying the reaction bounds after constraining them:")
print(model.reactions.get_by_id(reaction_id).bounds)
# optimizing the model for only the selected reaction
# model.slim_optimize()
# optimizing model
solution = model.optimize()
return solution
def read_model(file_name):
"""
:param file_name:
:return model:
"""
# Load model¶depending on the kind of file
if file_name.endswith(".xml"):
model = cobra.io.read_sbml_model(file_name)
elif file_name.endswith(".json"):
model = cobra.io.load_json_model(file_name)
return model
def model_has_IPP_pathway(model):
'''
We check if the model has the following reactions if so then it has the isopentenol pathway
['HMGCOAS','HMGCOAR','MEVK1','PMD','IPMPP','IPtrpp','IPtex','EX_isoprenol_e']
'''
reaction_list = ['HMGCOAS','HMGCOAR','MEVK1','PMD','IPMPP','IPtrpp','IPtex','EX_isoprenol_e']
model_reactions = [r.id for r in model.reactions]
for reac in reaction_list:
if reac not in model_reactions:
return False
return True
def add_isopentenol_pathway(model, sce):
'''
Add isopentenol pathway by taking it from the model instance of S. cerevisiae,
we used the iMM904.json model
'''
# Load S. cerevisiae model
# sce = cobra.io.load_json_model(f'data/{cerevisiae_modelfile}')
# Add mevalonate pathway reactions from S. cerevisiae model
for x in ['HMGCOAS','HMGCOAR','MEVK1','DPMVD']:
r = sce.reactions.get_by_id(x).copy()
r.gene_reaction_rule = ''
model.add_reaction(r)
# Update gene names
model.reactions.get_by_id('HMGCOAS').gene_reaction_rule = 'HMGS'
model.reactions.get_by_id('HMGCOAR').gene_reaction_rule = 'HMGR'
model.reactions.get_by_id('MEVK1').gene_reaction_rule = 'MK'
model.reactions.get_by_id('DPMVD').gene_reaction_rule = 'PMD'
# Add IP to model
m = model.metabolites.ipdp_c.copy()
m.id = 'ipmp_c'
m.name = 'Isopentenyl monophosphate'
m.formula = 'C5H9O4P'
m.charge = -2
model.add_metabolites([m])
# Update PMD reaction to convert mev-5p to IP
model.reactions.get_by_id('DPMVD').id = 'PMD'
model.reactions.get_by_id('PMD').add_metabolites({'5dpmev_c': 1.0, '5pmev_c': -1.0,
'ipdp_c': -1.0, 'ipmp_c': 1.0})
# Add isoprenol (isopentenol)
m = model.metabolites.ipmp_c.copy()
m.id = 'isoprenol_c'
m.name = 'Isopentenol'
m.formula = 'C5H10O'
m.charge = 0
model.add_metabolites([m])
# Add phosphatase reaction by AphA
r = model.reactions.CHLabcpp.copy()
r.id = 'IPMPP'
r.name = 'Isopentenyl monophosphate phosphatase'
r.gene_reaction_rule = 'AphA'
model.add_reactions([r])
r.add_metabolites({'chol_p': 1.0, 'atp_c': 1.0, 'chol_c': -1.0, 'adp_c': -1.0, 'h_c': -1.0, 'ipmp_c': -1.0, 'isoprenol_c': 1.0})
# Add periplasmic and extracellular isoprenol
m = model.metabolites.isoprenol_c.copy()
m.id = 'isoprenol_p'
m.compartment = 'p'
model.add_metabolites([m])
m = model.metabolites.isoprenol_c.copy()
m.id = 'isoprenol_e'
m.compartment = 'e'
model.add_metabolites([m])
# Add periplasmic and extracellular transport reactions
r = model.reactions.ETOHtrpp.copy()
r.id = 'IPtrpp'
r.name = 'Isopentenol reversible transport via diffusion (periplasm)'
r.gene_reaction_rule = ''
model.add_reactions([r])
r.add_metabolites({'etoh_p': 1.0, 'etoh_c': -1.0, 'isoprenol_p': -1.0, 'isoprenol_c': 1.0})
r = model.reactions.ETOHtex.copy()
r.id = 'IPtex'
r.name = 'Isopentenol transport via diffusion (extracellular to periplasm)'
r.gene_reaction_rule = ''
model.add_reactions([r])
r.add_metabolites({'etoh_e': 1.0, 'etoh_p': -1.0, 'isoprenol_e': -1.0, 'isoprenol_p': 1.0})
# Add a boundary reaction
r = model.reactions.EX_etoh_e.copy()
r.id = 'EX_isoprenol_e'
r.name = 'Isopentenol exchange'
r.gene_reaction_rule = ''
model.add_reactions([r])
r.add_metabolites({'etoh_e': 1.0, 'isoprenol_e': -1.0})
# Write model to files
outputfilename = user_params['modelfile'].split('.')[0] + '_IPP.json'
cobra.io.save_json_model(model, f'data/{outputfilename}')
return model
#=============================================================================
class Ropacus():
def __init__(self):
self.time_series_omics_data = {}
self.LOWER_BOUND = -15
self.UPPER_BOUND = -15
def generate_time_series_data(self, model):
# intiializing omics dictionaries to contain data across timepoints
proteomics_list: List = []
transcriptomics_list: List = []
fluxomics_list: List = []
metabolomics_list: List = []
# generating time series data for the following flux constraints
# 6, 9, 12, 15 corresponding to the times 0, 3, 6, 9 hours
# NOTE: The constraints and the timepoints should be supplied as command line inputs
time_series_omics_data = {}
experiment_timepoints = [0, 3, 6, 9]
flux_constraints = [6, 9, 12, 15]
# NOTE; constraints in flux_constraints, think about it
for i in range(len(flux_constraints)):
# Set global reactions bounds (in addition to local)
self.LOWER_BOUND = flux_constraints[i]
self.UPPER_BOUND = flux_constraints[i]
cobra_config = cobra.Configuration()
cobra_config.bounds = self.LOWER_BOUND, self.UPPER_BOUND
# Print the list of reaction names related to BIOMASS production
self.print_reactions(model)
# get fake proteomics data and write it to XLSX file
condition = 1
self.generate_mock_data(model, condition)
def add_random_noise(self):
# TODO
"""
:return:
"""
pass
def chemical_translation(self, dict_in: Dict[str, Any],
fmt_from: str = 'KEGG',
fmt_to: str = 'PubChem CID') -> Dict[str, Any]:
"""
Proxy to UCDavis Chemical Translation Service (CTS). Maps the keys of
the input dictionary keeping intact the values.
Default behaviour: map KEGG Compounds into PubChem CIDs
For details, see https://cts.fiehnlab.ucdavis.edu/services
"""
dict_out: Dict[str, float] = {}
print(gray('Mapping metabolites ids using CTS'), end='', flush=True)
ids_in: List[str] = list(dict_in.keys())
pattern = re.compile(
r"""(?:"searchTerm":")(\w+)(?:","results":\[")(\w+)(?:"])""")
for id_in in ids_in:
mapping_str: str = f'{fmt_from}/{fmt_to}/{id_in}'
mapping_data = urllib.parse.quote(mapping_str)
mapping_req = urllib.request.Request(CTS_URL + mapping_data)
with urllib.request.urlopen(mapping_req) as map_file:
mapping = map_file.read().strip().decode('utf-8')
match: re.Match = pattern.search(mapping)
if match:
assert match.group(1) == id_in
id_out: str = match.group(2)
if fmt_to == 'PubChem CID':
id_out = 'CID:' + id_out
dict_out[id_out] = dict_in[id_in]
print(green('.'), end='', flush=True)
dprint(f'Metabolite {id_in} mapped to {id_out}')
else:
print(red('.'), end='', flush=True)
dprint(yellow(f'Metabolite {id_in} mapping failed!'))
print(green('OK!'))
self.vprint(gray('Number of unmapped genes from'), fmt_from, gray('to'),
fmt_to, gray(':'), yellow(len(dict_in) - len(dict_out)))
return dict_out
def dict_to_edd(self, omics_dict: Dict[str, float],
omics: Omics) -> pd.DataFrame:
"""Get dataframe with EDD format from dictionary with omics values"""
edd: List[OrderedDict[str, Any]] = []
sample: OrderedDict[str, Any]
for measurement, value in omics_dict.items():
sample = col.OrderedDict([
('Line Name', 'WT'),
('Measurement Type', measurement),
('Time', 0), # TODO: Generalize for time-series
('Value', value),
('Units', UNITS[omics])
])
edd.append(sample)
return | pd.DataFrame(edd) | pandas.DataFrame |
# %%
from bs4 import BeautifulSoup
import requests
import math
import pandas as pd
import numpy as np
import sys, os, fnmatch
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from datetime import datetime as dt
# %%
def get_version(s, version):
for v in version:
# split up '(F)SCIP' entry
if v.startswith("(F)SCIP"):
version.append(v.replace("(F)", ""))
version.append(v.replace("(F)", "F"))
version.remove(v)
if s in ["SPLX", "SOPLX"]:
s = "SoPlex"
elif s in ["MDOPT"]:
s = "MindOpt"
elif s in ["GLOP"]:
s = "Google-GLOP"
elif s in ["MSK"]:
s = "Mosek"
match = [v for v in version if v.lower().find(s.lower()) >= 0]
return match[0] if match else s
# %%
def parse_table(url, timelimit=3600, threads=1):
"""
parse a specific table to generate a dictionary with the runtimes and
auxiliary information
"""
resp = requests.get(url)
soup = BeautifulSoup(resp.text, features="html.parser")
pre = soup.find_all("pre")
stats = {}
stats["date"] = pre[0].text.split("\n")[1].replace("=", "").replace("-", "").strip()
stats["title"] = pre[0].text.split("\n")[2].strip()
if url.find("lpsimp.html") >= 0:
tab = pre[3].text.split("\n")
tabmark = [ind for ind, i in enumerate(tab) if i.startswith("=====")]
_version = pre[2].text.split("\n")[1:-1]
_version = [x.split()[0].rstrip(":") for x in _version]
_score = tab[tabmark[0] - 2].split()[2:]
_solved = tab[tabmark[0] - 1].split()[1:]
solver = tab[tabmark[0] + 1].split()[2:]
stats["solver"] = solver
stats["nprobs"] = len(tab[tabmark[1] + 1 : tabmark[-1]])
stats["score"] = {solver[i]: float(_score[i]) for i in range(len(solver))}
stats["solved"] = {solver[i]: int(_solved[i]) for i in range(len(solver))}
stats["version"] = {s: get_version(s, _version) for s in solver}
stats["timelimit"] = int(
pre[3].text.split("\n")[-2].split()[1].replace(",", "")
)
stats["times"] = pd.DataFrame(
[l.split() for l in tab[tabmark[1] + 1 : tabmark[-1]]],
columns=["instance"] + solver,
)
elif url.find("lpbar.html") >= 0:
tab = pre[3].text.split("\n")
tabmark = [ind for ind, i in enumerate(tab) if i.startswith("=====")]
_version = pre[2].text.split("\n")[1:-1]
_version = [x.split()[0].rstrip(":") for x in _version]
_score = tab[tabmark[0] - 2].split()[2:]
_solved = tab[tabmark[0] - 1].split()[1:]
solver = tab[tabmark[0] + 1].split()[1:]
stats["solver"] = solver
stats["nprobs"] = len(tab[tabmark[1] + 1 : tabmark[-1]])
stats["score"] = {solver[i]: float(_score[i]) for i in range(len(solver))}
stats["solved"] = {solver[i]: int(_solved[i]) for i in range(len(solver))}
stats["version"] = {s: get_version(s, _version) for s in solver}
stats["timelimit"] = timelimit
stats["times"] = pd.DataFrame(
[l.split() for l in tab[tabmark[0] + 3 : tabmark[-1]]],
columns=["instance"] + solver,
)
elif url.find("network.html") >= 0:
tab = pre[2].text.split("\n")
tabmark = [ind for ind, i in enumerate(tab) if i.startswith("=====")]
_version = pre[1].text.split("\n")[1:-1]
_version = [x.split()[1] for x in _version]
_score = tab[3].split()
solver = tab[5].split()[3:]
stats["solver"] = solver
stats["nprobs"] = len(tab[tabmark[0] + 3 : tabmark[-1]])
stats["score"] = {solver[i]: float(_score[i]) for i in range(len(solver))}
stats["solved"] = {}
stats["version"] = {s: get_version(s, _version) for s in solver}
stats["timelimit"] = timelimit
stats["times"] = pd.DataFrame(
[l.split() for l in tab[tabmark[0] + 3 : tabmark[-1]]],
columns=["instance", "nodes", "arcs"] + solver,
)
stats["times"] = stats["times"].drop(["nodes", "arcs"], axis=1)
for s in solver:
stats["solved"][s] = (
stats["nprobs"]
- | pd.to_numeric(stats["times"][s], errors="coerce") | pandas.to_numeric |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
from ....config import options, option_context
from ....dataframe import DataFrame
from ....tensor import arange, tensor
from ....tensor.random import rand
from ....tests.core import require_cudf
from ....utils import lazy_import
from ... import eval as mars_eval, cut, qcut
from ...datasource.dataframe import from_pandas as from_pandas_df
from ...datasource.series import from_pandas as from_pandas_series
from ...datasource.index import from_pandas as from_pandas_index
from .. import to_gpu, to_cpu
from ..to_numeric import to_numeric
from ..rebalance import DataFrameRebalance
cudf = lazy_import('cudf', globals=globals())
@require_cudf
def test_to_gpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
res = cdf.execute().fetch()
assert isinstance(res, cudf.DataFrame)
pd.testing.assert_frame_equal(res.to_pandas(), pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries)
cseries = series.to_gpu()
res = cseries.execute().fetch()
assert isinstance(res, cudf.Series)
pd.testing.assert_series_equal(res.to_pandas(), pseries)
@require_cudf
def test_to_cpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
df2 = to_cpu(cdf)
res = df2.execute().fetch()
assert isinstance(res, pd.DataFrame)
pd.testing.assert_frame_equal(res, pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries, chunk_size=(13, 21))
cseries = to_gpu(series)
series2 = to_cpu(cseries)
res = series2.execute().fetch()
assert isinstance(res, pd.Series)
pd.testing.assert_series_equal(res, pseries)
def test_rechunk_execution(setup):
data = pd.DataFrame(np.random.rand(8, 10))
df = from_pandas_df(pd.DataFrame(data), chunk_size=3)
df2 = df.rechunk((3, 4))
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
df = from_pandas_df(data)
df2 = df.rechunk(5)
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
# test Series rechunk execution.
data = pd.Series(np.random.rand(10,))
series = from_pandas_series(data)
series2 = series.rechunk(3)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
series2 = series.rechunk(1)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
# test index rechunk execution
data = pd.Index(np.random.rand(10,))
index = from_pandas_index(data)
index2 = index.rechunk(3)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
index2 = index.rechunk(1)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
# test rechunk on mixed typed columns
data = pd.DataFrame({0: [1, 2], 1: [3, 4], 'a': [5, 6]})
df = from_pandas_df(data)
df = df.rechunk((2, 2)).rechunk({1: 3})
res = df.execute().fetch()
pd.testing.assert_frame_equal(data, res)
def test_series_map_execution(setup):
raw = pd.Series(np.arange(10))
s = from_pandas_series(raw, chunk_size=7)
with pytest.raises(ValueError):
# cannot infer dtype, the inferred is int,
# but actually it is float
# just due to nan
s.map({5: 10})
r = s.map({5: 10}, dtype=float)
result = r.execute().fetch()
expected = raw.map({5: 10})
pd.testing.assert_series_equal(result, expected)
r = s.map({i: 10 + i for i in range(7)}, dtype=float)
result = r.execute().fetch()
expected = raw.map({i: 10 + i for i in range(7)})
pd.testing.assert_series_equal(result, expected)
r = s.map({5: 10}, dtype=float, na_action='ignore')
result = r.execute().fetch()
expected = raw.map({5: 10}, na_action='ignore')
pd.testing.assert_series_equal(result, expected)
# dtype can be inferred
r = s.map({5: 10.})
result = r.execute().fetch()
expected = raw.map({5: 10.})
pd.testing.assert_series_equal(result, expected)
r = s.map(lambda x: x + 1, dtype=int)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
def f(x: int) -> float:
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
def f(x: int):
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series
raw2 = pd.Series([10], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2, dtype=float)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series, and dtype can be inferred
raw2 = pd.Series([10.], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test str
raw = pd.Series(['a', 'b', 'c', 'd'])
s = from_pandas_series(raw, chunk_size=2)
r = s.map({'c': 'e'})
result = r.execute().fetch()
expected = raw.map({'c': 'e'})
pd.testing.assert_series_equal(result, expected)
# test map index
raw = pd.Index(np.random.rand(7))
idx = from_pandas_index(pd.Index(raw), chunk_size=2)
r = idx.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_index_equal(result, expected)
def test_describe_execution(setup):
s_raw = pd.Series(np.random.rand(10))
# test one chunk
series = from_pandas_series(s_raw, chunk_size=10)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
# test multi chunks
series = from_pandas_series(s_raw, chunk_size=3)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(5)
df_raw = pd.DataFrame(rs.rand(10, 4), columns=list('abcd'))
df_raw['e'] = rs.randint(100, size=10)
# test one chunk
df = from_pandas_df(df_raw, chunk_size=10)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = series.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_series_equal(result, expected)
# test multi chunks
df = from_pandas_df(df_raw, chunk_size=3)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = df.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_frame_equal(result, expected)
# test skip percentiles
r = df.describe(percentiles=False, include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
expected.drop(['50%'], axis=0, inplace=True)
pd.testing.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
df.describe(percentiles=[1.1])
with pytest.raises(ValueError):
# duplicated values
df.describe(percentiles=[0.3, 0.5, 0.3])
# test input dataframe which has unknown shape
df = from_pandas_df(df_raw, chunk_size=3)
df2 = df[df['a'] < 0.5]
r = df2.describe()
result = r.execute().fetch()
expected = df_raw[df_raw['a'] < 0.5].describe()
pd.testing.assert_frame_equal(result, expected)
def test_data_frame_apply_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
df = from_pandas_df(df_raw, chunk_size=5)
r = df.apply('ffill')
result = r.execute().fetch()
expected = df_raw.apply('ffill')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(['sum', 'max'])
result = r.execute().fetch()
expected = df_raw.apply(['sum', 'max'])
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sqrt)
result = r.execute().fetch()
expected = df_raw.apply(np.sqrt)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2]))
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2]))
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sum, axis='index')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='index')
pd.testing.assert_series_equal(result, expected)
r = df.apply(np.sum, axis='columns')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='columns')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1)
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1, result_type='expand')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
pd.testing.assert_frame_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
def test_series_apply_execute(setup):
idxes = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idxes)
series = from_pandas_series(s_raw, chunk_size=5)
r = series.apply('add', args=(1,))
result = r.execute().fetch()
expected = s_raw.apply('add', args=(1,))
pd.testing.assert_series_equal(result, expected)
r = series.apply(['sum', 'max'])
result = r.execute().fetch()
expected = s_raw.apply(['sum', 'max'])
pd.testing.assert_series_equal(result, expected)
r = series.apply(np.sqrt)
result = r.execute().fetch()
expected = s_raw.apply(np.sqrt)
pd.testing.assert_series_equal(result, expected)
r = series.apply('sqrt')
result = r.execute().fetch()
expected = s_raw.apply('sqrt')
pd.testing.assert_series_equal(result, expected)
r = series.apply(lambda x: [x, x + 1], convert_dtype=False)
result = r.execute().fetch()
expected = s_raw.apply(lambda x: [x, x + 1], convert_dtype=False)
pd.testing.assert_series_equal(result, expected)
s_raw2 = pd.Series([np.array([1, 2, 3]), np.array([4, 5, 6])])
series = from_pandas_series(s_raw2)
dtypes = pd.Series([np.dtype(float)] * 3)
r = series.apply(pd.Series, output_type='dataframe',
dtypes=dtypes)
result = r.execute().fetch()
expected = s_raw2.apply(pd.Series)
pd.testing.assert_frame_equal(result, expected)
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_apply_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.apply(lambda row: str(row[0]) + row[1], axis=1)
result = r.execute().fetch()
expected = df1.apply(lambda row: str(row[0]) + row[1], axis=1)
pd.testing.assert_series_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.apply(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.apply(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_transform_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
idx_vals = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idx_vals)
def rename_fn(f, new_name):
f.__name__ = new_name
return f
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
# DATAFRAME CASES
df = from_pandas_df(df_raw, chunk_size=5)
# test transform scenarios on data frames
r = df.transform(lambda x: list(range(len(x))))
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))))
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: list(range(len(x))), axis=1)
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(['cumsum', 'cummax', lambda x: x + 1])
result = r.execute().fetch()
expected = df_raw.transform(['cumsum', 'cummax', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
fn_dict = OrderedDict([
('A', 'cumsum'),
('D', ['cumsum', 'cummax']),
('F', lambda x: x + 1),
])
r = df.transform(fn_dict)
result = r.execute().fetch()
expected = df_raw.transform(fn_dict)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1])
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], axis=1, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1], axis=1)
pd.testing.assert_frame_equal(result, expected)
fn_list = [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]
r = df.transform(fn_list, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_list)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.sum(), _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.sum())
pd.testing.assert_series_equal(result, expected)
fn_dict = OrderedDict([
('A', rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1')),
('D', [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]),
('F', lambda x: x.iloc[:-1].reset_index(drop=True)),
])
r = df.transform(fn_dict, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_dict)
pd.testing.assert_frame_equal(result, expected)
# SERIES CASES
series = from_pandas_series(s_raw, chunk_size=5)
# test transform scenarios on series
r = series.transform(lambda x: x + 1)
result = r.execute().fetch()
expected = s_raw.transform(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
r = series.transform(['cumsum', lambda x: x + 1])
result = r.execute().fetch()
expected = s_raw.transform(['cumsum', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
# test transform on string dtype
df_raw = pd.DataFrame({'col1': ['str'] * 10, 'col2': ['string'] * 10})
df = from_pandas_df(df_raw, chunk_size=3)
r = df['col1'].transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw['col1'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
r = df.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw.transform(lambda x: x + '_suffix')
pd.testing.assert_frame_equal(result, expected)
r = df['col2'].transform(lambda x: x + '_suffix', dtype=np.dtype('str'))
result = r.execute().fetch()
expected = df_raw['col2'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_transform_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.transform({'b': lambda x: x + '_suffix'})
result = r.execute().fetch()
expected = df1.transform({'b': lambda x: x + '_suffix'})
pd.testing.assert_frame_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_string_method_execution(setup):
s = pd.Series(['s1,s2', 'ef,', 'dd', np.nan])
s2 = pd.concat([s, s, s])
series = from_pandas_series(s, chunk_size=2)
series2 = from_pandas_series(s2, chunk_size=2)
# test getitem
r = series.str[:3]
result = r.execute().fetch()
expected = s.str[:3]
pd.testing.assert_series_equal(result, expected)
# test split, expand=False
r = series.str.split(',', n=2)
result = r.execute().fetch()
expected = s.str.split(',', n=2)
pd.testing.assert_series_equal(result, expected)
# test split, expand=True
r = series.str.split(',', expand=True, n=1)
result = r.execute().fetch()
expected = s.str.split(',', expand=True, n=1)
pd.testing.assert_frame_equal(result, expected)
# test rsplit
r = series.str.rsplit(',', expand=True, n=1)
result = r.execute().fetch()
expected = s.str.rsplit(',', expand=True, n=1)
pd.testing.assert_frame_equal(result, expected)
# test cat all data
r = series2.str.cat(sep='/', na_rep='e')
result = r.execute().fetch()
expected = s2.str.cat(sep='/', na_rep='e')
assert result == expected
# test cat list
r = series.str.cat(['a', 'b', np.nan, 'c'])
result = r.execute().fetch()
expected = s.str.cat(['a', 'b', np.nan, 'c'])
pd.testing.assert_series_equal(result, expected)
# test cat series
r = series.str.cat(series.str.capitalize(), join='outer')
result = r.execute().fetch()
expected = s.str.cat(s.str.capitalize(), join='outer')
pd.testing.assert_series_equal(result, expected)
# test extractall
r = series.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
result = r.execute().fetch()
expected = s.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
pd.testing.assert_frame_equal(result, expected)
# test extract, expand=False
r = series.str.extract(r'[ab](\d)', expand=False)
result = r.execute().fetch()
expected = s.str.extract(r'[ab](\d)', expand=False)
pd.testing.assert_series_equal(result, expected)
# test extract, expand=True
r = series.str.extract(r'[ab](\d)', expand=True)
result = r.execute().fetch()
expected = s.str.extract(r'[ab](\d)', expand=True)
pd.testing.assert_frame_equal(result, expected)
def test_datetime_method_execution(setup):
# test datetime
s = pd.Series([pd.Timestamp('2020-1-1'),
pd.Timestamp('2020-2-1'),
np.nan])
series = from_pandas_series(s, chunk_size=2)
r = series.dt.year
result = r.execute().fetch()
expected = s.dt.year
pd.testing.assert_series_equal(result, expected)
r = series.dt.strftime('%m-%d-%Y')
result = r.execute().fetch()
expected = s.dt.strftime('%m-%d-%Y')
pd.testing.assert_series_equal(result, expected)
# test timedelta
s = pd.Series([pd.Timedelta('1 days'),
pd.Timedelta('3 days'),
np.nan])
series = from_pandas_series(s, chunk_size=2)
r = series.dt.days
result = r.execute().fetch()
expected = s.dt.days
pd.testing.assert_series_equal(result, expected)
def test_isin_execution(setup):
# one chunk in multiple chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=10)
sb = from_pandas_series(b, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
# multiple chunk in one chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = from_pandas_series(b, chunk_size=4)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
# multiple chunk in multiple chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = from_pandas_series(b, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = np.array([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = tensor(b, chunk_size=3)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = {2, 1, 9, 3} # set
sa = from_pandas_series(a, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 3)))
df = from_pandas_df(raw, chunk_size=(5, 2))
# set
b = {2, 1, raw[1][0]}
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin(b)
pd.testing.assert_frame_equal(result, expected)
# mars object
b = tensor([2, 1, raw[1][0]], chunk_size=2)
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin([2, 1, raw[1][0]])
pd.testing.assert_frame_equal(result, expected)
# dict
b = {1: tensor([2, 1, raw[1][0]], chunk_size=2),
2: [3, 10]}
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin({1: [2, 1, raw[1][0]], 2: [3, 10]})
pd.testing.assert_frame_equal(result, expected)
def test_cut_execution(setup):
session = setup
rs = np.random.RandomState(0)
raw = rs.random(15) * 1000
s = pd.Series(raw, index=[f'i{i}' for i in range(15)])
bins = [10, 100, 500]
ii = pd.interval_range(10, 500, 3)
labels = ['a', 'b']
t = tensor(raw, chunk_size=4)
series = from_pandas_series(s, chunk_size=4)
iii = from_pandas_index(ii, chunk_size=2)
# cut on Series
r = cut(series, bins)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, bins))
r, b = cut(series, bins, retbins=True)
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, bins, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# cut on tensor
r = cut(t, bins)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
# one chunk
r = cut(s, tensor(bins, chunk_size=2), right=False, include_lowest=True)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, bins, right=False, include_lowest=True))
# test labels
r = cut(t, bins, labels=labels)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=labels)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
r = cut(t, bins, labels=False)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=False)
np.testing.assert_array_equal(result, expected)
# test labels which is tensor
labels_t = tensor(['a', 'b'], chunk_size=1)
r = cut(raw, bins, labels=labels_t, include_lowest=True)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=labels, include_lowest=True)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
# test labels=False
r, b = cut(raw, ii, labels=False, retbins=True)
# result and expected is array whose dtype is CategoricalDtype
r_result, b_result = session.fetch(*session.execute(r, b))
r_expected, b_expected = pd.cut(raw, ii, labels=False, retbins=True)
for r, e in zip(r_result, r_expected):
np.testing.assert_equal(r, e)
pd.testing.assert_index_equal(b_result, b_expected)
# test bins which is md.IntervalIndex
r, b = cut(series, iii, labels=tensor(labels, chunk_size=1), retbins=True)
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, ii, labels=labels, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
pd.testing.assert_index_equal(b_result, b_expected)
# test duplicates
bins2 = [0, 2, 4, 6, 10, 10]
r, b = cut(s, bins2, labels=False, retbins=True,
right=False, duplicates='drop')
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, bins2, labels=False, retbins=True,
right=False, duplicates='drop')
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# test integer bins
r = cut(series, 3)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, 3))
r, b = cut(series, 3, right=False, retbins=True)
r_result, b_result = session.fetch(*session.execute(r, b))
r_expected, b_expected = pd.cut(s, 3, right=False, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# test min max same
s2 = pd.Series([1.1] * 15)
r = cut(s2, 3)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s2, 3))
# test inf exist
s3 = s2.copy()
s3[-1] = np.inf
with pytest.raises(ValueError):
cut(s3, 3).execute()
def test_transpose_execution(setup):
raw = pd.DataFrame({"a": ['1', '2', '3'], "b": ['5', '-6', '7'], "c": ['1', '2', '3']})
# test 1 chunk
df = from_pandas_df(raw)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# test multi chunks
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
df = from_pandas_df(raw, chunk_size=2)
result = df.T.execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# dtypes are varied
raw = pd.DataFrame({"a": [1.1, 2.2, 3.3], "b": [5, -6, 7], "c": [1, 2, 3]})
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
raw = pd.DataFrame({"a": [1.1, 2.2, 3.3], "b": ['5', '-6', '7']})
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# Transposing from results of other operands
raw = pd.DataFrame(np.arange(0, 100).reshape(10, 10))
df = DataFrame(arange(0, 100, chunk_size=5).reshape(10, 10))
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
df = DataFrame(rand(100, 100, chunk_size=10))
raw = df.to_pandas()
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
def test_to_numeric_execition(setup):
rs = np.random.RandomState(0)
s = pd.Series(rs.randint(5, size=100))
s[rs.randint(100)] = np.nan
# test 1 chunk
series = from_pandas_series(s)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test multi chunks
series = from_pandas_series(s, chunk_size=20)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test object dtype
s = pd.Series(['1.0', 2, -3, '2.0'])
series = from_pandas_series(s)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test errors and downcast
s = pd.Series(['appple', 2, -3, '2.0'])
series = from_pandas_series(s)
r = to_numeric(series, errors='ignore', downcast='signed')
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s, errors='ignore', downcast='signed'))
# test list data
l = ['1.0', 2, -3, '2.0']
r = to_numeric(l)
np.testing.assert_array_equal(r.execute().fetch(),
pd.to_numeric(l))
def test_q_cut_execution(setup):
rs = np.random.RandomState(0)
raw = rs.random(15) * 1000
s = pd.Series(raw, index=[f'i{i}' for i in range(15)])
series = from_pandas_series(s)
r = qcut(series, 3)
result = r.execute().fetch()
expected = pd.qcut(s, 3)
pd.testing.assert_series_equal(result, expected)
r = qcut(s, 3)
result = r.execute().fetch()
expected = pd.qcut(s, 3)
pd.testing.assert_series_equal(result, expected)
series = from_pandas_series(s)
r = qcut(series, [0.3, 0.5, 0.7])
result = r.execute().fetch()
expected = pd.qcut(s, [0.3, 0.5, 0.7])
pd.testing.assert_series_equal(result, expected)
r = qcut(range(5), 3)
result = r.execute().fetch()
expected = pd.qcut(range(5), 3)
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
r = qcut(range(5), [0.2, 0.5])
result = r.execute().fetch()
expected = pd.qcut(range(5), [0.2, 0.5])
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
r = qcut(range(5), tensor([0.2, 0.5]))
result = r.execute().fetch()
expected = pd.qcut(range(5), [0.2, 0.5])
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
def test_shift_execution(setup):
# test dataframe
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 8)),
columns=['col' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw, chunk_size=5)
for periods in (2, -2, 6, -6):
for axis in (0, 1):
for fill_value in (None, 0, 1.):
r = df.shift(periods=periods, axis=axis,
fill_value=fill_value)
try:
result = r.execute().fetch()
expected = raw.shift(periods=periods, axis=axis,
fill_value=fill_value)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, axis: {axis}, fill_value: {fill_value}'
) from e
raw2 = raw.copy()
raw2.index = pd.date_range('2020-1-1', periods=10)
raw2.columns = pd.date_range('2020-3-1', periods=8)
df2 = from_pandas_df(raw2, chunk_size=5)
# test freq not None
for periods in (2, -2):
for axis in (0, 1):
for fill_value in (None, 0, 1.):
r = df2.shift(periods=periods, freq='D', axis=axis,
fill_value=fill_value)
try:
result = r.execute().fetch()
expected = raw2.shift(periods=periods, freq='D', axis=axis,
fill_value=fill_value)
pd.testing.assert_frame_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, axis: {axis}, fill_value: {fill_value}') from e
# test tshift
r = df2.tshift(periods=1)
result = r.execute().fetch()
expected = raw2.tshift(periods=1)
pd.testing.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
_ = df.tshift(periods=1)
# test series
s = raw.iloc[:, 0]
series = from_pandas_series(s, chunk_size=5)
for periods in (0, 2, -2, 6, -6):
for fill_value in (None, 0, 1.):
r = series.shift(periods=periods, fill_value=fill_value)
try:
result = r.execute().fetch()
expected = s.shift(periods=periods, fill_value=fill_value)
pd.testing.assert_series_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, fill_value: {fill_value}') from e
s2 = raw2.iloc[:, 0]
# test freq not None
series2 = from_pandas_series(s2, chunk_size=5)
for periods in (2, -2):
for fill_value in (None, 0, 1.):
r = series2.shift(periods=periods, freq='D', fill_value=fill_value)
try:
result = r.execute().fetch()
expected = s2.shift(periods=periods, freq='D', fill_value=fill_value)
pd.testing.assert_series_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, fill_value: {fill_value}') from e
def test_diff_execution(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 8)),
columns=['col' + str(i + 1) for i in range(8)])
raw1 = raw.copy()
raw1['col4'] = raw1['col4'] < 400
r = from_pandas_df(raw1, chunk_size=(10, 5)).diff(-1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw1.diff(-1))
r = from_pandas_df(raw1, chunk_size=5).diff(-1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw1.diff(-1))
r = from_pandas_df(raw, chunk_size=(5, 8)).diff(1, axis=1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.diff(1, axis=1))
r = from_pandas_df(raw, chunk_size=5).diff(1, axis=1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.diff(1, axis=1), check_dtype=False)
# test series
s = raw.iloc[:, 0]
s1 = s.copy() < 400
r = from_pandas_series(s, chunk_size=10).diff(-1)
pd.testing.assert_series_equal(r.execute().fetch(),
s.diff(-1))
r = from_pandas_series(s, chunk_size=5).diff(-1)
pd.testing.assert_series_equal(r.execute().fetch(),
s.diff(-1))
r = from_pandas_series(s1, chunk_size=5).diff(1)
pd.testing.assert_series_equal(r.execute().fetch(),
s1.diff(1))
def test_value_counts_execution(setup):
rs = np.random.RandomState(0)
s = pd.Series(rs.randint(5, size=100), name='s')
s[rs.randint(100)] = np.nan
# test 1 chunk
series = from_pandas_series(s, chunk_size=100)
r = series.value_counts()
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts())
r = series.value_counts(bins=5, normalize=True)
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts(bins=5, normalize=True))
# test multi chunks
series = from_pandas_series(s, chunk_size=30)
r = series.value_counts(method='tree')
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts())
r = series.value_counts(method='tree', normalize=True)
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts(normalize=True))
# test bins and normalize
r = series.value_counts(method='tree', bins=5, normalize=True)
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts(bins=5, normalize=True))
def test_astype(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 8)),
columns=['c' + str(i + 1) for i in range(8)])
# single chunk
df = from_pandas_df(raw)
r = df.astype('int32')
result = r.execute().fetch()
expected = raw.astype('int32')
pd.testing.assert_frame_equal(expected, result)
# multiply chunks
df = from_pandas_df(raw, chunk_size=6)
r = df.astype('int32')
result = r.execute().fetch()
expected = raw.astype('int32')
pd.testing.assert_frame_equal(expected, result)
# dict type
df = from_pandas_df(raw, chunk_size=5)
r = df.astype({'c1': 'int32', 'c2': 'float', 'c8': 'str'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'int32', 'c2': 'float', 'c8': 'str'})
pd.testing.assert_frame_equal(expected, result)
# test arrow_string dtype
df = from_pandas_df(raw, chunk_size=8)
r = df.astype({'c1': 'arrow_string'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'arrow_string'})
pd.testing.assert_frame_equal(expected, result)
# test series
s = pd.Series(rs.randint(5, size=20))
series = from_pandas_series(s)
r = series.astype('int32')
result = r.execute().fetch()
expected = s.astype('int32')
pd.testing.assert_series_equal(result, expected)
series = from_pandas_series(s, chunk_size=6)
r = series.astype('arrow_string')
result = r.execute().fetch()
expected = s.astype('arrow_string')
pd.testing.assert_series_equal(result, expected)
# test index
raw = pd.Index(rs.randint(5, size=20))
mix = from_pandas_index(raw)
r = mix.astype('int32')
result = r.execute().fetch()
expected = raw.astype('int32')
pd.testing.assert_index_equal(result, expected)
# multiply chunks
series = from_pandas_series(s, chunk_size=6)
r = series.astype('str')
result = r.execute().fetch()
expected = s.astype('str')
pd.testing.assert_series_equal(result, expected)
# test category
raw = pd.DataFrame(rs.randint(3, size=(20, 8)),
columns=['c' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw)
r = df.astype('category')
result = r.execute().fetch()
expected = raw.astype('category')
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw)
r = df.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=5)
r = df.astype('category')
result = r.execute().fetch()
expected = raw.astype('category')
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=3)
r = df.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=6)
r = df.astype({'c1': 'category', 'c5': 'float', 'c2': 'int32',
'c7': pd.CategoricalDtype([1, 3, 4, 2]),
'c4': pd.CategoricalDtype([1, 3, 2])})
result = r.execute().fetch()
expected = raw.astype({'c1': 'category', 'c5': 'float', 'c2': 'int32',
'c7': pd.CategoricalDtype([1, 3, 4, 2]),
'c4': pd.CategoricalDtype([1, 3, 2])})
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=8)
r = df.astype({'c2': 'category'})
result = r.execute().fetch()
expected = raw.astype({'c2': 'category'})
pd.testing.assert_frame_equal(expected, result)
# test series category
raw = pd.Series(np.random.choice(['a', 'b', 'c'], size=(10,)))
series = from_pandas_series(raw, chunk_size=4)
result = series.astype('category').execute().fetch()
expected = raw.astype('category')
pd.testing.assert_series_equal(expected, result)
series = from_pandas_series(raw, chunk_size=3)
result = series.astype(
pd.CategoricalDtype(['a', 'c', 'b']), copy=False).execute().fetch()
expected = raw.astype(pd.CategoricalDtype(['a', 'c', 'b']), copy=False)
pd.testing.assert_series_equal(expected, result)
series = from_pandas_series(raw, chunk_size=6)
result = series.astype(
pd.CategoricalDtype(['a', 'c', 'b', 'd'])).execute().fetch()
expected = raw.astype(pd.CategoricalDtype(['a', 'c', 'b', 'd']))
pd.testing.assert_series_equal(expected, result)
def test_drop(setup):
# test dataframe drop
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 8)),
columns=['c' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw, chunk_size=3)
columns = ['c2', 'c4', 'c5', 'c6']
index = [3, 6, 7]
r = df.drop(columns=columns, index=index)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.drop(columns=columns, index=index))
idx_series = from_pandas_series(pd.Series(index))
r = df.drop(idx_series)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.drop(pd.Series(index)))
df.drop(columns, axis=1, inplace=True)
pd.testing.assert_frame_equal(df.execute().fetch(),
raw.drop(columns, axis=1))
del df['c3']
pd.testing.assert_frame_equal(df.execute().fetch(),
raw.drop(columns + ['c3'], axis=1))
ps = df.pop('c8')
pd.testing.assert_frame_equal(df.execute().fetch(),
raw.drop(columns + ['c3', 'c8'], axis=1))
pd.testing.assert_series_equal(ps.execute().fetch(),
raw['c8'])
# test series drop
raw = pd.Series(rs.randint(1000, size=(20,)))
series = from_pandas_series(raw, chunk_size=3)
r = series.drop(index=index)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.drop(index=index))
# test index drop
ser = pd.Series(range(20))
rs.shuffle(ser)
raw = pd.Index(ser)
idx = from_pandas_index(raw)
r = idx.drop(index)
pd.testing.assert_index_equal(r.execute().fetch(),
raw.drop(index))
def test_melt(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 8)),
columns=['c' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw, chunk_size=3)
r = df.melt(id_vars=['c1'], value_vars=['c2', 'c4'])
pd.testing.assert_frame_equal(
r.execute().fetch().sort_values(['c1', 'variable']).reset_index(drop=True),
raw.melt(id_vars=['c1'], value_vars=['c2', 'c4']).sort_values(['c1', 'variable']).reset_index(drop=True)
)
def test_drop_duplicates(setup):
# test dataframe drop
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 5)),
columns=['c' + str(i + 1) for i in range(5)],
index=['i' + str(j) for j in range(20)])
duplicate_lines = rs.randint(1000, size=5)
for i in [1, 3, 10, 11, 15]:
raw.iloc[i] = duplicate_lines
with option_context({'combine_size': 2}):
# test dataframe
for chunk_size in [(8, 3), (20, 5)]:
df = from_pandas_df(raw, chunk_size=chunk_size)
if chunk_size[0] < len(raw):
methods = ['tree', 'subset_tree', 'shuffle']
else:
# 1 chunk
methods = [None]
for method in methods:
for subset in [None, 'c1', ['c1', 'c2']]:
for keep in ['first', 'last', False]:
for ignore_index in [True, False]:
try:
r = df.drop_duplicates(method=method, subset=subset,
keep=keep, ignore_index=ignore_index)
result = r.execute().fetch()
try:
expected = raw.drop_duplicates(subset=subset,
keep=keep, ignore_index=ignore_index)
except TypeError:
# ignore_index is supported in pandas 1.0
expected = raw.drop_duplicates(subset=subset,
keep=keep)
if ignore_index:
expected.reset_index(drop=True, inplace=True)
pd.testing.assert_frame_equal(result, expected)
except Exception as e: # pragma: no cover
raise AssertionError(
f'failed when method={method}, subset={subset}, '
f'keep={keep}, ignore_index={ignore_index}') from e
# test series and index
s = raw['c3']
ind = pd.Index(s)
for tp, obj in [('series', s), ('index', ind)]:
for chunk_size in [8, 20]:
to_m = from_pandas_series if tp == 'series' else from_pandas_index
mobj = to_m(obj, chunk_size=chunk_size)
if chunk_size < len(obj):
methods = ['tree', 'shuffle']
else:
# 1 chunk
methods = [None]
for method in methods:
for keep in ['first', 'last', False]:
try:
r = mobj.drop_duplicates(method=method, keep=keep)
result = r.execute().fetch()
expected = obj.drop_duplicates(keep=keep)
cmp = pd.testing.assert_series_equal \
if tp == 'series' else pd.testing.assert_index_equal
cmp(result, expected)
except Exception as e: # pragma: no cover
raise AssertionError(f'failed when method={method}, keep={keep}') from e
# test inplace
series = from_pandas_series(s, chunk_size=11)
series.drop_duplicates(inplace=True)
result = series.execute().fetch()
expected = s.drop_duplicates()
pd.testing.assert_series_equal(result, expected)
def test_duplicated(setup):
# test dataframe drop
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 5)),
columns=['c' + str(i + 1) for i in range(5)],
index=['i' + str(j) for j in range(20)])
duplicate_lines = rs.randint(1000, size=5)
for i in [1, 3, 10, 11, 15]:
raw.iloc[i] = duplicate_lines
with option_context({'combine_size': 2}):
# test dataframe
for chunk_size in [(8, 3), (20, 5)]:
df = from_pandas_df(raw, chunk_size=chunk_size)
if chunk_size[0] < len(raw):
methods = ['tree', 'subset_tree', 'shuffle']
else:
# 1 chunk
methods = [None]
for method in methods:
for subset in [None, 'c1', ['c1', 'c2']]:
for keep in ['first', 'last', False]:
try:
r = df.duplicated(method=method, subset=subset, keep=keep)
result = r.execute().fetch()
expected = raw.duplicated(subset=subset, keep=keep)
pd.testing.assert_series_equal(result, expected)
except Exception as e: # pragma: no cover
raise AssertionError(
f'failed when method={method}, subset={subset}, '
f'keep={keep}') from e
# test series
s = raw['c3']
for tp, obj in [('series', s)]:
for chunk_size in [8, 20]:
to_m = from_pandas_series if tp == 'series' else from_pandas_index
mobj = to_m(obj, chunk_size=chunk_size)
if chunk_size < len(obj):
methods = ['tree', 'shuffle']
else:
# 1 chunk
methods = [None]
for method in methods:
for keep in ['first', 'last', False]:
try:
r = mobj.duplicated(method=method, keep=keep)
result = r.execute().fetch()
expected = obj.duplicated(keep=keep)
cmp = pd.testing.assert_series_equal \
if tp == 'series' else pd.testing.assert_index_equal
cmp(result, expected)
except Exception as e: # pragma: no cover
raise AssertionError(f'failed when method={method}, keep={keep}') from e
def test_memory_usage_execution(setup):
dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
data = dict([(t, np.ones(shape=500).astype(t))
for t in dtypes])
raw = pd.DataFrame(data)
df = from_pandas_df(raw, chunk_size=(500, 2))
r = df.memory_usage(index=False)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.memory_usage(index=False))
df = from_pandas_df(raw, chunk_size=(500, 2))
r = df.memory_usage(index=True)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.memory_usage(index=True))
df = from_pandas_df(raw, chunk_size=(100, 3))
r = df.memory_usage(index=False)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.memory_usage(index=False))
r = df.memory_usage(index=True)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.memory_usage(index=True))
raw = pd.DataFrame(data, index=np.arange(500).astype('object'))
df = from_pandas_df(raw, chunk_size=(100, 3))
r = df.memory_usage(index=True)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.memory_usage(index=True))
raw = pd.Series(np.ones(shape=500).astype('object'), name='s')
series = from_pandas_series(raw)
r = series.memory_usage(index=True)
assert r.execute().fetch() == raw.memory_usage(index=True)
series = from_pandas_series(raw, chunk_size=100)
r = series.memory_usage(index=False)
assert r.execute().fetch() == raw.memory_usage(index=False)
series = from_pandas_series(raw, chunk_size=100)
r = series.memory_usage(index=True)
assert r.execute().fetch() == raw.memory_usage(index=True)
raw = pd.Series(np.ones(shape=500).astype('object'),
index=np.arange(500).astype('object'), name='s')
series = from_pandas_series(raw, chunk_size=100)
r = series.memory_usage(index=True)
assert r.execute().fetch() == raw.memory_usage(index=True)
raw = pd.Index(np.arange(500), name='s')
index = from_pandas_index(raw)
r = index.memory_usage()
assert r.execute().fetch() == raw.memory_usage()
index = from_pandas_index(raw, chunk_size=100)
r = index.memory_usage()
assert r.execute().fetch() == raw.memory_usage()
def test_select_dtypes_execution(setup):
raw = pd.DataFrame({'a': np.random.rand(10),
'b': np.random.randint(10, size=10)})
df = from_pandas_df(raw, chunk_size=5)
r = df.select_dtypes(include=['float64'])
result = r.execute().fetch()
expected = raw.select_dtypes(include=['float64'])
pd.testing.assert_frame_equal(result, expected)
def test_map_chunk_execution(setup):
raw = pd.DataFrame(np.random.rand(10, 5),
columns=[f'col{i}' for i in range(5)])
df = from_pandas_df(raw, chunk_size=(5, 3))
def f1(pdf):
return pdf + 1
r = df.map_chunk(f1)
result = r.execute().fetch()
expected = raw + 1
pd.testing.assert_frame_equal(result, expected)
raw_s = raw['col1']
series = from_pandas_series(raw_s, chunk_size=5)
r = series.map_chunk(f1)
result = r.execute().fetch()
expected = raw_s + 1
pd.testing.assert_series_equal(result, expected)
def f2(pdf):
return pdf.sum(axis=1)
df = from_pandas_df(raw, chunk_size=5)
r = df.map_chunk(f2, output_type='series')
result = r.execute().fetch()
expected = raw.sum(axis=1)
pd.testing.assert_series_equal(result, expected)
raw = pd.DataFrame({'a': [f's{i}'for i in range(10)],
'b': np.arange(10)})
df = from_pandas_df(raw, chunk_size=5)
def f3(pdf):
return pdf['a'].str.slice(1).astype(int) + pdf['b']
with pytest.raises(TypeError):
r = df.map_chunk(f3)
_ = r.execute().fetch()
r = df.map_chunk(f3, output_type='series')
result = r.execute(extra_config={'check_dtypes': False}).fetch()
expected = f3(raw)
pd.testing.assert_series_equal(result, expected)
def f4(pdf):
ret = pd.DataFrame(columns=['a', 'b'])
ret['a'] = pdf['a'].str.slice(1).astype(int)
ret['b'] = pdf['b']
return ret
with pytest.raises(TypeError):
r = df.map_chunk(f4, output_type='dataframe')
_ = r.execute().fetch()
r = df.map_chunk(f4, output_type='dataframe',
dtypes=pd.Series([np.dtype(int), raw['b'].dtype], index=['a', 'b']))
result = r.execute().fetch()
expected = f4(raw)
pd.testing.assert_frame_equal(result, expected)
raw2 = pd.DataFrame({'a': [np.array([1, 2, 3]), np.array([4, 5, 6])]})
df2 = from_pandas_df(raw2)
dtypes = pd.Series([np.dtype(float)] * 3)
r = df2.map_chunk(lambda x: x['a'].apply(pd.Series), output_type='dataframe',
dtypes=dtypes)
assert r.shape == (2, 3)
pd.testing.assert_series_equal(r.dtypes, dtypes)
result = r.execute().fetch()
expected = raw2.apply(lambda x: x['a'], axis=1, result_type='expand')
pd.testing.assert_frame_equal(result, expected)
raw = pd.DataFrame(np.random.rand(10, 5),
columns=[f'col{i}' for i in range(5)])
df = from_pandas_df(raw, chunk_size=(5, 3))
def f5(pdf, chunk_index):
return pdf + 1 + chunk_index[0]
r = df.map_chunk(f5, with_chunk_index=True)
result = r.execute().fetch()
expected = (raw + 1).add(np.arange(10) // 5, axis=0)
pd.testing.assert_frame_equal(result, expected)
raw_s = raw['col1']
series = from_pandas_series(raw_s, chunk_size=5)
r = series.map_chunk(f5, with_chunk_index=True)
result = r.execute().fetch()
expected = raw_s + 1 + np.arange(10) // 5
pd.testing.assert_series_equal(result, expected)
def test_cartesian_chunk_execution(setup):
rs = np.random.RandomState(0)
raw1 = pd.DataFrame({'a': rs.randint(3, size=10),
'b': rs.rand(10)})
raw2 = pd.DataFrame({'c': rs.randint(3, size=10),
'd': rs.rand(10),
'e': rs.rand(10)})
df1 = from_pandas_df(raw1, chunk_size=(5, 1))
df2 = from_pandas_df(raw2, chunk_size=(5, 1))
def f(c1, c2):
c1, c2 = c1.copy(), c2.copy()
c1['x'] = 1
c2['x'] = 1
r = c1.merge(c2, on='x')
r = r[(r['b'] > r['d']) & (r['b'] < r['e'])]
return r[['a', 'c']]
rr = df1.cartesian_chunk(df2, f)
result = rr.execute().fetch()
expected = f(raw1, raw2)
pd.testing.assert_frame_equal(
result.sort_values(by=['a', 'c']).reset_index(drop=True),
expected.sort_values(by=['a', 'c']).reset_index(drop=True))
def f2(c1, c2):
r = f(c1, c2)
return r['a'] + r['c']
rr = df1.cartesian_chunk(df2, f2)
result = rr.execute().fetch()
expected = f2(raw1, raw2)
pd.testing.assert_series_equal(
result.sort_values().reset_index(drop=True),
expected.sort_values().reset_index(drop=True))
# size_res = setup.executor.execute_dataframe(rr, mock=True)[0][0]
# assert size_res > 0
def f3(c1, c2):
cr = pd.DataFrame()
cr['a'] = c1.str.slice(1).astype(np.int64)
cr['x'] = 1
cr2 = pd.DataFrame()
cr2['b'] = c2.str.slice(1).astype(np.int64)
cr2['x'] = 1
return cr.merge(cr2, on='x')[['a', 'b']]
s_raw = pd.Series([f's{i}' for i in range(10)])
series = from_pandas_series(s_raw, chunk_size=5)
rr = series.cartesian_chunk(series, f3, output_type='dataframe',
dtypes=pd.Series([np.dtype(np.int64)] * 2,
index=['a', 'b']))
result = rr.execute().fetch()
expected = f3(s_raw, s_raw)
pd.testing.assert_frame_equal(
result.sort_values(by=['a', 'b']).reset_index(drop=True),
expected.sort_values(by=['a', 'b']).reset_index(drop=True))
with pytest.raises(TypeError):
_ = series.cartesian_chunk(series, f3)
def f4(c1, c2):
r = f3(c1, c2)
return r['a'] + r['b']
rr = series.cartesian_chunk(series, f4, output_type='series',
dtypes=np.dtype(np.int64))
result = rr.execute().fetch()
expected = f4(s_raw, s_raw)
pd.testing.assert_series_equal(
result.sort_values().reset_index(drop=True),
expected.sort_values().reset_index(drop=True))
def test_rebalance_execution(setup):
raw = pd.DataFrame(np.random.rand(10, 3), columns=list('abc'))
df = from_pandas_df(raw)
def _expect_count(n):
def _tile_rebalance(op):
tileable = yield from op.tile(op)
assert len(tileable.chunks) == n
return tileable
return _tile_rebalance
r = df.rebalance(num_partitions=3)
extra_config = {'operand_tile_handlers': {DataFrameRebalance: _expect_count(3)}}
result = r.execute(extra_config=extra_config).fetch()
pd.testing.assert_frame_equal(result, raw)
r = df.rebalance(factor=0.5)
extra_config = {'operand_tile_handlers': {DataFrameRebalance: _expect_count(1)}}
result = r.execute(extra_config=extra_config).fetch()
pd.testing.assert_frame_equal(result, raw)
# test worker has two cores
r = df.rebalance()
extra_config = {'operand_tile_handlers': {DataFrameRebalance: _expect_count(2)}}
result = r.execute(extra_config=extra_config).fetch()
pd.testing.assert_frame_equal(result, raw)
def test_stack_execution(setup):
raw = pd.DataFrame(np.random.rand(10, 3), columns=list('abc'),
index=[f's{i}' for i in range(10)])
for loc in [(5, 1), (8, 2), (1, 0)]:
raw.iloc[loc] = np.nan
df = from_pandas_df(raw, chunk_size=(5, 2))
for dropna in (True, False):
r = df.stack(dropna=dropna)
result = r.execute().fetch()
expected = raw.stack(dropna=dropna)
pd.testing.assert_series_equal(result, expected)
cols = pd.MultiIndex.from_tuples([
('c1', 'cc1'), ('c1', 'cc2'), ('c2', 'cc3')])
raw2 = raw.copy()
raw2.columns = cols
df = from_pandas_df(raw2, chunk_size=(5, 2))
for level in [-1, 0, [0, 1]]:
for dropna in (True, False):
r = df.stack(level=level, dropna=dropna)
result = r.execute().fetch()
expected = raw2.stack(level=level, dropna=dropna)
assert_method = \
pd.testing.assert_series_equal if expected.ndim == 1 \
else pd.testing.assert_frame_equal
assert_method(result, expected)
def test_explode_execution(setup):
raw = pd.DataFrame({'a': np.random.rand(10),
'b': [np.random.rand(random.randint(1, 10)) for _ in range(10)],
'c': np.random.rand(10),
'd': np.random.rand(10)})
df = from_pandas_df(raw, chunk_size=(4, 2))
for ignore_index in [False, True]:
r = df.explode('b', ignore_index=ignore_index)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.explode('b', ignore_index=ignore_index))
series = from_pandas_series(raw.b, chunk_size=4)
for ignore_index in [False, True]:
r = series.explode(ignore_index=ignore_index)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.b.explode(ignore_index=ignore_index))
def test_eval_query_execution(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame({'a': rs.rand(100),
'b': rs.rand(100),
'c c': rs.rand(100)})
df = from_pandas_df(raw, chunk_size=(10, 2))
r = mars_eval('c = df.a * 2 + df["c c"]', target=df)
pd.testing.assert_frame_equal(r.execute().fetch(),
pd.eval('c = raw.a * 2 + raw["c c"]', engine='python', target=raw))
r = df.eval('a + b')
pd.testing.assert_series_equal(r.execute().fetch(),
raw.eval('a + b'))
_val = 5.0 # noqa: F841
_val_array = [1, 2, 3] # noqa: F841
expr = """
e = -a + b + 1
f = b + `c c` + @_val + @_val_array[-1]
"""
r = df.eval(expr)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.eval(expr))
copied_df = df.copy()
copied_df.eval('c = a + b', inplace=True)
pd.testing.assert_frame_equal(copied_df.execute().fetch(),
raw.eval('c = a + b'))
expr = 'a > b | a < `c c`'
r = df.query(expr)
pd.testing.assert_frame_equal(
r.execute(extra_config={'check_index_value': False}).fetch(),
raw.query(expr))
expr = 'a > b & ~(a < `c c`)'
r = df.query(expr)
pd.testing.assert_frame_equal(
r.execute(extra_config={'check_index_value': False}).fetch(),
raw.query(expr))
expr = 'a < b < `c c`'
r = df.query(expr)
pd.testing.assert_frame_equal(
r.execute(extra_config={'check_index_value': False}).fetch(),
raw.query(expr))
copied_df = df.copy()
copied_df.query('a < b', inplace=True)
pd.testing.assert_frame_equal(
copied_df.execute(extra_config={'check_index_value': False}).fetch(),
raw.query('a < b'))
def test_check_monotonic_execution(setup):
idx_value = pd.Index(list(range(1000)))
idx_increase = from_pandas_index(idx_value, chunk_size=100)
assert idx_increase.is_monotonic_increasing.execute().fetch() is True
assert idx_increase.is_monotonic_decreasing.execute().fetch() is False
idx_decrease = from_pandas_index(idx_value[::-1], chunk_size=100)
assert idx_decrease.is_monotonic_increasing.execute().fetch() is False
assert idx_decrease.is_monotonic_decreasing.execute().fetch() is True
idx_mixed = from_pandas_index(
pd.Index(list(range(500)) + list(range(500))), chunk_size=100)
assert idx_mixed.is_monotonic_increasing.execute().fetch() is False
assert idx_mixed.is_monotonic_decreasing.execute().fetch() is False
ser_mixed = from_pandas_series(
pd.Series(list(range(500)) + list(range(499, 999))), chunk_size=100)
assert ser_mixed.is_monotonic_increasing.execute().fetch() is True
assert ser_mixed.is_monotonic_decreasing.execute().fetch() is False
def test_pct_change_execution(setup):
# test dataframe
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 8)),
columns=['col' + str(i + 1) for i in range(8)],
index=pd.date_range('2021-1-1', periods=10))
df = from_pandas_df(raw, chunk_size=5)
r = df.pct_change()
result = r.execute().fetch()
expected = raw.pct_change()
| pd.testing.assert_frame_equal(expected, result) | pandas.testing.assert_frame_equal |
import pandas as pd
from datetime import datetime
from multiprocessing import Pool
import seaborn as sns
from matplotlib import pyplot as plt
from pathlib import Path
# ================================
# MARKING SCHEME NOTES
# ===============================
# 1. In the accompanying assignment Python file, students are supposed to fill in required code
# in all places where it says "YOUR CODE HERE"
# 2. In order to find points for that particular line, please refer to the
# corresponding line here where it has comment POINTS at the end of the line
def get_date_range_by_chunking(large_csv):
"""
In this function, the idea is to use pandas chunk feature.
:param large_csv: Full path to activity_log_raw.csv
:return:
"""
# ======================================
# EXPLORE THE DATA
# ======================================
# Read the first 100,000 rows in the dataset
df_first_100k = pd.read_csv(large_csv, nrows=100000) # POINTS: 1
print(df_first_100k)
# Identify the time column in the dataset
str_time_col = 'ACTIVITY_TIME' # POINTS: 1
# ============================================================
# FIND THE FIRST [EARLIEST] AND LAST DATE IN THE WHOLE DATASET
# BY USING CHUNKING
# =============================================================
# set chunk size
chunksize = 1000000 # POINTS: 1
# declare a list to hold the dates
dates = [] # POINTS: 1
with pd.read_csv(large_csv, chunksize=chunksize) as reader: # POINTS: 1
for chunk in reader:
# convert the string to Python datetime object
# add a new column to hold this datetime object
time_col = 'activ_time' # POINTS: 1
chunk[time_col] = chunk[str_time_col].apply(lambda x: pd.to_datetime(x[:9]))
chunk.sort_values(by=time_col, inplace=True)
top_date = chunk.iloc[0][time_col]
dates.append(top_date) # POINTS: 1
chunk.sort_values(by=time_col, ascending=False, inplace=True)
bottom_date = chunk.iloc[0][time_col]
dates.append(bottom_date) # POINTS: 1
# Find the earliest and last date by sorting the dates list
sorted_dates = sorted(dates, reverse=False) # POINTS: 1
first = sorted_dates[0] # POINTS: 2
last = sorted_dates[-1] # POINTS: 2
print("First date is {} and the last date is {}".format(first, last))
return first, last
def quadratic_func(x, a):
"""
Define the quadratic function: y = 2x^2 + y-1
:param x:
:return:
"""
y = 2*(x**2) + a-1 # POINTS: 3
return y
def run_the_quad_func_without_multiprocessing(list_x, list_y):
"""
Run the quadratic function on a huge list of X and Ys without using parallelism
:param list_x: List of xs
:param list_y: List of ys
:return:
"""
results = [quadratic_func(x, y) for x, y in zip(list_x, list_y)]
return results
def run_the_quad_func_with_multiprocessing(list_x, list_y, num_processors):
"""
Run the quadratic function with multiprocessing
:param list_x: List of xs
:param list_y: List of xs
:param num_processors: Number of processors to use
:return:
"""
processors = Pool(num_processors) # POINTS: 2
params = [i for i in zip(list_x, list_y)]
results = processors.starmap(quadratic_func, params) # POINTS: 3
processors.close()
return results
def multiprocessing_vs_sequential_quadratic(list_len, out_plot, out_csv):
"""
Compare how
:param list_len:
:return:
"""
data = []
for i in range(1, list_len):
list_length = 10 ** i
x = [i for i in range(list_len)] # POINTS: 2
y = [i for i in range(list_len)] # POINTS: 2
start_time = datetime.now()
run_the_quad_func_without_multiprocessing(x, y) # POINTS: 2
end_time = datetime.now()
time_taken_seq = (end_time - start_time).total_seconds()
data.append({'ListLen': list_length, 'Type' : 'Parallel', 'TimeTaken': time_taken_seq})
start_time = datetime.now()
run_the_quad_func_with_multiprocessing(x, y, 4) # POINTS: 2
end_time = datetime.now()
time_taken_mult = (end_time - start_time).total_seconds()
data.append({'ListLen': list_length, 'Type' : 'Sequential', 'TimeTaken': time_taken_mult})
df = pd.DataFrame(data) # POINTS: 1
plt.figure(figsize=(12, 8))
sns.lineplot(data=df, x='ListLen', y='TimeTaken', hue='Type')
plt.savefig(out_plot) # POINTS: 1
df.to_csv(out_csv, index=False) # POINTS: 1
def get_num_uniq_users(csv_file, userid_col):
"""
A Helper function to help get the number of unique users
:param csv_file: path to CSV file
:param userid_col: Column for user ID
:return:
"""
df = | pd.read_csv(csv_file) | pandas.read_csv |
# allocation.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
"""
Methods of allocating datasets
"""
import pandas as pd
from flowsa.common import fbs_activity_fields, sector_level_key, \
load_crosswalk, check_activities_sector_like
from flowsa.settings import log, vLogDetailed
from flowsa.dataclean import replace_NoneType_with_empty_cells, \
replace_strings_with_NoneType
from flowsa.flowbyfunctions import sector_aggregation, \
sector_disaggregation, subset_and_merge_df_by_sector_lengths
def allocate_by_sector(df_w_sectors, attr, allocation_method,
group_cols, **kwargs):
"""
Create an allocation ratio for df
:param df_w_sectors: df with column of sectors
:param attr: dictionary, attributes of activity set
:param allocation_method: currently written for 'proportional'
and 'proportional-flagged'
:param group_cols: columns on which to base aggregation and disaggregation
:return: df with FlowAmountRatio for each sector
"""
# first determine if there is a special case with how
# the allocation ratios are created
if allocation_method == 'proportional-flagged':
# if the allocation method is flagged, subset sectors that are
# flagged/notflagged, where nonflagged sectors have flowamountratio=1
if kwargs != {}:
if 'flowSubsetMapped' in kwargs:
fsm = kwargs['flowSubsetMapped']
flagged = fsm[fsm['disaggregate_flag'] == 1]
if flagged['SectorProducedBy'].isna().all():
sector_col = 'SectorConsumedBy'
else:
sector_col = 'SectorProducedBy'
flagged_names = flagged[sector_col].tolist()
nonflagged = fsm[fsm['disaggregate_flag'] == 0]
nonflagged_names = nonflagged[sector_col].tolist()
# subset the original df so rows of data that run through the
# proportional allocation process are
# sectors included in the flagged list
df_w_sectors_nonflagged = df_w_sectors.loc[
(df_w_sectors[fbs_activity_fields[0]].isin(
nonflagged_names)) |
(df_w_sectors[fbs_activity_fields[1]].isin(
nonflagged_names))].reset_index(drop=True)
df_w_sectors_nonflagged = \
df_w_sectors_nonflagged.assign(FlowAmountRatio=1)
df_w_sectors = \
df_w_sectors.loc[(df_w_sectors[fbs_activity_fields[0]]
.isin(flagged_names)) |
(df_w_sectors[fbs_activity_fields[1]]
.isin(flagged_names)
)].reset_index(drop=True)
else:
log.error('The proportional-flagged allocation '
'method requires a column "disaggregate_flag" '
'in the flow_subset_mapped df')
# run sector aggregation fxn to determine total flowamount
# for each level of sector
if len(df_w_sectors) == 0:
return df_w_sectors_nonflagged
else:
df1 = sector_aggregation(df_w_sectors)
# run sector disaggregation to capture one-to-one
# naics4/5/6 relationships
df2 = sector_disaggregation(df1)
# if statements for method of allocation
# either 'proportional' or 'proportional-flagged'
allocation_df = []
if allocation_method in ('proportional', 'proportional-flagged'):
allocation_df = proportional_allocation(df2, attr)
else:
log.error('Must create function for specified '
'method of allocation')
if allocation_method == 'proportional-flagged':
# drop rows where values are not in flagged names
allocation_df =\
allocation_df.loc[(allocation_df[fbs_activity_fields[0]]
.isin(flagged_names)) |
(allocation_df[fbs_activity_fields[1]]
.isin(flagged_names)
)].reset_index(drop=True)
# concat the flagged and nonflagged dfs
allocation_df = \
pd.concat([allocation_df, df_w_sectors_nonflagged],
ignore_index=True).sort_values(['SectorProducedBy',
'SectorConsumedBy'])
return allocation_df
def proportional_allocation(df, attr):
"""
Creates a proportional allocation based on all the most
aggregated sectors within a location
Ensure that sectors are at 2 digit level - can run sector_aggregation()
prior to using this function
:param df: df, includes sector columns
:param attr: dictionary, attributes for an activity set
:return: df, with 'FlowAmountRatio' column
"""
# tmp drop NoneType
df = replace_NoneType_with_empty_cells(df)
# determine if any additional columns beyond location and sector by which
# to base allocation ratios
if 'allocation_merge_columns' in attr:
groupby_cols = ['Location'] + attr['allocation_merge_columns']
denom_subset_cols = ['Location', 'LocationSystem', 'Year',
'Denominator'] + attr['allocation_merge_columns']
else:
groupby_cols = ['Location']
denom_subset_cols = ['Location', 'LocationSystem', 'Year',
'Denominator']
cw_load = load_crosswalk('sector_length')
cw = cw_load['NAICS_2'].drop_duplicates()
denom_df = df.loc[(df['SectorProducedBy'].isin(cw)) |
(df['SectorConsumedBy'].isin(cw))]
# generate denominator based on identified groupby cols
denom_df = denom_df.assign(Denominator=denom_df.groupby(
groupby_cols)['FlowAmount'].transform('sum'))
# subset select columns by which to generate ratios
denom_df_2 = denom_df[denom_subset_cols].drop_duplicates()
# merge the denominator column with fba_w_sector df
allocation_df = df.merge(denom_df_2, how='left')
# calculate ratio
allocation_df.loc[:, 'FlowAmountRatio'] = \
allocation_df['FlowAmount'] / allocation_df['Denominator']
allocation_df = allocation_df.drop(columns=['Denominator']).reset_index(
drop=True)
# add nonetypes
allocation_df = replace_strings_with_NoneType(allocation_df)
return allocation_df
def proportional_allocation_by_location_and_activity(df_load, sectorcolumn):
"""
Creates a proportional allocation within each aggregated
sector within a location
:param df_load: df with sector columns
:param sectorcolumn: str, sector column for which to create
allocation ratios
:return: df, with 'FlowAmountRatio' and 'HelperFlow' columns
"""
# tmp replace NoneTypes with empty cells
df = replace_NoneType_with_empty_cells(df_load).reset_index(drop=True)
# want to create denominator based on shortest length naics for each
# activity/location
grouping_cols = [e for e in ['FlowName', 'Location', 'Activity',
'ActivityConsumedBy', 'ActivityProducedBy',
'Class', 'SourceName', 'Unit', 'FlowType',
'Compartment', 'Year']
if e in df.columns.values.tolist()]
activity_cols = [e for e in ['Activity', 'ActivityConsumedBy',
'ActivityProducedBy']
if e in df.columns.values.tolist()]
# trim whitespace
df[sectorcolumn] = df[sectorcolumn].str.strip()
# to create the denominator dataframe first add a column that captures
# the sector length
denom_df = df.assign(sLen=df[sectorcolumn].str.len())
denom_df = denom_df[denom_df['sLen'] == denom_df.groupby(activity_cols)[
'sLen'].transform(min)].drop(columns='sLen')
denom_df.loc[:, 'Denominator'] = \
denom_df.groupby(grouping_cols)['HelperFlow'].transform('sum')
# list of column headers, that if exist in df, should be aggregated
# using the weighted avg fxn
possible_column_headers = ('Location', 'LocationSystem', 'Year',
'Activity', 'ActivityConsumedBy',
'ActivityProducedBy')
# list of column headers that do exist in the df being aggregated
column_headers = [e for e in possible_column_headers
if e in denom_df.columns.values.tolist()]
merge_headers = column_headers.copy()
column_headers.append('Denominator')
# create subset of denominator values based on Locations and Activities
denom_df_2 = \
denom_df[column_headers].drop_duplicates().reset_index(drop=True)
# merge the denominator column with fba_w_sector df
allocation_df = df.merge(denom_df_2,
how='left',
left_on=merge_headers,
right_on=merge_headers)
# calculate ratio
allocation_df.loc[:, 'FlowAmountRatio'] = \
allocation_df['HelperFlow'] / allocation_df['Denominator']
allocation_df = allocation_df.drop(
columns=['Denominator']).reset_index(drop=True)
# where parent NAICS are not found in the allocation dataset, make sure
# those child NAICS are not dropped
allocation_df['FlowAmountRatio'] = \
allocation_df['FlowAmountRatio'].fillna(1)
# fill empty cols with NoneType
allocation_df = replace_strings_with_NoneType(allocation_df)
# fill na values with 0
allocation_df['HelperFlow'] = allocation_df['HelperFlow'].fillna(0)
return allocation_df
def equally_allocate_parent_to_child_naics(
df_load, method, overwritetargetsectorlevel=None):
"""
Determine rows of data that will be lost if subset data at
target sector level.
Equally allocate parent NAICS to child NAICS where child NAICS missing
:param df_load: df, FBS format
:param overwritetargetsectorlevel: str, optional, specify what sector
level to allocate to
:return: df, with all child NAICS at target sector level
"""
# determine which sector level to use, use the least aggregated level
sector_level = method.get('target_sector_level')
if overwritetargetsectorlevel is not None:
sector_level = overwritetargetsectorlevel
# if secondary sector levels are identified, set the sector level to the
# least aggregated
sector_level_list = [sector_level]
if 'target_subset_sector_level' in method:
sector_level_dict = method.get('target_subset_sector_level')
for k, v in sector_level_dict.items():
sector_level_list = sector_level_list + [k]
sector_subset_dict = dict((k, sector_level_key[k]) for k in
sector_level_list if k in sector_level_key)
sector_level = max(sector_subset_dict, key=sector_subset_dict.get)
# exclude nonsectors
df = replace_NoneType_with_empty_cells(df_load)
# determine if activities are sector-like, if aggregating a df with a
# 'SourceName'
sector_like_activities = check_activities_sector_like(df_load)
# if activities are source like, drop from df,
# add back in as copies of sector columns columns to keep
if sector_like_activities:
# subset df
df_cols = [e for e in df.columns if e not in
('ActivityProducedBy', 'ActivityConsumedBy')]
df = df[df_cols]
rows_lost = | pd.DataFrame() | pandas.DataFrame |
# Data container for ESI data
from pathlib import Path
import geopandas as gpd
import numpy as np
import pandas as pd
from scipy.spatial import cKDTree
from .grs import GRS
class ESI:
"""
ESI data container.
Attributes:
-----------
path: Path
Path to ESI data
gdf: geopandas.GeoDataFrame
ESI data
locs: pandas.DataFrame
Points along every ESI segment including segment identifier (esi_id) and ESI code (esi_code)
tree: scipy.spatial.cKDTree
Tree to query closest ESI point to look up ESI segment identifier and ESI code
"""
def __init__(self, fpath: Path):
self.path = fpath
self.gdf = gpd.read_parquet(self.path)
# Need tree + location lookup because gpd.query only looks over overlapping features
# - Get (lon, lat) of every point in the geometry column to make a tree
self.locs = esi_to_locs(self.gdf)
self.tree = cKDTree(np.vstack((self.locs.lon.values, self.locs.lat.values)).T)
def get_grs_region_for_each_row(self, grs: GRS) -> np.ndarray:
"""Given GRS data container, return GRS code for each row in ESI data as array"""
grs_codes_for_each_esi_row = self._get_grs_intersects(grs)
grs_codes_for_each_esi_row = self._fill_grs_nonintersects(grs_codes_for_each_esi_row, grs)
return grs_codes_for_each_esi_row
def _get_grs_intersects(self, grs: GRS, grs_code_column_name: str = 'OBJECTID') -> np.ndarray:
"""Given GRS data container, return ndarray of GRS codes for each row in ESI data.
Notes:
- Filled with -9999 which is used as a missing value flag to identify rows that do not intersect
"""
nrows = len(self.gdf)
esi_to_grs_region = np.ones((nrows,)) * -9999
for ix, row in self.gdf.iterrows():
for grs_region in grs.gdf.index:
if row.geometry.intersects(grs.loc[grs_region, 'geometry']):
esi_to_grs_region[ix] = grs.loc[grs_region, grs_code_column_name]
break
return esi_to_grs_region
def _fill_grs_nonintersects(self, grs_codes_for_each_esi_row: np.ndarray, grs: GRS) -> np.ndarray:
"""Given array of GRS code for each ESI row, fill in missing GRS code using nearest neighbor.
Notes:
- Many points in ESI data do not intersect the GRS shape files
- The points missing a GRS code (flagged as -9999) use a nearest neighbor lookup to fill in
"""
esi_rows_missing_grs_ix = np.argwhere(grs_codes_for_each_esi_row == -9999)
for missing_ix in esi_rows_missing_grs_ix:
# There must be nicer syntax? But, this is robust
x = self.gdf.iloc[esi_rows_missing_grs_ix[0]].geometry.values[0].geoms[0].xy[0][0]
y = self.gdf.iloc[esi_rows_missing_grs_ix[0]].geometry.values[0].geoms[0].xy[1][0]
_, grs_locs_ix = grs.grs_tree.query((x, y))
missing_grs_code = grs.grs_locs[grs_locs_ix, 2]
grs_codes_for_each_esi_row[missing_ix] = missing_grs_code
return grs_codes_for_each_esi_row
def __str__(self):
return f'ESI for {self.path}'
def __repr__(self):
return f'ESI for {self.path}'
def get_esi_npoints(gdf: gpd.GeoDataFrame) -> int:
"""Given exploded ESI, return the number of points in the file.
Notes:
- Can't extract this from the GDF because the points are exploded and encoded as linestrings.
"""
count = 0
for _, row in gdf.iterrows():
# row[0] - ORI
# row[1] - geometry which is LineString. len is number of points
npoints = len(np.array(row.geometry.xy).T)
count += npoints
return count
def clean_esi_code(esi_column: pd.Series) -> np.ndarray:
"""Given column of ESI codes, clean values, remove letters and return as integer array."""
cleaned_esi_codes = np.zeros(len(esi_column), dtype='i2')
for i, row in esi_column.iterrows():
cleaned_esi_codes[i] = clean_esi_string(row)
return cleaned_esi_codes
def esi_to_locs(esi: gpd.GeoDataFrame) -> pd.DataFrame:
"""Given ESI GeoDataFrame, return DataFrame of points with ESI codes and ids.
Notes:
- Array is returned needed for look-ups
"""
esi_exploded = esi.explode()
npoints = get_esi_npoints(esi_exploded)
# lon, lat, esilgen_, esilgen_id, esi value, esi row in dataframe
lons = np.zeros((npoints,), dtype='f4')
lats = np.zeros((npoints,), dtype='f4')
# max string length is 15 characters
esi_ids = np.empty((npoints,), dtype='U15')
esi_codes = np.zeros((npoints,), dtype='i4')
esi_rows = np.zeros((npoints,), dtype='i4')
# Iterate over each row
# - Extract x, y points from each point in the line
start_ix = 0
for ix, row in esi_exploded.iterrows():
# x,y = row[1] and transpose to be (n, 2)
line_locs = np.array(row.geometry.xy).T
# number of points in the line
nline_locs = len(line_locs)
end_ix = start_ix + nline_locs
lons[start_ix:end_ix] = line_locs[:, 0]
lats[start_ix:end_ix] = line_locs[:, 1]
esi_ids[start_ix:end_ix] = row.esi_id
esi_codes[start_ix:end_ix] = clean_esi_string(row.esi)
# Knowing the row number in the original DataFrame is useful for look-ups
esi_rows[start_ix:end_ix] = int(ix[0])
start_ix = end_ix
# return as a dataframe
df = pd.DataFrame(
{
'lon': lons,
'lat': lats,
'esi_id': pd.Series(esi_ids, dtype='U15'),
'esi_code': pd.Series(esi_codes, dtype='i4'),
'esi_row': | pd.Series(esi_rows, dtype='i4') | pandas.Series |
import numpy as np
import pandas as pd
from imblearn.over_sampling import SMOTE
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.neighbors import LocalOutlierFactor
from sklearn.preprocessing import StandardScaler
train_data_path = "train_data.csv"
labels_path = "train_labels.csv"
test_data_path = "test_data.csv"
def load_data_train_test_data():
genres_labels = np.array(pd.read_csv(labels_path, index_col=False, header=None))
genres = range(1, 11)
training_data_set = np.array(pd.read_csv(train_data_path, index_col=False, header=None))
pca = PCA(79)
scaler = StandardScaler()
training_data_set = scaler.fit_transform(training_data_set)
training_data_set = pca.fit_transform(training_data_set)
training_data_set = np.append(training_data_set, genres_labels, 1)
number_of_cols = training_data_set.shape[1]
train, test = train_test_split(training_data_set, test_size=0.25, random_state=12,
stratify=training_data_set[:, number_of_cols - 1])
train_x = train[:, :number_of_cols - 1]
train_y = train[:, number_of_cols - 1]
# sm = SMOTE()
# x_train_res, y_train_res = sm.fit_resample(train_x, train_y)
# train_x = preprocessing.normalize(train_x, norm='l2')
test_x = test[:, :number_of_cols - 1]
test_y = test[:, number_of_cols - 1]
return train_x, train_y, test_x, test_y, genres, scaler, pca
def load_train_data_rythym_only():
genres_labels = np.array(pd.read_csv(labels_path, index_col=False, header=None))
genres = range(1, 11)
training_data_set = np.array(pd.read_csv(train_data_path, index_col=False, header=None))[:, :168]
pca = PCA(100)
scaler = StandardScaler()
training_data_set = scaler.fit_transform(training_data_set)
# training_data_set = preprocessing.normalize(training_data_set, norm='l2')
training_data_set = pca.fit_transform(training_data_set)
training_data_set = np.append(training_data_set, genres_labels, 1)
number_of_cols = training_data_set.shape[1]
train, test = train_test_split(training_data_set, test_size=0.25, random_state=12,
stratify=training_data_set[:, number_of_cols - 1])
train_x = train[:, :number_of_cols - 1]
train_y = train[:, number_of_cols - 1]
sm = SMOTE()
# x_train_res, y_train_res = sm.fit_resample(train_x, train_y)
# train_x = preprocessing.normalize(train_x, norm='l2')
test_x = test[:, :number_of_cols - 1]
test_y = test[:, number_of_cols - 1]
return train_x, train_y, test_x, test_y, genres, scaler, pca
def load_train_data_chroma_only():
genres_labels = np.array(pd.read_csv(labels_path, index_col=False, header=None))
genres = range(1, 11)
training_data_set = np.array(pd.read_csv(train_data_path, index_col=False, header=None))[:, 169:216]
pca = PCA(40)
scaler = StandardScaler()
training_data_set = scaler.fit_transform(training_data_set)
training_data_set = pca.fit_transform(training_data_set)
training_data_set = np.append(training_data_set, genres_labels, 1)
number_of_cols = training_data_set.shape[1]
train, test = train_test_split(training_data_set, test_size=0.25, random_state=12,
stratify=training_data_set[:, number_of_cols - 1])
train_x = train[:, :number_of_cols - 1]
train_y = train[:, number_of_cols - 1]
# sm = SMOTE()
# x_train_res, y_train_res = sm.fit_resample(train_x, train_y)
# train_x = preprocessing.normalize(train_x, norm='l2')
test_x = test[:, :number_of_cols - 1]
test_y = test[:, number_of_cols - 1]
return train_x, train_y, test_x, test_y, genres, scaler, pca
def load_train_data_MFCC_only():
genres_labels = np.array(pd.read_csv(labels_path, index_col=False, header=None))
genres = range(1, 11)
training_data_set = np.array(pd.read_csv(train_data_path, index_col=False, header=None))[:, 217:]
pca = PCA(40)
scaler = StandardScaler()
training_data_set = scaler.fit_transform(training_data_set)
training_data_set = pca.fit_transform(training_data_set)
training_data_set = np.append(training_data_set, genres_labels, 1)
number_of_cols = training_data_set.shape[1]
train, test = train_test_split(training_data_set, test_size=0.25, random_state=12,
stratify=training_data_set[:, number_of_cols - 1])
train_x = train[:, :number_of_cols - 1]
train_y = train[:, number_of_cols - 1]
# sm = SMOTE()
# x_train_res, y_train_res = sm.fit_resample(train_x, train_y)
# train_x = preprocessing.normalize(train_x, norm='l2')
test_x = test[:, :number_of_cols - 1]
test_y = test[:, number_of_cols - 1]
return train_x, train_y, test_x, test_y, genres, scaler, pca
def load_train_data_with_PCA_per_type():
genres_labels = np.array(pd.read_csv(labels_path, index_col=False, header=None))
genres_labels = genres_labels.reshape((genres_labels.shape[0],))
genres = range(1, 11)
training_data_set = np.array(pd.read_csv(train_data_path, index_col=False, header=None))
rythym = np.concatenate((training_data_set[:, :73], training_data_set[:, 74:168]), axis=1)
chroma = training_data_set[:, 169:216]
mfcc = training_data_set[:, 220:]
# pca_rythym = PCA(0.8)
# pca_chroma = PCA(0.8)
# pca_mfcc = PCA(0.8)
scaler_rythym = StandardScaler()
scaler_chroma = StandardScaler()
scaler_mfcc = StandardScaler()
rythym = scaler_rythym.fit_transform(rythym)
chroma = scaler_chroma.fit_transform(chroma)
mfcc = scaler_mfcc.fit_transform(mfcc)
rythym = preprocessing.normalize(rythym, norm='l2')
chroma = preprocessing.normalize(chroma, norm='l2')
mfcc = preprocessing.normalize(mfcc, norm='l2')
# rythym = pca_rythym.fit_transform(rythym)
# chroma = pca_chroma.fit_transform(chroma)
# mfcc = pca_mfcc.fit_transform(mfcc)
training_data_set = np.concatenate((rythym, chroma, mfcc), axis=1)
# sm = SMOTE()
# train_x, train_y = sm.fit_resample(train_x, train_y)
# train_x = preprocessing.normalize(train_x, norm='l2')
training_data_set = np.append(training_data_set, genres_labels[:, np.newaxis], 1)
number_of_cols = training_data_set.shape[1]
train, test = train_test_split(training_data_set, test_size=0.25, random_state=12,
stratify=training_data_set[:, number_of_cols - 1])
train_x = train[:, :number_of_cols - 1]
train_y = train[:, number_of_cols - 1]
# sm = SMOTE()
# x_train_res, y_train_res = sm.fit_resample(train_x, train_y)
# train_x = preprocessing.normalize(train_x, norm='l2')
test_x = test[:, :number_of_cols - 1]
test_y = test[:, number_of_cols - 1]
return train_x, train_y, test_x, test_y, genres, scaler_rythym, scaler_chroma, scaler_mfcc
def visualisation_data():
genres_labels = np.array(pd.read_csv(labels_path, index_col=False, header=None))
genres_labels = genres_labels.reshape((genres_labels.shape[0],))
genres = range(1, 11)
training_data_set = np.array(pd.read_csv(train_data_path, index_col=False, header=None))
pca = PCA(2)
training_data_set = pca.fit_transform(training_data_set, genres_labels)
# sm = SMOTE()
# train_x, train_y = sm.fit_resample(train_x, train_y)
# train_x = preprocessing.normalize(train_x, norm='l2')
return training_data_set, genres_labels, genres
def load_train_data_with_PCA_per_type_drop_column(which_column):
genres_labels = np.array(pd.read_csv(labels_path, index_col=False, header=None))
genres_labels = genres_labels.reshape((genres_labels.shape[0],))
genres = range(1, 11)
training_data_set = np.array(pd.read_csv(train_data_path, index_col=False, header=None))
if 0 <= which_column < 168:
rythym = np.concatenate((training_data_set[:, :which_column], training_data_set[:, which_column + 1:168]),
axis=1)
chroma = training_data_set[:, 169:216]
mfcc = training_data_set[:, 220:]
elif 169 <= which_column < 216:
rythym = training_data_set[:, :168]
chroma = np.concatenate((training_data_set[:, 169:which_column], training_data_set[:, which_column + 1:216]),
axis=1)
mfcc = training_data_set[:, 220:]
else:
rythym = training_data_set[:, :168]
chroma = training_data_set[:, 169:216]
mfcc = np.concatenate((training_data_set[:, 220:which_column], training_data_set[:, which_column + 1:]), axis=1)
# pca_rythym = PCA(0.8)
# pca_chroma = PCA(0.8)
# pca_mfcc = PCA(0.8)
scaler_rythym = StandardScaler()
scaler_chroma = StandardScaler()
scaler_mfcc = StandardScaler()
rythym = scaler_rythym.fit_transform(rythym)
chroma = scaler_chroma.fit_transform(chroma)
mfcc = scaler_mfcc.fit_transform(mfcc)
rythym = preprocessing.normalize(rythym, norm='l2')
chroma = preprocessing.normalize(chroma, norm='l2')
mfcc = preprocessing.normalize(mfcc, norm='l2')
# rythym = pca_rythym.fit_transform(rythym)
# chroma = pca_chroma.fit_transform(chroma)
# mfcc = pca_mfcc.fit_transform(mfcc)
training_data_set = np.concatenate((rythym, chroma, mfcc), axis=1)
# sm = SMOTE()
# train_x, train_y = sm.fit_resample(train_x, train_y)
# train_x = preprocessing.normalize(train_x, norm='l2')
return training_data_set, genres_labels, genres, scaler_rythym, scaler_chroma, scaler_mfcc \
# pca_rythym, pca_chroma, pca_mfcc
def load_test_data():
return np.array(pd.read_csv(test_data_path, index_col=False, header=None))
def load_train_data():
return np.array( | pd.read_csv(train_data_path, index_col=False, header=None) | pandas.read_csv |
from contextlib import contextmanager, ExitStack
from copy import deepcopy
from functools import partial, reduce
import itertools
import re
import tempfile
from typing import Callable, Iterable, Optional, Union
import warnings
import humanize
import IPython.display
from IPython.core.getipython import get_ipython
import matplotlib as mpl
import matplotlib.pyplot as plt
from mizani.transforms import trans
from more_itertools import flatten
import numpy as np
import pandas as pd
import PIL
import plotnine # For export
from plotnine import * # For export
from plotnine.data import * # For export
from plotnine.stats.binning import freedman_diaconis_bins
import potoo.mpl_backend_xee # Used by ~/.matplotlib/matplotlibrc
from potoo.util import or_else, puts, singleton, tap
ipy = get_ipython() # None if not in ipython
def get_figsize_named(size_name):
# Determined empirically, and fine-tuned for atom splits with status-bar + tab-bar
mpl_aspect = 2/3 # Tuned using plotnine, but works the same for mpl/sns
R_aspect = mpl_aspect # Seems fine
figsizes_mpl = dict(
# Some common sizes that are useful; add more as necessary
inline_short = dict(width=12, aspect_ratio=mpl_aspect * 1/2),
inline = dict(width=12, aspect_ratio=mpl_aspect * 1),
square = dict(width=12, aspect_ratio=1),
half = dict(width=12, aspect_ratio=mpl_aspect * 2),
full = dict(width=24, aspect_ratio=mpl_aspect * 1),
half_dense = dict(width=24, aspect_ratio=mpl_aspect * 2),
full_dense = dict(width=48, aspect_ratio=mpl_aspect * 1),
)
figsizes = dict(
mpl=figsizes_mpl,
R={
k: dict(width=v['width'], height=v['width'] * v['aspect_ratio'] / mpl_aspect * R_aspect)
for k, v in figsizes_mpl.items()
},
)
if size_name not in figsizes['mpl']:
raise ValueError(f'Unknown figsize name[{size_name}]')
return {k: figsizes[k][size_name] for k in figsizes.keys()}
def plot_set_defaults():
figsize()
plot_set_default_mpl_rcParams()
plot_set_plotnine_defaults()
plot_set_jupyter_defaults()
plot_set_R_defaults()
def figure_format(figure_format: str = None):
"""
Set figure_format: one of 'svg', 'retina', 'png'
"""
if figure_format:
assert figure_format in ['svg', 'retina', 'png'], f'Unknown figure_format[{figure_format}]'
ipy.run_line_magic('config', f"InlineBackend.figure_format = {figure_format!r}")
return or_else(None, lambda: ipy.run_line_magic('config', 'InlineBackend.figure_format'))
# XXX if the new version of figsize works
# @contextmanager
# def with_figsize(*args, **kwargs):
# saved_kwargs = get_figsize()
# try:
# figsize(*args, **kwargs)
# yield
# plt.show() # Because ipy can't evaluate the result of the nested block to force the automatic plt.show()
# finally:
# figsize(**saved_kwargs)
def figsize(*args, **kwargs):
"""
Set theme_figsize(...) as global plotnine.options + mpl.rcParams:
- https://plotnine.readthedocs.io/en/stable/generated/plotnine.themes.theme.html
- http://matplotlib.org/users/customizing.html
Can be used either as a global mutation (`figsize(...)`) or a context manager (`with figsize(...)`)
"""
to_restore = get_figsize()
_set_figsize(*args, **kwargs)
@contextmanager
def ctx():
try:
yield
plt.show() # Because ipy can't evaluate the result of the nested block to force the automatic plt.show()
finally:
# TODO RESTORE
figsize(**to_restore)
return ctx()
def _set_figsize(*args, **kwargs):
# TODO Unwind conflated concerns:
# - (See comment in get_figsize, below)
kwargs.pop('_figure_format', None)
kwargs.pop('_Rdefaults', None)
# Passthru to theme_figsize
t = theme_figsize(*args, **kwargs)
[width, height] = figure_size = t.themeables['figure_size'].properties['value']
aspect_ratio = t.themeables['aspect_ratio'].properties['value']
dpi = t.themeables['dpi'].properties['value']
# Set mpl figsize
mpl.rcParams.update(t.rcParams)
# Set plotnine figsize
plotnine.options.figure_size = figure_size
plotnine.options.aspect_ratio = aspect_ratio
plotnine.options.dpi = dpi # (Ignored for figure_format='svg')
# Set %R figsize
Rdefaults = plot_set_R_figsize(
width=width,
height=height,
units='in', # TODO Does this work with svg? (works with png, at least)
res=dpi * 2, # Make `%Rdevice png` like mpl 'retina' (ignored for `%Rdevice svg`)
)
# Show feedback to user
return get_figsize()
def get_figsize():
return dict(
width=plotnine.options.figure_size[0],
height=plotnine.options.figure_size[1],
aspect_ratio=plotnine.options.aspect_ratio,
dpi=plotnine.options.dpi,
# TODO Unwind conflated concerns:
# - We return _figure_format/_Rdefaults to the user so they have easy visibility into them
# - But our output is also used as input to figsize(**get_figsize()), so figsize has to filter them out
_figure_format=figure_format(),
_Rdefaults=plot_get_R_figsize(),
)
# For plotnine
class theme_figsize(theme):
"""
plotnine theme with defaults for figure_size width + aspect_ratio (which overrides figure_size height if defined):
- aspect is allowed as an alias for aspect_ratio
- https://plotnine.readthedocs.io/en/stable/generated/plotnine.themes.theme.html#aspect_ratio-and-figure_size
"""
def __init__(self, name='inline', width=None, height=None, aspect_ratio=None, aspect=None, dpi=72):
aspect_ratio = aspect_ratio or aspect # Alias
if name:
size = get_figsize_named(name)['mpl']
width = width or size.get('width')
height = height or size.get('height')
aspect_ratio = aspect_ratio or size.get('aspect_ratio')
if not width and height and aspect_ratio:
width = height / aspect_ratio
if width and not height and aspect_ratio:
height = width * aspect_ratio
if width and height and not aspect_ratio:
aspect_ratio = height / width
super().__init__(
# height is ignored by plotnine when aspect_ratio is given, but supply anyway so that we can set theme.rcParams
# into mpl.rcParams since the latter has no notion of aspect_ratio [TODO Submit PR to fix]
figure_size=[width, height],
aspect_ratio=aspect_ratio,
dpi=dpi,
)
@property
def rcParams(self):
rc = theme.rcParams.fget(self) # (Idiom to do super() for a property)
# Manual retina, since plt.savefig doesn't respond to `%config InlineBackend.figure_format` like plt.show
# - TODO plt.savefig produces 2x bigger imgs than plt.show. Figure out how to achieve 1x with non-blurry fonts
if figure_format() == 'retina':
if rc['savefig.dpi'] == 'figure':
rc['savefig.dpi'] = rc['figure.dpi']
rc['savefig.dpi'] *= 2
return rc
@property
def figsize(self):
return self.themeables['figure_size'].properties['value']
# ~/.matploblib/matploblibrc isn't read by ipykernel, so we call this from ~/.pythonrc which is
# - TODO What's the right way to init mpl.rcParams?
def plot_set_default_mpl_rcParams():
mpl.rcParams['figure.facecolor'] = 'white' # Match savefig.facecolor
mpl.rcParams['savefig.bbox'] = 'tight' # Else plt.savefig adds lots of surrounding whitespace that plt.show doesn't
mpl.rcParams['image.interpolation'] = 'nearest' # Don't interpolate, show square pixels
# http://matplotlib.org/users/colormaps.html
# - TODO Looks like no way to set default colormap for pandas df.plot? [yes, but hacky: https://stackoverflow.com/a/41598326/397334]
mpl.rcParams['image.cmap'] = 'magma_r' # [2] perceptually uniform (light -> dark)
# mpl.rcParams['image.cmap'] = 'inferno_r' # [2] perceptually uniform (light -> dark)
# mpl.rcParams['image.cmap'] = 'magma' # [2] perceptually uniform (dark -> light)
# mpl.rcParams['image.cmap'] = 'inferno' # [2] perceptually uniform (dark -> light)
# mpl.rcParams['image.cmap'] = 'Greys' # [2] black-white (nonlinear)
# mpl.rcParams['image.cmap'] = 'gray_r' # [1] black-white
# TODO Sync more carefully with ~/.matploblib/matploblibrc?
def plot_set_plotnine_defaults():
ignore_warning_plotnine_stat_bin_binwidth()
def ignore_warning_plotnine_stat_bin_binwidth():
# Don't warn from geom_histogram/stat_bin if you use the default bins/binwidth
# - Default bins is computed via freedman_diaconis_bins, which is dynamic and pretty good, so don't discourage it
warnings.filterwarnings('ignore',
category=plotnine.exceptions.PlotnineWarning,
module=re.escape('plotnine.stats.stat_bin'),
message=r"'stat_bin\(\)' using 'bins = \d+'\. Pick better value with 'binwidth'\.",
)
def plot_set_jupyter_defaults():
if ipy:
# 'svg' is pretty, 'retina' is the prettier version of 'png', and 'png' is ugly (on retina macs)
# - But the outside world prefers png to svg (e.g. uploading images to github, docs, slides)
figure_format('retina')
#
# plotnine
#
# HACK Add white bg to theme_minimal/theme_void, which by default have transparent bg
theme_minimal = lambda **kwargs: plotnine.theme_minimal (**kwargs) + theme(plot_background=element_rect('white'))
theme_void = lambda **kwargs: plotnine.theme_void (**kwargs) + theme(plot_background=element_rect('white'))
def gg_to_img(g: ggplot, **kwargs) -> PIL.Image.Image:
"""Render a ggplot as an image"""
g.draw()
return plt_to_img(**kwargs)
def ggbar(df, x='x', **geom_kw):
return _gg(df, mapping=aes(x=x), geom=geom_bar, geom_kw=geom_kw)
def ggcol(df, x='x', y='y', **geom_kw):
return _gg(df, mapping=aes(x=x, y=y), geom=geom_col, geom_kw=geom_kw)
def ggbox(df, x='x', y='y', **geom_kw):
return _gg(df, mapping=aes(x=x, y=y), geom=geom_boxplot, geom_kw=geom_kw)
def gghist(df, x='x', **geom_kw):
return _gg(df, mapping=aes(x=x), geom=geom_histogram, geom_kw=geom_kw)
def ggdens(df, x='x', **geom_kw):
return _gg(df, mapping=aes(x=x), geom=geom_density, geom_kw=geom_kw)
def ggpoint(df, x='x', y='y', **geom_kw):
return _gg(df, mapping=aes(x=x, y=y), geom=geom_point, geom_kw=geom_kw)
def ggline(df, x='x', y='y', **geom_kw):
return _gg(df, mapping=aes(x=x, y=y), geom=geom_line, geom_kw=geom_kw)
def _gg(df, mapping, geom, geom_kw):
if isinstance(df, np.ndarray):
df = pd.Series(df)
if isinstance(df, pd.Series):
k = df.name or 'x'
df = | pd.DataFrame({k: df}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert | inference.is_list_like(result) | pandas.core.dtypes.inference.is_list_like |
import re
import pandas
import spacy
from spacytextblob.spacytextblob import SpacyTextBlob
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sid = SentimentIntensityAnalyzer()
# nlp = spacy.load('en_core_web_lg')
nlp = spacy.load('en_core_web_md')
nlp.add_pipe('spacytextblob')
def subcatego(cat_mess: str) -> str:
r = re.compile('slug...([a-zA-Z 0-9]+)/([a-zA-Z 0-9]+)')
f = r.findall(cat_mess)
if len(f)>0:
return f[0][-1] # only difference here
else:
s = re.compile('slug...([a-zA-Z 0-9]+)')
g = s.findall(cat_mess)
return g[0] # because some without subcat
def catego(cat_mess: str) -> str:
r = re.compile('slug...([a-zA-Z 0-9]+)/([a-zA-Z 0-9]+)')
f = r.findall(cat_mess)
if len(f)>0:
return f[0][0] # only difference here
else:
s = re.compile('slug...([a-zA-Z 0-9]+)')
g = s.findall(cat_mess)
return g[0] # because some without subcat
def wrangle2(df):
"""
For cleaning pd.DataFrame read from csv available at
<https://webrobots.io/kickstarter-datasets/>
"""
df = df.copy()
observation_threshold = len(df)/2
df.dropna(thresh=observation_threshold , axis=1, inplace=True)
df.dropna(subset=['blurb'], inplace=True)
df.dropna(subset=['location'], inplace=True)
df.drop_duplicates('id',inplace=True)
# df.set_index('id',inplace=True)
df.reset_index(drop=True,inplace=True)
# class_to_drop1 = df[df['state'] == 'canceled'].index
# df.drop(class_to_drop1, inplace=True)
df = df.loc[df.state != 'live']
df.loc[df.state == 'canceled', 'state'] = 0
df.loc[df.state == 'failed', 'state'] = 0
df.loc[df.state == 'successful', 'state'] = 1 #this was it :)
df = df.rename(columns={'state': 'success'})
df['goal_usd'] = round(df['goal'] * df['static_usd_rate'],2)
for col in ['created_at', 'deadline', 'launched_at']:
df[col] = | pandas.to_datetime(df[col], origin='unix', unit='s') | pandas.to_datetime |
""" MCH API ver 0.1
Author: <NAME>
License: CC-BY-SA 4.0
2020 Mexico
"""
import os
from flask import Flask, jsonify, json, Response
from flask_restful import Api, Resource, reqparse, abort
from flask_mysqldb import MySQL
import pandas as pd
import numpy as np
import json
from os.path import abspath, dirname, join
app = Flask(__name__)
# Mysql connection
app.config['MYSQL_HOST'] = os.getenv('MCH_DB_HOST')
app.config['MYSQL_USER'] = os.getenv('MCH_DB_USER')
app.config['MYSQL_PASSWORD'] = os.getenv('MCH_DB_PASSWORD')
app.config['MYSQL_DB'] = os.getenv('MCH_DB_NAME')
app.config['MYSQL_PORT'] = int(os.getenv('MCH_DB_PORT'))
app.config['SECRET_KEY'] = os.getenv("APP_SECRET")
mysql = MySQL(app)
api = Api(app)
# dataframe for stations table
stnmdata = pd.DataFrame()
# read MCH languaje definition from mch.dbn
filemch = open('mch.dbn', 'r')
filemch.readline() # odbc connector
filemch.readline() # mysql5
filemch.readline() # interface languaje
mchlang = filemch.readline() # database languaje
# read fields and tables names definition file
deftbfl = pd.read_csv('MCHtablasycampos.def', sep = "\t", names = ['sec','type', 'id_sec', 'esp', 'eng', 'fra', '4', 'comment'], encoding='utf_8')
# new dataframe for especific languaje
ltbfl = pd.DataFrame()
# looking for especific fields and tables for the languaje
if int(mchlang) == 1:
ltbfl = deftbfl[['id_sec','esp']]
ltbfl.set_index('id_sec')
if int(mchlang) == 2:
ltbfl = deftbfl[['id_sec','eng']]
ltbfl.set_index('id_sec')
if int(mchlang) == 3:
ltbfl = deftbfl[['id_sec','fra']]
ltbfl.set_index('id_sec')
def deg_to_dms(deg):
d = int(deg)
md = abs(deg - d) * 60
m = int(md)
sd = (md - m) * 60
return [d, m, sd]
class stations(Resource):
def get(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstaciones']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstacion']
strqry='select * from ' +stntable.iloc[0,1] +' order by ' +stnfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Station','StationName','StationName2','TimeZone','Longitude','Latitude','Altitude','Longitude2','Latitude2','DMSlongitude','DMSLatitude','Statee','RegManagmt','Catchment','Subcatchment',
'OperatnlRegion','HydroReg','RH(2)','Municipality','CodeB','CodeG','CodeCB','CodePB','CodeE','CodeCL','CodeHG','CodePG','CodeNw','Code1','Code2','Code3','MaxOrdStrgLvl','MaxOrdStrgVol',
'MaxExtStrgLvl','MaxExtStrgVol','SpillwayLevel','SpillwayStorage','FreeSpillwayLevel','FreeSpillwayStorage','DeadStrgLevel','DeadStrgCapac','UsableStorageCapLev','UsableStorage','HoldingStorage',
'Key1fil','Key2fil','Key3fil','CritLevelSta','MinLevelSta','MaxLevelSta','CritFlow','MinDischarge','MaxDischarge','Stream','Distance','Infrastructure','Type','Usee'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(stations, "/API/stations")
qry_station_req_arg = reqparse.RequestParser()
pars = qry_station_req_arg.add_argument("stn_id",type=str,help="Station ID",required=True)
class qry_station(Resource):
def get(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstaciones']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstacion']
parser = reqparse.RequestParser()
parser.add_argument('stn_id')
args = parser.parse_args()
stn_id = args.get('stn_id')
strqry='select * from ' +stntable.iloc[0,1] +' where ' +stnfield.iloc[0,1] +'="'+ stn_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
qrystation = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=qrystation,columns=['Station','StationName','StationName2','TimeZone','Longitude','Latitude','Altitude','Longitude2','Latitude2','DMSlongitude','DMSLatitude','Statee','RegManagmt','Catchment','Subcatchment',
'OperatnlRegion','HydroReg','RH','Municipality','CodeB','CodeG','CodeCB','CodePB','CodeE','CodeCL','CodeHG','CodePG','CodeNw','Code1','Code2','Code3','MaxOrdStrgLvl','MaxOrdStrgVol',
'MaxExtStrgLvl','MaxExtStrgVol','SpillwayLevel','SpillwayStorage','FreeSpillwayLevel','FreeSpillwayStorage','DeadStrgLevel','DeadStrgCapac','UsableStorageCapLev','UsableStorage','HoldingStorage',
'Key1fil','Key2fil','Key3fil','CritLevelSta','MinLevelSta','MaxLevelSta','CritFlow','MinDischarge','MaxDischarge','Stream','Distance','Infrastructure','Type','Usee'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="Station not found...")
#abort_if_stn_not_exist("stn_id")
return parsed
def post(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstaciones']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstacion']
parser = reqparse.RequestParser()
parser.add_argument('file')
parser.add_argument('stn_id')
parser.add_argument('stn_name')
parser.add_argument('stn_name2')
parser.add_argument('t_zone')
parser.add_argument('long')
parser.add_argument('lat')
parser.add_argument('alt')
parser.add_argument('state_id')
parser.add_argument('reg_m')
parser.add_argument('catchm')
parser.add_argument('s_cat')
parser.add_argument('o_reg')
parser.add_argument('hydro_r')
parser.add_argument('rh')
parser.add_argument('mun_id')
parser.add_argument('mosl')
parser.add_argument('mosv')
parser.add_argument('mesl')
parser.add_argument('mesv')
parser.add_argument('s_level')
parser.add_argument('s_stor')
parser.add_argument('fs_level')
parser.add_argument('fs_stor')
parser.add_argument('ds_level')
parser.add_argument('ds_cap')
parser.add_argument('us_capl')
parser.add_argument('ustor')
parser.add_argument('hstor')
parser.add_argument('crl_s')
parser.add_argument('mnl_s')
parser.add_argument('mxl_s')
parser.add_argument('cr_f')
parser.add_argument('mn_dis')
parser.add_argument('mx_dis')
parser.add_argument('stream')
parser.add_argument('dist')
parser.add_argument('infr')
parser.add_argument('type')
parser.add_argument('use')
args = parser.parse_args()
# retrieve parameters
jfile = args.get('file')
stn_id = args.get('stn_id')
stn_name = args.get('stn_name')
stn_name2 = args.get('stn_name2')
t_zone = args.get('t_zone')
long2 = args.get('long')
lat2 = args.get('lat')
alt = args.get('alt')
state_id = args.get('state_id')
reg_m = args.get('reg_m')
catchm = args.get('catchm')
s_cat = args.get('s_cat')
o_reg = args.get('o_reg')
hydro_r = args.get('hydro_r')
rh = args.get('rh')
mun_id = args.get('mun_id')
mosl = args.get('mosl')
mosv = args.get('mosv')
mesl = args.get('mesl')
mesv = args.get('mesv')
s_level = args.get('s_level')
s_stor = args.get('s_stor')
fs_level = args.get('fs_level')
fs_stor = args.get('fs_stor')
ds_level = args.get('ds_level')
ds_cap = args.get('ds_cap')
us_capl = args.get('us_capl')
ustor = args.get('ustor')
hstor = args.get('hstor')
crl_s = args.get('crl_s')
mnl_s = args.get('mnl_s')
mxl_s = args.get('mxl_s')
cr_f = args.get('cr_f')
mn_dis = args.get('mn_dis')
mx_dis = args.get('mx_dis')
stream = args.get('stream')
dist = args.get('dist')
infr = args.get('infr')
typee = args.get('type')
usee = args.get('use')
# check if input is at file
if jfile in (None, ''):
Latitude=deg_to_dms(float(lat2))
Longitude=deg_to_dms(float(long2))
slong2=str(Longitude[0])+'°'+str(Longitude[1]) +'´' +str(Longitude[2])
slat2=str(Latitude[0])+'°'+str(Latitude[1]) +'´' +str(Latitude[2])
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +str(stn_id) +'","' +str(stn_name) +'","' +str(stn_name2) +'","' +str(t_zone) +'","' + str(long2)
+ '","' +str(lat2) +'","' +str(alt) +'","' +str(long2) +'","' +str(lat2) +'","' +slong2 +'","' +slat2 +'","' +str(state_id) +'","' +str(reg_m)
+ '","' +str(catchm) +'","' +str(s_cat) +'","' +str(o_reg) +'","' +str(hydro_r) +'","' +str(rh) +'","' +str(mun_id) +'","","","","","","","","","","","","","' + str(mosl)
+ '","' +str(mosv) +'","' +str(mesl) +'","' +str(mesv) +'","' +str(s_level) +'","' +str(s_stor) +'","' +str(fs_level) +'","' + str(fs_stor)
+ '","' +str(ds_level) +'","' +str(ds_cap) +'","' +str(us_capl) +'","' +str(ustor) +'","' +str(hstor) +'","","","","' +str(crl_s) +'","' + str(mnl_s)
+ '","' +str(mxl_s) +'","' +str(cr_f) +'","' +str(mn_dis) +'","' +str(mx_dis) +'","' +str(stream) +'","' +str(dist) +'","' +str(infr) +'","' + str(typee)
+ '","' +str(usee) +'")')
qry.execute(strqry)
else:
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = pd.DataFrame(jdata)
fields = data.columns.tolist()
tdata=len(data.index)
rows=list(range(0,tdata))
if int(tdata) > 1:
for n in rows:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[int(n),0] +'","' +data.iloc[int(n),1] +'","' +data.iloc[int(n),2] +'","' +data.iloc[int(n),3] +'","' + data.iloc[int(n),4]
+ '","' +data.iloc[int(n),5] +'","' +str(data.iloc[int(n),6]) +'","' +str(data.iloc[int(n),7]) +'","' +str(data.iloc[int(n),8]) +'","' +data.iloc[int(n),9] +'","' +data.iloc[int(n),10] +'","' +data.iloc[int(n),11]
+ '","' +data.iloc[int(n),12] + '","' +data.iloc[int(n),13] +'","' +data.iloc[int(n),14] +'","' +data.iloc[int(n),15] +'","' +data.iloc[int(n),16] +'","' +data.iloc[int(n),17] +'","' +data.iloc[int(n),18]
+ '","' +data.iloc[int(n),19] +'","' +data.iloc[int(n),20] +'","' +data.iloc[int(n),21] +'","' +data.iloc[int(n),22] +'","' +data.iloc[int(n),23] +'","' +data.iloc[int(n),24] +'","' +data.iloc[int(n),25]
+ '","' +data.iloc[int(n),26] + '","' +data.iloc[int(n),27] +'","' +data.iloc[int(n),28] +'","' +data.iloc[int(n),29] +'","' +data.iloc[int(n),30] +'","' +data.iloc[int(n),31]
+ '","' +data.iloc[int(n),32] +'","' +data.iloc[int(n),33] +'","' +data.iloc[int(n),34] +'","' +data.iloc[int(n),35] +'","' +data.iloc[int(n),36] +'","' +data.iloc[int(n),37] +'","' + data.iloc[int(n),38]
+ '","' +data.iloc[int(n),39] +'","' +data.iloc[int(n),40] +'","' +data.iloc[int(n),41] +'","' +data.iloc[int(n),42] +'","' +data.iloc[int(n),43] +'","' +data.iloc[int(n),44] +'","' +data.iloc[int(n),45]
+ '","' +data.iloc[int(n),46] +'","' +data.iloc[int(n),47] +'","' + data.iloc[int(n),48] +'","' +data.iloc[int(n),49] +'","' +data.iloc[int(n),50] +'","' +data.iloc[int(n),51] +'","' +data.iloc[int(n),52]
+ '","' +data.iloc[int(n),53] +'","' +data.iloc[int(n),54] +'","' +data.iloc[int(n),55] +'","' +data.iloc[int(n),56] +'","' +data.iloc[int(n),57] +'")')
qry.execute(strqry)
else:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[0,0] +'","' +data.iloc[0,1] +'","' +data.iloc[0,2] +'","' +data.iloc[0,3] +'","' + data.iloc[0,4]
+ '","' +data.iloc[0,5] +'","' +str(data.iloc[0,6]) +'","' +str(data.iloc[0,7]) +'","' +str(data.iloc[0,8]) +'","' +data.iloc[0,9] +'","' +data.iloc[0,10] +'","' +data.iloc[0,11]
+ '","' +data.iloc[0,12] + '","' +data.iloc[0,13] +'","' +data.iloc[0,14] +'","' +data.iloc[0,15] +'","' +data.iloc[0,16] +'","' +data.iloc[0,17] +'","' +data.iloc[0,18]
+ '","' +data.iloc[0,19] +'","' +data.iloc[0,20] +'","' +data.iloc[0,21] +'","' +data.iloc[0,22] +'","' +data.iloc[0,23] +'","' +data.iloc[0,24] +'","' +data.iloc[0,25]
+ '","' +data.iloc[0,26] + '","' +data.iloc[0,27] +'","' +data.iloc[0,28] +'","' +data.iloc[0,29] +'","' +data.iloc[0,30] +'","' +data.iloc[0,31]
+ '","' +data.iloc[0,32] +'","' +data.iloc[0,33] +'","' +data.iloc[0,34] +'","' +data.iloc[0,35] +'","' +data.iloc[0,36] +'","' +data.iloc[0,37] +'","' + data.iloc[0,38]
+ '","' +data.iloc[0,39] +'","' +data.iloc[0,40] +'","' +data.iloc[0,41] +'","' +data.iloc[0,42] +'","' +data.iloc[0,43] +'","' +data.iloc[0,44] +'","' +data.iloc[0,45]
+ '","' +data.iloc[0,46] +'","' +data.iloc[0,47] +'","' + data.iloc[0,48] +'","' +data.iloc[0,49] +'","' +data.iloc[0,50] +'","' +data.iloc[0,51] +'","' +data.iloc[0,52]
+ '","' +data.iloc[0,53] +'","' +data.iloc[0,54] +'","' +data.iloc[0,55] +'","' +data.iloc[0,56] +'","' +data.iloc[0,57] +'")')
qry.execute(strqry)
return 'Station stored',201
def delete(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstaciones']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstacion']
parser = reqparse.RequestParser()
parser.add_argument('stn_id')
args = parser.parse_args()
stn_id = args.get('stn_id')
strqry='delete from ' +stntable.iloc[0,1] +' where ' +stnfield.iloc[0,1] +'="'+ stn_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
return 'Station deleted',204
api.add_resource(qry_station, "/API/stations/qry_station")
class stngroups(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntGruposestac']
nfield = ltbfl[ltbfl['id_sec'] == 'ncGrupoEstac']
strqry='select distinct(' +nfield.iloc[0,1] +') from ' +ntable.iloc[0,1] +' order by ' +nfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Stngroup'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(stngroups, "/API/stngroups")
class qry_stngroup(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntGruposestac']
nfield = ltbfl[ltbfl['id_sec'] == 'ncGrupoEstac']
parser = reqparse.RequestParser()
parser.add_argument('stngp_id')
args = parser.parse_args()
stngp_id = args.get('stngp_id')
strqry='select * from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ stngp_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=dataqry,columns=['Stngroup','Secuen','Station'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="Stationgroup not found...")
return parsed
def post(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntGruposestac']
nfield = ltbfl[ltbfl['id_sec'] == 'ncGrupoEstac']
parser = reqparse.RequestParser()
parser.add_argument('file')
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = pd.DataFrame(jdata)
tdata=len(data.index)
rows=list(range(0,tdata))
for n in rows:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[int(n),0] +'","' +data.iloc[int(n),1] +'","' +data.iloc[int(n),2] +'")')
qry.execute(strqry)
return 'Stationgroup stored',201
def delete(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntGruposestac']
nfield = ltbfl[ltbfl['id_sec'] == 'ncGrupoEstac']
parser = reqparse.RequestParser()
parser.add_argument('stngp_id')
args = parser.parse_args()
stngp_id = args.get('stngp_id')
strqry='delete from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ stngp_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
return 'Stationgroup deleted',204
api.add_resource(qry_stngroup, "/API/stngroups/qry_stngroup")
class variables(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntVariables']
nfield = ltbfl[ltbfl['id_sec'] == 'ncVariable']
strqry='select distinct(' +nfield.iloc[0,1] +') from ' +ntable.iloc[0,1] +' order by ' +nfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Variable'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(variables, "/API/variables")
class qry_variable(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntVariables']
nfield = ltbfl[ltbfl['id_sec'] == 'ncVariable']
parser = reqparse.RequestParser()
parser.add_argument('var_id')
args = parser.parse_args()
var_id = args.get('var_id')
strqry='select * from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ var_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=dataqry,columns=['Variable','VariabAbbrev','VariabDescrn','TableName','Unit','TypeDDorDE','CumulType','NbrDecimal','CalcbyGrp','CalcDTaD'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="Variable not found...")
return parsed
api.add_resource(qry_variable, "/API/variables/qry_variable")
class states(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntEstados']
nfield = ltbfl[ltbfl['id_sec'] == 'ncEstado']
strqry='select * from ' +ntable.iloc[0,1] +' order by ' +nfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Statee','State2','Statename'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(states, "/API/states")
class qry_state(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntEstados']
nfield = ltbfl[ltbfl['id_sec'] == 'ncEstado']
parser = reqparse.RequestParser()
parser.add_argument('state_id')
args = parser.parse_args()
state_id = args.get('state_id')
strqry='select * from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ state_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=dataqry,columns=['Statee','State2','Statename'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="State not found...")
return parsed
def post(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstados']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstado']
parser = reqparse.RequestParser()
parser.add_argument('file')
parser.add_argument('state_id')
parser.add_argument('state_2')
parser.add_argument('state_name')
args = parser.parse_args()
# retrieve parameters
jfile = args.get('file')
state_id = args.get('state_id')
state_2 = args.get('state_2')
state_name = args.get('state_name')
# check if input is at file
if jfile in (None, ''):
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +str(state_id) +'","' +str(state_2) +'","' +str(state_name) +'")')
qry.execute(strqry)
else:
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = pd.DataFrame(jdata)
fields = data.columns.tolist()
tdata=len(data.index)
rows=list(range(0,tdata))
if int(tdata) > 1:
for n in rows:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[int(n),0] +'","' +data.iloc[int(n),1] +'","' +data.iloc[int(n),2] +'")')
qry.execute(strqry)
else:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[0,0] +'","' +data.iloc[0,1] +'","' +data.iloc[0,2] +'")')
qry.execute(strqry)
return 'State stored',201
def delete(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntEstados']
nfield = ltbfl[ltbfl['id_sec'] == 'ncEstado']
parser = reqparse.RequestParser()
parser.add_argument('state_id')
args = parser.parse_args()
stngp_id = args.get('state_id')
strqry='delete from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ state_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
return 'State deleted',204
api.add_resource(qry_state, "/API/states/qry_state")
class municipalities(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntMunicipios']
nfield = ltbfl[ltbfl['id_sec'] == 'ncMunicipio']
strqry='select * from ' +ntable.iloc[0,1] +' order by ' +nfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Municipality','Municipality2','MunicipalityName'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(municipalities, "/API/municipalities")
class qry_municipality(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntMunicipios']
nfield = ltbfl[ltbfl['id_sec'] == 'ncMunicipio']
parser = reqparse.RequestParser()
parser.add_argument('mun_id')
args = parser.parse_args()
mun_id = args.get('mun_id')
strqry='select * from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ mun_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=dataqry,columns=['Municipality','Municipality2','MunicipalityName'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="Municipality not found...")
return parsed
def post(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntMunicipios']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncMunicipio']
parser = reqparse.RequestParser()
parser.add_argument('file')
parser.add_argument('mun_id')
parser.add_argument('mun_2')
parser.add_argument('mun_name')
args = parser.parse_args()
# retrieve parameters
jfile = args.get('file')
mun_id = args.get('mun_id')
mun_2 = args.get('mun_2')
mun_name = args.get('mun_name')
# check if input is at file
if jfile in (None, ''):
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +str(mun_id) +'","' +str(mun_2) +'","' +str(mun_name) +'")')
qry.execute(strqry)
else:
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = | pd.DataFrame(jdata) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 19 15:36:56 2020
@author: suyu
"""
from surprise import SVD
from surprise import Dataset
from surprise import Reader
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error,roc_auc_score,mean_absolute_error,log_loss
import numpy as np
import pandas as pd
import sys
sys.path.append('../')
from gammli.DataReader import data_initialize
def svd(wc, data, meta_info_ori, task_type="Regression", random_state=0):
base = SVD(n_factors=3)
cold_mae = []
cold_rmse = []
warm_mae = []
warm_rmse = []
cold_auc = []
cold_logloss = []
warm_auc = []
warm_logloss = []
for j in range(10):
train, test = train_test_split(data, test_size=0.2, random_state=j)
tr_x, tr_Xi, tr_y, tr_idx, te_x, te_Xi, te_y, val_x, val_Xi, val_y, val_idx, meta_info, model_info, sy, sy_t = data_initialize(train, test, meta_info_ori, task_type, 'warm', random_state=0, verbose=False)
Xi = tr_x[:,-2:]
Xi_t = te_x[:,-2:]
tr_ratings_dict = {'itemID': Xi[:,1].tolist(),
'userID': Xi[:,0].tolist(),
'rating': tr_y.ravel().tolist()}
tr_df = | pd.DataFrame(tr_ratings_dict) | pandas.DataFrame |
"""
Analysis dashboards module.
"""
import copy
from datetime import timedelta
import numpy as np
import pandas as pd
import logging
from flask_login import login_required
from flask import render_template, request
from sqlalchemy import and_
from app.dashboards import blueprint
from utilities.utils import parse_date_range_argument
from __app__.crop.structure import SQLA as db
from __app__.crop.structure import (
SensorClass,
ReadingsAdvanticsysClass,
ReadingsEnergyClass,
ReadingsZensieTRHClass,
)
from __app__.crop.constants import CONST_MAX_RECORDS, CONST_TIMESTAMP_FORMAT
# Temperature constants
TEMP_BINS = [0.0, 17.0, 21.0, 24.0, 30.0]
# Ventilation constants
CONST_SFP = 2.39 # specific fan power
CONST_VTOT = 20337.0 # total volume – m3
def resample(df, bins, dt_from, dt_to):
"""
Resamples (adds missing date/temperature bin combinations) to a dataframe.
Arguments:
df: dataframe with temperature assign to bins
bins: temperature bins as a list
dt_from: date range from
dt_to: date range to
Returns:
bins_list: a list of temperature bins
df_list: a list of df corresponding to temperature bins
"""
bins_list = []
for i in range(len(bins) - 1):
bins_list.append("(%.1f, %.1f]" % (bins[i], bins[i + 1]))
date_min = min(df["date"].min(), dt_from)
date_max = max(df["date"].max(), dt_to)
for n in range(int((date_max - date_min).days) + 1):
day = date_min + timedelta(n)
for temp_range in bins_list:
if len(df[(df["date"] == day) & (df["temp_bin"] == temp_range)].index) == 0:
df2 = pd.DataFrame(
{"date": [day], "temp_bin": [temp_range], "temp_cnt": [0]}
)
df = df.append(df2)
df = df.sort_values(by=["date", "temp_bin"], ascending=True)
df.reset_index(inplace=True, drop=True)
df_list = []
for bin_range in bins_list:
df_bin = df[df["temp_bin"] == bin_range]
del df_bin["temp_bin"]
df_bin.reset_index(inplace=True, drop=True)
df_list.append(df_bin)
return bins_list, df_list
def lights_energy_use(dt_from_, dt_to_):
"""
Energy use from Carpenter's place (with lights - called Clapham in the database)
Arguments:
dt_from_: date range from
dt_to_: date range to
Returns:
lights_results_df - a pandas dataframe with mean lights on values
"""
dt_from = pd.to_datetime(dt_from_.date()) + timedelta(hours=14)
dt_to = pd.to_datetime(dt_to_.date()) + timedelta(days=1, hours=15)
d_from = pd.to_datetime(dt_from_.date())
d_to = pd.to_datetime(dt_to_.date())
col_ec = "electricity_consumption"
sensor_device_id = "Clapham"
lights_on_cols = []
lights_results_df = None
# getting eneregy data for the analysis
query = db.session.query(
ReadingsEnergyClass.timestamp,
ReadingsEnergyClass.electricity_consumption,
).filter(
and_(
SensorClass.device_id == sensor_device_id,
ReadingsEnergyClass.sensor_id == SensorClass.id,
ReadingsEnergyClass.timestamp >= dt_from,
ReadingsEnergyClass.timestamp <= dt_to,
)
)
df = pd.read_sql(query.statement, query.session.bind)
if not df.empty:
# Reseting index
df.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True)
# grouping data by date-hour
energy_hour = (
df.groupby(
by=[
df["timestamp"].map(
lambda x: pd.to_datetime(
"%04d-%02d-%02d-%02d" % (x.year, x.month, x.day, x.hour),
format="%Y-%m-%d-%H",
)
),
]
)["electricity_consumption"]
.sum()
.reset_index()
)
# Sorting and reseting index
energy_hour.sort_values(by=["timestamp"], ascending=True).reset_index(
inplace=True
)
# energy dates. Energy date starts from 4pm each day and lasts for 24 hours
energy_hour.loc[
energy_hour["timestamp"].dt.hour < 15, "energy_date"
] = pd.to_datetime((energy_hour["timestamp"] + timedelta(days=-1)).dt.date)
energy_hour.loc[
energy_hour["timestamp"].dt.hour >= 15, "energy_date"
] = | pd.to_datetime(energy_hour["timestamp"].dt.date) | pandas.to_datetime |
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype
import matplotlib as plt
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import train_test_split as sk_train_test_split
from multiprocessing import Pool
import gc
class Featurizer():
def __init__(self, assetId='assetCode',
n_lag=[3,7,14],
shift_size=1,
return_features=['returnsClosePrevMktres10','returnsClosePrevRaw10',
'returnsOpenPrevMktres1', 'returnsOpenPrevRaw1',
'open','close']
):
self.assetId = assetId
self.n_lag = n_lag
self.shift_size = shift_size
self.return_features = return_features
def transform(self, df):
new_df = self.generate_lag_features(df)
df = | pd.merge(df, new_df, how='left', on=['time', self.assetId]) | pandas.merge |
#encoding=utf-8
from nltk.corpus import stopwords
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import Ridge
from scipy.sparse import hstack, csr_matrix
import pandas as pd
import numpy as np
import xgboost as xgb
#import matplotlib.pyplot as plt
import gc, re
from sklearn.utils import shuffle
from contextlib import contextmanager
from sklearn.externals import joblib
import time
print("Starting job at time:",time.time())
debug = True
print("loading data ...")
used_cols = ["item_id", "user_id"]
if debug == False:
train_df = pd.read_csv("../input/train.csv", parse_dates = ["activation_date"])
y = train_df["deal_probability"]
test_df = pd.read_csv("../input/test.csv", parse_dates = ["activation_date"])
train_active = pd.read_csv("../input/train_active.csv", usecols=used_cols)
test_active = pd.read_csv("../input/test_active.csv", usecols=used_cols)
train_periods = pd.read_csv("../input/periods_train.csv", parse_dates=["date_from", "date_to"])
test_periods = pd.read_csv("../input/periods_test.csv", parse_dates=["date_from", "date_to"])
else:
train_df = pd.read_csv("../input/train.csv", parse_dates = ["activation_date"])
train_df = shuffle(train_df, random_state=1234); train_df = train_df.iloc[:10000]
y = train_df["deal_probability"]
test_df = pd.read_csv("../input/test.csv", nrows=1000, parse_dates = ["activation_date"])
train_active = pd.read_csv("../input/train_active.csv", nrows=1000, usecols=used_cols)
test_active = pd.read_csv("../input/test_active.csv", nrows=1000, usecols=used_cols)
train_periods = pd.read_csv("../input/periods_train.csv", nrows=1000, parse_dates=["date_from", "date_to"])
test_periods = pd.read_csv("../input/periods_test.csv", nrows=1000, parse_dates=["date_from", "date_to"])
print("loading data done!")
# =============================================================================
# Add image quality: by steeve
# =============================================================================
import pickle
with open('../input/inception_v3_include_head_max_train.p','rb') as f:
x = pickle.load(f)
train_features = x['features']
train_ids = x['ids']
with open('../input/inception_v3_include_head_max_test.p','rb') as f:
x = pickle.load(f)
test_features = x['features']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_features, columns = ['image_quality'])
incep_test_image_df = pd.DataFrame(test_features, columns = ['image_quality'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
del incep_train_image_df, incep_test_image_df
gc.collect()
with open('../input/train_image_features.p','rb') as f:
x = pickle.load(f)
train_blurinesses = x['blurinesses']
train_ids = x['ids']
with open('../input/test_image_features.p','rb') as f:
x = pickle.load(f)
test_blurinesses = x['blurinesses']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_blurinesses, columns = ['blurinesses'])
incep_test_image_df = pd.DataFrame(test_blurinesses, columns = ['blurinesses'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding whitenesses ...')
with open('../input/train_image_features.p','rb') as f:
x = pickle.load(f)
train_whitenesses = x['whitenesses']
train_ids = x['ids']
with open('../input/test_image_features.p','rb') as f:
x = pickle.load(f)
test_whitenesses = x['whitenesses']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_whitenesses, columns = ['whitenesses'])
incep_test_image_df = pd.DataFrame(test_whitenesses, columns = ['whitenesses'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding dullnesses ...')
with open('../input/train_image_features.p','rb') as f:
x = pickle.load(f)
train_dullnesses = x['dullnesses']
train_ids = x['ids']
with open('../input/test_image_features.p','rb') as f:
x = pickle.load(f)
test_dullnesses = x['dullnesses']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_dullnesses, columns = ['dullnesses'])
incep_test_image_df = pd.DataFrame(test_dullnesses, columns = ['dullnesses'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
# =============================================================================
# new image data
# =============================================================================
print('adding average_pixel_width ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_average_pixel_width = x['average_pixel_width']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_average_pixel_width = x['average_pixel_width']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_average_pixel_width, columns = ['average_pixel_width'])
incep_test_image_df = pd.DataFrame(test_average_pixel_width, columns = ['average_pixel_width'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding average_reds ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_average_reds = x['average_reds']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_average_reds = x['average_reds']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_average_reds, columns = ['average_reds'])
incep_test_image_df = pd.DataFrame(test_average_reds, columns = ['average_reds'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding average_blues ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_average_blues = x['average_blues']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_average_blues = x['average_blues']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_average_blues, columns = ['average_blues'])
incep_test_image_df = pd.DataFrame(test_average_blues, columns = ['average_blues'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding average_greens ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_average_greens = x['average_greens']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_average_greens = x['average_greens']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_average_greens, columns = ['average_greens'])
incep_test_image_df = pd.DataFrame(test_average_greens, columns = ['average_greens'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding widths ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_widths = x['widths']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_widths = x['widths']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_widths, columns = ['widths'])
incep_test_image_df = pd.DataFrame(test_widths, columns = ['widths'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding heights ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_heights = x['heights']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_heights = x['heights']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_heights, columns = ['heights'])
incep_test_image_df = pd.DataFrame(test_heights, columns = ['heights'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
del test_average_blues, test_average_greens, test_average_reds, incep_test_image_df
del train_average_blues, train_average_greens, train_average_reds, incep_train_image_df
gc.collect()
#==============================================================================
# image features by Qifeng
#==============================================================================
print('adding image features @ qifeng ...')
with open('../input/train_image_features_cspace.p','rb') as f:
x = pickle.load(f)
x_train = pd.DataFrame(x, columns = ['average_HSV_Ss',\
'average_HSV_Vs',\
'average_LUV_Ls',\
'average_LUV_Us',\
'average_LUV_Vs',\
'average_HLS_Hs',\
'average_HLS_Ls',\
'average_HLS_Ss',\
'average_YUV_Ys',\
'average_YUV_Us',\
'average_YUV_Vs',\
'ids'
])
#x_train.rename(columns = {'$ids':'image'}, inplace = True)
with open('../input/test_image_features_cspace.p','rb') as f:
x = pickle.load(f)
x_test = pd.DataFrame(x, columns = ['average_HSV_Ss',\
'average_HSV_Vs',\
'average_LUV_Ls',\
'average_LUV_Us',\
'average_LUV_Vs',\
'average_HLS_Hs',\
'average_HLS_Ls',\
'average_HLS_Ss',\
'average_YUV_Ys',\
'average_YUV_Us',\
'average_YUV_Vs',\
'ids'
])
#x_test.rename(columns = {'$ids':'image'}, inplace = True)
train_df = train_df.join(x_train.set_index('ids'), on='image')
test_df = test_df.join(x_test.set_index('ids'), on='image')
del x, x_train, x_test; gc.collect()
# =============================================================================
# add geo info: https://www.kaggle.com/frankherfert/avito-russian-region-cities/data
# =============================================================================
#tmp = pd.read_csv("../input/avito_region_city_features.csv", usecols=["region", "city", "latitude","longitude"])
#train_df = train_df.merge(tmp, on=["city","region"], how="left")
#train_df["lat_long"] = train_df["latitude"]+train_df["longitude"]
#test_df = test_df.merge(tmp, on=["city","region"], how="left")
#test_df["lat_long"] = test_df["latitude"]+test_df["longitude"]
#del tmp; gc.collect()
# =============================================================================
# Add region-income
# =============================================================================
tmp = | pd.read_csv("../input/region_income.csv", sep=";", names=["region", "income"]) | pandas.read_csv |
from tkinter import ttk,filedialog
from tkinter import *
import pandas as pd
# import argparse
from openpyxl import Workbook,worksheet
from openpyxl.styles import Border, Side, Font, Alignment
from openpyxl.utils.dataframe import dataframe_to_rows
from openpyxl.utils import get_column_letter
root = Tk()
root.title('eagel bom tool')
isOctoPart = BooleanVar()
isOctoPart.set(False)
octoPartUser = StringVar()
octoPartUser.set('<EMAIL>')
pathLabelText = 'Select file'
path = ''
def selectFile():
global path
path = filedialog.askopenfilename(initialdir = "`~`",title = "Select file",filetypes = (("csv files","*.csv"),("all files","*.*")))
if len(path) > 0:
print(path)
pathEntry.delete(0, END)
pathEntry.insert(END, path)
def get_col_widths(dataframe):
# First we find the maximum length of the index column
idx_max = max([len(str(s)) for s in dataframe.index.values] + [len(str(dataframe.index.name))])
# Then, we concatenate this to the max of the lengths of column name and its values for each column, left to right
return [idx_max] + [max([len(str(s)) for s in dataframe[col].values] + [len(col)]) for col in dataframe.columns]
def runScript():
filePath = pathEntry.get()
print(filePath)
if isOctoPart.get() == True:
print("bulding an Octopart BOM")
if len(filePath) > 0:
cuurencyFormat = '#,##0.00$'
#Import CSV file as dataframe
bomDf = pd.read_csv(filePath,sep=';')
#Remove any exluded parts from dataframe
if 'BOM' in bomDf.columns:
indexNames = bomDf[bomDf['BOM'] == 'EXCLUDE'].index
bomDf.drop(indexNames, inplace=True)
#copy the importent columes to a new dataframe
newDf = | pd.DataFrame() | pandas.DataFrame |
from .database import CodingSystem, CodingProperty, GlobalProperty, GlobalRating, GlobalValue, Interview, \
PropertyValue, Utterance, UtteranceCode
from .utils import sanitize_for_spss
from pandas import DataFrame, Index, MultiIndex, notna
from pandas.api.types import is_string_dtype, is_object_dtype
from peewee import Case, Cast, fn, JOIN, Value
from savReaderWriter.savWriter import SavWriter
from numpy import NaN
import logging
import pandas.api.types as ptypes
logging.getLogger('caastools.dataset').addHandler(logging.NullHandler())
__all__ = ['sequential', 'session_level', 'create_sl_variable_labels', 'save_as_spss']
def _global_query_(included_interviews=None, included_globals=None, client_as_numeric=True, exclude_reliability=True):
"""
Constructs the globals query for session-level datasets
:param included_interviews: iterable of str specifying names of interviews to include
:param included_globals:
:param client_as_numeric: Whether to cast client_id as a numeric type. Default True
:param exclude_reliability: Whether to exclude (True, default) or include (False) interviews of type 'reliability'
:return: ModelSelect, Cte - The full query for global ratings and the CTE associated object
"""
client_column = Cast(Interview.client_id, "INT").alias('client_id') if client_as_numeric else Interview.client_id
# May want only certain interviews included or certain properties included,
# so construct some predicates for where clauses, if necessary
types = ['general'] if exclude_reliability else ['general', 'reliability']
predicate = Interview.interview_type.in_(types)
if included_interviews is not None:
predicate = predicate & Interview.interview_name.in_(included_interviews)
if included_globals is not None:
predicate = predicate & GlobalProperty.gp_name.in_(included_globals)
"""
Logic above replaces this
global_predicate = ((p1) & (p2) & (p3)) if included_interviews is not None and included_globals is not None else \
((p1) & (p3)) if included_interviews is not None else \
((p2) & (p3)) if included_globals is not None else \
p3
"""
# For any session-level/decile dataset, we want scores for all session-level globals.
# Thus, there will need to be either a UNION ALL of counts and global ratings
# or a separate query for globals.
# Below constructs the global ratings part of the UNION ALL
global_query = (GlobalRating.select(GlobalRating.interview_id, GlobalProperty.gp_name,
Cast(GlobalValue.gv_value, "INT"), GlobalValue.global_property_id)
.join(GlobalValue).join(GlobalProperty, JOIN.LEFT_OUTER))
global_cte = global_query.cte("global_cte", columns=['interview_id', 'gp_name', 'gv_value', 'global_property_id'])
outer_global_query = (Interview
.select(Interview.interview_name, Interview.interview_type, client_column, Interview.rater_id,
Interview.session_number, GlobalProperty.gp_name, global_cte.c.gv_value)
.join(CodingSystem)
.join(GlobalProperty))
full_global_query = outer_global_query.join(
global_cte, JOIN.LEFT_OUTER, on=((Interview.interview_id == global_cte.c.interview_id) &
(GlobalProperty.global_property_id == global_cte.c.global_property_id))
)
# Append the predicate
full_global_query = full_global_query.where(predicate)
return full_global_query, global_cte
def quantile_level(quantiles=10, included_interviews=None, included_properties=None, included_globals=None,
client_as_numeric=True, exclude_reliability=True):
"""
Constructs a quantile-level dataset
:param quantiles: Integer specifying number of quantiles into which data is to be divided. Default 10
:param included_interviews: Sequence of strings specifying interview names to be included in the dataset,
None to include all interviews. Default None
:param included_properties: Sequence of int specifying CodingProperty whose data is to be included in the dataset,
None to include all coding properties. Default None
:param included_globals: sequence of int specifying GlobalProperties to be included. None to include all.
Default None
:param client_as_numeric: Whether to cast client_id to a numeric type. Default True
:param exclude_reliability: Whether to exclude (True, default) or include (False) interviews of type 'reliability'
:return: DataFrame
"""
included_types = ['general'] if exclude_reliability else ['general', 'reliability']
# In order to build the quantile-level dataset, each utterance needs to be placed into its quantile
# Frist step in that operation is to determine the length of a quantile by interview
decile_lens = Utterance.select(Utterance.interview_id, fn.MIN(Utterance.utt_start_time),
fn.MAX(Utterance.utt_end_time),
(fn.MAX(Utterance.utt_end_time) - fn.MIN(Utterance.utt_start_time)) / quantiles) \
.group_by(Utterance.interview_id) \
.cte('decile_lens', columns=['interview_id', 'start_time', 'end_time', 'length'])
# Once the length of a quantile is known, the next step is to compute a CTE
# in which each utterance has its quantile number assigned
utt_deciles = Utterance.select(
Utterance.interview_id, Utterance.utterance_id,
Cast((Utterance.utt_start_time - decile_lens.c.start_time) / decile_lens.c.length + 1, "INT")
).join(decile_lens, JOIN.LEFT_OUTER, on=(Utterance.interview_id == decile_lens.c.interview_id)) \
.cte('utt_deciles', columns=['interview_id', 'utterance_id', 'quantile'])
# Once an utterance has a quantile number assigned, the last step in getting the counts
# is to select codes and group by interview, quantile, and property_value
decile_counts = UtteranceCode.select(utt_deciles.c.interview_id, utt_deciles.c.quantile,
UtteranceCode.property_value_id,
fn.COUNT(UtteranceCode.utterance_code_id)) \
.join(utt_deciles, JOIN.LEFT_OUTER, on=(UtteranceCode.utterance_id == utt_deciles.c.utterance_id)) \
.group_by(utt_deciles.c.interview_id, utt_deciles.c.quantile, UtteranceCode.property_value_id) \
.cte('decile_counts', columns=['interview_id', 'quantile', 'property_value_id', 'cnt'])
case = Case(None, ((decile_counts.c.cnt.is_null(), 0),), (decile_counts.c.cnt))
client_column = Cast(Interview.client_id, "INT").alias('client_id') if client_as_numeric else Interview.client_id
var_column = CodingProperty.cp_display_name.concat("_").concat(PropertyValue.pv_value).alias('property')
# In order to construct the quantile-level dataset, we first need a recursive CTE
# that defines every code for every quantile and interview
# This will require a recursive CTE
# Start with the base case
quantile = Value(quantiles).alias('quantile')
base_case = (Interview.select(Interview.interview_id, Interview.interview_type,
PropertyValue.property_value_id, Interview.interview_name, client_column,
Interview.rater_id, Interview.session_number, quantile, var_column)
.join(CodingSystem)
.join(CodingProperty)
.join(PropertyValue)).cte('base', recursive=True)
# Now, define the recursive terms
rquantile = (base_case.c.quantile - 1).alias('quantile')
rterm = base_case.select(base_case.c.interview_id, base_case.c.interview_type, base_case.c.property_value_id,
base_case.c.interview_name, base_case.c.client_id, base_case.c.rater_id,
base_case.c.session_number, rquantile, base_case.c.property).where(rquantile > 1)
# The full expression is the union all of the base case with the recursive term
qt = base_case.union_all(rterm)
outer_query = qt.select_from(qt.c.interview_id, qt.c.property_value_id, qt.c.interview_name, qt.c.interview_type,
qt.c.client_id, qt.c.rater_id, qt.c.session_number, qt.c.quantile, qt.c.property,
case.alias('var_count'))
# Join the recursive CTE for interview/quantiles to the actual count data
full_query = (
outer_query.join(
decile_counts, JOIN.LEFT_OUTER,
on=((qt.c.property_value_id == decile_counts.c.property_value_id) &
(qt.c.interview_id == decile_counts.c.interview_id) &
(qt.c.quantile == decile_counts.c.quantile)))
)
# Filter the included data as specified, for type and name
predicate = qt.c.interview_type.in_(included_types)
if included_interviews is not None:
predicate = predicate & qt.c.interview_name.in_(included_interviews)
full_query = full_query.where(predicate)
# Include the required table expressions to create the full query
full_query = full_query.with_cte(decile_counts, utt_deciles, decile_lens, qt)
full_query = full_query.order_by(qt.c.client_id, qt.c.session_number, qt.c.rater_id, qt.c.property,
qt.c.quantile)
# WIth the full query constructed, can build the dataframe from the returned rows
df = DataFrame.from_records(full_query.tuples().execute(),
columns=['interview_id', 'property_value_id', 'interview_name', 'interview_type',
'client_id', 'rater_id', 'session_number', 'quantile', 'var_name',
'var_value'])
# Compute a column for quantile x code
df['decile_var_name'] = df['var_name'] + "_q" + df['quantile'].astype(str).apply(lambda x: x.zfill(2))
# Reshape the dataframe and index on client_id
df = df.loc[:, ['interview_name', 'client_id', 'rater_id', 'session_number', 'decile_var_name', 'var_value']] \
.set_index(['interview_name', 'client_id', 'rater_id', 'session_number', 'decile_var_name']) \
.unstack('decile_var_name').loc[:, 'var_value'].reset_index().set_index('client_id')
# To add the globals data, first get the appropriate query
# Then put into dataframe
# then, reshape and reindex like the count data
global_query, global_cte = _global_query_(
included_interviews=included_interviews, included_globals=included_globals,
exclude_reliability=exclude_reliability
)
global_query = global_query.with_cte(global_cte)
gdf = (
DataFrame.from_records(
global_query.tuples().execute(),
columns=['interview_name', 'client_id', 'rater_id', 'session_number', 'var_name', 'var_value']
)
.loc[:, ['client_id', 'var_name', 'var_value']].set_index(['client_id', 'var_name'])
.unstack('var_name').loc[:, 'var_value'])
df = df.join(gdf).sort_index()
return df
def sequential(included_interviews, included_properties, client_as_numeric=True, exclude_reliability=True, quantiles=1):
"""
datasets.sequential(included_interviews, included_properties) -> pandas.DataFrame
Builds a sequential dataset with including those interviews specified in included_interviews and the
properties specified in included_properties
:param included_interviews: sequence of interviews to be included in the dataset. None for all interviews
:param included_properties: Sequence of str specifying display_name of CodingProperty to include in the query
:param client_as_numeric: Whether client_id should be a numeric variable (default True)
:param exclude_reliability: Whether to exclude (True, default) or include (False) interviews of type 'reliability'
:param quantiles: Number of quantiles per interview. Default 1
:return: pandas.DataFrame
"""
included_types = ['general'] if exclude_reliability else ['general', 'reliability']
type_predicate = Interview.interview_type.in_(included_types)
# No need to include properties twice
included_properties = sorted(set(included_properties))
table_expressions = []
display_names = []
property_cases = []
cast_columns = []
property_query = UtteranceCode.select(UtteranceCode.utterance_id, PropertyValue.pv_value,
CodingProperty.cp_data_type) \
.join(PropertyValue) \
.join(CodingProperty)
STR_MSNG = '-999999999999999'
NUM_MSNG = -999999999999999
client_column = Cast(Interview.client_id, "INT").alias('client_id') if client_as_numeric else Interview.client_id
# The dataset construction needs to be atomic to avoid race conditions
with UtteranceCode._meta.database.atomic() as transaction:
# each utterance needs to be placed into its quantile
# Frist step in that operation is to determine the length of a quantile by interview
quantile_lens = Utterance.select(Utterance.interview_id, fn.MIN(Utterance.utt_start_time),
fn.MAX(Utterance.utt_end_time) + 0.5,
(fn.MAX(Utterance.utt_end_time) - fn.MIN(
Utterance.utt_start_time) + 0.5) / quantiles) \
.group_by(Utterance.interview_id) \
.cte('decile_lens', columns=['interview_id', 'start_time', 'end_time', 'length'])
# Once the length of a quantile is known, the next step is to compute a CTE
# in which each utterance has its quantile number assigned
utt_quantiles = Utterance.select(
Utterance.interview_id, Utterance.utterance_id,
Cast((Utterance.utt_start_time - quantile_lens.c.start_time) / quantile_lens.c.length + 1, "INT")
) \
.join(quantile_lens, JOIN.LEFT_OUTER, on=(Utterance.interview_id == quantile_lens.c.interview_id)) \
.cte('utt_deciles', columns=['interview_id', 'utterance_id', 'quantile'])
# Need the property's data type, so that appropriate casts can be made
cp_dict = {itm[0]: (itm[1], itm[2]) for itm in
CodingProperty.select(CodingProperty.cp_display_name, CodingProperty.coding_property_id,
CodingProperty.cp_data_type
)
.where(CodingProperty.cp_display_name.in_(included_properties))
.tuples().execute()}
# Need a CTE for each property whose data is to be included, so construct queries and convert to CTE
# Need to conditionally create a CAST expression as well because some properties are Numeric, some are STR
for cp_display_name in included_properties:
prop_pk, cp_data_type = cp_dict.get(cp_display_name, (None, None))
if cp_display_name is None:
logging.warning(f"CodingProperty with display name of {cp_display_name} " +
"not found. This data will not be included")
continue
# If a numeric type specified, add it to the columns to be cast to numeric
if cp_data_type == 'numeric':
cast_columns.append(cp_display_name)
cte = property_query.where(PropertyValue.coding_property_id == prop_pk) \
.cte(f"cte_{cp_display_name}", columns=['utterance_id', cp_display_name, 'cp_data_type'])
data_field = getattr(cte.c, cp_display_name)
table_expressions.append(cte)
pc = Case(None, ((data_field.is_null(), STR_MSNG),), data_field)
property_cases.append(pc)
display_names.append(cp_display_name)
# The outer query will select the Utterances of the interview.
# any CTE will match on the Utterannce.utterance_id field and insert the appropriate fields with codes
# outer query needs to include the fields of the CTE as well, so start there
basic_query = Interview.select(
Interview.interview_name, Interview.interview_type, Interview.rater_id, client_column,
Interview.session_number, Utterance.utt_line, Utterance.utt_enum, Utterance.utt_role,
*(Cast(pc, "FLOAT").alias(name) if name in cast_columns else pc.alias(name)
for name, pc in zip(display_names, property_cases)), Utterance.utt_text, Utterance.utt_start_time,
Utterance.utt_end_time, utt_quantiles.c.quantile
) \
.join(Utterance) \
.join(utt_quantiles, JOIN.LEFT_OUTER, on=(Utterance.utterance_id == utt_quantiles.c.utterance_id))
# Once the basic query is constructed, the joins need to be added into the query
# so that the fields of the CTE can be queried property
for name, cte in zip(display_names, table_expressions):
basic_query = basic_query.join(cte, JOIN.LEFT_OUTER, on=(Utterance.utterance_id == cte.c.utterance_id))
# Add the quantile CTE to the list of CTE to be included in the query later
table_expressions.extend([quantile_lens, utt_quantiles])
# Final step of query preparation is to add in the CTE themselves and narrow the results
basic_query = basic_query.with_cte(*table_expressions)
if included_interviews is not None:
basic_query = basic_query.where((Interview.interview_name.in_(included_interviews)) & (type_predicate))
else:
basic_query = basic_query.where(type_predicate)
basic_query = basic_query.order_by(client_column, Interview.session_number, Utterance.utt_enum)
results = basic_query.tuples().execute()
columns = [itm[0] for itm in results.cursor.description]
df = DataFrame(data=results, columns=columns).replace([NUM_MSNG, STR_MSNG], [NaN, ''])
return df
def session_level(included_interviews=None, included_properties=None, included_globals=None,
client_as_numeric=True, exclude_reliability=True):
"""
session_level(interview_names) -> pandas.DataFrame
Builds a session-level DataFrame with counts for interviews named in interview_names
:param included_interviews: iterable of Interview.interview_names to be included in the Dataset
:param included_properties: iterable of str specifying the display_name of any properties to be included
:param included_globals: iterable of GlobalProperty.global_property_id to be included
:param client_as_numeric: Whether to cast client_id as a numeric variable. Default True
:param exclude_reliability: Whether to exclude (True, default) or include (False) interviews of type 'reliability'
:return: pandas.DataFrame
"""
# Used to create a predicate to exclude reliability interviews, if specified
included_types = ['general'] if exclude_reliability else ['general', 'reliability']
# may want the client_id cast as numeric
client_column = Cast(Interview.client_id, "INT").alias('client_id') if client_as_numeric else Interview.client_id
var_column = CodingProperty.cp_display_name.concat("_").concat(PropertyValue.pv_value).alias('property')
# May want only certain interviews included or certain properties included,
# so construct some predicates for where clauses, if necessary
predicate = Interview.interview_type.in_(included_types)
if included_interviews is not None:
predicate = predicate & Interview.interview_name.in_(included_interviews)
if included_properties is not None:
predicate = predicate & CodingProperty.cp_display_name.in_(included_properties)
# Construct the global query and associated CTE
full_global_query, global_cte = _global_query_(included_interviews=included_interviews,
included_globals=included_globals,
client_as_numeric=client_as_numeric)
# Below constructs the code frequency part of the UNION ALL
# inner_query is the CTE that selects the existing count data. Is later joined with an outer
inner_query = (
UtteranceCode.select(Utterance.interview_id, UtteranceCode.property_value_id,
fn.COUNT(UtteranceCode.property_value_id))
.join(Utterance)
.group_by(Utterance.interview_id, UtteranceCode.property_value_id))
# The inner query needs to be used as a table expression, so that it can be joined with the outer query properly
cte = inner_query.cte('cte', columns=('interview_id', 'pvid', 'cnt'))
# We want to enter zero when the result of the join is NULL
# (Null indicates that a count for a PropertyValue was zero
# because there is no related record in the UtteranceCode table having the specified PropertyValue)
case = Case(None, ((cte.c.cnt.is_null(), 0),), (cte.c.cnt))
outer_query = (Interview
.select(Interview.interview_name, Interview.interview_type, client_column, Interview.rater_id,
Interview.session_number, var_column, case.alias('var_count'))
.join(CodingSystem)
.join(CodingProperty)
.join(PropertyValue))
# Perform the joins on the CTE and do the union all for the final query
full_query = (outer_query.join(cte, JOIN.LEFT_OUTER, on=((PropertyValue.property_value_id == cte.c.pvid)
& (Interview.interview_id == cte.c.interview_id)))
.with_cte(cte, global_cte))
full_query = full_query.where(predicate)
full_query = (full_query.union_all(full_global_query)
.order_by(client_column, Interview.session_number, Interview.rater_id, var_column))
# pull the query results into a dataframe, then reshape it
# Some DBMS lack the pivot function so reshaping the DataFrame itself rather than the query is necessary
df = DataFrame.from_records(data=full_query.tuples().execute(),
columns=['interview_name', 'interview_type', 'client_id', 'rater_id', 'session_number',
'var_name', 'var_value'])
df = df.set_index(['interview_name', 'interview_type', 'client_id', 'rater_id', 'session_number', 'var_name']) \
.unstack('var_name').loc[:, 'var_value'].reset_index().sort_index()
return df
def create_sequential_variable_labels(coding_system_id, find, replace):
"""
datasets.create_sequential_variable_labels(coding_system_id, find, replace) -> dict
Creates a dictionary of variable labels suitable for building an SPSS sequential dataset
:param coding_system_id: the ID of the coding system for which to create labels
:param find: sequence of strings to be replaced in the variable names
:param replace: sequence of strings with which to replace corresponding entries in find. May also be a
callable which determines the appropriate replacement values
:return: dict
"""
cp_query = (CodingProperty.select(CodingProperty.cp_name, CodingProperty.cp_description)
.join(CodingSystem)
.where(CodingSystem.coding_system_id == coding_system_id)
.order_by(CodingProperty.coding_property_id))
labels = {sanitize_for_spss(tpl[0], find=find, repl=replace): tpl[1] for tpl in cp_query.tuples().execute()}
return labels
def create_sl_variable_labels(coding_system_id, find, replace):
"""
datasets.create_variable_labels(coding_system_id) -> dict
creates a dictionary of variable labels suitable for building an SPSS session-level dataset
:param coding_system_id: the coding system for which to create variable labels
:param find: sequence of strings to be replaced in the variable names
:param replace: sequence of strings with which to replace corresponding entries in find. May also be a
function which determines the appropriate replacement characters
:return: dict
"""
# In the SL dataset, each PropertyValue and each GlobalProperty become its own variable,
# so need to query those tables for the right entities
gp_query = (GlobalProperty.select(GlobalProperty.gp_name, GlobalProperty.gp_description)
.where(GlobalProperty.coding_system == coding_system_id))
pv_query = (PropertyValue.select(CodingProperty.cp_name.concat("_").concat(PropertyValue.pv_value))
.join(CodingProperty)
.join(CodingSystem)
.where(CodingProperty.coding_system == coding_system_id)
.union_all(gp_query)
.order_by(CodingProperty.coding_property_id, PropertyValue.pv_value))
sl_labels = {sanitize_for_spss(row[0], find=find, repl=replace): row[1] for row in pv_query.tuples().execute()}
return sl_labels
def save_as_spss(data_frame: DataFrame, out_path: str, labels: dict = None, find=None, repl=None) -> None:
"""
caastools.utils.save_as_spss(data_frame: pandas.DataFrame, out_path: str) -> None
saves data_frame as an SPSS dataset at out_path
:param data_frame: the pandas DataFrame to save
:param out_path: the path at which to save the file
:param labels: a dictionary mapping column labels in the data frame to a variable label in the SPSS dataset
:param find: a sequence of characters within variable names to be replaced with other values. Default None
:param repl: a sequence of characters with which to replace corresponding entries in find, or a function
which yields their replacements. Default None
:return: None
:raise ValueError: if either find/repl is None and the other is not
:raise ValueError: if find and repl are sequences of unequal length
"""
cols = data_frame.columns # type: pandas.Index
is_multi_index = isinstance(cols, MultiIndex)
var_names = []
var_types = {}
var_formats = {}
var_labels = {} if labels is None else labels
# Construct the various information that the SPSS dictionary will contain about each variable
for col in cols:
var_name = sanitize_for_spss(".".join(str(i) for i in col) if is_multi_index else str(col),
find=find, repl=repl)
var_names.append(var_name)
# Need to know the data type and format of each column so that the SPSS file can be written properly
# 0 is a numeric type, any positive integer is a string type where the number represents the number
# of bytes the string can hold.
if is_string_dtype(data_frame[col]) or is_object_dtype(data_frame[col]):
lens = list(filter(lambda x: notna(x) and x is not None, set(data_frame[col].str.len())))
var_types[var_name] = int(max(lens)) * 2 if len(lens) > 0 else 255
else:
var_types[var_name] = 0
var_formats[var_name] = "F10.2" if | ptypes.is_float_dtype(data_frame[col].dtype) | pandas.api.types.is_float_dtype |
import ccxt
import pandas as pd
import datetime
import os
import time
import numpy as np
class binance_data():
now = datetime.datetime.now()
timestamp_now = int(time.time()*1000)
addtime = {'1m':60000, '15m':900000, '30m':1800000,'1h':3600000, '12h':43200000,'1d':86400000}
def __init__(self,api_key,secret,ticker,t_frame):
self.ticker = ticker
self.t_frame = t_frame
self.file = f'./data/binance_ETH_{self.t_frame}.csv'
if api_key != '':
self.binance = ccxt.binance(config={
'apiKey': api_key,
'secret': secret,
'enableRateLimit': True,
'options': {
'defaultType': 'future'
}
})
else:
pass
def updating_coin_csv(self):
##ethsince = 1577059200000
if not os.path.isfile(self.file):
if not os.path.isdir('./data'):
os.mkdir('data')
ethsince = 1577059200000
else: # else it exists so append without writing the header
ethsince = int( | pd.read_csv(self.file) | pandas.read_csv |
# Imports
import streamlit as st
import streamlit.components.v1 as components
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import time
import os.path
# ML dependency imports
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_regression
from sklearn.linear_model import LinearRegression, Lasso
from sklearn.model_selection import train_test_split
from streamlit.type_util import Key
# Page Settings
st.set_page_config(page_title="California Wildfire ML", page_icon="./img/fav.png", initial_sidebar_state="collapsed")
#"""
#--------------------------
#---- MACHINE LEARNING ----
#--------------------------
#"""
def main():
print("IN MAIN")
# If data has not been cleaned, then clean it
if os.path.isfile("./data/clean/fire_data_clean.csv") == False:
print("CLEANING FIRE")
clean_fire()
if os.path.isfile("./data/clean/drought_data_clean.csv") == False:
print("CLEANING DROUGHT")
clean_drought()
if os.path.isfile("./data/clean/precip_data_clean.csv") == False:
print("CLEANING RAIN")
clean_percip()
# # Init sidebar with header text
# st.sidebar.header("Menu")
# # Add URL for github repository
# st.sidebar.write("[View on GitHub](https://github.com/josephchancey/ca-wildfire-ml)")
def old_fire_dataset():
unclean_fire = pd.read_csv("./data/fire_data.csv")
return unclean_fire
def old_precip_dataset():
unclean_precip = pd.read_csv("./data/precip_data.csv")
return unclean_precip
def old_drought_dataset():
unclean_drought = pd.read_csv("./data/drought_data.csv")
return unclean_drought
def clean_fire():
if os.path.isfile("./data/clean/fire_data_clean.csv") == False:
# import fire data csv
fireFile = "./data/fire_data.csv"
# read the file and store in a data frame
fireData = pd.read_csv(fireFile)
# remove extraneous columns
fireData = fireData[["incident_id","incident_name","incident_county","incident_acres_burned",
"incident_dateonly_created","incident_dateonly_extinguished"]]
# rename columns
fireData = fireData.rename(columns={"incident_id":"ID","incident_name":"Name","incident_county":"County",
"incident_acres_burned":"AcresBurned","incident_dateonly_created":"Started",
"incident_dateonly_extinguished":"Extinguished"})
# check for duplicates, then drop ID column
fireData.drop_duplicates(subset=["ID"])
fireData = fireData[["Name","County","AcresBurned","Started","Extinguished"]]
# create a column that contains the duration
# first convert date columns to datetime
fireData["Started"] = pd.to_datetime(fireData["Started"])
fireData["Extinguished"] = pd.to_datetime(fireData["Extinguished"])
# subtract the dates
fireData["Duration"] = fireData["Extinguished"] - fireData["Started"]
# convert duration to string and remove "days"
fireData["Duration"] = fireData["Duration"].astype(str)
fireData["Duration"] = fireData["Duration"].str.replace("days","")
# replace NaT with NaN and convert back to float
fireData["Duration"] = fireData["Duration"].replace(["NaT"],"NaN")
fireData["Duration"] = fireData["Duration"].astype(float)
# add one day to duration to capture fires that started and were extinguished in the same day
fireData["Duration"] = fireData["Duration"] + 1
# create a column for year and filter for fires during or after 2013
fireData["Year"] = fireData["Started"].dt.year
fireData = fireData.loc[(fireData["Year"]>=2013),:]
# create a column to hold the year and month of the start date
fireData["Date"] = fireData["Started"].apply(lambda x: x.strftime('%Y-%m'))
fireData = fireData[["Date", "County", "Duration", "AcresBurned"]]
# drop nulls
fireData = fireData.dropna()
# reset the index
fireData.reset_index(inplace=True,drop=True)
# export as csv
fireData.to_csv("./data/clean/fire_data_clean.csv",index=False)
return fireData
else:
# This prevents the cleaning from being ran each time this function is called, checks if cleaning is done already
fireData = pd.read_csv("./data/clean/fire_data_clean.csv")
return fireData
def clean_percip():
if os.path.isfile("./data/clean/precip_data_clean.csv") == False:
# import precipitation data csv
precipFile = "./data/precip_data.csv"
# read the file and store in a data frame
precipData = pd.read_csv(precipFile)
# remove extraneous columns
precipData = precipData[["Date","Location","Value"]]
# rename columns
precipData = precipData.rename(columns = {"Location":"County","Value":"Precip"})
# remove "county" from county column to be consistent with other datasets
precipData["County"] = precipData["County"].astype(str)
precipData["County"] = precipData["County"].str.replace(" County","")
# convert date column
precipData["Date"] = pd.to_datetime(precipData["Date"].astype(str), format='%Y%m')
# create a column for year and filter for data during or after 2013
precipData["Year"] = precipData["Date"].dt.year
precipData = precipData.loc[(precipData["Year"]>=2013),:]
# drop the year column
precipData = precipData[["Date","County","Precip"]]
# edit the date column to match the format of the other datasets
precipData["Date"] = precipData["Date"].apply(lambda x: x.strftime('%Y-%m'))
precipData = precipData.dropna()
precipData.reset_index(inplace=True,drop=True)
# export as csv
precipData.to_csv("./data/clean/precip_data_clean.csv",index=False)
return precipData
else:
precipData = pd.read_csv("./data/clean/precip_data_clean.csv")
return precipData
def clean_drought():
if os.path.isfile("./data/clean/precip_data_clean.csv") == False:
# import drought data csv
droughtFile = "./data/drought_data.csv"
# read the file and store in a dataframe
droughtData = pd.read_csv(droughtFile)
droughtData = droughtData[["ValidStart","County","None","D0","D1","D2",
"D3","D4"]]
# rename columns
droughtData = droughtData.rename(columns={"ValidStart":"Date"})
# remove "county" from county column to be consistent with other datasets
droughtData["County"] = droughtData["County"].astype(str)
droughtData["County"] = droughtData["County"].str.replace(" County","")
# edit the date column to match the format of the other datasets
droughtData["Date"] = pd.to_datetime(droughtData["Date"])
droughtData["Date"] = droughtData["Date"].apply(lambda x: x.strftime('%Y-%m'))
# drop nulls and reset the index
droughtData = droughtData.dropna()
droughtData.reset_index(inplace=True,drop=True)
# group by date and county and average the drought levels of each week to obtain a monthly summary
groupedDrought = droughtData.groupby(["Date","County"])
groupedDrought = groupedDrought.mean()
# export as csv
groupedDrought.to_csv("./data/clean/drought_data_clean.csv")
return groupedDrought
else:
groupedDrought = pd.read_csv("./data/clean/drought_data_clean.csv")
return groupedDrought
def lin_model():
print("MODEL RAN")
# import fire data
fireFile = "./data/clean/fire_data_clean.csv"
fireData = pd.read_csv(fireFile)
droughtFile = "./data/clean/drought_data_clean.csv"
droughtData = pd.read_csv(droughtFile)
precipFile = "./data/clean/precip_data_clean.csv"
precipData = pd.read_csv(precipFile)
droughtMerged = pd.merge(droughtData, fireData, on = ["Date", "County"])
precipMerged = pd.merge(precipData, fireData, on = ["Date","County"])
masterMerge = pd.merge(droughtMerged, precipData, on = ["Date","County"])
droughtML = pd.get_dummies(droughtMerged)
precipML = pd.get_dummies(precipMerged)
masterML = pd.get_dummies(masterMerge)
masterML.drop(columns='None', inplace=True)
df = masterML
X = df
y = df["AcresBurned"]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
scaler = StandardScaler().fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
reg = LinearRegression().fit(X_train_scaled, y_train)
reg_score_val = reg.score(X_test_scaled, y_test)
return reg_score_val
def lasso_model():
# import fire data
fireFile = "./data/clean/fire_data_clean.csv"
fireData = pd.read_csv(fireFile)
droughtFile = "./data/clean/drought_data_clean.csv"
droughtData = pd.read_csv(droughtFile)
precipFile = "./data/clean/precip_data_clean.csv"
precipData = pd.read_csv(precipFile)
droughtMerged = pd.merge(droughtData, fireData, on = ["Date", "County"])
precipMerged = pd.merge(precipData, fireData, on = ["Date","County"])
masterMerge = pd.merge(droughtMerged, precipData, on = ["Date","County"])
masterML = pd.get_dummies(masterMerge)
masterML.drop(columns='None', inplace=True)
df = masterML
X = df
y = df["AcresBurned"]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
scaler = StandardScaler().fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
lasso = Lasso().fit(X_train_scaled, y_train)
lasso_score_val = lasso.score(X_test_scaled, y_test)
return lasso_score_val
def random_forest():
# import fire data
fireFile = "./data/clean/fire_data_clean.csv"
fireData = pd.read_csv(fireFile)
droughtFile = "./data/clean/drought_data_clean.csv"
droughtData = pd.read_csv(droughtFile)
precipFile = "./data/clean/precip_data_clean.csv"
precipData = pd.read_csv(precipFile)
droughtMerged = pd.merge(droughtData, fireData, on = ["Date", "County"])
precipMerged = pd.merge(precipData, fireData, on = ["Date","County"])
masterMerge = pd.merge(droughtMerged, precipData, on = ["Date","County"])
masterML = pd.get_dummies(masterMerge)
masterML.drop(columns='None', inplace=True)
df = masterML
X = df
y = df["AcresBurned"]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
scaler = StandardScaler().fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
clf.score(X_train, y_train)
random_forest_val = clf.score(X_test, y_test)
return random_forest_val
def plot_rnd_frst():
# import fire data
fireFile = "./data/clean/fire_data_clean.csv"
fireData = pd.read_csv(fireFile)
droughtFile = "./data/clean/drought_data_clean.csv"
droughtData = pd.read_csv(droughtFile)
precipFile = "./data/clean/precip_data_clean.csv"
precipData = pd.read_csv(precipFile)
droughtMerged = pd.merge(droughtData, fireData, on = ["Date", "County"])
precipMerged = pd.merge(precipData, fireData, on = ["Date","County"])
masterMerge = pd.merge(droughtMerged, precipData, on = ["Date","County"])
masterML = pd.get_dummies(masterMerge)
masterML.drop(columns='None', inplace=True)
df = masterML
X = df
y = df["AcresBurned"]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
scaler = StandardScaler().fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
clf.score(X_train, y_train)
random_forest_val = clf.score(X_test, y_test)
plt.scatter(list(X_test["Precip"]), list(y_test.values), c="Green", label="Training Data")
plt.scatter(list(X_test["Precip"]), clf.predict(X_test), c="Red", label="Prediction")
plt.ylabel('Acres Burned')
plt.xlabel('Precipitation Level by County')
plt.legend()
#plt.hlines(y=0, xmin=y.min(), xmax=y.max())
plt.title("Random Forest Classification on Precipitation")
return plt
def plot_lin_reg():
print("MODEL RAN")
# import fire data
fireFile = "./data/clean/fire_data_clean.csv"
fireData = pd.read_csv(fireFile)
droughtFile = "./data/clean/drought_data_clean.csv"
droughtData = pd.read_csv(droughtFile)
precipFile = "./data/clean/precip_data_clean.csv"
precipData = pd.read_csv(precipFile)
droughtMerged = pd.merge(droughtData, fireData, on = ["Date", "County"])
precipMerged = pd.merge(precipData, fireData, on = ["Date","County"])
masterMerge = pd.merge(droughtMerged, precipData, on = ["Date","County"])
droughtML = pd.get_dummies(droughtMerged)
precipML = pd.get_dummies(precipMerged)
masterML = pd.get_dummies(masterMerge)
masterML.drop(columns='None', inplace=True)
df = masterML
X = df
y = df["AcresBurned"]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
scaler = StandardScaler().fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
reg = LinearRegression().fit(X_train_scaled, y_train)
reg_score_val = reg.score(X_test_scaled, y_test)
plt.cla()
plt.scatter(list(X_test["Precip"]), list(y_test.values), c="Green", label="Training Data")
plt.scatter(list(X_test["Precip"]), reg.predict(X_test), c="Red", label="Predictions")
plt.ylabel('Acres Burned')
plt.xlabel('Precipitation Level by County')
plt.legend()
#plt.hlines(y=0, xmin=y.min(), xmax=y.max())
plt.title("Linear Regression Model on Precipitation")
return plt
def rfc_training_drought():
# import fire data
fireFile = "./data/clean/fire_data_clean.csv"
fireData = pd.read_csv(fireFile)
droughtFile = "./data/clean/drought_data_clean.csv"
droughtData = pd.read_csv(droughtFile)
precipFile = "./data/clean/precip_data_clean.csv"
precipData = pd.read_csv(precipFile)
droughtMerged = pd.merge(droughtData, fireData, on = ["Date", "County"])
precipMerged = pd.merge(precipData, fireData, on = ["Date","County"])
masterMerge = pd.merge(droughtMerged, precipData, on = ["Date","County"])
masterML = pd.get_dummies(masterMerge)
masterML.drop(columns='None', inplace=True)
df = masterML
X = df
y = df["AcresBurned"]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
scaler = StandardScaler().fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
clf.score(X_train, y_train)
random_forest_val = clf.score(X_test, y_test)
st.set_option('deprecation.showPyplotGlobalUse', False)
plt.cla()
plt.scatter(list(X_test["D3"]), list(y_test.values), c="green", label="Training Data")
# plt.scatter(list(X_test["Precip"]), clf.predict(X_test), c="red", label="Prediction")
plt.legend()
plt.ylabel('Acres Burned')
plt.xlabel('Percent of County in Drought Level 3')
#plt.hlines(y=0, xmin=y.min(), xmax=y.max())
plt.title("Random Forest Training on Drought D3")
return plt
def rfc_prediction_drought():
# import fire data
fireFile = "./data/clean/fire_data_clean.csv"
fireData = pd.read_csv(fireFile)
droughtFile = "./data/clean/drought_data_clean.csv"
droughtData = pd.read_csv(droughtFile)
precipFile = "./data/clean/precip_data_clean.csv"
precipData = pd.read_csv(precipFile)
droughtMerged = pd.merge(droughtData, fireData, on = ["Date", "County"])
precipMerged = pd.merge(precipData, fireData, on = ["Date","County"])
masterMerge = pd.merge(droughtMerged, precipData, on = ["Date","County"])
masterML = pd.get_dummies(masterMerge)
masterML.drop(columns='None', inplace=True)
df = masterML
X = df
y = df["AcresBurned"]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
scaler = StandardScaler().fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
clf.score(X_train, y_train)
random_forest_val = clf.score(X_test, y_test)
st.set_option('deprecation.showPyplotGlobalUse', False)
plt.cla()
plt.scatter(list(X_test["Precip"]), clf.predict(X_test), c="red", label="Prediction")
plt.legend()
plt.ylabel('Acres Burned')
plt.xlabel('Percent of County in Drought Level 3')
#plt.hlines(y=0, xmin=y.min(), xmax=y.max())
plt.title("Random Forest Testing on Drought D3")
return plt
def rfc_drought_four_training():
# import fire data
fireFile = "./data/clean/fire_data_clean.csv"
fireData = pd.read_csv(fireFile)
droughtFile = "./data/clean/drought_data_clean.csv"
droughtData = pd.read_csv(droughtFile)
precipFile = "./data/clean/precip_data_clean.csv"
precipData = pd.read_csv(precipFile)
droughtMerged = pd.merge(droughtData, fireData, on = ["Date", "County"])
precipMerged = pd.merge(precipData, fireData, on = ["Date","County"])
masterMerge = pd.merge(droughtMerged, precipData, on = ["Date","County"])
masterML = pd.get_dummies(masterMerge)
masterML.drop(columns='None', inplace=True)
df = masterML
X = df
y = df["AcresBurned"]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
scaler = StandardScaler().fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
clf.score(X_train, y_train)
random_forest_val = clf.score(X_test, y_test)
st.set_option('deprecation.showPyplotGlobalUse', False)
plt.cla()
plt.scatter(list(X_test["D4"]), list(y_test.values), c="green", label="Training Data")
# plt.scatter(list(X_test["Precip"]), clf.predict(X_test), c="red", label="Prediction")
plt.legend()
plt.ylabel('Acres Burned')
plt.xlabel('Severe Drought Level 4')
#plt.hlines(y=0, xmin=y.min(), xmax=y.max())
plt.title("Random Forest Training on Drought D4")
return plt
def rfc_drought_four_prediction():
# import fire data
fireFile = "./data/clean/fire_data_clean.csv"
fireData = pd.read_csv(fireFile)
droughtFile = "./data/clean/drought_data_clean.csv"
droughtData = pd.read_csv(droughtFile)
precipFile = "./data/clean/precip_data_clean.csv"
precipData = pd.read_csv(precipFile)
droughtMerged = pd.merge(droughtData, fireData, on = ["Date", "County"])
precipMerged = pd.merge(precipData, fireData, on = ["Date","County"])
masterMerge = pd.merge(droughtMerged, precipData, on = ["Date","County"])
masterML = pd.get_dummies(masterMerge)
masterML.drop(columns='None', inplace=True)
df = masterML
X = df
y = df["AcresBurned"]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
scaler = StandardScaler().fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
clf.score(X_train, y_train)
random_forest_val = clf.score(X_test, y_test)
st.set_option('deprecation.showPyplotGlobalUse', False)
plt.cla()
plt.scatter(list(X_test["D4"]), clf.predict(X_test), c="red", label="Prediction")
plt.legend()
plt.ylabel('Acres Burned')
plt.xlabel('Severe Drought Level 4')
#plt.hlines(y=0, xmin=y.min(), xmax=y.max())
plt.title("Random Forest Testing on Drought D4")
return plt
def lin_reg_d4():
print("MODEL RAN")
# import fire data
fireFile = "./data/clean/fire_data_clean.csv"
fireData = pd.read_csv(fireFile)
droughtFile = "./data/clean/drought_data_clean.csv"
droughtData = pd.read_csv(droughtFile)
precipFile = "./data/clean/precip_data_clean.csv"
precipData = pd.read_csv(precipFile)
droughtMerged = pd.merge(droughtData, fireData, on = ["Date", "County"])
precipMerged = pd.merge(precipData, fireData, on = ["Date","County"])
masterMerge = pd.merge(droughtMerged, precipData, on = ["Date","County"])
droughtML = pd.get_dummies(droughtMerged)
precipML = pd.get_dummies(precipMerged)
masterML = pd.get_dummies(masterMerge)
masterML.drop(columns='None', inplace=True)
df = masterML
X = df
y = df["AcresBurned"]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
scaler = StandardScaler().fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
reg = LinearRegression().fit(X_train_scaled, y_train)
reg_score_val = reg.score(X_test_scaled, y_test)
plt.cla()
st.set_option('deprecation.showPyplotGlobalUse', False)
plt.scatter(list(X_test["D4"]), list(y_test.values), c="green", label="Training Data")
plt.scatter(list(X_test["D4"]), reg.predict(X_test), c="red", label="Prediction")
plt.legend()
plt.ylabel('Acres Burned')
plt.xlabel('Drought Level 4')
#plt.hlines(y=0, xmin=y.min(), xmax=y.max())
plt.title("Linear Regression Severe Drought Level 4")
return plt
def lin_reg_d3():
print("MODEL RAN")
# import fire data
fireFile = "./data/clean/fire_data_clean.csv"
fireData = pd.read_csv(fireFile)
droughtFile = "./data/clean/drought_data_clean.csv"
droughtData = pd.read_csv(droughtFile)
precipFile = "./data/clean/precip_data_clean.csv"
precipData = pd.read_csv(precipFile)
droughtMerged = pd.merge(droughtData, fireData, on = ["Date", "County"])
precipMerged = pd.merge(precipData, fireData, on = ["Date","County"])
masterMerge = pd.merge(droughtMerged, precipData, on = ["Date","County"])
droughtML = pd.get_dummies(droughtMerged)
precipML = pd.get_dummies(precipMerged)
masterML = | pd.get_dummies(masterMerge) | pandas.get_dummies |
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# Use seaborn for pairplot
#pip install -q seaborn
# visualization tools
#%matplotlib inline
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
# Make numpy printouts easier to read.
np.set_printoptions(precision=3, suppress=True)
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
print(tf.__version__)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
column_names = ['MPG', 'Cylinders', 'Displacement', 'Horsepower', 'Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(url, names=column_names,
na_values='?', comment='\t',
sep=' ', skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
dataset.isna().sum()
dataset = dataset.dropna()
dataset['Origin'] = dataset['Origin'].map({1: 'USA', 2: 'Europe', 3: 'Japan'})
dataset = pd.get_dummies(dataset, prefix='', prefix_sep='')
dataset.tail()
train_dataset = dataset.sample(frac=0.8, random_state=0)
test_dataset = dataset.drop(train_dataset.index)
sns.pairplot(train_dataset[['MPG', 'Cylinders', 'Displacement', 'Weight']], diag_kind='kde')
train_dataset.describe().transpose()
train_features = train_dataset.copy()
test_features = test_dataset.copy()
train_labels = train_features.pop('MPG')
test_labels = test_features.pop('MPG')
train_dataset.describe().transpose()[['mean', 'std']]
normalizer = preprocessing.Normalization()
normalizer.adapt(np.array(train_features))
print(normalizer.mean.numpy())
first = np.array(train_features[:1])
with np.printoptions(precision=2, suppress=True):
print('First example:', first)
print()
print('Normalized:', normalizer(first).numpy())
horsepower = np.array(train_features['Horsepower'])
horsepower_normalizer = preprocessing.Normalization(input_shape=[1,])
horsepower_normalizer.adapt(horsepower)
horsepower_model = tf.keras.Sequential([
horsepower_normalizer,
layers.Dense(units=1)
])
horsepower_model.summary()
horsepower_model.predict(horsepower[:10])
horsepower_model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.1),
loss='mean_absolute_error')
#%%time
history = horsepower_model.fit(
train_features['Horsepower'], train_labels,
epochs=100,
# suppress logging
verbose=0,
# Calculate validation results on 20% of the training data
validation_split = 0.2)
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
def plot_loss(history):
plt.plot(history.history['loss'], label='loss')
plt.plot(history.history['val_loss'], label='val_loss')
plt.ylim([0, 10])
plt.xlabel('Epoch')
plt.ylabel('Error [MPG]')
plt.legend()
plt.grid(True)
#plt.show()
plt.draw()
plt.pause(0.001)
input("Open Ports --> Open Preview or Browser --> push enter to continue")
plot_loss(history)
test_results = {}
test_results['horsepower_model'] = horsepower_model.evaluate(
test_features['Horsepower'],
test_labels, verbose=0)
x = tf.linspace(0.0, 250, 251)
y = horsepower_model.predict(x)
def plot_horsepower(x, y):
plt.scatter(train_features['Horsepower'], train_labels, label='Data')
plt.plot(x, y, color='k', label='Predictions')
plt.xlabel('Horsepower')
plt.ylabel('MPG')
plt.legend()
#plt.show()
plt.draw()
plt.pause(0.001)
input("Open Ports --> Open Preview or Browser --> push enter to continue")
plot_horsepower(x,y)
linear_model = tf.keras.Sequential([
normalizer,
layers.Dense(units=1)
])
linear_model.predict(train_features[:10])
linear_model.layers[1].kernel
linear_model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.1),
loss='mean_absolute_error')
#%%time
history = linear_model.fit(
train_features, train_labels,
epochs=100,
# suppress logging
verbose=0,
# Calculate validation results on 20% of the training data
validation_split = 0.2)
plot_loss(history)
test_results['linear_model'] = linear_model.evaluate(
test_features, test_labels, verbose=0)
def build_and_compile_model(norm):
model = keras.Sequential([
norm,
layers.Dense(64, activation='relu'),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
model.compile(loss='mean_absolute_error',
optimizer=tf.keras.optimizers.Adam(0.001))
return model
dnn_horsepower_model = build_and_compile_model(horsepower_normalizer)
dnn_horsepower_model.summary()
#%%time
history = dnn_horsepower_model.fit(
train_features['Horsepower'], train_labels,
validation_split=0.2,
verbose=0, epochs=100)
plot_loss(history)
x = tf.linspace(0.0, 250, 251)
y = dnn_horsepower_model.predict(x)
plot_horsepower(x, y)
test_results['dnn_horsepower_model'] = dnn_horsepower_model.evaluate(
test_features['Horsepower'], test_labels,
verbose=0)
dnn_model = build_and_compile_model(normalizer)
dnn_model.summary()
#%%time
history = dnn_model.fit(
train_features, train_labels,
validation_split=0.2,
verbose=0, epochs=100)
plot_loss(history)
test_results['dnn_model'] = dnn_model.evaluate(test_features, test_labels, verbose=0)
| pd.DataFrame(test_results, index=['Mean absolute error [MPG]']) | pandas.DataFrame |
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from datetime import time
from os.path import abspath, dirname, join
from unittest import TestCase
import typing
import re
import functools
import itertools
import pathlib
from collections import abc
import pytest
import numpy as np
import pandas as pd
import pandas.testing as tm
from pandas import Timedelta, read_csv
from parameterized import parameterized
import pytz
from pytz import UTC
from toolz import concat
from exchange_calendars import get_calendar
from exchange_calendars.calendar_utils import (
ExchangeCalendarDispatcher,
_default_calendar_aliases,
_default_calendar_factories,
)
from exchange_calendars.errors import (
CalendarNameCollision,
InvalidCalendarName,
NoSessionsError,
)
from exchange_calendars.exchange_calendar import ExchangeCalendar, days_at_time
from .test_utils import T
class FakeCalendar(ExchangeCalendar):
name = "DMY"
tz = "Asia/Ulaanbaatar"
open_times = ((None, time(11, 13)),)
close_times = ((None, time(11, 49)),)
class CalendarRegistrationTestCase(TestCase):
def setup_method(self, method):
self.dummy_cal_type = FakeCalendar
self.dispatcher = ExchangeCalendarDispatcher({}, {}, {})
def teardown_method(self, method):
self.dispatcher.clear_calendars()
def test_register_calendar(self):
# Build a fake calendar
dummy_cal = self.dummy_cal_type()
# Try to register and retrieve the calendar
self.dispatcher.register_calendar("DMY", dummy_cal)
retr_cal = self.dispatcher.get_calendar("DMY")
self.assertEqual(dummy_cal, retr_cal)
# Try to register again, expecting a name collision
with self.assertRaises(CalendarNameCollision):
self.dispatcher.register_calendar("DMY", dummy_cal)
# Deregister the calendar and ensure that it is removed
self.dispatcher.deregister_calendar("DMY")
with self.assertRaises(InvalidCalendarName):
self.dispatcher.get_calendar("DMY")
def test_register_calendar_type(self):
self.dispatcher.register_calendar_type("DMY", self.dummy_cal_type)
retr_cal = self.dispatcher.get_calendar("DMY")
self.assertEqual(self.dummy_cal_type, type(retr_cal))
def test_both_places_are_checked(self):
dummy_cal = self.dummy_cal_type()
# if instance is registered, can't register type with same name
self.dispatcher.register_calendar("DMY", dummy_cal)
with self.assertRaises(CalendarNameCollision):
self.dispatcher.register_calendar_type("DMY", type(dummy_cal))
self.dispatcher.deregister_calendar("DMY")
# if type is registered, can't register instance with same name
self.dispatcher.register_calendar_type("DMY", type(dummy_cal))
with self.assertRaises(CalendarNameCollision):
self.dispatcher.register_calendar("DMY", dummy_cal)
def test_force_registration(self):
self.dispatcher.register_calendar("DMY", self.dummy_cal_type())
first_dummy = self.dispatcher.get_calendar("DMY")
# force-register a new instance
self.dispatcher.register_calendar("DMY", self.dummy_cal_type(), force=True)
second_dummy = self.dispatcher.get_calendar("DMY")
self.assertNotEqual(first_dummy, second_dummy)
class DefaultsTestCase(TestCase):
def test_default_calendars(self):
dispatcher = ExchangeCalendarDispatcher(
calendars={},
calendar_factories=_default_calendar_factories,
aliases=_default_calendar_aliases,
)
# These are ordered aliases first, so that we can deregister the
# canonical factories when we're done with them, and we'll be done with
# them after they've been used by all aliases and by canonical name.
for name in concat([_default_calendar_aliases, _default_calendar_factories]):
self.assertIsNotNone(
dispatcher.get_calendar(name), "get_calendar(%r) returned None" % name
)
dispatcher.deregister_calendar(name)
class DaysAtTimeTestCase(TestCase):
@parameterized.expand(
[
# NYSE standard day
(
"2016-07-19",
0,
time(9, 31),
pytz.timezone("America/New_York"),
"2016-07-19 9:31",
),
# CME standard day
(
"2016-07-19",
-1,
time(17, 1),
pytz.timezone("America/Chicago"),
"2016-07-18 17:01",
),
# CME day after DST start
(
"2004-04-05",
-1,
time(17, 1),
pytz.timezone("America/Chicago"),
"2004-04-04 17:01",
),
# ICE day after DST start
(
"1990-04-02",
-1,
time(19, 1),
pytz.timezone("America/Chicago"),
"1990-04-01 19:01",
),
]
)
def test_days_at_time(self, day, day_offset, time_offset, tz, expected):
days = pd.DatetimeIndex([pd.Timestamp(day, tz=tz)])
result = days_at_time(days, time_offset, tz, day_offset)[0]
expected = pd.Timestamp(expected, tz=tz).tz_convert(UTC)
self.assertEqual(result, expected)
class ExchangeCalendarTestBase(object):
# Override in subclasses.
answer_key_filename = None
calendar_class = None
# Affects test_start_bound. Should be set to earliest date for which
# calendar can be instantiated, or None if no start bound.
START_BOUND: pd.Timestamp | None = None
# Affects test_end_bound. Should be set to latest date for which
# calendar can be instantiated, or None if no end bound.
END_BOUND: pd.Timestamp | None = None
# Affects tests that care about the empty periods between sessions. Should
# be set to False for 24/7 calendars.
GAPS_BETWEEN_SESSIONS = True
# Affects tests that care about early closes. Should be set to False for
# calendars that don't have any early closes.
HAVE_EARLY_CLOSES = True
# Affects tests that care about late opens. Since most do not, defaulting
# to False.
HAVE_LATE_OPENS = False
# Affects test_for_breaks. True if one or more calendar sessions has a
# break.
HAVE_BREAKS = False
# Affects test_session_has_break.
SESSION_WITH_BREAK = None # None if no session has a break
SESSION_WITHOUT_BREAK = T("2011-06-15") # None if all sessions have breaks
# Affects test_sanity_check_session_lengths. Should be set to the largest
# number of hours that ever appear in a single session.
MAX_SESSION_HOURS = 0
# Affects test_minute_index_to_session_labels.
# Change these if the start/end dates of your test suite don't contain the
# defaults.
MINUTE_INDEX_TO_SESSION_LABELS_START = pd.Timestamp("2011-01-04", tz=UTC)
MINUTE_INDEX_TO_SESSION_LABELS_END = pd.Timestamp("2011-04-04", tz=UTC)
# Affects tests around daylight savings. If possible, should contain two
# dates that are not both in the same daylight savings regime.
DAYLIGHT_SAVINGS_DATES = ["2004-04-05", "2004-11-01"]
# Affects test_start_end. Change these if your calendar start/end
# dates between 2010-01-03 and 2010-01-10 don't match the defaults.
TEST_START_END_FIRST = pd.Timestamp("2010-01-03", tz=UTC)
TEST_START_END_LAST = pd.Timestamp("2010-01-10", tz=UTC)
TEST_START_END_EXPECTED_FIRST = pd.Timestamp("2010-01-04", tz=UTC)
TEST_START_END_EXPECTED_LAST = pd.Timestamp("2010-01-08", tz=UTC)
@staticmethod
def load_answer_key(filename):
"""
Load a CSV from tests/resources/{filename}.csv
"""
fullpath = join(
dirname(abspath(__file__)),
"./resources",
filename + ".csv",
)
return read_csv(
fullpath,
index_col=0,
# NOTE: Merely passing parse_dates=True doesn't cause pandas to set
# the dtype correctly, and passing all reasonable inputs to the
# dtype kwarg cause read_csv to barf.
parse_dates=[0, 1, 2],
date_parser=lambda x: pd.Timestamp(x, tz=UTC),
)
@classmethod
def setup_class(cls):
cls.answers = cls.load_answer_key(cls.answer_key_filename)
cls.start_date = cls.answers.index[0]
cls.end_date = cls.answers.index[-1]
cls.calendar = cls.calendar_class(cls.start_date, cls.end_date)
cls.one_minute = pd.Timedelta(1, "T")
cls.one_hour = pd.Timedelta(1, "H")
cls.one_day = pd.Timedelta(1, "D")
cls.today = pd.Timestamp.now(tz="UTC").floor("D")
@classmethod
def teardown_class(cls):
cls.calendar = None
cls.answers = None
def test_bound_start(self):
if self.START_BOUND is not None:
cal = self.calendar_class(self.START_BOUND, self.today)
self.assertIsInstance(cal, ExchangeCalendar)
start = self.START_BOUND - pd.DateOffset(days=1)
with pytest.raises(ValueError, match=re.escape(f"{start}")):
self.calendar_class(start, self.today)
else:
# verify no bound imposed
cal = self.calendar_class(pd.Timestamp("1902-01-01", tz="UTC"), self.today)
self.assertIsInstance(cal, ExchangeCalendar)
def test_bound_end(self):
if self.END_BOUND is not None:
cal = self.calendar_class(self.today, self.END_BOUND)
self.assertIsInstance(cal, ExchangeCalendar)
end = self.END_BOUND + pd.DateOffset(days=1)
with pytest.raises(ValueError, match=re.escape(f"{end}")):
self.calendar_class(self.today, end)
else:
# verify no bound imposed
cal = self.calendar_class(self.today, pd.Timestamp("2050-01-01", tz="UTC"))
self.assertIsInstance(cal, ExchangeCalendar)
def test_sanity_check_session_lengths(self):
# make sure that no session is longer than self.MAX_SESSION_HOURS hours
for session in self.calendar.all_sessions:
o, c = self.calendar.open_and_close_for_session(session)
delta = c - o
self.assertLessEqual(delta.seconds / 3600, self.MAX_SESSION_HOURS)
def test_calculated_against_csv(self):
tm.assert_index_equal(self.calendar.schedule.index, self.answers.index)
def test_adhoc_holidays_specification(self):
"""adhoc holidays should be tz-naive (#33, #39)."""
dti = pd.DatetimeIndex(self.calendar.adhoc_holidays)
assert dti.tz is None
def test_is_open_on_minute(self):
one_minute = pd.Timedelta(minutes=1)
m = self.calendar.is_open_on_minute
for market_minute in self.answers.market_open[1:]:
market_minute_utc = market_minute
# The exchange should be classified as open on its first minute
self.assertTrue(m(market_minute_utc, _parse=False))
if self.GAPS_BETWEEN_SESSIONS:
# Decrement minute by one, to minute where the market was not
# open
pre_market = market_minute_utc - one_minute
self.assertFalse(m(pre_market, _parse=False))
for market_minute in self.answers.market_close[:-1]:
close_minute_utc = market_minute
# should be open on its last minute
self.assertTrue(m(close_minute_utc, _parse=False))
if self.GAPS_BETWEEN_SESSIONS:
# increment minute by one minute, should be closed
post_market = close_minute_utc + one_minute
self.assertFalse(m(post_market, _parse=False))
def _verify_minute(
self,
calendar,
minute,
next_open_answer,
prev_open_answer,
next_close_answer,
prev_close_answer,
):
next_open = calendar.next_open(minute, _parse=False)
self.assertEqual(next_open, next_open_answer)
prev_open = self.calendar.previous_open(minute, _parse=False)
self.assertEqual(prev_open, prev_open_answer)
next_close = self.calendar.next_close(minute, _parse=False)
self.assertEqual(next_close, next_close_answer)
prev_close = self.calendar.previous_close(minute, _parse=False)
self.assertEqual(prev_close, prev_close_answer)
def test_next_prev_open_close(self):
# for each session, check:
# - the minute before the open (if gaps exist between sessions)
# - the first minute of the session
# - the second minute of the session
# - the minute before the close
# - the last minute of the session
# - the first minute after the close (if gaps exist between sessions)
opens = self.answers.market_open.iloc[1:-2]
closes = self.answers.market_close.iloc[1:-2]
previous_opens = self.answers.market_open.iloc[:-1]
previous_closes = self.answers.market_close.iloc[:-1]
next_opens = self.answers.market_open.iloc[2:]
next_closes = self.answers.market_close.iloc[2:]
for (
open_minute,
close_minute,
previous_open,
previous_close,
next_open,
next_close,
) in zip(
opens, closes, previous_opens, previous_closes, next_opens, next_closes
):
minute_before_open = open_minute - self.one_minute
# minute before open
if self.GAPS_BETWEEN_SESSIONS:
self._verify_minute(
self.calendar,
minute_before_open,
open_minute,
previous_open,
close_minute,
previous_close,
)
# open minute
self._verify_minute(
self.calendar,
open_minute,
next_open,
previous_open,
close_minute,
previous_close,
)
# second minute of session
self._verify_minute(
self.calendar,
open_minute + self.one_minute,
next_open,
open_minute,
close_minute,
previous_close,
)
# minute before the close
self._verify_minute(
self.calendar,
close_minute - self.one_minute,
next_open,
open_minute,
close_minute,
previous_close,
)
# the close
self._verify_minute(
self.calendar,
close_minute,
next_open,
open_minute,
next_close,
previous_close,
)
# minute after the close
if self.GAPS_BETWEEN_SESSIONS:
self._verify_minute(
self.calendar,
close_minute + self.one_minute,
next_open,
open_minute,
next_close,
close_minute,
)
def test_next_prev_minute(self):
all_minutes = self.calendar.all_minutes
# test 20,000 minutes because it takes too long to do the rest.
for idx, minute in enumerate(all_minutes[1:20000]):
self.assertEqual(
all_minutes[idx + 2], self.calendar.next_minute(minute, _parse=False)
)
self.assertEqual(
all_minutes[idx], self.calendar.previous_minute(minute, _parse=False)
)
# test a couple of non-market minutes
if self.GAPS_BETWEEN_SESSIONS:
for open_minute in self.answers.market_open[1:]:
hour_before_open = open_minute - self.one_hour
self.assertEqual(
open_minute,
self.calendar.next_minute(hour_before_open, _parse=False),
)
for close_minute in self.answers.market_close[1:]:
hour_after_close = close_minute + self.one_hour
self.assertEqual(
close_minute,
self.calendar.previous_minute(hour_after_close, _parse=False),
)
def test_date_to_session_label(self):
m = self.calendar.date_to_session_label
sessions = self.answers.index[:30] # first 30 sessions
# test for error if request session prior to first calendar session.
date = self.answers.index[0] - self.one_day
error_msg = (
"Cannot get a session label prior to the first calendar"
f" session ('{self.answers.index[0]}'). Consider passing"
" `direction` as 'next'."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(date, "previous", _parse=False)
# direction as "previous"
dates = pd.date_range(sessions[0], sessions[-1], freq="D")
last_session = None
for date in dates:
session_label = m(date, "previous", _parse=False)
if date in sessions:
assert session_label == date
last_session = session_label
else:
assert session_label == last_session
# direction as "next"
last_session = None
for date in dates.sort_values(ascending=False):
session_label = m(date, "next", _parse=False)
if date in sessions:
assert session_label == date
last_session = session_label
else:
assert session_label == last_session
# test for error if request session after last calendar session.
date = self.answers.index[-1] + self.one_day
error_msg = (
"Cannot get a session label later than the last calendar"
f" session ('{self.answers.index[-1]}'). Consider passing"
" `direction` as 'previous'."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(date, "next", _parse=False)
if self.GAPS_BETWEEN_SESSIONS:
not_sessions = dates[~dates.isin(sessions)][:5]
for not_session in not_sessions:
error_msg = (
f"`date` '{not_session}' does not represent a session. Consider"
" passing a `direction`."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(not_session, "none", _parse=False)
# test default behaviour
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(not_session, _parse=False)
# non-valid direction (can only be thrown if gaps between sessions)
error_msg = (
"'not a direction' is not a valid `direction`. Valid `direction`"
' values are "next", "previous" and "none".'
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(not_session, "not a direction", _parse=False)
def test_minute_to_session_label(self):
m = self.calendar.minute_to_session_label
# minute is prior to first session's open
minute_before_first_open = self.answers.iloc[0].market_open - self.one_minute
session_label = self.answers.index[0]
minutes_that_resolve_to_this_session = [
m(minute_before_first_open, _parse=False),
m(minute_before_first_open, direction="next", _parse=False),
]
unique_session_labels = set(minutes_that_resolve_to_this_session)
self.assertTrue(len(unique_session_labels) == 1)
self.assertIn(session_label, unique_session_labels)
with self.assertRaises(ValueError):
m(minute_before_first_open, direction="previous", _parse=False)
with self.assertRaises(ValueError):
m(minute_before_first_open, direction="none", _parse=False)
# minute is between first session's open and last session's close
for idx, (session_label, open_minute, close_minute, _, _) in enumerate(
self.answers.iloc[1:-2].itertuples(name=None)
):
hour_into_session = open_minute + self.one_hour
minute_before_session = open_minute - self.one_minute
minute_after_session = close_minute + self.one_minute
next_session_label = self.answers.index[idx + 2]
previous_session_label = self.answers.index[idx]
# verify that minutes inside a session resolve correctly
minutes_that_resolve_to_this_session = [
m(open_minute, _parse=False),
m(open_minute, direction="next", _parse=False),
m(open_minute, direction="previous", _parse=False),
m(open_minute, direction="none", _parse=False),
m(hour_into_session, _parse=False),
m(hour_into_session, direction="next", _parse=False),
m(hour_into_session, direction="previous", _parse=False),
m(hour_into_session, direction="none", _parse=False),
m(close_minute),
m(close_minute, direction="next", _parse=False),
m(close_minute, direction="previous", _parse=False),
m(close_minute, direction="none", _parse=False),
session_label,
]
if self.GAPS_BETWEEN_SESSIONS:
minutes_that_resolve_to_this_session.append(
m(minute_before_session, _parse=False)
)
minutes_that_resolve_to_this_session.append(
m(minute_before_session, direction="next", _parse=False)
)
minutes_that_resolve_to_this_session.append(
m(minute_after_session, direction="previous", _parse=False)
)
self.assertTrue(
all(
x == minutes_that_resolve_to_this_session[0]
for x in minutes_that_resolve_to_this_session
)
)
minutes_that_resolve_to_next_session = [
m(minute_after_session, _parse=False),
m(minute_after_session, direction="next", _parse=False),
next_session_label,
]
self.assertTrue(
all(
x == minutes_that_resolve_to_next_session[0]
for x in minutes_that_resolve_to_next_session
)
)
self.assertEqual(
m(minute_before_session, direction="previous", _parse=False),
previous_session_label,
)
if self.GAPS_BETWEEN_SESSIONS:
# Make sure we use the cache correctly
minutes_that_resolve_to_different_sessions = [
m(minute_after_session, direction="next", _parse=False),
m(minute_after_session, direction="previous", _parse=False),
m(minute_after_session, direction="next", _parse=False),
]
self.assertEqual(
minutes_that_resolve_to_different_sessions,
[next_session_label, session_label, next_session_label],
)
# make sure that exceptions are raised at the right time
with self.assertRaises(ValueError):
m(open_minute, "asdf", _parse=False)
if self.GAPS_BETWEEN_SESSIONS:
with self.assertRaises(ValueError):
m(minute_before_session, direction="none", _parse=False)
# minute is later than last session's close
minute_after_last_close = self.answers.iloc[-1].market_close + self.one_minute
session_label = self.answers.index[-1]
minute_that_resolves_to_session_label = m(
minute_after_last_close, direction="previous", _parse=False
)
self.assertEqual(session_label, minute_that_resolves_to_session_label)
with self.assertRaises(ValueError):
m(minute_after_last_close, _parse=False)
with self.assertRaises(ValueError):
m(minute_after_last_close, direction="next", _parse=False)
with self.assertRaises(ValueError):
m(minute_after_last_close, direction="none", _parse=False)
@parameterized.expand(
[
(1, 0),
(2, 0),
(2, 1),
]
)
def test_minute_index_to_session_labels(self, interval, offset):
minutes = self.calendar.minutes_for_sessions_in_range(
self.MINUTE_INDEX_TO_SESSION_LABELS_START,
self.MINUTE_INDEX_TO_SESSION_LABELS_END,
)
minutes = minutes[range(offset, len(minutes), interval)]
np.testing.assert_array_equal(
pd.DatetimeIndex(minutes.map(self.calendar.minute_to_session_label)),
self.calendar.minute_index_to_session_labels(minutes),
)
def test_next_prev_session(self):
session_labels = self.answers.index[1:-2]
max_idx = len(session_labels) - 1
# the very first session
first_session_label = self.answers.index[0]
with self.assertRaises(ValueError):
self.calendar.previous_session_label(first_session_label, _parse=False)
# all the sessions in the middle
for idx, session_label in enumerate(session_labels):
if idx < max_idx:
self.assertEqual(
self.calendar.next_session_label(session_label, _parse=False),
session_labels[idx + 1],
)
if idx > 0:
self.assertEqual(
self.calendar.previous_session_label(session_label, _parse=False),
session_labels[idx - 1],
)
# the very last session
last_session_label = self.answers.index[-1]
with self.assertRaises(ValueError):
self.calendar.next_session_label(last_session_label, _parse=False)
@staticmethod
def _find_full_session(calendar):
for session_label in calendar.schedule.index:
if session_label not in calendar.early_closes:
return session_label
return None
def test_minutes_for_period(self):
# full session
# find a session that isn't an early close. start from the first
# session, should be quick.
full_session_label = self._find_full_session(self.calendar)
if full_session_label is None:
raise ValueError("Cannot find a full session to test!")
minutes = self.calendar.minutes_for_session(full_session_label)
_open, _close = self.calendar.open_and_close_for_session(full_session_label)
_break_start, _break_end = self.calendar.break_start_and_end_for_session(
full_session_label
)
if not pd.isnull(_break_start):
constructed_minutes = np.concatenate(
[
pd.date_range(start=_open, end=_break_start, freq="min"),
pd.date_range(start=_break_end, end=_close, freq="min"),
]
)
else:
constructed_minutes = pd.date_range(start=_open, end=_close, freq="min")
np.testing.assert_array_equal(
minutes,
constructed_minutes,
)
# early close period
if self.HAVE_EARLY_CLOSES:
early_close_session_label = self.calendar.early_closes[0]
minutes_for_early_close = self.calendar.minutes_for_session(
early_close_session_label
)
_open, _close = self.calendar.open_and_close_for_session(
early_close_session_label
)
np.testing.assert_array_equal(
minutes_for_early_close,
pd.date_range(start=_open, end=_close, freq="min"),
)
# late open period
if self.HAVE_LATE_OPENS:
late_open_session_label = self.calendar.late_opens[0]
minutes_for_late_open = self.calendar.minutes_for_session(
late_open_session_label
)
_open, _close = self.calendar.open_and_close_for_session(
late_open_session_label
)
np.testing.assert_array_equal(
minutes_for_late_open,
pd.date_range(start=_open, end=_close, freq="min"),
)
def test_sessions_in_range(self):
# pick two sessions
session_count = len(self.calendar.schedule.index)
first_idx = session_count // 3
second_idx = 2 * first_idx
first_session_label = self.calendar.schedule.index[first_idx]
second_session_label = self.calendar.schedule.index[second_idx]
answer_key = self.calendar.schedule.index[first_idx : second_idx + 1]
rtrn = self.calendar.sessions_in_range(
first_session_label, second_session_label, _parse=False
)
np.testing.assert_array_equal(answer_key, rtrn)
def get_session_block(self):
"""
Get an "interesting" range of three sessions in a row. By default this
tries to find and return a (full session, early close session, full
session) block.
"""
if not self.HAVE_EARLY_CLOSES:
# If we don't have any early closes, just return a "random" chunk
# of three sessions.
return self.calendar.all_sessions[10:13]
shortened_session = self.calendar.early_closes[0]
shortened_session_idx = self.calendar.schedule.index.get_loc(shortened_session)
session_before = self.calendar.schedule.index[shortened_session_idx - 1]
session_after = self.calendar.schedule.index[shortened_session_idx + 1]
return [session_before, shortened_session, session_after]
def test_minutes_in_range(self):
sessions = self.get_session_block()
first_open, first_close = self.calendar.open_and_close_for_session(sessions[0])
minute_before_first_open = first_open - self.one_minute
middle_open, middle_close = self.calendar.open_and_close_for_session(
sessions[1]
)
last_open, last_close = self.calendar.open_and_close_for_session(sessions[-1])
minute_after_last_close = last_close + self.one_minute
# get all the minutes between first_open and last_close
minutes1 = self.calendar.minutes_in_range(first_open, last_close, _parse=False)
minutes2 = self.calendar.minutes_in_range(
minute_before_first_open, minute_after_last_close, _parse=False
)
if self.GAPS_BETWEEN_SESSIONS:
np.testing.assert_array_equal(minutes1, minutes2)
else:
# if no gaps, then minutes2 should have 2 extra minutes
np.testing.assert_array_equal(minutes1, minutes2[1:-1])
# manually construct the minutes
(
first_break_start,
first_break_end,
) = self.calendar.break_start_and_end_for_session(sessions[0])
(
middle_break_start,
middle_break_end,
) = self.calendar.break_start_and_end_for_session(sessions[1])
(
last_break_start,
last_break_end,
) = self.calendar.break_start_and_end_for_session(sessions[-1])
intervals = [
(first_open, first_break_start, first_break_end, first_close),
(middle_open, middle_break_start, middle_break_end, middle_close),
(last_open, last_break_start, last_break_end, last_close),
]
all_minutes = []
for _open, _break_start, _break_end, _close in intervals:
if pd.isnull(_break_start):
all_minutes.append(
pd.date_range(start=_open, end=_close, freq="min"),
)
else:
all_minutes.append(
pd.date_range(start=_open, end=_break_start, freq="min"),
)
all_minutes.append(
pd.date_range(start=_break_end, end=_close, freq="min"),
)
all_minutes = np.concatenate(all_minutes)
np.testing.assert_array_equal(all_minutes, minutes1)
def test_minutes_for_sessions_in_range(self):
sessions = self.get_session_block()
minutes = self.calendar.minutes_for_sessions_in_range(sessions[0], sessions[-1])
# do it manually
session0_minutes = self.calendar.minutes_for_session(sessions[0])
session1_minutes = self.calendar.minutes_for_session(sessions[1])
session2_minutes = self.calendar.minutes_for_session(sessions[2])
concatenated_minutes = np.concatenate(
[session0_minutes.values, session1_minutes.values, session2_minutes.values]
)
np.testing.assert_array_equal(concatenated_minutes, minutes.values)
def test_sessions_window(self):
sessions = self.get_session_block()
np.testing.assert_array_equal(
self.calendar.sessions_window(sessions[0], len(sessions) - 1, _parse=False),
self.calendar.sessions_in_range(sessions[0], sessions[-1], _parse=False),
)
np.testing.assert_array_equal(
self.calendar.sessions_window(
sessions[-1], -1 * (len(sessions) - 1), _parse=False
),
self.calendar.sessions_in_range(sessions[0], sessions[-1], _parse=False),
)
def test_session_distance(self):
sessions = self.get_session_block()
forward_distance = self.calendar.session_distance(
sessions[0],
sessions[-1],
_parse=False,
)
self.assertEqual(forward_distance, len(sessions))
backward_distance = self.calendar.session_distance(
sessions[-1],
sessions[0],
_parse=False,
)
self.assertEqual(backward_distance, -len(sessions))
one_day_distance = self.calendar.session_distance(
sessions[0],
sessions[0],
_parse=False,
)
self.assertEqual(one_day_distance, 1)
def test_open_and_close_for_session(self):
for session_label, open_answer, close_answer, _, _ in self.answers.itertuples(
name=None
):
found_open, found_close = self.calendar.open_and_close_for_session(
session_label, _parse=False
)
# Test that the methods for just session open and close produce the
# same values as the method for getting both.
alt_open = self.calendar.session_open(session_label, _parse=False)
self.assertEqual(alt_open, found_open)
alt_close = self.calendar.session_close(session_label, _parse=False)
self.assertEqual(alt_close, found_close)
self.assertEqual(open_answer, found_open)
self.assertEqual(close_answer, found_close)
def test_session_opens_in_range(self):
found_opens = self.calendar.session_opens_in_range(
self.answers.index[0],
self.answers.index[-1],
_parse=False,
)
found_opens.index.freq = None
tm.assert_series_equal(found_opens, self.answers["market_open"])
def test_session_closes_in_range(self):
found_closes = self.calendar.session_closes_in_range(
self.answers.index[0],
self.answers.index[-1],
_parse=False,
)
found_closes.index.freq = None
tm.assert_series_equal(found_closes, self.answers["market_close"])
def test_daylight_savings(self):
# 2004 daylight savings switches:
# Sunday 2004-04-04 and Sunday 2004-10-31
# make sure there's no weirdness around calculating the next day's
# session's open time.
m = dict(self.calendar.open_times)
m[pd.Timestamp.min] = m.pop(None)
open_times = pd.Series(m)
for date in self.DAYLIGHT_SAVINGS_DATES:
next_day = pd.Timestamp(date, tz=UTC)
open_date = next_day + Timedelta(days=self.calendar.open_offset)
the_open = self.calendar.schedule.loc[next_day].market_open
localized_open = the_open.tz_localize(UTC).tz_convert(self.calendar.tz)
self.assertEqual(
(open_date.year, open_date.month, open_date.day),
(localized_open.year, localized_open.month, localized_open.day),
)
open_ix = open_times.index.searchsorted(pd.Timestamp(date), side="right")
if open_ix == len(open_times):
open_ix -= 1
self.assertEqual(open_times.iloc[open_ix].hour, localized_open.hour)
self.assertEqual(open_times.iloc[open_ix].minute, localized_open.minute)
def test_start_end(self):
"""
Check ExchangeCalendar with defined start/end dates.
"""
calendar = self.calendar_class(
start=self.TEST_START_END_FIRST,
end=self.TEST_START_END_LAST,
)
self.assertEqual(
calendar.first_trading_session,
self.TEST_START_END_EXPECTED_FIRST,
)
self.assertEqual(
calendar.last_trading_session,
self.TEST_START_END_EXPECTED_LAST,
)
def test_has_breaks(self):
has_breaks = self.calendar.has_breaks()
self.assertEqual(has_breaks, self.HAVE_BREAKS)
def test_session_has_break(self):
if self.SESSION_WITHOUT_BREAK is not None:
self.assertFalse(
self.calendar.session_has_break(self.SESSION_WITHOUT_BREAK)
)
if self.SESSION_WITH_BREAK is not None:
self.assertTrue(self.calendar.session_has_break(self.SESSION_WITH_BREAK))
# TODO remove this class when all calendars migrated. No longer requried as
# `minute_index_to_session_labels` comprehensively tested under new suite.
class OpenDetectionTestCase(TestCase):
# This is an extra set of unit tests that were added during a rewrite of
# `minute_index_to_session_labels` to ensure that the existing
# calendar-generic test suite correctly covered edge cases around
# non-market minutes.
def test_detect_non_market_minutes(self):
cal = get_calendar("NYSE")
# NOTE: This test is here instead of being on the base class for all
# calendars because some of our calendars are 24/7, which means there
# aren't any non-market minutes to find.
day0 = cal.minutes_for_sessions_in_range(
pd.Timestamp("2013-07-03", tz=UTC),
pd.Timestamp("2013-07-03", tz=UTC),
)
for minute in day0:
self.assertTrue(cal.is_open_on_minute(minute))
day1 = cal.minutes_for_sessions_in_range(
pd.Timestamp("2013-07-05", tz=UTC),
pd.Timestamp("2013-07-05", tz=UTC),
)
for minute in day1:
self.assertTrue(cal.is_open_on_minute(minute))
def NYSE_timestamp(s):
return pd.Timestamp(s, tz="America/New_York").tz_convert(UTC)
non_market = [
# After close.
NYSE_timestamp("2013-07-03 16:01"),
# Holiday.
NYSE_timestamp("2013-07-04 10:00"),
# Before open.
NYSE_timestamp("2013-07-05 9:29"),
]
for minute in non_market:
self.assertFalse(cal.is_open_on_minute(minute), minute)
input_ = pd.to_datetime(
np.hstack([day0.values, minute.asm8, day1.values]),
utc=True,
)
with self.assertRaises(ValueError) as e:
cal.minute_index_to_session_labels(input_)
exc_str = str(e.exception)
self.assertIn("First Bad Minute: {}".format(minute), exc_str)
# TODO remove this class when all calendars migrated. No longer requried as
# this case is handled by new test base internally.
class NoDSTExchangeCalendarTestBase(ExchangeCalendarTestBase):
def test_daylight_savings(self):
"""
Several countries in Africa / Asia do not observe DST
so we need to skip over this test for those markets
"""
pass
def get_csv(name: str) -> pd.DataFrame:
"""Get csv file as DataFrame for given calendar `name`."""
filename = name.replace("/", "-").lower() + ".csv"
path = pathlib.Path(__file__).parent.joinpath("resources", filename)
df = pd.read_csv(
path,
index_col=0,
parse_dates=[0, 1, 2, 3, 4],
infer_datetime_format=True,
)
df.index = df.index.tz_localize("UTC")
for col in df:
df[col] = df[col].dt.tz_localize("UTC")
return df
class Answers:
"""Inputs and expected output for testing a given calendar and side.
Inputs and expected outputs are provided by public instance methods and
properties. These either read directly from the corresponding .csv file
or are evaluated from the .csv file contents. NB Properites / methods
MUST NOT make evaluations by way of repeating the code of the
ExchangeCalendar method they are intended to test!
Parameters
----------
calendar_name
Canonical name of calendar for which require answer info. For
example, 'XNYS'.
side {'both', 'left', 'right', 'neither'}
Side of sessions to treat as trading minutes.
"""
ONE_MIN = pd.Timedelta(1, "T")
TWO_MIN = pd.Timedelta(2, "T")
ONE_DAY = pd.Timedelta(1, "D")
LEFT_SIDES = ["left", "both"]
RIGHT_SIDES = ["right", "both"]
def __init__(
self,
calendar_name: str,
side: str,
):
self._name = calendar_name.upper()
self._side = side
# --- Exposed constructor arguments ---
@property
def name(self) -> str:
"""Name of corresponding calendar."""
return self._name
@property
def side(self) -> str:
"""Side of calendar for which answers valid."""
return self._side
# --- Properties read (indirectly) from csv file ---
@functools.lru_cache(maxsize=4)
def _answers(self) -> pd.DataFrame:
return get_csv(self.name)
@property
def answers(self) -> pd.DataFrame:
"""Answers as correspoding csv."""
return self._answers()
@property
def sessions(self) -> pd.DatetimeIndex:
"""Session labels."""
return self.answers.index
@property
def opens(self) -> pd.Series:
"""Market open time for each session."""
return self.answers.market_open
@property
def closes(self) -> pd.Series:
"""Market close time for each session."""
return self.answers.market_close
@property
def break_starts(self) -> pd.Series:
"""Break start time for each session."""
return self.answers.break_start
@property
def break_ends(self) -> pd.Series:
"""Break end time for each session."""
return self.answers.break_end
# --- get and helper methods ---
def get_next_session(self, session: pd.Timestamp) -> pd.Timestamp:
"""Get session that immediately follows `session`."""
assert (
session != self.last_session
), "Cannot get session later than last answers' session."
idx = self.sessions.get_loc(session) + 1
return self.sessions[idx]
def session_has_break(self, session: pd.Timestamp) -> bool:
"""Query if `session` has a break."""
return session in self.sessions_with_break
@staticmethod
def get_sessions_sample(sessions: pd.DatetimeIndex):
"""Return sample of given `sessions`.
Sample includes:
All sessions within first two years of `sessions`.
All sessions within last two years of `sessions`.
All sessions falling:
within first 3 days of any month.
from 28th of any month.
from 14th through 16th of any month.
"""
if sessions.empty:
return sessions
mask = (
(sessions < sessions[0] + pd.DateOffset(years=2))
| (sessions > sessions[-1] - pd.DateOffset(years=2))
| (sessions.day <= 3)
| (sessions.day >= 28)
| (14 <= sessions.day) & (sessions.day <= 16)
)
return sessions[mask]
def get_sessions_minutes(
self, start: pd.Timestamp, end: pd.Timestamp | int = 1
) -> pd.DatetimeIndex:
"""Get trading minutes for 1 or more consecutive sessions.
Parameters
----------
start
Session from which to get trading minutes.
end
Session through which to get trading mintues. Can be passed as:
pd.Timestamp: return will include trading minutes for `end`
session.
int: where int represents number of consecutive sessions
inclusive of `start`, for which require trading
minutes. Default is 1, such that by default will return
trading minutes for only `start` session.
"""
idx = self.sessions.get_loc(start)
stop = idx + end if isinstance(end, int) else self.sessions.get_loc(end) + 1
indexer = slice(idx, stop)
dtis = []
for first, last, last_am, first_pm in zip(
self.first_minutes[indexer],
self.last_minutes[indexer],
self.last_am_minutes[indexer],
self.first_pm_minutes[indexer],
):
if pd.isna(last_am):
dtis.append(pd.date_range(first, last, freq="T"))
else:
dtis.append(pd.date_range(first, last_am, freq="T"))
dtis.append(pd.date_range(first_pm, last, freq="T"))
return dtis[0].union_many(dtis[1:])
# --- Evaluated general calendar properties ---
@functools.lru_cache(maxsize=4)
def _has_a_session_with_break(self) -> pd.DatetimeIndex:
return self.break_starts.notna().any()
@property
def has_a_session_with_break(self) -> bool:
"""Does any session of answers have a break."""
return self._has_a_session_with_break()
@property
def has_a_session_without_break(self) -> bool:
"""Does any session of answers not have a break."""
return self.break_starts.isna().any()
# --- Evaluated properties for first and last sessions ---
@property
def first_session(self) -> pd.Timestamp:
"""First session covered by answers."""
return self.sessions[0]
@property
def last_session(self) -> pd.Timestamp:
"""Last session covered by answers."""
return self.sessions[-1]
@property
def sessions_range(self) -> tuple[pd.Timestamp, pd.Timestamp]:
"""First and last sessions covered by answers."""
return self.first_session, self.last_session
@property
def first_session_open(self) -> pd.Timestamp:
"""Open time of first session covered by answers."""
return self.opens[0]
@property
def last_session_close(self) -> pd.Timestamp:
"""Close time of last session covered by answers."""
return self.closes[-1]
@property
def first_trading_minute(self) -> pd.Timestamp:
open_ = self.first_session_open
return open_ if self.side in self.LEFT_SIDES else open_ + self.ONE_MIN
@property
def last_trading_minute(self) -> pd.Timestamp:
close = self.last_session_close
return close if self.side in self.RIGHT_SIDES else close - self.ONE_MIN
@property
def trading_minutes_range(self) -> tuple[pd.Timestamp, pd.Timestamp]:
"""First and last trading minutes covered by answers."""
return self.first_trading_minute, self.last_trading_minute
# --- out-of-bounds properties ---
@property
def minute_too_early(self) -> pd.Timestamp:
"""Minute earlier than first trading minute."""
return self.first_trading_minute - self.ONE_MIN
@property
def minute_too_late(self) -> pd.Timestamp:
"""Minute later than last trading minute."""
return self.last_trading_minute + self.ONE_MIN
@property
def session_too_early(self) -> pd.Timestamp:
"""Date earlier than first session."""
return self.first_session - self.ONE_DAY
@property
def session_too_late(self) -> pd.Timestamp:
"""Date later than last session."""
return self.last_session + self.ONE_DAY
# --- Evaluated properties covering every session. ---
@functools.lru_cache(maxsize=4)
def _first_minutes(self) -> pd.Series:
if self.side in self.LEFT_SIDES:
minutes = self.opens.copy()
else:
minutes = self.opens + self.ONE_MIN
minutes.name = "first_minutes"
return minutes
@property
def first_minutes(self) -> pd.Series:
"""First trading minute of each session (UTC)."""
return self._first_minutes()
@property
def first_minutes_plus_one(self) -> pd.Series:
"""First trading minute of each session plus one minute."""
return self.first_minutes + self.ONE_MIN
@property
def first_minutes_less_one(self) -> pd.Series:
"""First trading minute of each session less one minute."""
return self.first_minutes - self.ONE_MIN
@functools.lru_cache(maxsize=4)
def _last_minutes(self) -> pd.Series:
if self.side in self.RIGHT_SIDES:
minutes = self.closes.copy()
else:
minutes = self.closes - self.ONE_MIN
minutes.name = "last_minutes"
return minutes
@property
def last_minutes(self) -> pd.Series:
"""Last trading minute of each session."""
return self._last_minutes()
@property
def last_minutes_plus_one(self) -> pd.Series:
"""Last trading minute of each session plus one minute."""
return self.last_minutes + self.ONE_MIN
@property
def last_minutes_less_one(self) -> pd.Series:
"""Last trading minute of each session less one minute."""
return self.last_minutes - self.ONE_MIN
@functools.lru_cache(maxsize=4)
def _last_am_minutes(self) -> pd.Series:
if self.side in self.RIGHT_SIDES:
minutes = self.break_starts.copy()
else:
minutes = self.break_starts - self.ONE_MIN
minutes.name = "last_am_minutes"
return minutes
@property
def last_am_minutes(self) -> pd.Series:
"""Last pre-break trading minute of each session.
NaT if session does not have a break.
"""
return self._last_am_minutes()
@property
def last_am_minutes_plus_one(self) -> pd.Series:
"""Last pre-break trading minute of each session plus one minute."""
return self.last_am_minutes + self.ONE_MIN
@property
def last_am_minutes_less_one(self) -> pd.Series:
"""Last pre-break trading minute of each session less one minute."""
return self.last_am_minutes - self.ONE_MIN
@functools.lru_cache(maxsize=4)
def _first_pm_minutes(self) -> pd.Series:
if self.side in self.LEFT_SIDES:
minutes = self.break_ends.copy()
else:
minutes = self.break_ends + self.ONE_MIN
minutes.name = "first_pm_minutes"
return minutes
@property
def first_pm_minutes(self) -> pd.Series:
"""First post-break trading minute of each session.
NaT if session does not have a break.
"""
return self._first_pm_minutes()
@property
def first_pm_minutes_plus_one(self) -> pd.Series:
"""First post-break trading minute of each session plus one minute."""
return self.first_pm_minutes + self.ONE_MIN
@property
def first_pm_minutes_less_one(self) -> pd.Series:
"""First post-break trading minute of each session less one minute."""
return self.first_pm_minutes - self.ONE_MIN
# --- Evaluated session sets and ranges that meet a specific condition ---
@property
def _mask_breaks(self) -> pd.Series:
return self.break_starts.notna()
@functools.lru_cache(maxsize=4)
def _sessions_with_break(self) -> pd.DatetimeIndex:
return self.sessions[self._mask_breaks]
@property
def sessions_with_break(self) -> pd.DatetimeIndex:
return self._sessions_with_break()
@functools.lru_cache(maxsize=4)
def _sessions_without_break(self) -> pd.DatetimeIndex:
return self.sessions[~self._mask_breaks]
@property
def sessions_without_break(self) -> pd.DatetimeIndex:
return self._sessions_without_break()
@property
def sessions_without_break_run(self) -> pd.DatetimeIndex:
"""Longest run of consecutive sessions without a break."""
s = self.break_starts.isna()
if s.empty:
return pd.DatetimeIndex([], tz="UTC")
trues_grouped = (~s).cumsum()[s]
group_sizes = trues_grouped.value_counts()
max_run_size = group_sizes.max()
max_run_group_id = group_sizes[group_sizes == max_run_size].index[0]
run_without_break = trues_grouped[trues_grouped == max_run_group_id].index
return run_without_break
@property
def sessions_without_break_range(self) -> tuple[pd.Timestamp, pd.Timestamp] | None:
"""Longest session range that does not include a session with a break.
Returns None if all sessions have a break.
"""
sessions = self.sessions_without_break_run
if sessions.empty:
return None
return sessions[0], sessions[-1]
@property
def _mask_sessions_without_gap_after(self) -> pd.Series:
if self.side == "neither":
# will always have gap after if neither open or close are trading
# minutes (assuming sessions cannot overlap)
return pd.Series(False, index=self.sessions)
elif self.side == "both":
# a trading minute cannot be a minute of more than one session.
assert not (self.closes == self.opens.shift(-1)).any()
# there will be no gap if next open is one minute after previous close
closes_plus_min = self.closes + pd.Timedelta(1, "T")
return self.opens.shift(-1) == closes_plus_min
else:
return self.opens.shift(-1) == self.closes
@property
def _mask_sessions_without_gap_before(self) -> pd.Series:
if self.side == "neither":
# will always have gap before if neither open or close are trading
# minutes (assuming sessions cannot overlap)
return | pd.Series(False, index=self.sessions) | pandas.Series |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_union(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'],
freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4,
expected4),
(rng5, other5, expected5), (rng6, other6,
expected6),
(rng7, other7, expected7)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_difference(self):
# diff
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=5)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=3)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = rng4
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(
['2000-01-01 09:01', '2000-01-01 09:05'], freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:03'], freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=3)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('2006-01-01', freq='A', periods=2)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3),
(rng4, other4, expected4),
(rng5, other5, expected5),
(rng6, other6, expected6),
(rng7, other7, expected7), ]:
result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with tm.assertRaises(TypeError):
rng - other
with tm.assertRaises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = | pd.period_range('2009', '2019', freq='A') | pandas.period_range |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import timeit
__author__ = ['<NAME>']
__email__ = ['<EMAIL>']
__package__ = 'Gemm testing'
NUM_REPEATS = 10
NUMBER = 500
def gemm_nn (N, M, K):
SETUP_CODE = '''
import numpy as np
np.random.seed(123)
N, M, K = ({N}, {M}, {K})
a = np.random.uniform(low=0., high=1., size=(N, M))
b = np.random.uniform(low=0., high=1., size=(M, K))
'''.format(**{'N' : N,
'M' : M,
'K' : K
})
TEST_CODE = '''
c = np.einsum('ij, jk -> ik', a, b, optimize=True)
'''
times = timeit.repeat(setup=SETUP_CODE,
stmt=TEST_CODE,
repeat=NUM_REPEATS,
number=NUMBER)
return times
def gemm_nt (N, M, K):
SETUP_CODE = '''
import numpy as np
np.random.seed(123)
N, M, K = ({N}, {M}, {K})
a = np.random.uniform(low=0., high=1., size=(N, M))
b = np.random.uniform(low=0., high=1., size=(M, K))
bt = b.T
'''.format(**{'N' : N,
'M' : M,
'K' : K
})
TEST_CODE = '''
c = np.einsum('ij, kj -> ik', a, bt, optimize=True)
'''
times = timeit.repeat(setup=SETUP_CODE,
stmt=TEST_CODE,
repeat=NUM_REPEATS,
number=NUMBER)
return times
if __name__ == '__main__':
import seaborn as sns
import pylab as plt
import pandas as pd
import numpy as np
N, M, K = (100, 200, 300)
times_nn = gemm_nn(N, M, K)
times_nt = gemm_nt(N, M, K)
ref = np.asarray(times_nn)
val = np.asarray(times_nt)
times_nt = np.asarray(times_nt)/ref
times_nn = np.asarray(times_nn)/ref
times_nn = pd.DataFrame(data=times_nn, columns=['Times'])
times_nn['Gemm'] = 'GEMM_NN'
times_nt = | pd.DataFrame(data=times_nt, columns=['Times']) | pandas.DataFrame |
import logging
import os
import gc
import pandas as pd
from src.data_models.tdidf_model import FrequencyModel
from src.evaluations.statisticalOverview import StatisticalOverview
from src.globalVariable import GlobalVariable
from src.kemures.tecnics.content_based import ContentBased
from src.preprocessing.preferences_analytics import PreferenceAnalytics
from src.preprocessing.preprocessing import Preprocessing
def execute_by_scenario_list():
application_class_df = pd.DataFrame()
application_results_df = pd.DataFrame()
for scenario in GlobalVariable.SCENARIO_SIZE_LIST:
gc.collect()
scenario_class_df = pd.DataFrame()
scenario_results_df = pd.DataFrame()
for run in range(GlobalVariable.RUN_TIMES):
os.system('cls||clear')
logger.info("+ Round -> " + str(run + 1))
logger.info("+ Scenario -> " + str(scenario))
songs_base_df, users_preference_base_df = Preprocessing.load_data_test(scenario)
run_class_df, run_results_df = ContentBased.run_recommenders(
users_preference_base_df, FrequencyModel.mold(songs_base_df), scenario, run + 1
)
scenario_results_df = pd.concat([scenario_results_df, run_results_df])
scenario_class_df = pd.concat([scenario_class_df, run_class_df])
StatisticalOverview.result_info(scenario_results_df)
StatisticalOverview.print_scenario(scenario_results_df, scenario)
StatisticalOverview.save_scenario_as_csv(scenario_results_df, scenario)
StatisticalOverview.scenario_graphic(scenario_results_df)
StatisticalOverview.save_class_results_as_cdv(scenario_class_df, scenario)
os.system('cls||clear')
application_results_df = | pd.concat([scenario_results_df, application_results_df]) | pandas.concat |
#!/usr/bin/env python3
import sys
import numpy as np
import pandas as pd
from functools import partial
from multiprocessing import Pool
from sklearn.ensemble import RandomForestClassifier
def input_validator(filename, indel_class):
"""Validate and shuffle data
Args:
filename (str): path to input training data
indel_class (str): "s" for 1-nt, "m" for >1-nt indels
Returns
df (pandas.DataFrame)
"""
df = | pd.read_csv(filename, sep="\t") | pandas.read_csv |
import numpy as np
import pandas as pd
import pandas.core.computation.expressions as expressions
from proto.common.v1 import common_pb2
from proto.aiengine.v1 import aiengine_pb2
from types import SimpleNamespace
import math
import threading
from exception import RewardInvalidException
from metrics import metrics
from exec import somewhat_safe_exec
class DataManager:
def init(
self,
epoch_time: pd.Timestamp,
period_secs: pd.Timedelta,
interval_secs: pd.Timedelta,
granularity_secs: pd.Timedelta,
fields: "dict[str]",
action_rewards: "dict[str]",
actions_order: "dict[int]",
laws: "list[str]",
):
self.fields = fields
self.laws = laws
self.interval_secs = interval_secs
self.granularity_secs = granularity_secs
self.epoch_time = epoch_time
self.period_secs = period_secs
self.end_time = epoch_time + self.period_secs
new_series = dict()
for field_name in fields:
new_series[field_name] = [fields[field_name].initializer]
self.massive_table_sparse = pd.DataFrame(new_series, index={self.epoch_time})
self.massive_table_sparse = self.massive_table_sparse.resample(
self.granularity_secs
).mean()
self.fill_table()
self.interpretations: common_pb2.IndexedInterpretations = None
self.current_time: pd.Timestamp = None
self.action_rewards = action_rewards
self.table_lock = threading.Lock()
self.action_names = [None] * len(actions_order)
for action in actions_order:
self.action_names[actions_order[action]] = action
def get_window_span(self):
return math.floor(self.interval_secs / self.granularity_secs)
def overwrite_data(self, new_data):
self.massive_table_sparse = new_data
self.fill_table()
def fill_table(self):
metrics.start("resample")
self.massive_table_sparse = self.massive_table_sparse.resample(
self.granularity_secs
).mean()
metrics.end("resample")
metrics.start("ffill")
self.massive_table_filled = self.massive_table_sparse.copy()
for col_name in self.massive_table_sparse:
fill_method = self.fields[col_name].fill_method
if fill_method == aiengine_pb2.FILL_FORWARD:
self.massive_table_filled[col_name] = self.massive_table_sparse[
col_name
].ffill()
elif fill_method == aiengine_pb2.FILL_ZERO:
self.massive_table_filled[col_name] = self.massive_table_sparse[
col_name
].fillna(0)
metrics.end("ffill")
metrics.start("reindex")
self.massive_table_filled.index = (
self.massive_table_filled.index.drop_duplicates(keep="first")
)
metrics.end("reindex")
def merge_row(self, new_row):
index = new_row.index[0]
for column_name in list(new_row.keys()):
value = new_row[column_name].array[0]
self.massive_table_sparse.loc[index][column_name] = value
metrics.start("ffill")
self.massive_table_filled = self.massive_table_sparse.ffill()
metrics.end("ffill")
def merge_data(self, new_data):
def combiner(existing, newer):
existing_values = (
existing.values if hasattr(existing, "values") else existing
)
newer_values = newer.values if hasattr(newer, "values") else newer
# use newer values if they aren't nan
condition = | pd.isnull(newer_values) | pandas.isnull |
#
# extract_hourly_intervention.py
#
# Authors:
# <NAME>
# <NAME>
#
# This file extracts the hourly intervation for patients
import pandas as pd
import os
import numpy as np
import os
from scipy.stats import skew
import directories
import csv
import argparse
parser = argparse.ArgumentParser(description='Parser to pass number of top features')
parser.add_argument('--data-set', default=0, type=int, help='Data set package (1-3)')
args = parser.parse_args()
if args.data_set==1:
augment_interventions = False
augment_triples = False
augment_demographics = True
data_source_dir = directories.episode_data
data_target_dir = "c/"
data_ts_dir = directories.processed_data_demographics
elif args.data_set==2:
augment_interventions = True
augment_triples = False
augment_demographics = True
data_source_dir = directories.episode_data
data_target_dir = "d/"
data_ts_dir = directories.processed_data_interventions
elif args.data_set==3:
augment_interventions = False
augment_triples = True
augment_demographics = True
data_source_dir = directories.episode_data
data_target_dir = "e/"
data_ts_dir = directories.processed_data_triples
else:
exit()
def read_itemid_to_variable_map(fn, variable_column='LEVEL2'):
var_map = pd.DataFrame.from_csv(fn, index_col=None).fillna('').astype(str)
#var_map[variable_column] = var_map[variable_column].apply(lambda s: s.lower())
var_map.COUNT = var_map.COUNT.astype(int)
var_map = var_map.ix[(var_map[variable_column] != '') & (var_map.COUNT>0)]
var_map = var_map.ix[(var_map.STATUS == 'ready')]
var_map.ITEMID = var_map.ITEMID.astype(int)
var_map = var_map[[variable_column, 'ITEMID', 'MIMIC LABEL']].set_index('ITEMID')
return var_map.rename_axis({variable_column: 'VARIABLE', 'MIMIC LABEL': 'MIMIC_LABEL'}, axis=1)
def map_itemids_to_variables(events, var_map):
return events.merge(var_map, left_on='ITEMID', right_index=True)
def get_events_for_stay(events, icustayid, intime=None, outtime=None):
idx = (events.ICUSTAY_ID == icustayid)
if intime is not None and outtime is not None:
idx = idx | ((events.CHARTTIME >= intime) & (events.CHARTTIME <= outtime))
events = events.ix[idx]
del events['ICUSTAY_ID']
return events
def add_hours_elpased_to_events(events, dt, remove_charttime=True):
events['HOURS'] = (pd.to_datetime(events.CHARTTIME) - dt).apply(lambda s: s / np.timedelta64(1, 's')) / 60./60
if remove_charttime:
del events['CHARTTIME']
return events
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
all_functions = [(' min',min), (' max',max), (' mean',np.mean),
(' std',np.std), (' skew',skew), (' not_null_len',len)]
functions_map = {
"all": all_functions,
"len": [len],
"all_but_len": all_functions[:-1]
}
periods_map = {
"all": (0, 0, 1, 0),
"first4days": (0, 0, 0, 4*24),
"first8days": (0, 0, 0, 8*24),
"last12hours": (1, -12, 1, 0),
"first25percent": (2, 25),
"first50percent": (2, 50)
}
patient_ids = os.listdir(data_source_dir)
len(patient_ids)
# Read procedure and medication connections
medsConnections = pd.DataFrame.from_csv(directories.processed_csv+"MedsConnections.csv",index_col=None).fillna('').astype(str)
proceduresConnection = pd.DataFrame.from_csv(directories.processed_csv+"ProceduresConnection.csv",index_col=None).fillna('').astype(str)
medicationTriples = {}
for x in range(0,medsConnections.shape[0]):
medicationTriples.setdefault(medsConnections["Prescription"][x],[]).append(medsConnections["Lab"][x])
procedureTriples = {}
for x in range(0,proceduresConnection.shape[0]):
procedureTriples.setdefault(proceduresConnection["Procedure"][x],[]).append(proceduresConnection["Lab"][x])
# Imputes time series data for asthama patients , (backward, forward)
#print(medicationTriples)
print(procedureTriples)
count_patient = 0
procedures = pd.DataFrame.from_csv(directories.mimic_iii_data+"PROCEDUREEVENTS_MV.csv",index_col=None).fillna('').astype(str)
procedures.STARTTIME = pd.to_datetime(procedures.STARTTIME)
procedures.ENDTIME = pd.to_datetime(procedures.ENDTIME)
d_item = pd.DataFrame.from_csv(directories.mimic_iii_data+"D_ITEMS.csv",index_col=None).fillna('').astype(str)
d_item = d_item[["ITEMID","LABEL"]]
#procedures = procedures.merge(d_item, left_on='ITEMID', right_index=True)
procedure_map = {}
for x in range(0,d_item.shape[0]):
procedure_map[d_item["ITEMID"][x]] = d_item["LABEL"][x]
prescriptions = pd.DataFrame.from_csv(directories.mimic_iii_data+"PRESCRIPTIONS.csv",index_col=None).fillna('').astype(str)
prescriptions.STARTDATE = | pd.to_datetime(prescriptions.STARTDATE) | pandas.to_datetime |
import pandas as pd
import numpy as np
attr = | pd.read_csv('GTEx_Analysis_v8_Annotations_SampleAttributesDS.txt', sep='\t') | pandas.read_csv |
import copy
import unittest
import numpy as np
import pandas as pd
from sklearn.exceptions import NotFittedError
from pymatgen.core import Structure, Lattice
from matminer.featurizers.structure.bonding import (
MinimumRelativeDistances,
BondFractions,
BagofBonds,
StructuralHeterogeneity,
GlobalInstabilityIndex,
)
from matminer.featurizers.structure.matrix import SineCoulombMatrix, CoulombMatrix
from matminer.featurizers.structure.tests.base import StructureFeaturesTest
class BondingStructureTest(StructureFeaturesTest):
def test_bondfractions(self):
# Test individual structures with featurize
bf_md = BondFractions.from_preset("MinimumDistanceNN")
bf_md.no_oxi = True
bf_md.fit([self.diamond_no_oxi])
self.assertArrayEqual(bf_md.featurize(self.diamond), [1.0])
self.assertArrayEqual(bf_md.featurize(self.diamond_no_oxi), [1.0])
bf_voronoi = BondFractions.from_preset("VoronoiNN")
bf_voronoi.bbv = float("nan")
bf_voronoi.fit([self.nacl])
bond_fracs = bf_voronoi.featurize(self.nacl)
bond_names = bf_voronoi.feature_labels()
ref = {
"Na+ - Na+ bond frac.": 0.25,
"Cl- - Na+ bond frac.": 0.5,
"Cl- - Cl- bond frac.": 0.25,
}
self.assertDictEqual(dict(zip(bond_names, bond_fracs)), ref)
# Test to make sure dataframe behavior is as intended
s_list = [self.diamond_no_oxi, self.ni3al]
df = | pd.DataFrame.from_dict({"s": s_list}) | pandas.DataFrame.from_dict |
from oauth2client import file, client, tools
from apiclient import discovery
from httplib2 import Http
from typing import Optional, Union, List
import os
from pandas.core.frame import DataFrame
from pandas import Timestamp, Timedelta
from functools import lru_cache
CLIENT_SECRET_PATH = '~/.gsheets2pandas/client_secret.json'
CLIENT_CREDENTIALS_PATH = '~/.gsheets2pandas/client_credentials.json'
SCOPE = 'https://www.googleapis.com/auth/spreadsheets.readonly'
FIELDS = 'sheets/data/rowData/values(effectiveValue,effectiveFormat)'
PANDAS_START_TIME = Timestamp(1899, 12, 30)
class GSheetReader:
"""
Returns an authenticated Google Sheets reader based on configured
client_secret.json and client_credentials.json files
:param client_secret_path: path to google API client secret. If omitted, then looks in:
1. CLIENT_SECRET_PATH environment variable
2. ~/.pandas_read_gsheet/client_secret.json
:param client_credentials_path: path to google API client credentials. If the file
doesn't exist, it will be created through client secret flow. This will spawn a browser
to complete Google Auth. This might fail in Jupyter/JupyterLab - try running the auth_setup.py file.
If omitted, then looks in:
1. CLIENT_CREDENTIALS_PATH environment variable
2. ~/.pandas_read_gsheet/client_credentials.json
"""
def __init__(
self,
client_secret_path: Optional[str] = None,
client_credentials_path: Optional[str] = None,
):
self.client_secret_path = (
client_secret_path
if client_secret_path is not None
else os.environ.get('CLIENT_SECRET_PATH', CLIENT_SECRET_PATH)
)
self.client_credentials_path = (
client_credentials_path
if client_credentials_path is not None
else os.environ.get('CLIENT_CREDENTIALS_PATH', CLIENT_CREDENTIALS_PATH)
)
def __repr__(self):
return f'{self.__class__.__name__}({self.client_secret_path}, {self.client_credentials_path})'
@property
@lru_cache(maxsize=1)
def credentials(self) -> client.OAuth2Credentials:
store = file.Storage(os.path.expanduser(self.client_credentials_path))
credentials = store.get()
if credentials is None or credentials.invalid:
credentials = self._refresh_credentials(store)
return credentials
def _refresh_credentials(self, store: file.Storage) -> client.OAuth2Credentials:
flow = client.flow_from_clientsecrets(
os.path.expanduser(self.client_secret_path), scope=SCOPE
)
return tools.run_flow(flow, store, http=Http())
@property
@lru_cache(maxsize=1)
def service(self) -> discovery.Resource:
return discovery.build('sheets', 'v4', http=self.credentials.authorize(Http()))
@staticmethod
def _timestamp_from_float(f: Union[int, float]) -> Timestamp:
return PANDAS_START_TIME + Timedelta(days=f)
def _extract_cell_value(
self, cell: dict
) -> Union[int, float, bool, str, Timestamp]:
try:
cell_type, cell_value = list(cell['effectiveValue'].items())[0]
except KeyError:
cell_value = None
else:
if cell_type == 'numberValue':
try:
dt_type = cell['effectiveFormat']['numberFormat']['type']
if dt_type == 'DATE_TIME' or dt_type == 'DATE':
cell_value = self._timestamp_from_float(cell_value)
except KeyError:
pass
return cell_value
def _sheet_data_to_dataframe(self, data: list, header=True) -> DataFrame:
data_list = [
[self._extract_cell_value(cell) for cell in row['values']] for row in data
]
return (
DataFrame(data_list[1:], columns=data_list[0])
if header
else | DataFrame(data_list) | pandas.core.frame.DataFrame |
from typing import Any, List, Tuple, Union, Mapping, Optional, Sequence
from types import MappingProxyType
from pathlib import Path
from anndata import AnnData
from cellrank import logging as logg
from cellrank._key import Key
from cellrank.tl._enum import _DEFAULT_BACKEND, Backend_t
from cellrank.ul._docs import d
from cellrank.pl._utils import (
_fit_bulk,
_get_backend,
_callback_type,
_create_models,
_trends_helper,
_time_range_type,
_create_callbacks,
_input_model_type,
_return_model_type,
)
from cellrank.tl._utils import save_fig, _unique_order_preserving
from cellrank.ul._utils import _genesymbols, _get_n_cores, _check_collection
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import cm
@d.dedent
@_genesymbols
def gene_trends(
adata: AnnData,
model: _input_model_type,
genes: Union[str, Sequence[str]],
lineages: Optional[Union[str, Sequence[str]]] = None,
backward: bool = False,
data_key: str = "X",
time_key: str = "latent_time",
time_range: Optional[Union[_time_range_type, List[_time_range_type]]] = None,
transpose: bool = False,
callback: _callback_type = None,
conf_int: Union[bool, float] = True,
same_plot: bool = False,
hide_cells: bool = False,
perc: Optional[Union[Tuple[float, float], Sequence[Tuple[float, float]]]] = None,
lineage_cmap: Optional[matplotlib.colors.ListedColormap] = None,
abs_prob_cmap: matplotlib.colors.ListedColormap = cm.viridis,
cell_color: Optional[str] = None,
cell_alpha: float = 0.6,
lineage_alpha: float = 0.2,
size: float = 15,
lw: float = 2,
cbar: bool = True,
margins: float = 0.015,
sharex: Optional[Union[str, bool]] = None,
sharey: Optional[Union[str, bool]] = None,
gene_as_title: Optional[bool] = None,
legend_loc: Optional[str] = "best",
obs_legend_loc: Optional[str] = "best",
ncols: int = 2,
suptitle: Optional[str] = None,
return_models: bool = False,
n_jobs: Optional[int] = 1,
backend: Backend_t = _DEFAULT_BACKEND,
show_progress_bar: bool = True,
figsize: Optional[Tuple[float, float]] = None,
dpi: Optional[int] = None,
save: Optional[Union[str, Path]] = None,
plot_kwargs: Mapping[str, Any] = MappingProxyType({}),
**kwargs: Any,
) -> Optional[_return_model_type]:
"""
Plot gene expression trends along lineages.
Each lineage is defined via it's lineage weights which we compute using :func:`cellrank.tl.lineages`. This
function accepts any model based off :class:`cellrank.ul.models.BaseModel` to fit gene expression,
where we take the lineage weights into account in the loss function.
Parameters
----------
%(adata)s
%(model)s
%(genes)s
lineages
Names of the lineages to plot. If `None`, plot all lineages.
%(backward)s
data_key
Key in :attr:`anndata.AnnData.layers` or `'X'` for :attr:`anndata.AnnData.X` where the data is stored.
time_key
Key in :attr:`anndata.AnnData.obs` where the pseudotime is stored.
%(time_range)s
This can also be specified on per-lineage basis.
%(gene_symbols)s
transpose
If ``same_plot = True``, group the trends by ``lineages`` instead of ``genes``.
This forces ``hide_cells = True``.
If ``same_plot = False``, show ``lineages`` in rows and ``genes`` in columns.
%(model_callback)s
conf_int
Whether to compute and show confidence interval. If the ``model`` is :class:`cellrank.ul.models.GAMR`,
it can also specify the confidence level, the default is `0.95`.
same_plot
Whether to plot all lineages for each gene in the same plot.
hide_cells
If `True`, hide all cells.
perc
Percentile for colors. Valid values are in interval `[0, 100]`.
This can improve visualization. Can be specified individually for each lineage.
lineage_cmap
Categorical colormap to use when coloring in the lineages. If `None` and ``same_plot``,
use the corresponding colors in :attr:`anndata.AnnData.uns`, otherwise use `'black'`.
abs_prob_cmap
Continuous colormap to use when visualizing the absorption probabilities for each lineage.
Only used when ``same_plot = False``.
cell_color
Key in :attr:`anndata.AnnData.obs` or :attr:`anndata.AnnData.var_names` used for coloring the cells.
cell_alpha
Alpha channel for cells.
lineage_alpha
Alpha channel for lineage confidence intervals.
size
Size of the points.
lw
Line width of the smoothed values.
cbar
Whether to show colorbar. Always shown when percentiles for lineages differ.
Only used when ``same_plot = False``.
margins
Margins around the plot.
sharex
Whether to share x-axis. Valid options are `'row'`, `'col'` or `'none'`.
sharey
Whether to share y-axis. Valid options are `'row'`, `'col'` or `'none'`.
gene_as_title
Whether to show gene names as titles instead on y-axis.
legend_loc
Location of the legend displaying lineages. Only used when `same_plot = True`.
obs_legend_loc
Location of the legend when ``cell_color`` corresponds to a categorical variable.
ncols
Number of columns of the plot when plotting multiple genes. Only used when ``same_plot = True``.
suptitle
Suptitle of the figure.
%(return_models)s
%(parallel)s
%(plotting)s
plot_kwargs
Keyword arguments for :meth:`cellrank.ul.models.BaseModel.plot`.
kwargs
Keyword arguments for :meth:`cellrank.ul.models.BaseModel.prepare`.
Returns
-------
%(plots_or_returns_models)s
"""
if isinstance(genes, str):
genes = [genes]
genes = _unique_order_preserving(genes)
_check_collection(
adata,
genes,
"obs" if data_key == "obs" else "var_names",
use_raw=kwargs.get("use_raw", False),
)
lineage_key = Key.obsm.abs_probs(backward)
if lineage_key not in adata.obsm:
raise KeyError(f"Lineages key `{lineage_key!r}` not found in `adata.obsm`.")
if lineages is None:
lineages = adata.obsm[lineage_key].names
elif isinstance(lineages, str):
lineages = [lineages]
elif all(ln is None for ln in lineages): # no lineage, all the weights are 1
lineages = [None]
cbar = False
logg.debug("All lineages are `None`, setting the weights to `1`")
lineages = _unique_order_preserving(lineages)
if isinstance(time_range, (tuple, float, int, type(None))):
time_range = [time_range] * len(lineages)
elif len(time_range) != len(lineages):
raise ValueError(
f"Expected time ranges to be of length `{len(lineages)}`, found `{len(time_range)}`."
)
kwargs["time_key"] = time_key
kwargs["data_key"] = data_key
kwargs["backward"] = backward
kwargs["conf_int"] = conf_int # prepare doesnt take or need this
models = _create_models(model, genes, lineages)
all_models, models, genes, lineages = _fit_bulk(
models,
_create_callbacks(adata, callback, genes, lineages, **kwargs),
genes,
lineages,
time_range,
return_models=True,
filter_all_failed=False,
parallel_kwargs={
"show_progress_bar": show_progress_bar,
"n_jobs": _get_n_cores(n_jobs, len(genes)),
"backend": _get_backend(models, backend),
},
**kwargs,
)
lineages = sorted(lineages)
tmp = adata.obsm[lineage_key][lineages].colors
if lineage_cmap is None and not transpose:
lineage_cmap = tmp
plot_kwargs = dict(plot_kwargs)
plot_kwargs["obs_legend_loc"] = obs_legend_loc
if transpose:
all_models = | pd.DataFrame(all_models) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn import *
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from matplotlib import pyplot
import time
import os
showPlot=True
#prepare data
data_file_name = "../FinalCost.csv"
data_csv = pd.read_csv(data_file_name, delimiter = ';',header=None, usecols=[3,4,5,6,7,8,9,10,11,12,16,17])
#Lire ligne par ligne
data = data_csv[1:]
#Renommer les colonne
data.columns = ['ConsommationHier','MSemaineDernier','MSemaine7','ConsoMmJrAnP','ConsoMmJrMP','ConsoMMJrSmDer',
'MoyenneMoisPrec','MoyenneMMSAnPrec','MoyenneMMmAnPrec','ConsommationMaxMDer', 'PoidTot', 'SumRetrait']
# print (data.head(10))
# pd.options.display.float_format = '{:,.0f}'.format
#supprimer les lignes dont la valeur est null ( au moins une valeur null)
data = data.dropna ()
#Output Y avec son type
y=data['SumRetrait'].astype(float)
cols=['ConsommationHier','MSemaineDernier','MSemaine7','ConsoMmJrAnP','ConsoMmJrMP','ConsoMMJrSmDer',
'MoyenneMoisPrec','MoyenneMMSAnPrec','MoyenneMMmAnPrec','ConsommationMaxMDer', 'PoidTot']
x=data[cols].astype(float)
print(data.head())
x_train ,x_test ,y_train ,y_test = train_test_split( x,y, test_size=0.2 , random_state=1116)
print(type(y_test))
#print(y_test)
print(x.shape)
#Design the Regression Model
regressor =LinearRegression()
##training
regressor.fit(x_train,y_train)
#Make prediction
y_pred =regressor.predict(x_test)
# print (y_pred)
# print("---- test----")
#print(y_test)
YArray = y_test.as_matrix()
testData = pd.DataFrame(YArray)
preddData = pd.DataFrame(y_pred)
meanError = np.abs((YArray - y_pred)/YArray)*100
meanError2 = np.abs((YArray - y_pred))
print("Mean: ", meanError.mean()," - ", meanError2.mean())
dataF = | pd.concat([testData,preddData], axis=1) | pandas.concat |
"""Higher-level functions of automated time series modeling."""
import numpy as np
import pandas as pd
import random
import copy
import json
import sys
import time
from autots.tools.shaping import (
long_to_wide,
df_cleanup,
subset_series,
simple_train_test_split,
NumericTransformer,
clean_weights,
)
from autots.evaluator.auto_model import (
TemplateEvalObject,
NewGeneticTemplate,
RandomTemplate,
TemplateWizard,
unpack_ensemble_models,
generate_score,
generate_score_per_series,
model_forecast,
validation_aggregation,
back_forecast,
remove_leading_zeros,
)
from autots.models.ensemble import (
EnsembleTemplateGenerator,
HorizontalTemplateGenerator,
generate_mosaic_template,
)
from autots.models.model_list import model_lists
from autots.tools import cpu_count
from autots.tools.window_functions import retrieve_closest_indices
class AutoTS(object):
"""Automate time series modeling using a genetic algorithm.
Args:
forecast_length (int): number of periods over which to evaluate forecast. Can be overriden later in .predict().
frequency (str): 'infer' or a specific pandas datetime offset. Can be used to force rollup of data (ie daily input, but frequency 'M' will rollup to monthly).
prediction_interval (float): 0-1, uncertainty range for upper and lower forecasts. Adjust range, but rarely matches actual containment.
max_generations (int): number of genetic algorithms generations to run.
More runs = longer runtime, generally better accuracy.
It's called `max` because someday there will be an auto early stopping option, but for now this is just the exact number of generations to run.
no_negatives (bool): if True, all negative predictions are rounded up to 0.
constraint (float): when not None, use this value * data st dev above max or below min for constraining forecast values. Applied to point forecast only, not upper/lower forecasts.
ensemble (str): None or list or comma-separated string containing:
'auto', 'simple', 'distance', 'horizontal', 'horizontal-min', 'horizontal-max', "mosaic", "subsample"
initial_template (str): 'Random' - randomly generates starting template, 'General' uses template included in package, 'General+Random' - both of previous. Also can be overriden with self.import_template()
random_seed (int): random seed allows (slightly) more consistent results.
holiday_country (str): passed through to Holidays package for some models.
subset (int): maximum number of series to evaluate at once. Useful to speed evaluation when many series are input.
takes a new subset of columns on each validation, unless mosaic ensembling, in which case columns are the same in each validation
aggfunc (str): if data is to be rolled up to a higher frequency (daily -> monthly) or duplicate timestamps are included. Default 'first' removes duplicates, for rollup try 'mean' or np.sum.
Beware numeric aggregations like 'mean' will not work with non-numeric inputs.
na_tolerance (float): 0 to 1. Series are dropped if they have more than this percent NaN. 0.95 here would allow series containing up to 95% NaN values.
metric_weighting (dict): weights to assign to metrics, effecting how the ranking score is generated.
drop_most_recent (int): option to drop n most recent data points. Useful, say, for monthly sales data where the current (unfinished) month is included.
occurs after any aggregration is applied, so will be whatever is specified by frequency, will drop n frequencies
drop_data_older_than_periods (int): take only the n most recent timestamps
model_list (list): str alias or list of names of model objects to use
transformer_list (list): list of transformers to use, or dict of transformer:probability. Note this does not apply to initial templates.
can accept string aliases: "all", "fast", "superfast"
transformer_max_depth (int): maximum number of sequential transformers to generate for new Random Transformers. Fewer will be faster.
models_mode (str): option to adjust parameter options for newly generated models. Currently includes:
'default', 'deep' (searches more params, likely slower), and 'regressor' (forces 'User' regressor mode in regressor capable models)
num_validations (int): number of cross validations to perform. 0 for just train/test on best split.
Possible confusion: num_validations is the number of validations to perform *after* the first eval segment, so totally eval/validations will be this + 1.
models_to_validate (int): top n models to pass through to cross validation. Or float in 0 to 1 as % of tried.
0.99 is forced to 100% validation. 1 evaluates just 1 model.
If horizontal or mosaic ensemble, then additional min per_series models above the number here are added to validation.
max_per_model_class (int): of the models_to_validate what is the maximum to pass from any one model class/family.
validation_method (str): 'even', 'backwards', or 'seasonal n' where n is an integer of seasonal
'backwards' is better for recency and for shorter training sets
'even' splits the data into equally-sized slices best for more consistent data, a poetic but less effective strategy than others here
'seasonal n' for example 'seasonal 364' would test all data on each previous year of the forecast_length that would immediately follow the training data.
'similarity' automatically finds the data sections most similar to the most recent data that will be used for prediction
'custom' - if used, .fit() needs validation_indexes passed - a list of pd.DatetimeIndex's, tail of each is used as test
min_allowed_train_percent (float): percent of forecast length to allow as min training, else raises error.
0.5 with a forecast length of 10 would mean 5 training points are mandated, for a total of 15 points.
Useful in (unrecommended) cases where forecast_length > training length.
remove_leading_zeroes (bool): replace leading zeroes with NaN. Useful in data where initial zeroes mean data collection hasn't started yet.
prefill_na (str): value to input to fill all NaNs with. Leaving as None and allowing model interpolation is recommended.
None, 0, 'mean', or 'median'. 0 may be useful in for examples sales cases where all NaN can be assumed equal to zero.
introduce_na (bool): whether to force last values in one training validation to be NaN. Helps make more robust models.
defaults to None, which introduces NaN in last rows of validations if any NaN in tail of training data. Will not introduce NaN to all series if subset is used.
if True, will also randomly change 20% of all rows to NaN in the validations
model_interrupt (bool): if False, KeyboardInterrupts quit entire program.
if True, KeyboardInterrupts attempt to only quit current model.
if True, recommend use in conjunction with `verbose` > 0 and `result_file` in the event of accidental complete termination.
if "end_generation", as True and also ends entire generation of run. Note skipped models will not be tried again.
verbose (int): setting to 0 or lower should reduce most output. Higher numbers give more output.
n_jobs (int): Number of cores available to pass to parallel processing. A joblib context manager can be used instead (pass None in this case). Also 'auto'.
Attributes:
best_model (pd.DataFrame): DataFrame containing template for the best ranked model
best_model_name (str): model name
best_model_params (dict): model params
best_model_transformation_params (dict): transformation parameters
best_model_ensemble (int): Ensemble type int id
regression_check (bool): If True, the best_model uses an input 'User' future_regressor
df_wide_numeric (pd.DataFrame): dataframe containing shaped final data
initial_results.model_results (object): contains a collection of result metrics
score_per_series (pd.DataFrame): generated score of metrics given per input series, if horizontal ensembles
Methods:
fit, predict
export_template, import_template, import_results
results, failure_rate
horizontal_to_df, mosaic_to_df
plot_horizontal, plot_horizontal_transformers, plot_generation_loss, plot_backforecast
"""
def __init__(
self,
forecast_length: int = 14,
frequency: str = 'infer',
prediction_interval: float = 0.9,
max_generations: int = 10,
no_negatives: bool = False,
constraint: float = None,
ensemble: str = 'auto',
initial_template: str = 'General+Random',
random_seed: int = 2020,
holiday_country: str = 'US',
subset: int = None,
aggfunc: str = 'first',
na_tolerance: float = 1,
metric_weighting: dict = {
'smape_weighting': 5,
'mae_weighting': 2,
'rmse_weighting': 2,
'made_weighting': 0,
'containment_weighting': 0,
'runtime_weighting': 0.05,
'spl_weighting': 2,
'contour_weighting': 1,
},
drop_most_recent: int = 0,
drop_data_older_than_periods: int = 100000,
model_list: str = 'default',
transformer_list: dict = "fast",
transformer_max_depth: int = 6,
models_mode: str = "random",
num_validations: int = 2,
models_to_validate: float = 0.15,
max_per_model_class: int = None,
validation_method: str = 'backwards',
min_allowed_train_percent: float = 0.5,
remove_leading_zeroes: bool = False,
prefill_na: str = None,
introduce_na: bool = None,
model_interrupt: bool = False,
verbose: int = 1,
n_jobs: int = None,
):
assert forecast_length > 0, "forecast_length must be greater than 0"
assert transformer_max_depth > 0, "transformer_max_depth must be greater than 0"
self.forecast_length = int(abs(forecast_length))
self.frequency = frequency
self.aggfunc = aggfunc
self.prediction_interval = prediction_interval
self.no_negatives = no_negatives
self.constraint = constraint
self.random_seed = random_seed
self.holiday_country = holiday_country
if isinstance(ensemble, list):
ensemble = str(",".join(ensemble)).lower()
self.ensemble = str(ensemble).lower()
self.subset = subset
self.na_tolerance = na_tolerance
self.metric_weighting = metric_weighting
self.drop_most_recent = drop_most_recent
self.drop_data_older_than_periods = drop_data_older_than_periods
self.model_list = model_list
self.transformer_list = transformer_list
self.transformer_max_depth = transformer_max_depth
self.num_validations = abs(int(num_validations))
self.models_to_validate = models_to_validate
self.max_per_model_class = max_per_model_class
self.validation_method = str(validation_method).lower()
self.min_allowed_train_percent = min_allowed_train_percent
self.max_generations = max_generations
self.remove_leading_zeroes = remove_leading_zeroes
self.prefill_na = prefill_na
self.introduce_na = introduce_na
self.model_interrupt = model_interrupt
self.verbose = int(verbose)
self.n_jobs = n_jobs
self.models_mode = models_mode
# just a list of horizontal types in general
self.h_ens_list = ['horizontal', 'probabilistic', 'hdist', "mosaic"]
if self.ensemble == 'all':
self.ensemble = 'simple,distance,horizontal-max,mosaic'
elif self.ensemble == 'auto':
if model_list in ['fast', 'default', 'all', 'multivariate']:
self.ensemble = 'simple,distance,horizontal-max'
else:
self.ensemble = 'simple'
if self.forecast_length == 1:
if metric_weighting['contour_weighting'] > 0:
print("Contour metric does not work with forecast_length == 1")
# check metric weights are valid
metric_weighting_values = self.metric_weighting.values()
if min(metric_weighting_values) < 0:
raise ValueError(
f"Metric weightings must be numbers >= 0. Current weightings: {self.metric_weighting}"
)
elif sum(metric_weighting_values) == 0:
raise ValueError(
"Sum of metric_weightings is 0, one or more values must be > 0"
)
if 'seasonal' in self.validation_method:
val_list = [x for x in str(self.validation_method) if x.isdigit()]
self.seasonal_val_periods = int(''.join(val_list))
if self.n_jobs == 'auto':
self.n_jobs = cpu_count(modifier=0.75)
if verbose > 0:
print(f"Using {self.n_jobs} cpus for n_jobs.")
elif str(self.n_jobs).isdigit():
self.n_jobs = int(self.n_jobs)
if self.n_jobs < 0:
core_count = cpu_count() + 1 - self.n_jobs
self.n_jobs = core_count if core_count > 1 else 1
if self.n_jobs == 0:
self.n_jobs = 1
# convert shortcuts of model lists to actual lists of models
if model_list in list(model_lists.keys()):
self.model_list = model_lists[model_list]
# prepare for a common Typo
elif 'Prophet' in model_list:
self.model_list = ["FBProphet" if x == "Prophet" else x for x in model_list]
# generate template to begin with
initial_template = str(initial_template).lower()
if initial_template == 'random':
self.initial_template = RandomTemplate(
len(self.model_list) * 12,
model_list=self.model_list,
transformer_list=self.transformer_list,
transformer_max_depth=self.transformer_max_depth,
models_mode=self.models_mode,
)
elif initial_template == 'general':
from autots.templates.general import general_template
self.initial_template = general_template
elif initial_template == 'general+random':
from autots.templates.general import general_template
random_template = RandomTemplate(
len(self.model_list) * 5,
model_list=self.model_list,
transformer_list=self.transformer_list,
transformer_max_depth=self.transformer_max_depth,
models_mode=self.models_mode,
)
self.initial_template = pd.concat(
[general_template, random_template], axis=0
).drop_duplicates()
elif isinstance(initial_template, pd.DataFrame):
self.initial_template = initial_template
else:
print("Input initial_template unrecognized. Using Random.")
self.initial_template = RandomTemplate(
50,
model_list=self.model_list,
transformer_list=self.transformer_list,
transformer_max_depth=self.transformer_max_depth,
models_mode=self.models_mode,
)
# remove models not in given model list
self.initial_template = self.initial_template[
self.initial_template['Model'].isin(self.model_list)
]
if self.initial_template.shape[0] == 0:
raise ValueError(
"No models in template! Adjust initial_template or model_list"
)
# remove transformers not in transformer_list and max_depth
# yes it is awkward, but I cannot think of a better way at this time
if self.transformer_max_depth < 6 or self.transformer_list not in [
"all",
"fast",
]:
from autots.tools.transform import transformer_list_to_dict
transformer_lst, prb = transformer_list_to_dict(self.transformer_list)
for index, row in self.initial_template.iterrows():
full_params = json.loads(row['TransformationParameters'])
transformations = full_params['transformations']
transformation_params = full_params['transformation_params']
# remove those not in transformer_list
bad_keys = [
i
for i, x in json.loads(row['TransformationParameters'])[
'transformations'
].items()
if x not in transformer_lst
]
[transformations.pop(key) for key in bad_keys]
[transformation_params.pop(key) for key in bad_keys]
# shorten any remaining if beyond length
transformations = dict(
list(transformations.items())[: self.transformer_max_depth]
)
transformation_params = dict(
list(transformation_params.items())[: self.transformer_max_depth]
)
full_params['transformations'] = transformations
full_params['transformation_params'] = transformation_params
self.initial_template.loc[
index, 'TransformationParameters'
] = json.dumps(full_params)
self.best_model = pd.DataFrame()
self.regressor_used = False
# do not add 'ID' to the below unless you want to refactor things.
self.template_cols = [
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
]
self.template_cols_id = (
self.template_cols
if "ID" in self.template_cols
else ['ID'] + self.template_cols
)
self.initial_results = TemplateEvalObject()
self.best_model_name = ""
self.best_model_params = ""
self.best_model_transformation_params = ""
self.traceback = True if verbose > 1 else False
self.future_regressor_train = None
self.validation_train_indexes = []
self.validation_test_indexes = []
if verbose > 2:
print('"Hello. Would you like to destroy some evil today?" - Sanderson')
def __repr__(self):
"""Print."""
if self.best_model.empty:
return "Uninitiated AutoTS object"
else:
try:
res = ", ".join(self.initial_results.model_results[self.initial_results.model_results['ID'] == self.best_model['ID'].iloc[0]]['smape'].astype(str).tolist())
return f"Initiated AutoTS object with best model: \n{self.best_model_name}\n{self.best_model_transformation_params}\n{self.best_model_params}\nSMAPE: {res}"
except Exception:
return "Initiated AutoTS object"
def fit(
self,
df,
date_col: str = None,
value_col: str = None,
id_col: str = None,
future_regressor=None,
weights: dict = {},
result_file: str = None,
grouping_ids=None,
validation_indexes: list = None,
):
"""Train algorithm given data supplied.
Args:
df (pandas.DataFrame): Datetime Indexed dataframe of series, or dataframe of three columns as below.
date_col (str): name of datetime column
value_col (str): name of column containing the data of series.
id_col (str): name of column identifying different series.
future_regressor (numpy.Array): single external regressor matching train.index
weights (dict): {'colname1': 2, 'colname2': 5} - increase importance of a series in metric evaluation. Any left blank assumed to have weight of 1.
pass the alias 'mean' as a str ie `weights='mean'` to automatically use the mean value of a series as its weight
available aliases: mean, median, min, max
result_file (str): results saved on each new generation. Does not include validation rounds.
".csv" save model results table.
".pickle" saves full object, including ensemble information.
grouping_ids (dict): currently a one-level dict containing series_id:group_id mapping.
used in 0.2.x but not 0.3.x+ versions. retained for potential future use
"""
self.weights = weights
self.date_col = date_col
self.value_col = value_col
self.id_col = id_col
self.grouping_ids = grouping_ids
# import mkl
# so this actually works it seems, on all sub process models
# mkl.set_num_threads_local(8)
# convert class variables to local variables (makes testing easier)
forecast_length = self.forecast_length
self.validation_indexes = validation_indexes
if self.validation_method == "custom":
assert (
validation_indexes is not None
), "validation_indexes needs to be filled with 'custom' validation"
assert len(validation_indexes) >= (
self.num_validations + 1
), "validation_indexes needs to be >= num_validations + 1 with 'custom' validation"
# flag if weights are given
if bool(weights):
weighted = True
else:
weighted = False
self.weighted = weighted
frequency = self.frequency
prediction_interval = self.prediction_interval
no_negatives = self.no_negatives
random_seed = self.random_seed
holiday_country = self.holiday_country
metric_weighting = self.metric_weighting
num_validations = self.num_validations
verbose = self.verbose
template_cols = self.template_cols
# shut off warnings if running silently
if verbose <= 0:
import warnings
warnings.filterwarnings("ignore")
# clean up result_file input, if given.
if result_file is not None:
formats = ['.csv', '.pickle']
if not any(x in result_file for x in formats):
print("result_file must be a valid str with .csv or .pickle")
result_file = None
# set random seeds for environment
random_seed = abs(int(random_seed))
random.seed(random_seed)
np.random.seed(random_seed)
# convert data to wide format
if date_col is None and value_col is None:
df_wide = pd.DataFrame(df)
assert (
type(df_wide.index) is pd.DatetimeIndex
), "df index is not pd.DatetimeIndex"
else:
df_wide = long_to_wide(
df,
date_col=self.date_col,
value_col=self.value_col,
id_col=self.id_col,
aggfunc=self.aggfunc,
)
df_wide = df_cleanup(
df_wide,
frequency=self.frequency,
prefill_na=self.prefill_na,
na_tolerance=self.na_tolerance,
drop_data_older_than_periods=self.drop_data_older_than_periods,
aggfunc=self.aggfunc,
drop_most_recent=self.drop_most_recent,
verbose=self.verbose,
)
# handle categorical data if present
self.categorical_transformer = NumericTransformer(verbose=self.verbose)
df_wide_numeric = self.categorical_transformer.fit_transform(df_wide)
# check that column names are unique:
if not df_wide_numeric.columns.is_unique:
# maybe should make this an actual error in the future
print(
"Warning: column/series names are not unique. Unique column names are highly recommended for wide data!"
)
time.sleep(3) # give the message a chance to be seen
# remove other ensembling types if univariate
if df_wide_numeric.shape[1] == 1:
if "simple" in self.ensemble:
ens_piece1 = "simple"
else:
ens_piece1 = ""
if "distance" in self.ensemble:
ens_piece2 = "distance"
else:
ens_piece2 = ""
if "mosaic" in self.ensemble:
ens_piece3 = "mosaic"
else:
ens_piece3 = ""
self.ensemble = ens_piece1 + "," + ens_piece2 + "," + ens_piece3
ensemble = self.ensemble
# because horizontal cannot handle non-string columns/series_ids
if any(x in ensemble for x in self.h_ens_list):
df_wide_numeric.columns = [str(xc) for xc in df_wide_numeric.columns]
# use "mean" to assign weight as mean
if weighted:
if weights == 'mean':
weights = df_wide_numeric.mean(axis=0).to_dict()
elif weights == 'median':
weights = df_wide_numeric.median(axis=0).to_dict()
elif weights == 'min':
weights = df_wide_numeric.min(axis=0).to_dict()
elif weights == 'max':
weights = df_wide_numeric.max(axis=0).to_dict()
# clean up series weighting input
weights = clean_weights(weights, df_wide_numeric.columns, self.verbose)
self.weights = weights
# replace any zeroes that occur prior to all non-zero values
if self.remove_leading_zeroes:
df_wide_numeric = remove_leading_zeros(df_wide_numeric)
# check if NaN in last row
self._nan_tail = df_wide_numeric.tail(2).isna().sum(axis=1).sum() > 0
self.df_wide_numeric = df_wide_numeric
self.startTimeStamps = df_wide_numeric.notna().idxmax()
# generate similarity matching indices (so it can fail now, not after all the generations)
if self.validation_method == "similarity":
from autots.tools.transform import GeneralTransformer
params = {
"fillna": "median", # mean or median one of few consistent things
"transformations": {"0": "MaxAbsScaler"},
"transformation_params": {
"0": {},
},
}
trans = GeneralTransformer(**params)
stride_size = round(self.forecast_length / 2)
stride_size = stride_size if stride_size > 0 else 1
created_idx = retrieve_closest_indices(
trans.fit_transform(df_wide_numeric),
num_indices=num_validations + 1,
forecast_length=self.forecast_length,
stride_size=stride_size,
distance_metric="nan_euclidean",
include_differenced=True,
window_size=30,
include_last=True,
verbose=self.verbose,
)
self.validation_indexes = [
df_wide_numeric.index[df_wide_numeric.index <= indx[-1]]
for indx in created_idx
]
# record if subset or not
if self.subset is not None:
self.subset = abs(int(self.subset))
if self.subset >= self.df_wide_numeric.shape[1]:
self.subset_flag = False
else:
self.subset_flag = True
else:
self.subset_flag = False
#
# take a subset of the data if working with a large number of series
if self.subset_flag:
df_subset = subset_series(
df_wide_numeric,
list((weights.get(i)) for i in df_wide_numeric.columns),
n=self.subset,
random_state=random_seed,
)
if self.verbose > 1:
print(f'First subset is of: {df_subset.columns}')
else:
df_subset = df_wide_numeric.copy()
# go to first index
if self.validation_method in ['custom', "similarity"]:
first_idx = self.validation_indexes[0]
if max(first_idx) > max(df_subset.index):
raise ValueError(
"provided validation index exceeds historical data period"
)
df_subset = df_subset.reindex(first_idx)
# subset the weighting information as well
if not weighted:
current_weights = {x: 1 for x in df_subset.columns}
else:
current_weights = {x: weights[x] for x in df_subset.columns}
# split train and test portions, and split regressor if present
df_train, df_test = simple_train_test_split(
df_subset,
forecast_length=forecast_length,
min_allowed_train_percent=self.min_allowed_train_percent,
verbose=self.verbose,
)
self.validation_train_indexes.append(df_train.index)
self.validation_test_indexes.append(df_test.index)
if future_regressor is not None:
if not isinstance(future_regressor, pd.DataFrame):
future_regressor = pd.DataFrame(future_regressor)
if future_regressor.empty:
raise ValueError("regressor empty")
if not isinstance(future_regressor.index, pd.DatetimeIndex):
future_regressor.index = df_subset.index
# handle any non-numeric data, crudely
self.regr_num_trans = NumericTransformer(verbose=self.verbose)
future_regressor = self.regr_num_trans.fit_transform(future_regressor)
self.future_regressor_train = future_regressor
future_regressor_train = future_regressor.reindex(index=df_train.index)
future_regressor_test = future_regressor.reindex(index=df_test.index)
else:
future_regressor_train = None
future_regressor_test = None
if future_regressor is not None:
if future_regressor.shape[0] != df_wide_numeric.shape[0]:
print(
"future_regressor row count does not match length of training data"
)
time.sleep(2)
model_count = 0
# unpack ensemble models so sub models appear at highest level
self.initial_template = unpack_ensemble_models(
self.initial_template,
self.template_cols,
keep_ensemble=True,
recursive=True,
)
# remove horizontal ensembles from initial_template
if 'Ensemble' in self.initial_template['Model'].tolist():
self.initial_template = self.initial_template[
self.initial_template['Ensemble'] <= 1
]
# run the initial template
submitted_parameters = self.initial_template.copy()
template_result = TemplateWizard(
self.initial_template,
df_train,
df_test,
weights=current_weights,
model_count=model_count,
ensemble=ensemble,
forecast_length=forecast_length,
frequency=frequency,
prediction_interval=prediction_interval,
no_negatives=no_negatives,
constraint=self.constraint,
future_regressor_train=future_regressor_train,
future_regressor_forecast=future_regressor_test,
holiday_country=holiday_country,
startTimeStamps=self.startTimeStamps,
template_cols=template_cols,
random_seed=random_seed,
model_interrupt=self.model_interrupt,
grouping_ids=self.grouping_ids,
verbose=verbose,
n_jobs=self.n_jobs,
max_generations=self.max_generations,
traceback=self.traceback,
)
model_count = template_result.model_count
# capture the data from the lower level results
self.initial_results = self.initial_results.concat(template_result)
self.initial_results.model_results['Score'] = generate_score(
self.initial_results.model_results,
metric_weighting=metric_weighting,
prediction_interval=prediction_interval,
)
if result_file is not None:
self.initial_results.save(result_file)
# now run new generations, trying more models based on past successes.
current_generation = 0
while current_generation < self.max_generations:
current_generation += 1
if verbose > 0:
print(
"New Generation: {} of {}".format(
current_generation, self.max_generations
)
)
cutoff_multiple = 5 if current_generation < 10 else 3
top_n = len(self.model_list) * cutoff_multiple
new_template = NewGeneticTemplate(
self.initial_results.model_results,
submitted_parameters=submitted_parameters,
sort_column="Score",
sort_ascending=True,
max_results=top_n,
max_per_model_class=5,
top_n=top_n,
template_cols=template_cols,
transformer_list=self.transformer_list,
transformer_max_depth=self.transformer_max_depth,
models_mode=self.models_mode,
)
submitted_parameters = pd.concat(
[submitted_parameters, new_template],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
template_result = TemplateWizard(
new_template,
df_train,
df_test,
weights=current_weights,
model_count=model_count,
ensemble=ensemble,
forecast_length=forecast_length,
frequency=frequency,
prediction_interval=prediction_interval,
no_negatives=no_negatives,
constraint=self.constraint,
future_regressor_train=future_regressor_train,
future_regressor_forecast=future_regressor_test,
holiday_country=holiday_country,
startTimeStamps=self.startTimeStamps,
template_cols=template_cols,
model_interrupt=self.model_interrupt,
grouping_ids=self.grouping_ids,
random_seed=random_seed,
verbose=verbose,
n_jobs=self.n_jobs,
current_generation=current_generation,
max_generations=self.max_generations,
traceback=self.traceback,
)
model_count = template_result.model_count
# capture results from lower-level template run
self.initial_results = self.initial_results.concat(template_result)
self.initial_results.model_results['Score'] = generate_score(
self.initial_results.model_results,
metric_weighting=metric_weighting,
prediction_interval=prediction_interval,
)
if result_file is not None:
self.initial_results.save(result_file)
# try ensembling
if ensemble not in [None, 'none']:
try:
self.score_per_series = generate_score_per_series(
self.initial_results, self.metric_weighting, 1
)
ensemble_templates = EnsembleTemplateGenerator(
self.initial_results,
forecast_length=forecast_length,
ensemble=ensemble,
score_per_series=self.score_per_series,
)
template_result = TemplateWizard(
ensemble_templates,
df_train,
df_test,
weights=current_weights,
model_count=model_count,
forecast_length=forecast_length,
frequency=frequency,
prediction_interval=prediction_interval,
no_negatives=no_negatives,
constraint=self.constraint,
ensemble=ensemble,
future_regressor_train=future_regressor_train,
future_regressor_forecast=future_regressor_test,
holiday_country=holiday_country,
startTimeStamps=self.startTimeStamps,
template_cols=template_cols,
model_interrupt=self.model_interrupt,
grouping_ids=self.grouping_ids,
random_seed=random_seed,
current_generation=(current_generation + 1),
verbose=verbose,
n_jobs=self.n_jobs,
traceback=self.traceback,
)
model_count = template_result.model_count
# capture results from lower-level template run
self.initial_results = self.initial_results.concat(template_result)
self.initial_results.model_results['Score'] = generate_score(
self.initial_results.model_results,
metric_weighting=metric_weighting,
prediction_interval=prediction_interval,
)
if result_file is not None:
self.initial_results.save(result_file)
except Exception as e:
print(f"Ensembling Error: {e}")
# drop any duplicates in results
self.initial_results.model_results = (
self.initial_results.model_results.drop_duplicates(
subset=(['ID'] + self.template_cols)
)
)
# validations if float
if (self.models_to_validate < 1) and (self.models_to_validate > 0):
val_frac = self.models_to_validate
val_frac = 1 if val_frac >= 0.99 else val_frac
temp_len = self.initial_results.model_results.shape[0]
self.models_to_validate = val_frac * temp_len
self.models_to_validate = int(np.ceil(self.models_to_validate))
if self.max_per_model_class is None:
temp_len = len(self.model_list)
self.max_per_model_class = (self.models_to_validate / temp_len) + 1
self.max_per_model_class = int(np.ceil(self.max_per_model_class))
# check how many validations are possible given the length of the data.
if 'seasonal' in self.validation_method:
temp = df_wide_numeric.shape[0] + self.forecast_length
max_possible = temp / self.seasonal_val_periods
else:
max_possible = (df_wide_numeric.shape[0]) / forecast_length
if (max_possible - np.floor(max_possible)) > self.min_allowed_train_percent:
max_possible = int(max_possible)
else:
max_possible = int(max_possible) - 1
if max_possible < (num_validations + 1):
num_validations = max_possible - 1
if num_validations < 0:
num_validations = 0
print(
"Too many training validations for length of data provided, decreasing num_validations to {}".format(
num_validations
)
)
self.num_validations = num_validations
# construct validation template
validation_template = self.initial_results.model_results[
self.initial_results.model_results['Exceptions'].isna()
]
validation_template = validation_template[validation_template['Ensemble'] <= 1]
validation_template = validation_template.drop_duplicates(
subset=template_cols, keep='first'
)
validation_template = validation_template.sort_values(
by="Score", ascending=True, na_position='last'
)
if str(self.max_per_model_class).isdigit():
validation_template = (
validation_template.sort_values(
'Score', ascending=True, na_position='last'
)
.groupby('Model')
.head(self.max_per_model_class)
.reset_index(drop=True)
)
validation_template = validation_template.sort_values(
'Score', ascending=True, na_position='last'
).head(self.models_to_validate)
# add on best per_series models (which may not be in the top scoring)
if any(x in ensemble for x in self.h_ens_list):
model_results = self.initial_results.model_results
mods = generate_score_per_series(
self.initial_results, self.metric_weighting, 1
).idxmin()
per_series_val = model_results[
model_results['ID'].isin(mods.unique().tolist())
]
validation_template = pd.concat(
[validation_template, per_series_val], axis=0
)
validation_template = validation_template.drop_duplicates(
subset=['Model', 'ModelParameters', 'TransformationParameters']
)
validation_template = validation_template[self.template_cols]
# run validations
if num_validations > 0:
model_count = 0
for y in range(num_validations):
if verbose > 0:
print("Validation Round: {}".format(str(y + 1)))
# slice the validation data into current slice
val_list = ['backwards', 'back', 'backward']
if self.validation_method in val_list:
# gradually remove the end
current_slice = df_wide_numeric.head(
df_wide_numeric.shape[0] - (y + 1) * forecast_length
)
elif self.validation_method == 'even':
# /num_validations biases it towards the last segment
validation_size = len(df_wide_numeric.index) - forecast_length
validation_size = validation_size / (num_validations + 1)
validation_size = int(np.floor(validation_size))
current_slice = df_wide_numeric.head(
validation_size * (y + 1) + forecast_length
)
elif 'seasonal' in self.validation_method:
val_per = (y + 1) * self.seasonal_val_periods
if self.seasonal_val_periods < forecast_length:
pass
else:
val_per = val_per - forecast_length
val_per = df_wide_numeric.shape[0] - val_per
current_slice = df_wide_numeric.head(val_per)
elif self.validation_method in ['custom', "similarity"]:
current_slice = df_wide_numeric.reindex(
self.validation_indexes[(y + 1)]
)
else:
raise ValueError(
"Validation Method not recognized try 'even', 'backwards'"
)
# subset series (if used) and take a new train/test split
if self.subset_flag:
# mosaic can't handle different cols in each validation
if "mosaic" in self.ensemble:
rand_st = random_seed
else:
rand_st = random_seed + y + 1
df_subset = subset_series(
current_slice,
list((weights.get(i)) for i in current_slice.columns),
n=self.subset,
random_state=rand_st,
)
if self.verbose > 1:
print(f'{y + 1} subset is of: {df_subset.columns}')
else:
df_subset = current_slice
# subset weighting info
if not weighted:
current_weights = {x: 1 for x in df_subset.columns}
else:
current_weights = {x: weights[x] for x in df_subset.columns}
val_df_train, val_df_test = simple_train_test_split(
df_subset,
forecast_length=forecast_length,
min_allowed_train_percent=self.min_allowed_train_percent,
verbose=self.verbose,
)
self.validation_train_indexes.append(val_df_train.index)
self.validation_test_indexes.append(val_df_test.index)
if self.verbose >= 2:
print(f'Validation index is {val_df_train.index}')
# slice regressor into current validation slices
if future_regressor is not None:
val_future_regressor_train = future_regressor.reindex(
index=val_df_train.index
)
val_future_regressor_test = future_regressor.reindex(
index=val_df_test.index
)
else:
val_future_regressor_train = None
val_future_regressor_test = None
# force NaN for robustness
if self.introduce_na or (self.introduce_na is None and self._nan_tail):
if self.introduce_na:
idx = val_df_train.index
# make 20% of rows NaN at random
val_df_train = val_df_train.sample(
frac=0.8, random_state=self.random_seed
).reindex(idx)
nan_frac = val_df_train.shape[1] / num_validations
val_df_train.iloc[
-2:, int(nan_frac * y) : int(nan_frac * (y + 1))
] = np.nan
# run validation template on current slice
template_result = TemplateWizard(
validation_template,
df_train=val_df_train,
df_test=val_df_test,
weights=current_weights,
forecast_length=forecast_length,
frequency=frequency,
prediction_interval=prediction_interval,
no_negatives=no_negatives,
constraint=self.constraint,
ensemble=ensemble,
future_regressor_train=val_future_regressor_train,
future_regressor_forecast=val_future_regressor_test,
holiday_country=holiday_country,
startTimeStamps=self.startTimeStamps,
template_cols=self.template_cols,
model_interrupt=self.model_interrupt,
grouping_ids=self.grouping_ids,
random_seed=random_seed,
verbose=verbose,
n_jobs=self.n_jobs,
validation_round=(y + 1),
traceback=self.traceback,
)
model_count = template_result.model_count
# gather results of template run
self.initial_results = self.initial_results.concat(template_result)
self.initial_results.model_results['Score'] = generate_score(
self.initial_results.model_results,
metric_weighting=metric_weighting,
prediction_interval=prediction_interval,
)
self.validation_results = copy.copy(self.initial_results)
# aggregate validation results
self.validation_results = validation_aggregation(self.validation_results)
error_msg_template = """No models available from validation.
Try increasing models_to_validate, max_per_model_class
or otherwise increase models available."""
# Construct horizontal style ensembles
if any(x in ensemble for x in self.h_ens_list):
ensemble_templates = pd.DataFrame()
try:
if 'horizontal' in ensemble or 'probabilistic' in ensemble:
self.score_per_series = generate_score_per_series(
self.initial_results,
metric_weighting=metric_weighting,
total_validations=(num_validations + 1),
)
ens_templates = HorizontalTemplateGenerator(
self.score_per_series,
model_results=self.initial_results.model_results,
forecast_length=forecast_length,
ensemble=ensemble.replace('probabilistic', ' ').replace(
'hdist', ' '
),
subset_flag=self.subset_flag,
)
ensemble_templates = pd.concat(
[ensemble_templates, ens_templates], axis=0
)
except Exception as e:
if self.verbose >= 0:
print(f"Horizontal Ensemble Generation Error: {repr(e)}")
time.sleep(5)
try:
if 'mosaic' in ensemble:
ens_templates = generate_mosaic_template(
initial_results=self.initial_results.model_results,
full_mae_ids=self.initial_results.full_mae_ids,
num_validations=num_validations,
col_names=df_subset.columns,
full_mae_errors=self.initial_results.full_mae_errors,
)
ensemble_templates = pd.concat(
[ensemble_templates, ens_templates], axis=0
)
except Exception as e:
if self.verbose >= 0:
print(f"Mosaic Ensemble Generation Error: {e}")
try:
# test on initial test split to make sure they work
template_result = TemplateWizard(
ensemble_templates,
df_train,
df_test,
weights=current_weights,
model_count=0,
forecast_length=forecast_length,
frequency=frequency,
prediction_interval=prediction_interval,
no_negatives=no_negatives,
constraint=self.constraint,
future_regressor_train=future_regressor_train,
future_regressor_forecast=future_regressor_test,
holiday_country=holiday_country,
startTimeStamps=self.startTimeStamps,
template_cols=template_cols,
model_interrupt=self.model_interrupt,
grouping_ids=self.grouping_ids,
random_seed=random_seed,
verbose=verbose,
n_jobs=self.n_jobs,
traceback=self.traceback,
)
# capture results from lower-level template run
template_result.model_results['TotalRuntime'].fillna(
pd.Timedelta(seconds=60), inplace=True
)
self.initial_results.model_results = pd.concat(
[self.initial_results.model_results, template_result.model_results],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
self.initial_results.model_results['Score'] = generate_score(
self.initial_results.model_results,
metric_weighting=metric_weighting,
prediction_interval=prediction_interval,
)
if result_file is not None:
self.initial_results.save(result_file)
except Exception as e:
if self.verbose >= 0:
print(f"Ensembling Error: {e}")
template_result = TemplateEvalObject()
try:
template_result.model_results['smape']
except KeyError:
template_result.model_results['smape'] = 0
# rerun validation_results aggregation with new models added
self.validation_results = copy.copy(self.initial_results)
self.validation_results = validation_aggregation(self.validation_results)
# use the best of these ensembles if any ran successfully
try:
horz_flag = template_result.model_results['Exceptions'].isna().any()
except Exception:
horz_flag = False
if not template_result.model_results.empty and horz_flag:
template_result.model_results['Score'] = generate_score(
template_result.model_results,
metric_weighting=metric_weighting,
prediction_interval=prediction_interval,
)
self.best_model = template_result.model_results.sort_values(
by="Score", ascending=True, na_position='last'
).head(1)[self.template_cols_id]
self.ensemble_check = 1
# else use the best of the previous
else:
if self.verbose >= 0:
print("Horizontal ensemble failed. Using best non-horizontal.")
time.sleep(3)
eligible_models = self.validation_results.model_results[
self.validation_results.model_results['Runs']
>= (num_validations + 1)
]
try:
self.best_model = (
eligible_models.sort_values(
by="Score", ascending=True, na_position='last'
)
.drop_duplicates(subset=self.template_cols)
.head(1)[self.template_cols_id]
)
except IndexError:
raise ValueError(error_msg_template)
else:
# choose best model
eligible_models = self.validation_results.model_results[
self.validation_results.model_results['Runs'] >= (num_validations + 1)
]
try:
self.best_model = (
eligible_models.sort_values(
by="Score", ascending=True, na_position='last'
)
.drop_duplicates(subset=self.template_cols)
.head(1)[template_cols]
)
except IndexError:
raise ValueError(error_msg_template)
# give a more convenient dict option
self.best_model_name = self.best_model['Model'].iloc[0]
self.best_model_params = json.loads(self.best_model['ModelParameters'].iloc[0])
self.best_model_transformation_params = json.loads(
self.best_model['TransformationParameters'].iloc[0]
)
self.best_model_ensemble = self.best_model['Ensemble'].iloc[0]
self.ensemble_check = int(self.best_model_ensemble > 0)
# set flags to check if regressors or ensemble used in final model.
param_dict = json.loads(self.best_model.iloc[0]['ModelParameters'])
if self.ensemble_check == 1:
self.used_regressor_check = self._regr_param_check(param_dict)
elif self.ensemble_check == 0:
self.used_regressor_check = False
try:
reg_param = param_dict['regression_type']
if reg_param == 'User':
self.used_regressor_check = True
except KeyError:
pass
else:
print(f"Warning: ensemble_check not in [0,1]: {self.ensemble_check}")
# clean up any remaining print statements
sys.stdout.flush()
return self
def _regr_param_check(self, param_dict):
"""Help to search for if a regressor was used in model."""
out = False
for key in param_dict['models']:
cur_dict = json.loads(param_dict['models'][key]['ModelParameters'])
try:
reg_param = cur_dict['regression_type']
if reg_param == 'User':
return True
except KeyError:
pass
if param_dict['models'][key]['Model'] == 'Ensemble':
out = self._regr_param_check(cur_dict)
if out:
return out
return out
def predict(
self,
forecast_length: int = "self",
prediction_interval: float = 'self',
future_regressor=None,
hierarchy=None,
just_point_forecast: bool = False,
verbose: int = 'self',
):
"""Generate forecast data immediately following dates of index supplied to .fit().
Args:
forecast_length (int): Number of periods of data to forecast ahead
prediction_interval (float): interval of upper/lower forecasts.
defaults to 'self' ie the interval specified in __init__()
if prediction_interval is a list, then returns a dict of forecast objects.
future_regressor (numpy.Array): additional regressor
hierarchy: Not yet implemented
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Return:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
verbose = self.verbose if verbose == 'self' else verbose
if forecast_length == 'self':
forecast_length = self.forecast_length
if prediction_interval == 'self':
prediction_interval = self.prediction_interval
# checkup regressor
if future_regressor is not None:
if not isinstance(future_regressor, pd.DataFrame):
future_regressor = pd.DataFrame(future_regressor)
if self.future_regressor_train is None:
raise ValueError(
"regressor passed to .predict but no regressor was passed to .fit"
)
# handle any non-numeric data, crudely
future_regressor = self.regr_num_trans.transform(future_regressor)
# make sure training regressor fits training data index
self.future_regressor_train = self.future_regressor_train.reindex(
index=self.df_wide_numeric.index
)
# allow multiple prediction intervals
if isinstance(prediction_interval, list):
forecast_objects = {}
for interval in prediction_interval:
df_forecast = model_forecast(
model_name=self.best_model_name,
model_param_dict=self.best_model_params,
model_transform_dict=self.best_model_transformation_params,
df_train=self.df_wide_numeric,
forecast_length=forecast_length,
frequency=self.frequency,
prediction_interval=interval,
no_negatives=self.no_negatives,
constraint=self.constraint,
future_regressor_train=self.future_regressor_train,
future_regressor_forecast=future_regressor,
holiday_country=self.holiday_country,
startTimeStamps=self.startTimeStamps,
grouping_ids=self.grouping_ids,
random_seed=self.random_seed,
verbose=verbose,
n_jobs=self.n_jobs,
template_cols=self.template_cols,
)
# convert categorical back to numeric
trans = self.categorical_transformer
df_forecast.forecast = trans.inverse_transform(df_forecast.forecast)
df_forecast.lower_forecast = trans.inverse_transform(
df_forecast.lower_forecast
)
df_forecast.upper_forecast = trans.inverse_transform(
df_forecast.upper_forecast
)
forecast_objects[interval] = df_forecast
return forecast_objects
else:
df_forecast = model_forecast(
model_name=self.best_model_name,
model_param_dict=self.best_model_params,
model_transform_dict=self.best_model_transformation_params,
df_train=self.df_wide_numeric,
forecast_length=forecast_length,
frequency=self.frequency,
prediction_interval=prediction_interval,
no_negatives=self.no_negatives,
constraint=self.constraint,
future_regressor_train=self.future_regressor_train,
future_regressor_forecast=future_regressor,
holiday_country=self.holiday_country,
startTimeStamps=self.startTimeStamps,
grouping_ids=self.grouping_ids,
random_seed=self.random_seed,
verbose=verbose,
n_jobs=self.n_jobs,
template_cols=self.template_cols,
)
# convert categorical back to numeric
trans = self.categorical_transformer
df_forecast.forecast = trans.inverse_transform(df_forecast.forecast)
df_forecast.lower_forecast = trans.inverse_transform(
df_forecast.lower_forecast
)
df_forecast.upper_forecast = trans.inverse_transform(
df_forecast.upper_forecast
)
sys.stdout.flush()
if just_point_forecast:
return df_forecast.forecast
else:
return df_forecast
def results(self, result_set: str = 'initial'):
"""Convenience function to return tested models table.
Args:
result_set (str): 'validation' or 'initial'
"""
if result_set == 'validation':
return self.validation_results.model_results
else:
return self.initial_results.model_results
def failure_rate(self, result_set: str = 'initial'):
"""Return fraction of models passing with exceptions.
Args:
result_set (str, optional): 'validation' or 'initial'. Defaults to 'initial'.
Returns:
float.
"""
initial_results = self.results(result_set=result_set)
n = initial_results.shape[0]
x = (n - initial_results['Exceptions'].isna().sum()) / n
return x
def export_template(
self,
filename=None,
models: str = 'best',
n: int = 5,
max_per_model_class: int = None,
include_results: bool = False,
):
"""Export top results as a reusable template.
Args:
filename (str): 'csv' or 'json' (in filename).
`None` to return a dataframe and not write a file.
models (str): 'best' or 'all'
n (int): if models = 'best', how many n-best to export
max_per_model_class (int): if models = 'best',
the max number of each model class to include in template
include_results (bool): whether to include performance metrics
"""
if models == 'all':
export_template = self.initial_results.model_results[self.template_cols]
export_template = export_template.drop_duplicates()
elif models == 'best':
# skip to the answer if just n==1
if n == 1 and not include_results:
export_template = self.best_model
else:
export_template = self.validation_results.model_results
export_template = export_template[
export_template['Runs'] >= (self.num_validations + 1)
]
if any(x in self.ensemble for x in self.h_ens_list):
temp = self.initial_results.model_results
temp = temp[temp['Ensemble'] >= 2]
temp = temp[temp['Exceptions'].isna()]
export_template = export_template.merge(
temp,
how='outer',
on=export_template.columns.intersection(temp.columns).to_list(),
)
export_template['Score'] = generate_score(
export_template,
metric_weighting=self.metric_weighting,
prediction_interval=self.prediction_interval,
)
if str(max_per_model_class).isdigit():
export_template = (
export_template.sort_values('Score', ascending=True)
.groupby('Model')
.head(max_per_model_class)
.reset_index()
)
export_template = export_template.nsmallest(n, columns=['Score'])
if not include_results:
export_template = export_template[self.template_cols]
export_template = pd.concat(
[self.best_model, export_template]
).drop_duplicates()
else:
raise ValueError("`models` must be 'all' or 'best'")
try:
if filename is None:
return export_template
elif '.csv' in filename:
return export_template.to_csv(filename, index=False)
elif '.json' in filename:
return export_template.to_json(filename, orient='columns')
else:
raise ValueError("file must be .csv or .json")
except PermissionError:
raise PermissionError(
"Permission Error: directory or existing file is locked for editing."
)
def import_template(
self, filename: str, method: str = "add_on", enforce_model_list: bool = True
):
"""Import a previously exported template of model parameters.
Must be done before the AutoTS object is .fit().
Args:
filename (str): file location (or a pd.DataFrame already loaded)
method (str): 'add_on' or 'only' - "add_on" keeps `initial_template` generated in init. "only" uses only this template.
enforce_model_list (bool): if True, remove model types not in model_list
"""
if isinstance(filename, pd.DataFrame):
import_template = filename.copy()
elif '.csv' in filename:
import_template = pd.read_csv(filename)
elif '.json' in filename:
import_template = pd.read_json(filename, orient='columns')
else:
raise ValueError("file must be .csv or .json")
try:
import_template = import_template[self.template_cols]
except Exception:
print(
"Column names {} were not recognized as matching template columns: {}".format(
str(import_template.columns), str(self.template_cols)
)
)
import_template = unpack_ensemble_models(
import_template, self.template_cols, keep_ensemble=True, recursive=True
)
if enforce_model_list:
# remove models not in given model list
mod_list = self.model_list + ['Ensemble']
import_template = import_template[import_template['Model'].isin(mod_list)]
if import_template.shape[0] == 0:
raise ValueError(
"Len 0. Model_list does not match models in template! Try enforce_model_list=False."
)
if method.lower() in ['add on', 'addon', 'add_on']:
self.initial_template = self.initial_template.merge(
import_template,
how='outer',
on=self.initial_template.columns.intersection(
import_template.columns
).to_list(),
)
self.initial_template = self.initial_template.drop_duplicates(
subset=self.template_cols
)
elif method.lower() in ['only', 'user only', 'user_only', 'import_only']:
self.initial_template = import_template
else:
return ValueError("method must be 'add_on' or 'only'")
return self
def import_results(self, filename):
"""Add results from another run on the same data.
Input can be filename with .csv or .pickle.
or can be a DataFrame of model results or a full TemplateEvalObject
"""
csv_flag = False
if isinstance(filename, str):
if ".csv" in filename:
csv_flag = True
if isinstance(filename, pd.DataFrame) or csv_flag:
if ".csv" not in filename:
past_results = filename.copy()
else:
past_results = pd.read_csv(filename)
# remove those that succeeded (ie had no Exception)
past_results = past_results[pd.isnull(past_results['Exceptions'])]
# remove validation results
past_results = past_results[(past_results['ValidationRound']) == 0]
past_results['TotalRuntime'] = pd.to_timedelta(past_results['TotalRuntime'])
# combine with any existing results
self.initial_results.model_results = pd.concat(
[past_results, self.initial_results.model_results],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
self.initial_results.model_results.drop_duplicates(
subset=self.template_cols, keep='first', inplace=True
)
else:
if isinstance(filename, TemplateEvalObject):
new_obj = filename
elif '.pickle' in filename:
import pickle
new_obj = pickle.load(open(filename, "rb"))
else:
raise ValueError("import type not recognized.")
self.initial_results = self.initial_results.concat(new_obj)
return self
def back_forecast(
self, column=None, n_splits: int = 3, tail: int = None, verbose: int = 0
):
"""Create forecasts for the historical training data, ie. backcast or back forecast.
This actually forecasts on historical data, these are not fit model values as are often returned by other packages.
As such, this will be slower, but more representative of real world model performance.
There may be jumps in data between chunks.
Args are same as for model_forecast except...
n_splits(int): how many pieces to split data into. Pass 2 for fastest, or "auto" for best accuracy
column (str): if to run on only one column, pass column name. Faster than full.
tail (int): df.tail() of the dataset, back_forecast is only run on n most recent observations.
Returns a standard prediction object (access .forecast, .lower_forecast, .upper_forecast)
"""
if self.best_model.empty:
raise ValueError("No best_model. AutoTS .fit() needs to be run.")
if column is not None:
input_df = pd.DataFrame(self.df_wide_numeric[column])
else:
input_df = self.df_wide_numeric
if tail is not None:
input_df = input_df.tail(tail)
result = back_forecast(
df=input_df,
model_name=self.best_model_name,
model_param_dict=self.best_model_params,
model_transform_dict=self.best_model_transformation_params,
future_regressor_train=self.future_regressor_train,
n_splits=n_splits,
forecast_length=self.forecast_length,
frequency=self.frequency,
prediction_interval=self.prediction_interval,
no_negatives=self.no_negatives,
constraint=self.constraint,
holiday_country=self.holiday_country,
random_seed=self.random_seed,
n_jobs=self.n_jobs,
verbose=verbose,
)
return result
def horizontal_to_df(self):
"""helper function for plotting."""
if self.best_model.empty:
raise ValueError("No best_model. AutoTS .fit() needs to be run.")
if self.best_model['Ensemble'].iloc[0] != 2:
raise ValueError("Only works on horizontal ensemble type models.")
ModelParameters = self.best_model_params
series = ModelParameters['series']
series = pd.DataFrame.from_dict(series, orient="index").reset_index(drop=False)
if series.shape[1] > 2:
# for mosaic style ensembles, choose the mode model id
series.set_index(series.columns[0], inplace=True)
series = series.mode(axis=1)[0].to_frame().reset_index(drop=False)
series.columns = ['Series', 'ID']
series = series.merge(
self.results()[['ID', "Model"]].drop_duplicates(), on="ID"
)
series = series.merge(
self.df_wide_numeric.std().to_frame(), right_index=True, left_on="Series"
)
series = series.merge(
self.df_wide_numeric.mean().to_frame(), right_index=True, left_on="Series"
)
series.columns = ["Series", "ID", 'Model', "Volatility", "Mean"]
series['Transformers'] = series['ID'].copy()
series['FillNA'] = series['ID'].copy()
lookup = {}
na_lookup = {}
for k, v in ModelParameters['models'].items():
try:
trans_params = json.loads(v.get('TransformationParameters', '{}'))
lookup[k] = ",".join(trans_params.get('transformations', {}).values())
na_lookup[k] = trans_params.get('fillna', '')
except Exception:
lookup[k] = "None"
na_lookup[k] = "None"
series['Transformers'] = (
series['Transformers'].replace(lookup).replace("", "None")
)
series['FillNA'] = series['FillNA'].replace(na_lookup).replace("", "None")
return series
def mosaic_to_df(self):
"""Helper function to create a readable df of models in mosaic."""
if self.best_model.empty:
raise ValueError("No best_model. AutoTS .fit() needs to be run.")
if self.best_model['Ensemble'].iloc[0] != 2:
raise ValueError("Only works on horizontal ensemble type models.")
ModelParameters = self.best_model_params
if str(ModelParameters['model_name']).lower() != 'mosaic':
raise ValueError("Only works on mosaic ensembles.")
series = pd.DataFrame.from_dict(ModelParameters['series'])
lookup = {k: v['Model'] for k, v in ModelParameters['models'].items()}
return series.replace(lookup)
def plot_horizontal(self, max_series: int = 20, **kwargs):
"""Simple plot to visualize assigned series: models.
Note that for 'mosiac' ensembles, it only plots the type of the most common model_id for that series, or the first if all are mode.
Args:
max_series (int): max number of points to plot
**kwargs passed to pandas.plot()
"""
series = self.horizontal_to_df()
# remove some data to prevent overcrowding the graph, if necessary
max_series = series.shape[0] if series.shape[0] < max_series else max_series
series = series.sample(max_series, replace=False)
# sklearn.preprocessing.normalizer also might work
series[['log(Volatility)', 'log(Mean)']] = np.log(
series[['Volatility', 'Mean']]
)
# plot
series.set_index(['Model', 'log(Mean)']).unstack('Model')[
'log(Volatility)'
].plot(style='o', **kwargs)
def plot_horizontal_transformers(
self, method="transformers", color_list=None, **kwargs
):
"""Simple plot to visualize transformers used.
Note this doesn't capture transformers nested in simple ensembles.
Args:
method (str): 'fillna' or 'transformers' - which to plot
color_list = list of colors to *sample* for bar colors. Can be names or hex.
**kwargs passed to pandas.plot()
"""
series = self.horizontal_to_df()
if str(method).lower() == "fillna":
transformers = series['FillNA'].value_counts()
else:
transformers = pd.Series(
",".join(series['Transformers']).split(",")
).value_counts()
if color_list is None:
color_list = colors_list
colors = random.sample(color_list, transformers.shape[0])
# plot
transformers.plot(kind='bar', color=colors, **kwargs)
def plot_generation_loss(self, **kwargs):
"""Plot improvement in accuracy over generations.
Note: this is only "one size fits all" accuracy and
doesn't account for the benefits seen for ensembling.
Args:
**kwargs passed to pd.DataFrame.plot()
"""
for_gens = self.initial_results.model_results[
(self.initial_results.model_results['ValidationRound'] == 0)
& (self.initial_results.model_results['Ensemble'] < 1)
]
for_gens.groupby("Generation")['Score'].min().cummin().plot(
ylabel="Lowest Score", **kwargs
)
def plot_backforecast(
self, series=None, n_splits: int = 3, start_date=None, **kwargs
):
"""Plot the historical data and fit forecast on historic.
Args:
series (str or list): column names of time series
n_splits (int or str): "auto", number > 2, higher more accurate but slower
**kwargs passed to pd.DataFrame.plot()
"""
if series is None:
series = random.choice(self.df_wide_numeric.columns)
b_df = self.back_forecast(column=series, n_splits=n_splits, verbose=0).forecast
b_df = b_df.rename(columns=lambda x: str(x) + "_forecast")
plot_df = pd.concat(
[
pd.DataFrame(self.df_wide_numeric[series]),
b_df,
],
axis=1,
)
if start_date is not None:
plot_df = plot_df[plot_df.index >= start_date]
plot_df = remove_leading_zeros(plot_df)
plot_df.plot(**kwargs)
colors_list = [
'#FF00FF',
'#7FFFD4',
'#00FFFF',
'#F5DEB3',
'#FF6347',
'#8B008B',
'#696969',
'#FFC0CB',
'#C71585',
'#008080',
'#663399',
'#32CD32',
'#66CDAA',
'#A9A9A9',
'#2F4F4F',
'#FFDEAD',
'#800000',
'#FDF5E6',
'#F5F5F5',
'#F0FFF0',
'#87CEEB',
'#A52A2A',
'#90EE90',
'#7FFF00',
'#E9967A',
'#1E90FF',
'#FFF0F5',
'#ADD8E6',
'#008B8B',
'#FFF5EE',
'#00FA9A',
'#9370DB',
'#4682B4',
'#006400',
'#AFEEEE',
'#CD853F',
'#9400D3',
'#EE82EE',
'#00008B',
'#4B0082',
'#0403A7',
"#000000",
]
class AutoTSIntervals(object):
"""Autots looped to test multiple prediction intervals. Experimental.
Runs max_generations on first prediction interval, then validates on remainder.
Most args are passed through to AutoTS().
Args:
interval_models_to_validate (int): number of models to validate on each prediction interval.
import_results (str): results from run on same data to load, `filename.pickle`.
Currently result_file and import only save/load initial run, no validations.
"""
def fit(
self,
prediction_intervals,
forecast_length,
df_long,
max_generations,
num_validations,
validation_method,
models_to_validate,
interval_models_to_validate,
date_col,
value_col,
id_col=None,
import_template=None,
import_method='only',
import_results=None,
result_file=None,
model_list='all',
metric_weighting: dict = {
'smape_weighting': 1,
'mae_weighting': 0,
'rmse_weighting': 1,
'containment_weighting': 0,
'runtime_weighting': 0,
'spl_weighting': 10,
'contour_weighting': 0,
},
weights: dict = {},
grouping_ids=None,
future_regressor=None,
model_interrupt: bool = False,
constraint=2,
no_negatives=False,
remove_leading_zeroes=False,
random_seed=2020,
):
"""Train and find best."""
overall_results = TemplateEvalObject()
per_series_spl = pd.DataFrame()
runs = 0
for interval in prediction_intervals:
if runs != 0:
max_generations = 0
models_to_validate = 0.99
print(f"Current interval is {interval}")
current_model = AutoTS(
forecast_length=forecast_length,
prediction_interval=interval,
ensemble="probabilistic-max",
max_generations=max_generations,
model_list=model_list,
constraint=constraint,
no_negatives=no_negatives,
remove_leading_zeroes=remove_leading_zeroes,
metric_weighting=metric_weighting,
subset=None,
random_seed=random_seed,
num_validations=num_validations,
validation_method=validation_method,
model_interrupt=model_interrupt,
models_to_validate=models_to_validate,
)
if import_template is not None:
current_model = current_model.import_template(
import_template, method=import_method
)
if import_results is not None:
current_model = current_model.import_results(import_results)
current_model = current_model.fit(
df_long,
future_regressor=future_regressor,
weights=weights,
grouping_ids=grouping_ids,
result_file=result_file,
date_col=date_col,
value_col=value_col,
id_col=id_col,
)
current_model.initial_results.model_results['interval'] = interval
temp = current_model.initial_results
overall_results = overall_results.concat(temp)
temp = current_model.initial_results.per_series_spl
per_series_spl = pd.concat([per_series_spl, temp], axis=0)
if runs == 0:
result_file = None
import_results = None
import_template = current_model.export_template(
None, models='best', n=interval_models_to_validate
)
runs += 1
self.validation_results = validation_aggregation(overall_results)
self.results = overall_results.model_results
# remove models not validated
temp = per_series_spl.mean(axis=1).groupby(level=0).count()
temp = temp[temp >= ((runs) * (num_validations + 1))]
per_series_spl = per_series_spl[per_series_spl.index.isin(temp.index)]
per_series_spl = per_series_spl.groupby(level=0).mean()
# from autots.models.ensemble import HorizontalTemplateGenerator
ens_templates = HorizontalTemplateGenerator(
per_series_spl,
model_results=overall_results.model_results,
forecast_length=forecast_length,
ensemble='probabilistic-max',
subset_flag=False,
)
self.per_series_spl = per_series_spl
self.ens_templates = ens_templates
self.prediction_intervals = prediction_intervals
self.future_regressor_train = future_regressor
self.forecast_length = forecast_length
self.df_wide_numeric = current_model.df_wide_numeric
self.frequency = current_model.frequency
self.no_negatives = current_model.no_negatives
self.constraint = current_model.constraint
self.holiday_country = current_model.holiday_country
self.startTimeStamps = current_model.startTimeStamps
self.random_seed = current_model.random_seed
self.verbose = current_model.verbose
self.template_cols = current_model.template_cols
self.categorical_transformer = current_model.categorical_transformer
return self
def predict(self, future_regressor=None, verbose: int = 'self') -> dict:
"""Generate forecasts after training complete."""
if future_regressor is not None:
future_regressor = pd.DataFrame(future_regressor)
self.future_regressor_train = self.future_regressor_train.reindex(
index=self.df_wide_numeric.index
)
forecast_objects = {}
verbose = self.verbose if verbose == 'self' else verbose
urow = self.ens_templates.iloc[0]
for interval in self.prediction_intervals:
df_forecast = model_forecast(
model_name=urow['Model'],
model_param_dict=urow['ModelParameters'],
model_transform_dict=urow['TransformationParameters'],
df_train=self.df_wide_numeric,
forecast_length=self.forecast_length,
frequency=self.frequency,
prediction_interval=interval,
no_negatives=self.no_negatives,
constraint=self.constraint,
future_regressor_train=self.future_regressor_train,
future_regressor_forecast=future_regressor,
holiday_country=self.holiday_country,
startTimeStamps=self.startTimeStamps,
grouping_ids=self.grouping_ids,
random_seed=self.random_seed,
verbose=verbose,
template_cols=self.template_cols,
)
trans = self.categorical_transformer
df_forecast.forecast = trans.inverse_transform(df_forecast.forecast)
df_forecast.lower_forecast = trans.inverse_transform(
df_forecast.lower_forecast
)
df_forecast.upper_forecast = trans.inverse_transform(
df_forecast.upper_forecast
)
forecast_objects[interval] = df_forecast
return forecast_objects
def fake_regressor(
df,
forecast_length: int = 14,
date_col: str = None,
value_col: str = None,
id_col: str = None,
frequency: str = 'infer',
aggfunc: str = 'first',
drop_most_recent: int = 0,
na_tolerance: float = 0.95,
drop_data_older_than_periods: int = 100000,
dimensions: int = 1,
verbose: int = 0,
):
"""Create a fake regressor of random numbers for testing purposes."""
if date_col is None and value_col is None:
df_wide = pd.DataFrame(df)
assert (
type(df_wide.index) is pd.DatetimeIndex
), "df index is not pd.DatetimeIndex"
else:
df_wide = long_to_wide(
df,
date_col=date_col,
value_col=value_col,
id_col=id_col,
aggfunc=aggfunc,
)
df_wide = df_cleanup(
df_wide,
frequency=frequency,
na_tolerance=na_tolerance,
drop_data_older_than_periods=drop_data_older_than_periods,
aggfunc=aggfunc,
drop_most_recent=drop_most_recent,
verbose=verbose,
)
if frequency == 'infer':
frequency = pd.infer_freq(df_wide.index, warn=True)
forecast_index = pd.date_range(
freq=frequency, start=df_wide.index[-1], periods=forecast_length + 1
)
forecast_index = forecast_index[1:]
if dimensions <= 1:
future_regressor_train = pd.Series(
np.random.randint(0, 100, size=len(df_wide.index)), index=df_wide.index
)
future_regressor_forecast = pd.Series(
np.random.randint(0, 100, size=(forecast_length)), index=forecast_index
)
else:
future_regressor_train = pd.DataFrame(
np.random.randint(0, 100, size=(len(df_wide.index), dimensions)),
index=df_wide.index,
)
future_regressor_forecast = pd.DataFrame(
np.random.randint(0, 100, size=(forecast_length, dimensions)),
index=forecast_index,
)
return future_regressor_train, future_regressor_forecast
def error_correlations(all_result, result: str = 'corr'):
"""
Onehot encode AutoTS result df and return df or correlation with errors.
Args:
all_results (pandas.DataFrame): AutoTS model_results df
result (str): whether to return 'df', 'corr', 'poly corr' with errors
"""
import json
from sklearn.preprocessing import OneHotEncoder
all_results = all_result.copy()
all_results = all_results.drop_duplicates()
all_results['ExceptionFlag'] = (~all_results['Exceptions'].isna()).astype(int)
all_results = all_results[all_results['ExceptionFlag'] > 0]
all_results = all_results.reset_index(drop=True)
trans_df = all_results['TransformationParameters'].apply(json.loads)
try:
trans_df = pd.json_normalize(trans_df) # .fillna(value='NaN')
except Exception:
trans_df = pd.io.json.json_normalize(trans_df)
trans_cols1 = trans_df.columns
trans_df = trans_df.astype(str).replace('nan', 'NaNZ')
trans_transformer = OneHotEncoder(sparse=False).fit(trans_df)
trans_df = pd.DataFrame(trans_transformer.transform(trans_df))
trans_cols = np.array(
[x1 + x2 for x1, x2 in zip(trans_cols1, trans_transformer.categories_)]
)
trans_cols = [item for sublist in trans_cols for item in sublist]
trans_df.columns = trans_cols
model_df = all_results['ModelParameters'].apply(json.loads)
try:
model_df = | pd.json_normalize(model_df) | pandas.json_normalize |
import os
import sys
import time
import sqlite3
import warnings
import pythoncom
import numpy as np
import pandas as pd
from PyQt5 import QtWidgets
from PyQt5.QAxContainer import QAxWidget
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utility.static import *
from utility.setting import *
warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning)
class UpdaterTickKiwoom:
def __init__(self, windowQ, queryQ, tickQ):
self.windowQ = windowQ
self.queryQ = queryQ
self.tickQ = tickQ
self.dict_df = {}
self.time_info = now()
self.str_tday = strf_time('%Y%m%d')
self.Start()
def Start(self):
while True:
tick = self.tickQ.get()
if len(tick) != 2:
self.UpdateTickData(tick[0], tick[1], tick[2], tick[3], tick[4], tick[5], tick[6], tick[7],
tick[8], tick[9], tick[10], tick[11], tick[12], tick[13], tick[14],
tick[15], tick[16], tick[17], tick[18], tick[19], tick[20])
elif tick[0] == '틱데이터저장':
self.PutTickData(tick[1])
def UpdateTickData(self, code, c, o, h, low, per, dm, ch, vp, vitime, vid5,
s1jr, s2jr, b1jr, b2jr, s1hg, s2hg, b1hg, b2hg, d, receiv_time):
try:
hlm = int(round((h + low) / 2))
hlmp = round((c / hlm - 1) * 100, 2)
except ZeroDivisionError:
return
d = self.str_tday + d
if code not in self.dict_df.keys():
self.dict_df[code] = pd.DataFrame(
[[c, o, h, per, hlmp, dm, dm, ch, vp, vitime, vid5, s1jr, s2jr, b1jr, b2jr, s1hg, s2hg, b1hg, b2hg]],
columns=['현재가', '시가', '고가', '등락율', '고저평균대비등락율', '거래대금', '누적거래대금', '체결강도',
'전일거래량대비', 'VI발동시간', '상승VID5가격', '매도호가2', '매도호가1', '매수호가1', '매수호가2',
'매도잔량2', '매도잔량1', '매수잔량1', '매수잔량2'],
index=[d])
else:
sm = int(dm - self.dict_df[code]['누적거래대금'][-1])
self.dict_df[code].at[d] = \
c, o, h, per, hlmp, sm, dm, ch, vp, vitime, vid5, s1jr, s2jr, b1jr, b2jr, s1hg, s2hg, b1hg, b2hg
if now() > self.time_info:
self.UpdateInfo(receiv_time)
self.time_info = timedelta_sec(60)
def UpdateInfo(self, receiv_time):
gap = (now() - receiv_time).total_seconds()
self.windowQ.put([ui_num['S단순텍스트'], f'수신시간과 갱신시간의 차이는 [{gap}]초입니다.'])
def PutTickData(self, codes):
for code in list(self.dict_df.keys()):
if code in codes:
columns = ['현재가', '시가', '고가', '거래대금', '누적거래대금', '상승VID5가격',
'매도호가2', '매도호가1', '매수호가1', '매수호가2', '매도잔량2', '매도잔량1', '매수잔량1', '매수잔량2']
self.dict_df[code][columns] = self.dict_df[code][columns].astype(int)
else:
del self.dict_df[code]
self.queryQ.put([3, self.dict_df])
sys.exit()
class CollectorTickKiwoom:
app = QtWidgets.QApplication(sys.argv)
def __init__(self, windowQ, collectorQ, soundQ, queryQ, teleQ,
tick1Q, tick2Q, tick3Q, tick4Q, tick5Q, tick6Q, tick7Q, tick8Q):
self.windowQ = windowQ
self.collectorQ = collectorQ
self.soundQ = soundQ
self.queryQ = queryQ
self.teleQ = teleQ
self.tick1Q = tick1Q
self.tick2Q = tick2Q
self.tick3Q = tick3Q
self.tick4Q = tick4Q
self.tick5Q = tick5Q
self.tick6Q = tick6Q
self.tick7Q = tick7Q
self.tick8Q = tick8Q
self.dict_code = {
'틱0': [],
'틱1': [],
'틱2': [],
'틱3': [],
'틱4': [],
'틱5': [],
'틱6': [],
'틱7': [],
'틱8': []
}
self.dict_bool = {
'알림소리': False,
'TR수신': False,
'TR다음': False,
'CD수신': False,
'CR수신': False
}
self.dict_intg = {'장운영상태': 1}
self.df_mt = pd.DataFrame(columns=['거래대금상위100'])
self.df_tr = None
self.dict_item = None
self.dict_vipr = {}
self.dict_tick = {}
self.dict_cond = {}
self.name_code = {}
self.list_code = []
self.list_trcd = []
self.list_kosd = None
self.time_mtop = now()
self.str_trname = None
self.str_tday = strf_time('%Y%m%d')
self.str_jcct = self.str_tday + '090000'
remaintime = (strp_time('%Y%m%d%H%M%S', self.str_tday + '090100') - now()).total_seconds()
exittime = timedelta_sec(remaintime) if remaintime > 0 else timedelta_sec(600)
self.dict_time = {'휴무종료': exittime}
self.ocx = QAxWidget('KHOPENAPI.KHOpenAPICtrl.1')
self.ocx.OnEventConnect.connect(self.OnEventConnect)
self.ocx.OnReceiveTrData.connect(self.OnReceiveTrData)
self.ocx.OnReceiveRealData.connect(self.OnReceiveRealData)
self.ocx.OnReceiveTrCondition.connect(self.OnReceiveTrCondition)
self.ocx.OnReceiveConditionVer.connect(self.OnReceiveConditionVer)
self.ocx.OnReceiveRealCondition.connect(self.OnReceiveRealCondition)
self.Start()
def Start(self):
self.CommConnect()
self.EventLoop()
def CommConnect(self):
self.ocx.dynamicCall('CommConnect()')
while not self.dict_bool['로그인']:
pythoncom.PumpWaitingMessages()
self.dict_bool['CD수신'] = False
self.ocx.dynamicCall('GetConditionLoad()')
while not self.dict_bool['CD수신']:
pythoncom.PumpWaitingMessages()
self.list_kosd = self.GetCodeListByMarket('10')
list_code = self.GetCodeListByMarket('0') + self.list_kosd
df = pd.DataFrame(columns=['종목명'])
for code in list_code:
name = self.GetMasterCodeName(code)
df.at[code] = name
self.name_code[name] = code
self.queryQ.put([3, df, 'codename', 'replace'])
data = self.ocx.dynamicCall('GetConditionNameList()')
conditions = data.split(';')[:-1]
for condition in conditions:
cond_index, cond_name = condition.split('^')
self.dict_cond[int(cond_index)] = cond_name
con = sqlite3.connect(db_setting)
df = pd.read_sql('SELECT * FROM stock', con)
df = df.set_index('index')
self.dict_bool['알림소리'] = df['알림소리'][0]
con.close()
self.windowQ.put([ui_num['S단순텍스트'], '시스템 명령 실행 알림 - OpenAPI 로그인 완료'])
def EventLoop(self):
self.OperationRealreg()
self.ViRealreg()
int_time = int(strf_time('%H%M%S'))
while True:
if not self.collectorQ.empty():
work = self.collectorQ.get()
if type(work) == list:
self.UpdateRealreg(work)
elif type(work) == str:
self.RunWork(work)
if self.dict_intg['장운영상태'] == 1 and now() > self.dict_time['휴무종료']:
break
if self.dict_intg['장운영상태'] == 3:
if int_time < stock_init_time <= int(strf_time('%H%M%S')):
self.ConditionSearchStart()
if int_time < stock_exit_time + 100 <= int(strf_time('%H%M%S')):
self.ConditionSearchStop()
self.RemoveRealreg()
self.SaveDatabase()
break
if now() > self.time_mtop:
if len(self.df_mt) > 0:
self.UpdateMoneyTop()
self.time_mtop = timedelta_sec(+1)
time_loop = timedelta_sec(0.25)
while now() < time_loop:
pythoncom.PumpWaitingMessages()
time.sleep(0.0001)
int_time = int(strf_time('%H%M%S'))
self.windowQ.put([ui_num['S단순텍스트'], '시스템 명령 실행 알림 - 콜렉터를 종료합니다.'])
if self.dict_bool['알림소리']:
self.soundQ.put('주식 콜렉터를 종료합니다.')
self.teleQ.put('주식 콜렉터를 종료하였습니다.')
sys.exit()
def UpdateRealreg(self, rreg):
sn = rreg[0]
if len(rreg) == 2:
self.ocx.dynamicCall('SetRealRemove(QString, QString)', rreg)
self.windowQ.put([ui_num['S단순텍스트'], f'실시간 알림 중단 완료 - 모든 실시간 데이터 수신 중단'])
elif len(rreg) == 4:
ret = self.ocx.dynamicCall('SetRealReg(QString, QString, QString, QString)', rreg)
result = '완료' if ret == 0 else '실패'
if sn == sn_oper:
self.windowQ.put([ui_num['S단순텍스트'], f'실시간 알림 등록 {result} - 장운영시간 [{sn}]'])
else:
self.windowQ.put([ui_num['S단순텍스트'], f"실시간 알림 등록 {result} - [{sn}] 종목갯수 {len(rreg[1].split(';'))}"])
def RunWork(self, work):
if work == '틱데이터 저장 완료':
self.dict_bool['틱데이터저장'] = True
def OperationRealreg(self):
self.collectorQ.put([sn_oper, ' ', '215;20;214', 0])
self.dict_code['틱0'] = self.SendCondition(sn_oper, self.dict_cond[1], 1, 0)
self.dict_code['틱1'] = [code for i, code in enumerate(self.dict_code['틱0']) if i % 8 == 0]
self.dict_code['틱2'] = [code for i, code in enumerate(self.dict_code['틱0']) if i % 8 == 1]
self.dict_code['틱3'] = [code for i, code in enumerate(self.dict_code['틱0']) if i % 8 == 2]
self.dict_code['틱4'] = [code for i, code in enumerate(self.dict_code['틱0']) if i % 8 == 3]
self.dict_code['틱5'] = [code for i, code in enumerate(self.dict_code['틱0']) if i % 8 == 4]
self.dict_code['틱6'] = [code for i, code in enumerate(self.dict_code['틱0']) if i % 8 == 5]
self.dict_code['틱7'] = [code for i, code in enumerate(self.dict_code['틱0']) if i % 8 == 6]
self.dict_code['틱8'] = [code for i, code in enumerate(self.dict_code['틱0']) if i % 8 == 7]
k = 0
for i in range(0, len(self.dict_code['틱0']), 100):
self.collectorQ.put([sn_jchj + k, ';'.join(self.dict_code['틱0'][i:i + 100]),
'10;12;14;30;228;41;61;71;81', 1])
k += 1
def ViRealreg(self):
self.Block_Request('opt10054', 시장구분='000', 장전구분='1', 종목코드='', 발동구분='1', 제외종목='111111011',
거래량구분='0', 거래대금구분='0', 발동방향='0', output='발동종목', next=0)
self.windowQ.put([ui_num['S단순텍스트'], '시스템 명령 실행 알림 - VI발동해제 등록 완료'])
self.windowQ.put([ui_num['S단순텍스트'], '시스템 명령 실행 알림 - 시스템 시작 완료'])
if self.dict_bool['알림소리']:
self.soundQ.put('주식 콜렉터를 시작하였습니다.')
def ConditionSearchStart(self):
self.list_code = self.SendCondition(sn_cond, self.dict_cond[0], 0, 1)
self.df_mt.at[self.str_tday + '090000'] = ';'.join(self.list_code)
def ConditionSearchStop(self):
self.ocx.dynamicCall("SendConditionStop(QString, QString, int)", sn_cond, self.dict_cond[0], 0)
def RemoveRealreg(self):
self.collectorQ.put(['ALL', 'ALL'])
self.windowQ.put([ui_num['S단순텍스트'], '시스템 명령 실행 알림 - 실시간 데이터 중단 완료'])
def SaveDatabase(self):
self.queryQ.put([3, self.df_mt, 'moneytop', 'append'])
con = sqlite3.connect(db_tradelist)
df = pd.read_sql(f"SELECT * FROM tradelist WHERE 체결시간 LIKE '{self.str_tday}%'", con)
con.close()
df = df.set_index('index')
codes = []
for index in df.index:
code = self.name_code[df['종목명'][index]]
if code not in codes:
codes.append(code)
self.tick1Q.put(['틱데이터저장', codes])
self.tick2Q.put(['틱데이터저장', codes])
self.tick3Q.put(['틱데이터저장', codes])
self.tick4Q.put(['틱데이터저장', codes])
self.tick5Q.put(['틱데이터저장', codes])
self.tick6Q.put(['틱데이터저장', codes])
self.tick7Q.put(['틱데이터저장', codes])
self.tick8Q.put(['틱데이터저장', codes])
self.dict_bool['DB저장'] = True
def OnEventConnect(self, err_code):
if err_code == 0:
self.dict_bool['로그인'] = True
def OnReceiveConditionVer(self, ret, msg):
if msg == '':
return
if ret == 1:
self.dict_bool['CD수신'] = True
def OnReceiveTrCondition(self, screen, code_list, cond_name, cond_index, nnext):
if screen == "" and cond_name == "" and cond_index == "" and nnext == "":
return
codes = code_list.split(';')[:-1]
self.list_trcd = codes
self.dict_bool['CR수신'] = True
def OnReceiveRealCondition(self, code, IorD, cname, cindex):
if cname == "":
return
if IorD == "I" and cindex == "0" and code not in self.list_code:
self.list_code.append(code)
elif IorD == "D" and cindex == "0" and code in self.list_code:
self.list_code.remove(code)
def OnReceiveRealData(self, code, realtype, realdata):
if realdata == '':
return
if realtype == '장시작시간':
if self.dict_intg['장운영상태'] == 8:
return
try:
self.dict_intg['장운영상태'] = int(self.GetCommRealData(code, 215))
current = self.GetCommRealData(code, 20)
remain = self.GetCommRealData(code, 214)
except Exception as e:
self.windowQ.put([ui_num['S단순텍스트'], f'OnReceiveRealData 장시작시간 {e}'])
else:
self.windowQ.put([ui_num['S단순텍스트'], f"장운영 시간 수신 알림 - {self.dict_intg['장운영상태']} "
f'{current[:2]}:{current[2:4]}:{current[4:]} '
f'남은시간 {remain[:2]}:{remain[2:4]}:{remain[4:]}'])
elif realtype == 'VI발동/해제':
try:
code = self.GetCommRealData(code, 9001).strip('A').strip('Q')
gubun = self.GetCommRealData(code, 9068)
name = self.GetMasterCodeName(code)
except Exception as e:
self.windowQ.put([ui_num['S단순텍스트'], f'OnReceiveRealData VI발동/해제 {e}'])
else:
if gubun == '1' and code in self.dict_code['틱0'] and \
(code not in self.dict_vipr.keys() or
(self.dict_vipr[code][0] and now() > self.dict_vipr[code][1])):
self.UpdateViPriceDown5(code, name)
elif realtype == '주식체결':
try:
d = self.GetCommRealData(code, 20)
except Exception as e:
self.windowQ.put([ui_num['S단순텍스트'], f'OnReceiveRealData 주식체결 {e}'])
else:
if d != self.str_jcct[8:]:
self.str_jcct = self.str_tday + d
try:
c = abs(int(self.GetCommRealData(code, 10)))
o = abs(int(self.GetCommRealData(code, 16)))
except Exception as e:
self.windowQ.put([ui_num['S단순텍스트'], f'OnReceiveRealData 주식체결 {e}'])
else:
if code not in self.dict_vipr.keys():
self.InsertViPriceDown5(code, o)
if code in self.dict_vipr.keys() and not self.dict_vipr[code][0] and now() > self.dict_vipr[code][1]:
self.UpdateViPriceDown5(code, c)
if code in self.dict_tick.keys() and d == self.dict_tick[code][0]:
return
try:
h = abs(int(self.GetCommRealData(code, 17)))
low = abs(int(self.GetCommRealData(code, 18)))
per = float(self.GetCommRealData(code, 12))
dm = int(self.GetCommRealData(code, 14))
ch = float(self.GetCommRealData(code, 228))
vp = abs(float(self.GetCommRealData(code, 30)))
except Exception as e:
self.windowQ.put([ui_num['S단순텍스트'], f'OnReceiveRealData 주식체결 {e}'])
else:
self.UpdateTickData(code, c, o, h, low, per, dm, ch, vp, d)
elif realtype == '주식호가잔량':
try:
s1jr = int(self.GetCommRealData(code, 61))
s2jr = int(self.GetCommRealData(code, 62))
b1jr = int(self.GetCommRealData(code, 71))
b2jr = int(self.GetCommRealData(code, 72))
s1hg = abs(int(self.GetCommRealData(code, 41)))
s2hg = abs(int(self.GetCommRealData(code, 42)))
b1hg = abs(int(self.GetCommRealData(code, 51)))
b2hg = abs(int(self.GetCommRealData(code, 52)))
except Exception as e:
self.windowQ.put([ui_num['S단순텍스트'], f'OnReceiveRealData 주식호가잔량 {e}'])
else:
self.UpdateHoga(code, s1jr, s2jr, b1jr, b2jr, s1hg, s2hg, b1hg, b2hg)
def InsertViPriceDown5(self, code, o):
vid5 = self.GetVIPriceDown5(code, o)
self.dict_vipr[code] = [True, timedelta_sec(-180), vid5]
def GetVIPriceDown5(self, code, std_price):
vi = std_price * 1.1
x = self.GetHogaunit(code, vi)
if vi % x != 0:
vi = vi + (x - vi % x)
return int(vi - x * 5)
def GetHogaunit(self, code, price):
if price < 1000:
x = 1
elif 1000 <= price < 5000:
x = 5
elif 5000 <= price < 10000:
x = 10
elif 10000 <= price < 50000:
x = 50
elif code in self.list_kosd:
x = 100
elif 50000 <= price < 100000:
x = 100
elif 100000 <= price < 500000:
x = 500
else:
x = 1000
return x
def UpdateViPriceDown5(self, code, key):
if type(key) == str:
if code in self.dict_vipr.keys():
self.dict_vipr[code][0] = False
self.dict_vipr[code][1] = timedelta_sec(5)
else:
self.dict_vipr[code] = [False, timedelta_sec(5), 0]
elif type(key) == int:
vid5 = self.GetVIPriceDown5(code, key)
self.dict_vipr[code] = [True, timedelta_sec(5), vid5]
def UpdateTickData(self, code, c, o, h, low, per, dm, ch, vp, d):
vitime = strf_time('%Y%m%d%H%M%S', self.dict_vipr[code][1])
vi = self.dict_vipr[code][2]
try:
s1jr, s2jr, b1jr, b2jr, s1hg, s2hg, b1hg, b2hg = self.dict_tick[code][1:]
self.dict_tick[code][0] = d
except KeyError:
s1jr, s2jr, b1jr, b2jr, s1hg, s2hg, b1hg, b2hg = 0, 0, 0, 0, 0, 0, 0, 0
self.dict_tick[code] = [d, 0, 0, 0, 0, 0, 0, 0, 0]
data = [code, c, o, h, low, per, dm, ch, vp, vitime, vi,
s1jr, s2jr, b1jr, b2jr, s1hg, s2hg, b1hg, b2hg, d, now()]
if code in self.dict_code['틱1']:
self.tick1Q.put(data)
elif code in self.dict_code['틱2']:
self.tick2Q.put(data)
elif code in self.dict_code['틱3']:
self.tick3Q.put(data)
elif code in self.dict_code['틱4']:
self.tick4Q.put(data)
elif code in self.dict_code['틱5']:
self.tick5Q.put(data)
elif code in self.dict_code['틱6']:
self.tick6Q.put(data)
elif code in self.dict_code['틱7']:
self.tick7Q.put(data)
elif code in self.dict_code['틱8']:
self.tick8Q.put(data)
def UpdateHoga(self, code, s1jr, s2jr, b1jr, b2jr, s1hg, s2hg, b1hg, b2hg):
try:
d = self.dict_tick[code][0]
except KeyError:
d = '090000'
self.dict_tick[code] = [d, s1jr, s2jr, b1jr, b2jr, s1hg, s2hg, b1hg, b2hg]
def UpdateMoneyTop(self):
timetype = '%Y%m%d%H%M%S'
list_text = ';'.join(self.list_code)
curr_datetime = strp_time(timetype, self.str_jcct)
last_datetime = strp_time(timetype, self.df_mt.index[-1])
gap_seconds = (curr_datetime - last_datetime).total_seconds()
pre_time2 = strf_time(timetype, timedelta_sec(-2, curr_datetime))
pre_time1 = strf_time(timetype, timedelta_sec(-1, curr_datetime))
if 1 <= gap_seconds < 2:
self.df_mt.at[pre_time1] = list_text
elif 2 <= gap_seconds < 3:
self.df_mt.at[pre_time2] = list_text
self.df_mt.at[pre_time1] = list_text
self.df_mt.at[self.str_jcct] = list_text
def OnReceiveTrData(self, screen, rqname, trcode, record, nnext):
if screen == '' and record == '':
return
items = None
self.dict_bool['TR다음'] = True if nnext == '2' else False
for output in self.dict_item['output']:
record = list(output.keys())[0]
items = list(output.values())[0]
if record == self.str_trname:
break
rows = self.ocx.dynamicCall('GetRepeatCnt(QString, QString)', trcode, rqname)
if rows == 0:
rows = 1
df2 = []
for row in range(rows):
row_data = []
for item in items:
data = self.ocx.dynamicCall('GetCommData(QString, QString, int, QString)', trcode, rqname, row, item)
row_data.append(data.strip())
df2.append(row_data)
df = | pd.DataFrame(data=df2, columns=items) | pandas.DataFrame |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": pandas.StringDtype(),
"BitErrorsHost60": pandas.StringDtype(),
"BitErrorsHost61": pandas.StringDtype(),
"BitErrorsHost62": pandas.StringDtype(),
"BitErrorsHost63": pandas.StringDtype(),
"BitErrorsHost64": pandas.StringDtype(),
"BitErrorsHost65": pandas.StringDtype(),
"BitErrorsHost66": pandas.StringDtype(),
"BitErrorsHost67": pandas.StringDtype(),
"BitErrorsHost68": pandas.StringDtype(),
"BitErrorsHost69": pandas.StringDtype(),
"BitErrorsHost70": pandas.StringDtype(),
"BitErrorsHost71": pandas.StringDtype(),
"BitErrorsHost72": pandas.StringDtype(),
"BitErrorsHost73": pandas.StringDtype(),
"BitErrorsHost74": pandas.StringDtype(),
"BitErrorsHost75": pandas.StringDtype(),
"BitErrorsHost76": pandas.StringDtype(),
"BitErrorsHost77": pandas.StringDtype(),
"BitErrorsHost78": pandas.StringDtype(),
"BitErrorsHost79": pandas.StringDtype(),
"BitErrorsHost80": pandas.StringDtype(),
"bitErrBucketArray1": pandas.StringDtype(),
"bitErrBucketArray2": pandas.StringDtype(),
"bitErrBucketArray3": pandas.StringDtype(),
"bitErrBucketArray4": pandas.StringDtype(),
"bitErrBucketArray5": pandas.StringDtype(),
"bitErrBucketArray6": pandas.StringDtype(),
"bitErrBucketArray7": pandas.StringDtype(),
"bitErrBucketArray8": pandas.StringDtype(),
"bitErrBucketArray9": pandas.StringDtype(),
"bitErrBucketArray10": pandas.StringDtype(),
"bitErrBucketArray11": pandas.StringDtype(),
"bitErrBucketArray12": pandas.StringDtype(),
"bitErrBucketArray13": pandas.StringDtype(),
"bitErrBucketArray14": pandas.StringDtype(),
"bitErrBucketArray15": pandas.StringDtype(),
"bitErrBucketArray16": pandas.StringDtype(),
"bitErrBucketArray17": pandas.StringDtype(),
"bitErrBucketArray18": pandas.StringDtype(),
"bitErrBucketArray19": pandas.StringDtype(),
"bitErrBucketArray20": pandas.StringDtype(),
"bitErrBucketArray21": pandas.StringDtype(),
"bitErrBucketArray22": pandas.StringDtype(),
"bitErrBucketArray23": pandas.StringDtype(),
"bitErrBucketArray24": pandas.StringDtype(),
"bitErrBucketArray25": pandas.StringDtype(),
"bitErrBucketArray26": pandas.StringDtype(),
"bitErrBucketArray27": pandas.StringDtype(),
"bitErrBucketArray28": pandas.StringDtype(),
"bitErrBucketArray29": pandas.StringDtype(),
"bitErrBucketArray30": pandas.StringDtype(),
"bitErrBucketArray31": pandas.StringDtype(),
"bitErrBucketArray32": pandas.StringDtype(),
"bitErrBucketArray33": pandas.StringDtype(),
"bitErrBucketArray34": pandas.StringDtype(),
"bitErrBucketArray35": pandas.StringDtype(),
"bitErrBucketArray36": pandas.StringDtype(),
"bitErrBucketArray37": pandas.StringDtype(),
"bitErrBucketArray38": pandas.StringDtype(),
"bitErrBucketArray39": pandas.StringDtype(),
"bitErrBucketArray40": pandas.StringDtype(),
"bitErrBucketArray41": pandas.StringDtype(),
"bitErrBucketArray42": pandas.StringDtype(),
"bitErrBucketArray43": pandas.StringDtype(),
"bitErrBucketArray44": pandas.StringDtype(),
"bitErrBucketArray45": pandas.StringDtype(),
"bitErrBucketArray46": pandas.StringDtype(),
"bitErrBucketArray47": pandas.StringDtype(),
"bitErrBucketArray48": pandas.StringDtype(),
"bitErrBucketArray49": pandas.StringDtype(),
"bitErrBucketArray50": pandas.StringDtype(),
"bitErrBucketArray51": pandas.StringDtype(),
"bitErrBucketArray52": pandas.StringDtype(),
"bitErrBucketArray53": pandas.StringDtype(),
"bitErrBucketArray54": pandas.StringDtype(),
"bitErrBucketArray55": pandas.StringDtype(),
"bitErrBucketArray56": pandas.StringDtype(),
"bitErrBucketArray57": pandas.StringDtype(),
"bitErrBucketArray58": pandas.StringDtype(),
"bitErrBucketArray59": pandas.StringDtype(),
"bitErrBucketArray60": pandas.StringDtype(),
"bitErrBucketArray61": pandas.StringDtype(),
"bitErrBucketArray62": pandas.StringDtype(),
"bitErrBucketArray63": pandas.StringDtype(),
"bitErrBucketArray64": pandas.StringDtype(),
"bitErrBucketArray65": pandas.StringDtype(),
"bitErrBucketArray66": pandas.StringDtype(),
"bitErrBucketArray67": pandas.StringDtype(),
"bitErrBucketArray68": pandas.StringDtype(),
"bitErrBucketArray69": pandas.StringDtype(),
"bitErrBucketArray70": pandas.StringDtype(),
"bitErrBucketArray71": pandas.StringDtype(),
"bitErrBucketArray72": pandas.StringDtype(),
"bitErrBucketArray73": pandas.StringDtype(),
"bitErrBucketArray74": pandas.StringDtype(),
"bitErrBucketArray75": pandas.StringDtype(),
"bitErrBucketArray76": pandas.StringDtype(),
"bitErrBucketArray77": pandas.StringDtype(),
"bitErrBucketArray78": pandas.StringDtype(),
"bitErrBucketArray79": pandas.StringDtype(),
"bitErrBucketArray80": pandas.StringDtype(),
"mrr_successDistribution1": pandas.StringDtype(),
"mrr_successDistribution2": pandas.StringDtype(),
"mrr_successDistribution3": pandas.StringDtype(),
"mrr_successDistribution4": pandas.StringDtype(),
"mrr_successDistribution5": pandas.StringDtype(),
"mrr_successDistribution6": pandas.StringDtype(),
"mrr_successDistribution7": pandas.StringDtype(),
"mrr_successDistribution8": pandas.StringDtype(),
"mrr_successDistribution9": pandas.StringDtype(),
"mrr_successDistribution10": pandas.StringDtype(),
"mrr_successDistribution11": pandas.StringDtype(),
"mrr_successDistribution12": pandas.StringDtype(),
"mrr_successDistribution13": pandas.StringDtype(),
"mrr_successDistribution14": pandas.StringDtype(),
"mrr_successDistribution15": pandas.StringDtype(),
"mrr_successDistribution16": pandas.StringDtype(),
"mrr_successDistribution17": pandas.StringDtype(),
"mrr_successDistribution18": pandas.StringDtype(),
"mrr_successDistribution19": pandas.StringDtype(),
"mrr_successDistribution20": pandas.StringDtype(),
"mrr_successDistribution21": pandas.StringDtype(),
"mrr_successDistribution22": pandas.StringDtype(),
"mrr_successDistribution23": pandas.StringDtype(),
"mrr_successDistribution24": pandas.StringDtype(),
"mrr_successDistribution25": pandas.StringDtype(),
"mrr_successDistribution26": pandas.StringDtype(),
"mrr_successDistribution27": pandas.StringDtype(),
"mrr_successDistribution28": pandas.StringDtype(),
"mrr_successDistribution29": pandas.StringDtype(),
"mrr_successDistribution30": pandas.StringDtype(),
"mrr_successDistribution31": pandas.StringDtype(),
"mrr_successDistribution32": pandas.StringDtype(),
"mrr_successDistribution33": pandas.StringDtype(),
"mrr_successDistribution34": pandas.StringDtype(),
"mrr_successDistribution35": pandas.StringDtype(),
"mrr_successDistribution36": pandas.StringDtype(),
"mrr_successDistribution37": pandas.StringDtype(),
"mrr_successDistribution38": pandas.StringDtype(),
"mrr_successDistribution39": pandas.StringDtype(),
"mrr_successDistribution40": pandas.StringDtype(),
"mrr_successDistribution41": pandas.StringDtype(),
"mrr_successDistribution42": pandas.StringDtype(),
"mrr_successDistribution43": pandas.StringDtype(),
"mrr_successDistribution44": pandas.StringDtype(),
"mrr_successDistribution45": pandas.StringDtype(),
"mrr_successDistribution46": pandas.StringDtype(),
"mrr_successDistribution47": pandas.StringDtype(),
"mrr_successDistribution48": pandas.StringDtype(),
"mrr_successDistribution49": pandas.StringDtype(),
"mrr_successDistribution50": pandas.StringDtype(),
"mrr_successDistribution51": pandas.StringDtype(),
"mrr_successDistribution52": pandas.StringDtype(),
"mrr_successDistribution53": pandas.StringDtype(),
"mrr_successDistribution54": pandas.StringDtype(),
"mrr_successDistribution55": pandas.StringDtype(),
"mrr_successDistribution56": pandas.StringDtype(),
"mrr_successDistribution57": pandas.StringDtype(),
"mrr_successDistribution58": pandas.StringDtype(),
"mrr_successDistribution59": pandas.StringDtype(),
"mrr_successDistribution60": pandas.StringDtype(),
"mrr_successDistribution61": pandas.StringDtype(),
"mrr_successDistribution62": pandas.StringDtype(),
"mrr_successDistribution63": pandas.StringDtype(),
"mrr_successDistribution64": pandas.StringDtype(),
"blDowngradeCount": pandas.StringDtype(),
"snapReads": pandas.StringDtype(),
"pliCapTestTime": pandas.StringDtype(),
"currentTimeToFreeSpaceRecovery": pandas.StringDtype(),
"worstTimeToFreeSpaceRecovery": pandas.StringDtype(),
"rspnandReads": pandas.StringDtype(),
"cachednandReads": pandas.StringDtype(),
"spnandReads": pandas.StringDtype(),
"dpnandReads": pandas.StringDtype(),
"qpnandReads": pandas.StringDtype(),
"verifynandReads": pandas.StringDtype(),
"softnandReads": pandas.StringDtype(),
"spnandWrites": pandas.StringDtype(),
"dpnandWrites": pandas.StringDtype(),
"qpnandWrites": pandas.StringDtype(),
"opnandWrites": pandas.StringDtype(),
"xpnandWrites": pandas.StringDtype(),
"unalignedHostWriteCmd": pandas.StringDtype(),
"randomReadCmd": pandas.StringDtype(),
"randomWriteCmd": pandas.StringDtype(),
"secVenCmdCount": pandas.StringDtype(),
"secVenCmdCountFails": pandas.StringDtype(),
"mrrFailOnSlcOtfPages": pandas.StringDtype(),
"mrrFailOnSlcOtfPageMarkedAsMBPD": pandas.StringDtype(),
"lcorParitySeedErrors": pandas.StringDtype(),
"fwDownloadFails": pandas.StringDtype(),
"fwAuthenticationFails": pandas.StringDtype(),
"fwSecurityRev": pandas.StringDtype(),
"isCapacitorHealthly": pandas.StringDtype(),
"fwWRCounter": pandas.StringDtype(),
"sysAreaEraseFailCount": pandas.StringDtype(),
"iusDefragRelocated4DataRetention": pandas.StringDtype(),
"I2CTemp": pandas.StringDtype(),
"lbaMismatchOnNandReads": pandas.StringDtype(),
"currentWriteStreamsCount": pandas.StringDtype(),
"nandWritesPerStream1": pandas.StringDtype(),
"nandWritesPerStream2": pandas.StringDtype(),
"nandWritesPerStream3": pandas.StringDtype(),
"nandWritesPerStream4": pandas.StringDtype(),
"nandWritesPerStream5": pandas.StringDtype(),
"nandWritesPerStream6": pandas.StringDtype(),
"nandWritesPerStream7": pandas.StringDtype(),
"nandWritesPerStream8": pandas.StringDtype(),
"nandWritesPerStream9": pandas.StringDtype(),
"nandWritesPerStream10": pandas.StringDtype(),
"nandWritesPerStream11": pandas.StringDtype(),
"nandWritesPerStream12": pandas.StringDtype(),
"nandWritesPerStream13": pandas.StringDtype(),
"nandWritesPerStream14": pandas.StringDtype(),
"nandWritesPerStream15": pandas.StringDtype(),
"nandWritesPerStream16": pandas.StringDtype(),
"nandWritesPerStream17": pandas.StringDtype(),
"nandWritesPerStream18": pandas.StringDtype(),
"nandWritesPerStream19": pandas.StringDtype(),
"nandWritesPerStream20": pandas.StringDtype(),
"nandWritesPerStream21": pandas.StringDtype(),
"nandWritesPerStream22": pandas.StringDtype(),
"nandWritesPerStream23": pandas.StringDtype(),
"nandWritesPerStream24": pandas.StringDtype(),
"nandWritesPerStream25": pandas.StringDtype(),
"nandWritesPerStream26": pandas.StringDtype(),
"nandWritesPerStream27": pandas.StringDtype(),
"nandWritesPerStream28": pandas.StringDtype(),
"nandWritesPerStream29": pandas.StringDtype(),
"nandWritesPerStream30": pandas.StringDtype(),
"nandWritesPerStream31": pandas.StringDtype(),
"nandWritesPerStream32": pandas.StringDtype(),
"hostSoftReadSuccess": pandas.StringDtype(),
"xorInvokedCount": pandas.StringDtype(),
"comresets": pandas.StringDtype(),
"syncEscapes": pandas.StringDtype(),
"rErrHost": pandas.StringDtype(),
"rErrDevice": pandas.StringDtype(),
"iCrcs": pandas.StringDtype(),
"linkSpeedDrops": pandas.StringDtype(),
"mrrXtrapageEvents": pandas.StringDtype(),
"mrrToppageEvents": pandas.StringDtype(),
"hostXorSuccessCount": pandas.StringDtype(),
"hostXorFailCount": pandas.StringDtype(),
"nandWritesWithPreReadPerStream1": pandas.StringDtype(),
"nandWritesWithPreReadPerStream2": pandas.StringDtype(),
"nandWritesWithPreReadPerStream3": pandas.StringDtype(),
"nandWritesWithPreReadPerStream4": pandas.StringDtype(),
"nandWritesWithPreReadPerStream5": pandas.StringDtype(),
"nandWritesWithPreReadPerStream6": pandas.StringDtype(),
"nandWritesWithPreReadPerStream7": pandas.StringDtype(),
"nandWritesWithPreReadPerStream8": pandas.StringDtype(),
"nandWritesWithPreReadPerStream9": pandas.StringDtype(),
"nandWritesWithPreReadPerStream10": pandas.StringDtype(),
"nandWritesWithPreReadPerStream11": pandas.StringDtype(),
"nandWritesWithPreReadPerStream12": pandas.StringDtype(),
"nandWritesWithPreReadPerStream13": pandas.StringDtype(),
"nandWritesWithPreReadPerStream14": pandas.StringDtype(),
"nandWritesWithPreReadPerStream15": pandas.StringDtype(),
"nandWritesWithPreReadPerStream16": pandas.StringDtype(),
"nandWritesWithPreReadPerStream17": pandas.StringDtype(),
"nandWritesWithPreReadPerStream18": pandas.StringDtype(),
"nandWritesWithPreReadPerStream19": pandas.StringDtype(),
"nandWritesWithPreReadPerStream20": pandas.StringDtype(),
"nandWritesWithPreReadPerStream21": pandas.StringDtype(),
"nandWritesWithPreReadPerStream22": pandas.StringDtype(),
"nandWritesWithPreReadPerStream23": pandas.StringDtype(),
"nandWritesWithPreReadPerStream24": pandas.StringDtype(),
"nandWritesWithPreReadPerStream25": pandas.StringDtype(),
"nandWritesWithPreReadPerStream26": pandas.StringDtype(),
"nandWritesWithPreReadPerStream27": pandas.StringDtype(),
"nandWritesWithPreReadPerStream28": pandas.StringDtype(),
"nandWritesWithPreReadPerStream29": pandas.StringDtype(),
"nandWritesWithPreReadPerStream30": pandas.StringDtype(),
"nandWritesWithPreReadPerStream31": pandas.StringDtype(),
"nandWritesWithPreReadPerStream32": pandas.StringDtype(),
"dramCorrectables8to1": pandas.StringDtype(),
"driveRecoveryCount": pandas.StringDtype(),
"mprLiteReads": pandas.StringDtype(),
"eccErrOnMprLiteReads": pandas.StringDtype(),
"readForwardingXpPreReadCount": pandas.StringDtype(),
"readForwardingUpPreReadCount": pandas.StringDtype(),
"readForwardingLpPreReadCount": pandas.StringDtype(),
"pweDefectCompensationCredit": pandas.StringDtype(),
"planarXorRebuildFailure": pandas.StringDtype(),
"itgXorRebuildFailure": pandas.StringDtype(),
"planarXorRebuildSuccess": pandas.StringDtype(),
"itgXorRebuildSuccess": pandas.StringDtype(),
"xorLoggingSkippedSIcBand": pandas.StringDtype(),
"xorLoggingSkippedDieOffline": pandas.StringDtype(),
"xorLoggingSkippedDieAbsent": pandas.StringDtype(),
"xorLoggingSkippedBandErased": pandas.StringDtype(),
"xorLoggingSkippedNoEntry": pandas.StringDtype(),
"xorAuditSuccess": pandas.StringDtype(),
"maxSuspendCount": pandas.StringDtype(),
"suspendLimitPerPrgm": pandas.StringDtype(),
"psrCountStats": pandas.StringDtype(),
"readNandBuffCount": pandas.StringDtype(),
"readNandBufferRspErrorCount": pandas.StringDtype(),
"ddpNandWrites": pandas.StringDtype(),
"totalDeallocatedSectorsInCore": pandas.StringDtype(),
"prefetchHostReads": pandas.StringDtype(),
"hostReadtoDSMDCount": pandas.StringDtype(),
"hostWritetoDSMDCount": pandas.StringDtype(),
"snapReads4k": pandas.StringDtype(),
"snapReads8k": pandas.StringDtype(),
"snapReads16k": pandas.StringDtype(),
"xorLoggingTriggered": pandas.StringDtype(),
"xorLoggingAborted": pandas.StringDtype(),
"xorLoggingSkippedHistory": pandas.StringDtype(),
"deckDisturbRelocationUD": pandas.StringDtype(),
"deckDisturbRelocationMD": pandas.StringDtype(),
"deckDisturbRelocationLD": pandas.StringDtype(),
"bbdProactiveReadRetry": pandas.StringDtype(),
"statsRestoreRequired": pandas.StringDtype(),
"statsAESCount": pandas.StringDtype(),
"statsHESCount": pandas.StringDtype(),
"psrCountStats1": pandas.StringDtype(),
"psrCountStats2": pandas.StringDtype(),
"psrCountStats3": pandas.StringDtype(),
"psrCountStats4": pandas.StringDtype(),
"psrCountStats5": pandas.StringDtype(),
"psrCountStats6": pandas.StringDtype(),
"psrCountStats7": pandas.StringDtype(),
"psrCountStats8": pandas.StringDtype(),
"psrCountStats9": pandas.StringDtype(),
"psrCountStats10": pandas.StringDtype(),
"psrCountStats11": pandas.StringDtype(),
"psrCountStats12": pandas.StringDtype(),
"psrCountStats13": pandas.StringDtype(),
"psrCountStats14": pandas.StringDtype(),
"psrCountStats15": pandas.StringDtype(),
"psrCountStats16": pandas.StringDtype(),
"psrCountStats17": pandas.StringDtype(),
"psrCountStats18": pandas.StringDtype(),
"psrCountStats19": pandas.StringDtype(),
"psrCountStats20": pandas.StringDtype(),
"psrCountStats21": pandas.StringDtype(),
"psrCountStats22": pandas.StringDtype(),
"psrCountStats23": pandas.StringDtype(),
"psrCountStats24": pandas.StringDtype(),
"psrCountStats25": pandas.StringDtype(),
"psrCountStats26": pandas.StringDtype(),
"psrCountStats27": pandas.StringDtype(),
"psrCountStats28": pandas.StringDtype(),
"psrCountStats29": pandas.StringDtype(),
"psrCountStats30": pandas.StringDtype(),
"psrCountStats31": pandas.StringDtype(),
"psrCountStats32": pandas.StringDtype(),
"psrCountStats33": pandas.StringDtype(),
"psrCountStats34": pandas.StringDtype(),
"psrCountStats35": pandas.StringDtype(),
"psrCountStats36": pandas.StringDtype(),
"psrCountStats37": pandas.StringDtype(),
"psrCountStats38": pandas.StringDtype(),
"psrCountStats39": pandas.StringDtype(),
"psrCountStats40": pandas.StringDtype(),
"psrCountStats41": pandas.StringDtype(),
"psrCountStats42": pandas.StringDtype(),
"psrCountStats43": pandas.StringDtype(),
"psrCountStats44": pandas.StringDtype(),
"psrCountStats45": pandas.StringDtype(),
"psrCountStats46": pandas.StringDtype(),
"psrCountStatsHigh1": pandas.StringDtype(),
"psrCountStatsHigh2": pandas.StringDtype(),
"psrCountStatsHigh3": pandas.StringDtype(),
"psrCountStatsHigh4": pandas.StringDtype(),
"psrCountStatsHigh5": pandas.StringDtype(),
"psrCountStatsHigh6": pandas.StringDtype(),
"psrCountStatsHigh7": pandas.StringDtype(),
"psrCountStatsHigh8": pandas.StringDtype(),
"psrCountStatsHigh9": pandas.StringDtype(),
"psrCountStatsHigh10": pandas.StringDtype(),
"psrCountStatsHigh11": pandas.StringDtype(),
"psrCountStatsHigh12": pandas.StringDtype(),
"psrCountStatsHigh13": pandas.StringDtype(),
"psrCountStatsHigh14": | pandas.StringDtype() | pandas.StringDtype |
#!/usr/bin/env python
# encoding: utf-8
'''
editing.filter_known_snps
removes known SNPs (BED3) from a candidate list of editing sites (VCF).
@author: brian
@copyright: 2017 yeolab. All rights reserved.
@license: license
@contact: <EMAIL>
@deffield updated: 4-21-2017
'''
import sys
import os
import pandas as pd
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
__all__ = []
__version__ = 0.1
__date__ = '2016-07-13'
__updated__ = '2017-04-21'
DEBUG = 0
TESTRUN = 0
PROFILE = 0
class CLIError(Exception):
"""
Generic exception to raise and log different fatal errors.
"""
def __init__(self, msg):
super(CLIError).__init__(type(self))
self.msg = "E: %s" % msg
def __str__(self):
return self.msg
def __unicode__(self):
return self.msg
def filter_known_snp(infile, known, outfile):
"""
Step 7: Remove known SNPs
We don't want to erroneously call editing sites that are known SNPs.
Filter these out with a BED3 file with known SNP locations.
Really just compares the 0-based position of the BED3 file with the
1-based position of the vcf file, and filters accordingly.
:param infile: basestring
file location of the input VCF file
:param known: basestring
file location of the BED3 file containing known SNPs
:param outfile: basestring
file location of the intended output VCF file
:return:
"""
# print("Filtering known SNPs: {}".format(infile))
o = open(outfile, 'w')
with open(infile, 'r') as f:
for line in f:
if line.startswith('#'):
o.write(line)
o.close()
names1 = [
'CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT',
'.'
]
eff_df = pd.read_table(infile, comment='#', names=names1,
dtype={'QUAL': str})
names2 = ['CHROM', 'START',
'POS'] # POS is the 1-based position of the SNP.
if known.endswith('.gz'):
snp_df = pd.read_table(known, compression='gzip', names=names2)
else:
snp_df = pd.read_table(known, names=names2)
snp_df['KNOWN'] = 1
joined = | pd.merge(eff_df, snp_df, how='left', on=['CHROM', 'POS']) | pandas.merge |
#!/usr/bin/env python
# coding: utf-8
# TODO:
#
#
# R1
# - get the Nyquist plot axis dimensions issue when $k=1$ fixed
# - figure out the failing of .pz with active elements
#
#
# R2
# - make the frequency analysis stuff happen
#
# In[1]:
from skidl.pyspice import *
#can you say cheeky
import PySpice as pspice
#becouse it's written by a kiwi you know
import lcapy as kiwi
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sympy as sym
from scipy.signal import zpk2tf as scipy_zpk2tf
from IPython.display import YouTubeVideo, display
import traceback
import warnings
# In[2]:
#import dc code from parral folder
import sys
sys.path.insert(1, '../DC_1/')
from DC_1_Codes import get_skidl_spice_ref, easy_tf
from AC_2_Codes import *
sym.init_printing()
#notebook specific loading control statements
get_ipython().run_line_magic('matplotlib', 'inline')
#tool to log notebook internals
#https://github.com/jrjohansson/version_information
get_ipython().run_line_magic('load_ext', 'version_information')
get_ipython().run_line_magic('version_information', 'skidl, PySpice,lcapy, sympy, numpy, matplotlib, pandas, scipy')
# # What is PZ anylsis
#
# Pole-Zero analysis (.pz)does exactly what it says it does. It analysis the circuit between two ports and will return all the poles and or zero between the ports and that's it. That means with the resulting poles and zeros we can reconstruct the transfer function between the ports up to the gain term that is $$H(s)_{true}\propto \dfrac{\prod_n (s-a_n)}{\prod_m (s-b_m)}$$ where $a_n$ are the zeros and $b_n$ are the poles. Again this can't be stressed enough .pz does not recover the "gain" term $K$ that would turn the proportionality into an equality.
#
# So what use is it? While that depends on what stage of the design cycle you're in and what is being analyzed. So for RLC elements, it's just going to tell us the poles and zeros where we know that the poles and zero do not move. But for active devices such as BJTs, FETs, etc we could cycle the .pz analysis and see how the .pz locations move with the bias. And while .pz is limited from a verification standpoint seeing as it will just confirm the pole-zero locations that should have been set in the design stage it can be of use when performing reverse engineering on an unknown circuit. Further during the design stage or when analyzing an unknown circuit the lack of $K$ is not a total handicap. Since even without $K$ we can perform root-locus analysis or we can sweep $K$ while comparing the resulting transfer function response to an .ac simulation to then determine $K$ when reverse engineering an unknown design.
#
# So then let's go ahead and start looking at .pz and what we can do with that data.
#
# # PZ analysis of an RC lowpass filter
#
# For this first use case, we will use the RC lowpass filter that we developed in the last section bassed on the work of ALL ABOUT ELECTRONICS
# In[3]:
#instatate the rc_lowpass filter to
lowpassF=rc_lowpass(C_value=.1@u_uF, R_value=1@u_kOhm)
lowpassF.lcapy_self()
# Lets then get the voltage transfer function for this filter topology
# In[4]:
H_rcl_gen=lowpassF.get_tf(with_values=False); H_rcl_gen
# The voltage transfer function for this topology shows that it has a single pole and the following gain term $K$
# In[5]:
H_rcl_gen.K
# Real quickly how lcapy is getting the full voltage-transfer function is based on
#
# 1. Generating the symbolic Modified nodal analysis in the Laplace domain
#
# 2. extracting the so-called Two-Port admittance parameters $Y$ from the Modified nodal analysis matrix
#
# 3. finding the port 1 to port 2 voltage transfer function via $$H_v(s)=\dfrac{Y_{21}}{Y_{22}}$$ Which is just one way of doing it. Where more on two-port network theory and SPICE acquisition will be shown in the remaining sections of this chapter.
#
# Lets now get the transfer function for this instinacs of the rc lowpass topology and isolate its $K$ term
#
# In[6]:
H_rcl=lowpassF.get_tf(with_values=True, ZPK=True); H_rcl
# In[7]:
#K should always be real or where in trouble
K_rcl=np.real(H_rcl.K.cval); K_rcl
# As with any SPICE simulation we have to instantiate our DUT in a circuit. However unlike DC's .tf we do not actually have to have any supplies but since we are also going be comparing the .ac simulation to what .pz we need a full circuit with a source to perform the .ac simulation
# In[8]:
reset()
#create the nets
net_in=Net('In'); net_out=Net('Out');
#create a 1V AC test source and attache to nets
vs=SINEV(ac_magnitude=1@u_V, dc_offset=5@u_V); vs['p', 'n']+=net_in, gnd
#attaceh term_0 to net_in and term_2 to net_out per scikit-rf convention all
#other terminals are grounded
lowpassF.SKiDl(net_in, gnd, net_out, gnd)
circ=generate_netlist()
print(circ)
# In[9]:
filter_responce=qfilter_explorer(circ, 'RC Low Pass Filter Responce');
# In[10]:
#this is the full filter tf response in comparison to the .ac sim
filter_responce.symbolic_tf(lowpassF)
# ## .tf does not get $K$
#
# Lets be clear about this .tf will not yield $K$ in the generic case. It might get lucky but recalling that in DC simulations capcitors are treated as non existing elements there is no way that .tf will recover the $K$ for this topology where $K=\dfrac{1}{RC}$
#
# In[11]:
tf=easy_tf(circ)
# In[12]:
tf.dc_voltage_gain(vs, node(net_out), node(gnd))
tf.vg_results
# # PZ ease
#
# The following class like the rest in this book makes using the SPICE analysis easier and enhance it with the power of Python. But real quick let's look at the ngspice call for .pz (typically found in chapter 15 section 3)
#
# ```
# .pz node1 node2 node3 node4 <transfer_type('vol'/'cur')> <analysis_type('pz'/'zer'/'pol')>
# ```
#
# This differs from .tf in DC analysis where we had to specify a source for the input, where instead the input port terminals are specified by `node1` & `node2` which are the positive and negative terminals respectively. And similarly, the output port terminals are specified by `node3` & `node4`. Since .pz only requires the specification of the terminals to define the two-port network we can take advantage of this to look just at sat the feedback (aka $\beta$) network in circuits containing feedback structures.
#
# Following the node arguments are the transfer type argument where if `vol` is used we are acquiring the poles and or zeros of voltage transfer function
#
# $$H_v(s)=\dfrac{V_o}{V_i}$$
#
# else, if `cur` is used we are acquiring the Transimpedance (aka Transfer Impedance)
# $$H_F(s)=\dfrac{V_o}{I_i}$$
# , were again when using .pz we are only acquiring the poles and or zeros that make up the respective transfer function not the transfer function as a whole.
#
# Finlay the last argument `analysis_type` controls what we are acquiring from the .pz analysis. While typically we leave it as `pz` to get both the poles and zeros there are times it might not be possible to get both or the poles and zero have to be acquired separately. Where in that case we can use `pol` to get just the poles and `zer` to get just the zeros
#
# Below the class, `pz_ease` is designed to perform the .pz analysis with additional methods to analyze the results. And in both it's instantiation and in serval of its methods, the value of $K$ can be feed into it if known.
#
# In[13]:
#%%writefile -a AC_2_Codes.py
#chapteer 2 section 4 pz_ease class
#class to perform .pz simulations with a bit more grace
#with some additional built-in analysis tools
class pz_ease(ac_representation_tool, eecomplex_plot_templets):
def __init__(self, circ_netlist_obj, K=1.0):
"""
Class to perform Pole Zero (.pz) SPICE simulation with grace
Args:
circ_netlist_obj (pyspice.Spice.Netlist.Circuit): the Netlist circuit produced
from SKiDl's `generate_netlist()`
K (float/int; 1): the gain; must be manually put in or found from .tf analysis
Returns:
"""
self.circ_netlist_obj=circ_netlist_obj
assert (type(K)==float) or (type(K)==int), 'K must be a float or int'
self.K=K
#dic of allowed pz control statements
self.allowed_control_statments={'voltage':'vol', 'current':'cur',
'pole-zero':'pz', 'zeros':'zer', 'poles':'pol'}
def pz_def_ports(self, port_0_pos_term, port_0_neg_term, port_1_pos_term, port_1_neg_term, display_table=False):
"""
Method to set the Port terminals for the two-port section of the circuit under test
where all inputs must be nodes in the circuit under test
Terminals:
port_0_pos_term, port_0_neg_term, port_1_pos_term, port_1_neg_term
Port & Terminals are defined via:
```
Left_Port - Two-Port Section under Test - Right_Port
+-------------+
Postive Port0 port_0_pos_term-| DUT Section |-port_1_pos_term Postive Port1
Negtive Port0 port_0_neg_term-| |-port_1_neg_term Negtive Port1
+-------------+
```
Args:
display_table (bool; False): when true will display the generated `self.control_df` below
this method call in a jupyter notebook like environment
Returns:
Settings are recoded in `self.control_df` rows: `'port_0_terms+-'` & `'port_1_terms+-'`
"""
assert port_0_pos_term in self.circ_netlist_obj.node_names, f'`{port_0_pos_term}` is not a node in the circuit under test'
self.port_0_pos_term=port_0_pos_term
assert port_0_neg_term in self.circ_netlist_obj.node_names, f'`{port_0_neg_term}` is not a node in the circuit under test'
self.port_0_neg_term=port_0_neg_term
assert port_1_pos_term in self.circ_netlist_obj.node_names, f'`{port_1_pos_term}` is not a node in the circuit under test'
self.port_1_pos_term=port_1_pos_term
assert port_1_neg_term in self.circ_netlist_obj.node_names, f'`{port_1_neg_term}` is not a node in the circuit under test'
self.port_1_neg_term=port_1_neg_term
#record the results in table
self._build_control_table(display_table)
def pz_mode_set(self, tf_type='voltage', pz_acu='pole-zero', display_table=False):
"""
Method to set the pole-zero analysis controls
Args:
tf_type (str; 'voltage'): the tf for wich the poles and zeros fit to
if `voltage` the tf is of the form V_o/V_i else if `current` in the form of
V_o/I_i
pz_acu (str; 'pole-zero'): if `pole-zero` will attempt to get all the poles and zeros for the
specfied transfer function; else if `zeros` or `poles` will get just the respective zeros
or poles
display_table (bool; False): when true will display the generated `self.control_df` below
this method call in a jupyter notebook like environment
Returns:
Settings are recoded in `self.control_df` rows: `'tf_type'` & `'acqui_mode'`
"""
assert tf_type in self.allowed_control_statments.keys(), f'`{tf_type}` is not `voltage` or `current`'
self.tf_type=tf_type
assert pz_acu in self.allowed_control_statments.keys(), f'`{pz_acu}` is not `pole-zero` or `poles` or `zeros`'
self.pz_acu=pz_acu
#record the results in table
self._build_control_table(display_table)
def _build_control_table(self, display_table=True):
"""
Internal method to build a pz control table to display pz simulation settings
Args:
display_table (bool; True): when true will display the generated `self.control_df` below
this method call in a jupyter notebook like environment
Returns:
creates dataframe table `self.control_df` that records pz simulation controls
if `display_table` is true will force showing under jupyter notebook cell
"""
self.control_df=pd.DataFrame(columns=['value'],
index=['tf_type',
'acqui_mode',
'port_0_terms+-',
'port_1_terms+-'
])
if hasattr(self, 'tf_type'):
self.control_df.at['tf_type']=self.tf_type
if hasattr(self, 'pz_acu'):
self.control_df.at['acqui_mode']=self.pz_acu
if hasattr(self, 'port_0_pos_term') and hasattr(self, 'port_0_neg_term') :
self.control_df.at['port_0_terms+-', 'value']=[self.port_0_pos_term, self.port_0_neg_term]
if hasattr(self, 'port_1_pos_term') and hasattr(self, 'port_1_neg_term') :
self.control_df.at['port_1_terms+-', 'value']=[self.port_1_pos_term, self.port_1_neg_term]
self.control_df.index.name='pz_sim_control'
if display_table:
display(self.control_df)
def do_pz_sim(self, display_table=False):
"""
Method to perform the pole-zero simulation based on values stored in self.control_df
If the simulation does not converge will give a warning with a basic debug action
but will set `self.pz_values` to empty dict.
TODO:
- add simulation kwargs
- flush out exception handling
"""
attriputs_to_check=['port_0_pos_term', 'port_0_neg_term', 'port_1_pos_term', 'port_1_neg_term',
'tf_type', 'pz_acu']
for i in attriputs_to_check:
if hasattr(self, i):
pz_is_go=True
else:
pz_is_go=False
warnings.warn(f'{i} has not been set; pole-zero simulation will not procdede till set')
if pz_is_go:
self.sim=self.circ_netlist_obj.simulator()
#I cant catch the warning when it hangs so going to have to do this
self.pz_values={}
try:
self.pz_values=self.sim.polezero(
node1=self.port_0_pos_term,
node2=self.port_0_neg_term,
node3=self.port_1_pos_term,
node4=self.port_1_neg_term,
tf_type=self.allowed_control_statments[self.tf_type],
pz_type=self.allowed_control_statments[self.pz_acu]
)
self._record_pz_results(display_table)
except pspice.Spice.NgSpice.Shared.NgSpiceCommandError:
self.pz_values={}
warnings.warn("""PZ analysis did not converge with the current setting:
start by changing the tf type (self.tf_type) and pz acusisiton type (self.pz_acu) """)
def _record_pz_results(self, display_table=True):
"""
Internal method to record the PZ results to a dataframe
Args:
display_table (bool; True): when true will display the generated `self.control_df` below
this method call in a jupyter notebook like environment
Returns:
creates dataframe table `self.pz_results_DF` that records pz simulation results
if `display_table` is true will force showing under jupyter notebook cell
"""
self.pz_results_DF= | pd.DataFrame(columns=['Type', 'Values']) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 3 10:06:21 2018
@author: rucsa
"""
import pandas as pd
import datetime
import numpy as np
import tables
import check_data_and_prices_helpers as help
def add_returns():
#fundamentals_2016 = pd.read_hdf("../sources/fundamentals_2016_msci_regions.hdf5", "dataset1/x")
fundamentals_2017 = | pd.read_hdf("../sources/fundamentals_2017_msci_regions.hdf5", "dataset1/x") | pandas.read_hdf |
import pytest
from siuba.tests.helpers import data_frame
import pandas as pd
from siuba.experimental.pd_groups.translate import method_agg_op, method_el_op, method_el_op2
from siuba.experimental.pd_groups.groupby import broadcast_agg
#TODO:
# - what if they have mandatory, non-data args?
# - support accessor methods like _.x.str.upper()
# - support .expanding and .rolling
data_dt = data_frame(
g = ['a', 'a', 'b', 'b'],
x = pd.to_datetime(["2019-01-01 01:01:01", "2020-04-08 02:02:02", "2021-07-15 03:03:03", "2022-10-22 04:04:04"])
)
data_str = data_frame(
g = ['a', 'a', 'b', 'b'],
x = ['abc', 'cde', 'fg', 'h']
)
data_default = data_frame(
g = ['a', 'a', 'b', 'b'],
x = [10, 11, 12, 13],
y = [1,2,3,4]
)
data = {
'dt': data_dt,
'str': data_str,
None: data_default
}
# Test translator =============================================================
from pandas.testing import assert_frame_equal, assert_series_equal
from siuba.experimental.pd_groups.groupby import GroupByAgg, SeriesGroupBy
f_min = method_agg_op('min', is_property = False, accessor = None)
f_add = method_el_op2('add', is_property = False, accessor = None)
f_abs = method_el_op('abs', is_property = False, accessor = None)
f_df_size = lambda x: GroupByAgg.from_result(x.size(), x)
# GroupByAgg is liskov substitutable, so check that our functions operate
# like similarly substitutable subtypes. This means that...
# * input type is the same or more general, and
# * output type is the same or more specific
@pytest.mark.parametrize('f_op, f_dst, cls_result', [
# aggregation 1-arity
# f(SeriesGroupBy) -> GroupByAgg <= f(GroupByAgg) -> GroupByAgg
(lambda g: f_min(g.x), lambda g: g.x.min(), GroupByAgg),
(lambda g: f_min(f_min(g.x)), lambda g: g.x.min(), GroupByAgg),
# elementwise 1-arity
# f(GroupByAgg) -> GroupByAgg <= f(SeriesGroupBy) -> SeriesGroupBy
(lambda g: f_abs(f_min(g.x)), lambda g: g.x.min().abs(), GroupByAgg),
(lambda g: f_abs(g.x), lambda g: g.obj.x.abs(), SeriesGroupBy),
# elementwise 2-arity
# f(GroupByAgg, GroupByAgg) -> GroupByAgg <= f(GroupByAgg, SeriesGroupBy) -> SeriesGroupBy
(lambda g: f_add(f_min(g.x), f_min(g.y)), lambda g: g.x.min() + g.y.min(), GroupByAgg),
(lambda g: f_add(g.x, f_min(g.y)), lambda g: g.obj.x + g.y.transform('min'), SeriesGroupBy),
(lambda g: f_add(g.x, g.y), lambda g: g.obj.x + g.obj.y, SeriesGroupBy),
])
def test_grouped_translator_methods(f_op, f_dst, cls_result):
g = data_default.groupby('g')
res = f_op(g)
# needs to be exact, since GroupByAgg is subclass of SeriesGroupBy
assert type(res) is cls_result
dst = f_dst(g)
assert_series_equal(res.obj, dst, check_names = False)
@pytest.mark.parametrize('f_op, f_dst', [
(lambda g: f_add(f_min(g.x), f_min(g.y)), lambda g: g.x.transform('min') + g.y.transform('min')),
(lambda g: f_min(g.x), lambda g: g.x.transform('min')),
(lambda g: f_min(f_min(g.x)), lambda g: g.x.transform('min')),
(lambda g: f_abs(f_min(g.x)), lambda g: g.x.transform('min').abs()),
# Note that there's no way to transform a DF method, so use an arbitrary column
(lambda g: f_df_size(g), lambda g: g.x.transform('size')),
])
def test_agg_groupby_broadcasted_equal_to_transform(f_op, f_dst):
g = data_default.groupby('g')
res = f_op(g)
# needs to be exact, since GroupByAgg is subclass of SeriesGroupBy
assert type(res) is GroupByAgg
dst = f_dst(g)
broadcasted = broadcast_agg(res)
assert_series_equal(broadcasted, dst, check_names = False)
# Test generic functions ======================================================
def test_fast_mutate_basic():
# sanity check of https://github.com/machow/siuba/issues/355
from siuba.siu import _
res_df = data_default.groupby("g") >> fast_mutate(num = _.x / _.y * 100)
res = res_df.num
dst = data_default.x / data_default.y * 100
| assert_series_equal(res.obj, dst, check_names=False) | pandas.testing.assert_series_equal |
import pandas as pd
import numpy as np
from functions.load_wtdata import load_wtdata
from pathlib import Path
import gc
import tempfile
import os
#Configs
db_config = {'table_cast_park_dic':'1_cast_park_table_dic','host':"127.0.0.1",'user':"itestit",'password':"<PASSWORD>",'db':"SCHistorical_DB"}
exclude_columns = ['alarm_block_code','alarm_all','alarm_all_block_code','alarm','ot','ot_block_code','ot_all','ot_all_block_code']
datetime_name = 'date_time'
result_folder = 'results'
if not os.path.exists(result_folder):
os.makedirs(result_folder)
batch_size = 500
Marging=15 #15 dias antes los datos son malos.
# 2014-2015
# 'unix_timestamp_ini':1388534400,
# 'unix_timestamp_end':1420070399,
# 2015-2016
# 'unix_timestamp_ini':1420070499,
# 'unix_timestamp_end':1451606399,
# 2014-2016
#'unix_timestamp_ini':1388534400,
#'unix_timestamp_end':1451606399,
# 2016->
#'unix_timestamp_ini_test':1451606400,
#'unix_timestamp_end_test':1498236799,
#wt_query = {'timesteps':100,'epochs':50,'class_weight':{0: 1.,1: 10.},'ld_id':194,'ld_code':"B211",'wp_id':20,'wp_code':"izco",'seconds_to_aggregate':600,'array_id_walm':"607,608,613,627,631,659",'array_ot':"10067,10068",'freq_dat_med_min':10,'fault':'Gbox','type':"phealtdeep",'filter':"",'power_condition':"",'include_variables':"",'exclude_variables':"regex:model|fake_data|^SPCosPhi|^FrecRed|^Estado",'target_name':"alarm",'unix_timestamp_ini':1388534400,'unix_timestamp_end':1420070399,'unix_timestamp_ini_test':1420070499,'unix_timestamp_end_test':1500000000}
#wt_query = {'timesteps':100,'epochs':50,'class_weight':{0: 1.,1: 500.},'ld_id':212,'ld_code':"B312",'wp_id':20,'wp_code':"izco",'seconds_to_aggregate':600,'array_id_walm':"614,615,616,636,639,641",'array_ot':"10004",'freq_dat_med_min':10,'fault':'Gen','type':"phealtdeep",'filter':"",'power_condition':"",'include_variables':"",'exclude_variables':"regex:model|fake_data|^SPCosPhi|^FrecRed|^Estado",'target_name':"alarm",'unix_timestamp_ini':1388534400,'unix_timestamp_end':1420070399,'unix_timestamp_ini_test':1420070499,'unix_timestamp_end_test':1500000000}
#wt_query = {'timesteps':100,'epochs':50,'class_weight':{0: 1.,1: 100.},'ld_id':211,'ld_code':"B311",'wp_id':20,'wp_code':"izco",'seconds_to_aggregate':600,'array_id_walm':"614,615,616,636,639,641",'array_ot':"10004",'freq_dat_med_min':10,'fault':'Gen','type':"phealtdeep",'filter':"",'power_condition':"",'include_variables':"",'exclude_variables':"regex:model|fake_data|^SPCosPhi|^FrecRed|^Estado",'target_name':"alarm",'unix_timestamp_ini':1388534400,'unix_timestamp_end':1451606399,'unix_timestamp_ini_test':1420070499,'unix_timestamp_end_test':1500000000}
#wt_query = {'timesteps':100,'epochs':50,'class_weight':{0: 1.,1: 500.},'ld_id':189,'ld_code':"B206",'wp_id':20,'wp_code':"izco",'seconds_to_aggregate':600,'array_id_walm':"614,615,616,636,639,641",'array_ot':"10004",'freq_dat_med_min':10,'fault':'Gen','type':"phealtdeep",'filter':"",'power_condition':"",'include_variables':"",'exclude_variables':"regex:model|fake_data|^SPCosPhi|^FrecRed|^Estado",'target_name':"alarm",'unix_timestamp_ini':1388534400,'unix_timestamp_end':1420070399,'unix_timestamp_ini_test':1420070499,'unix_timestamp_end_test':1500000000}
#wt_query = {'timesteps':100,'epochs':50,'class_weight':{0: 1.,1: 500.},'ld_id':179,'ld_code':"B113",'wp_id':20,'wp_code':"izco",'seconds_to_aggregate':600,'array_id_walm':"614,615,616,636,639,641",'array_ot':"10004",'freq_dat_med_min':10,'fault':'Gen','type':"phealtdeep",'filter':"",'power_condition':"",'include_variables':"",'exclude_variables':"regex:model|fake_data|^SPCosPhi|^FrecRed|^Estado",'target_name':"alarm",'unix_timestamp_ini':1388534400,'unix_timestamp_end':1420070399,'unix_timestamp_ini_test':1420070499,'unix_timestamp_end_test':1500000000}
wt_query = {'timesteps':100,'epochs':50,'class_weight':{0: 1.,1: 500.},'ld_id':201,'ld_code':"B301",'wp_id':20,'wp_code':"izco",'seconds_to_aggregate':600,'array_id_walm':"614,615,616,636,639,641",'array_ot':"10004",'freq_dat_med_min':10,'fault':'Gen','type':"phealtdeep",'filter':"",'power_condition':"",'include_variables':"",'exclude_variables':"regex:model|fake_data|^SPCosPhi|^FrecRed|^Estado",'target_name':"alarm",'unix_timestamp_ini':1388534400,'unix_timestamp_end':1420070399,'unix_timestamp_ini_test':1420070499,'unix_timestamp_end_test':1500000000}
#Fuhrlander
#wt_query = {'timesteps':50,'epochs':10,'class_weight':{0: 1.,1: 500.},'ld_id':80,'ld_code':"FL701",'wp_id':13,'wp_code':"sant",'seconds_to_aggregate':300,'array_id_walm':"1271,1329,964,1306,2302,2304,2306,1369,1370",'array_ot':"",'freq_dat_med_min':5,'fault':'Gbox','type':"phealtdeep",'filter':"",'power_condition':"",'include_variables':"",'exclude_variables':"regex:model|fake_data|^SPCosPhi|^FrecRed|^Estado",'target_name':"alarm",'unix_timestamp_ini':1325376000,'unix_timestamp_end':1356998399,'unix_timestamp_ini_test':1388534400,'unix_timestamp_end_test':1420070399}
timesteps=wt_query['timesteps']
filename=str(result_folder+'/'+wt_query['ld_code'])+'_wtdata_train_'+wt_query['fault']+'_'+wt_query['target_name']+'_'+str(wt_query['unix_timestamp_ini'])+'_'+str(wt_query['unix_timestamp_end'])+'.csv.gz'
if not Path(filename).is_file():
print(filename+" not found...Downloading train data...")
wtdata_train=load_wtdata(wt_query=wt_query,db_config=db_config)
wtdata_train.to_csv(filename, sep=',',index =False,compression='gzip')
else:
print("Loading disk train data...")
wtdata_train = pd.read_csv(filename, sep=',', compression='gzip',low_memory=False)
#Format date_time
wtdata_train[datetime_name]=pd.to_datetime(wtdata_train[datetime_name],format='%Y-%m-%d %H:%M:%S')
if wt_query['target_name']=='alarm' and 'ot_all' in wtdata_train.columns:
#wtdata_train.loc[wtdata_train['ot_all'] == 1, 'alarm'] = 0
wtdata_train = wtdata_train[wtdata_train['ot_all'] != 1]
if wt_query['target_name']=='alarm' and 'ot' in wtdata_train.columns:
wtdata_train=wtdata_train[wtdata_train['ot'] != 1]
#Modify alarm to do pre_alarm
#from datetime import datetime, timedelta
#Anticipation = 14
#Marging=14
#dates_prealarm=[]
#active_alarms=wtdata_train[wtdata_train[wt_query['target_name']]==1][datetime_name].values
#for alarm in active_alarms:
# for m in range(0,Marging):
# dates_prealarm.append(alarm - np.timedelta64(Anticipation+m, 'D'))
#wtdata_train.loc[wtdata_train[datetime_name].isin(active_alarms),wt_query['target_name']]=0
#wtdata_train.loc[wtdata_train[datetime_name].isin(dates_prealarm),wt_query['target_name']]=1
from datetime import datetime, timedelta
dates_prealarm=[]
active_alarms=wtdata_train[wtdata_train[wt_query['target_name']]==1][datetime_name].values
for alarm in active_alarms:
for m in range(0,Marging):
dates_prealarm.append(alarm - np.timedelta64(m, 'D'))
wtdata_train.loc[wtdata_train[datetime_name].isin(active_alarms),wt_query['target_name']]=0
wtdata_train.loc[wtdata_train[datetime_name].isin(dates_prealarm),wt_query['target_name']]=1
del dates_prealarm, active_alarms
a=set(wtdata_train.columns)
a.difference
to_drop = set(wtdata_train.columns).intersection(exclude_columns).difference([wt_query['target_name']])
if any(to_drop):
wtdata_train = wtdata_train.drop(to_drop, axis=1)
#Identify columns all NA
idx_NA_columns_train = pd.isnull(wtdata_train).sum()>0.9*wtdata_train.shape[0]
if any(idx_NA_columns_train):
wtdata_train=wtdata_train.drop(idx_NA_columns_train[idx_NA_columns_train==True].index,axis=1)
wtdata_train = wtdata_train.dropna(axis=0,how='any',subset=set(wtdata_train.columns).difference(['date_time']))
y_train = wtdata_train.loc[:, wt_query['target_name']]
y_train = y_train.as_matrix()
X_train = wtdata_train.drop([wt_query['target_name']], axis=1)
del wtdata_train
gc.collect()
## Splitting the dataset into the Training set and Test set
#def non_shuffling_train_test_split(X, y, test_size=0.2):
# import numpy as np
# i = int((1 - test_size) * X.shape[0]) + 1
# X_train, X_test = np.split(X, [i])
# y_train, y_test = np.split(y, [i])
# return X_train, X_test, y_train, y_test
#
#X_train, X_test, y_train, y_test = non_shuffling_train_test_split(X, y, test_size = 0.1)
#Copy and Drop date_time
X_train_df=X_train[datetime_name]
to_drop = set(X_train.columns).intersection([datetime_name,wt_query['target_name']])
X_train=X_train.drop(to_drop, axis=1)
num_features = X_train.shape[1]
num_rows = X_train.shape[0]
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train.as_matrix())
# Reshaping
#X_train = np.reshape(X_train, (X_train.shape[0], 1,X_train.shape[1]))
# Creating a data structure with timesteps and t+1 output
#Save in disk to get free memory
temp_train = tempfile.NamedTemporaryFile(prefix='temp_train')
X_temp_timestepped=np.memmap(temp_train, dtype='float64', mode='w+', shape=((X_train.shape[0]-timesteps),timesteps,X_train.shape[1]))
#X_temp_timestepped=np.empty(shape=((num_rows-timesteps)*timesteps,num_features))
#X_temp_timestepped=np.memmap('temp_matrix.tmp', dtype='float64', mode='w+', shape=((num_rows-timesteps)*timesteps,num_features))
for i in range(timesteps,X_train.shape[0]):
X_temp_timestepped[i-timesteps,:]=np.reshape(X_train[i-timesteps:i, :],(timesteps,X_train.shape[1]))
X_train=X_temp_timestepped
del X_temp_timestepped
y_train=y_train[timesteps:]
gc.collect()
#Disable GPU
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
#Seed
np.random.seed(123)
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
filename_model=str(result_folder+'/'+wt_query['ld_code'])+'_wtdata_train_'+wt_query['fault']+'_'+wt_query['target_name']+'_'+str(wt_query['unix_timestamp_ini'])+'_'+str(wt_query['unix_timestamp_end'])+'_model'
if not Path(filename_model+'.json').is_file():
def build_classifier2(input_dim):
classifier = Sequential()
classifier.add(LSTM(units = 10, return_sequences=True,input_shape = (timesteps,input_dim[1])))
classifier.add(LSTM(units = 10, return_sequences=True))
classifier.add(LSTM(units = 10))
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
return classifier
classifier2 = build_classifier2([X_train.shape[0],X_train.shape[2]])
# Fitting the ANN to the Training set
classifier2.fit(np.array(X_train), np.array(y_train), batch_size = batch_size, epochs = wt_query['epochs'],class_weight = wt_query['class_weight'])
#Save model
# serialize model to JSON
model_json = classifier2.to_json()
filename_model=str(result_folder+'/'+wt_query['ld_code'])+'_wtdata_train_'+wt_query['fault']+'_'+wt_query['target_name']+'_'+str(wt_query['unix_timestamp_ini'])+'_'+str(wt_query['unix_timestamp_end'])+'_model'
with open(filename_model+'.json', "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
classifier2.save_weights(filename_model+'.h5')
print("Saved model to disk")
else:
json_file = open(filename_model + '.json', 'r')
classifier2 = json_file.read()
json_file.close()
from keras.models import model_from_json
classifier2 = model_from_json(classifier2)
# load weights into new model
classifier2.load_weights(filename_model+'.h5')
print("Loaded model from disk")
# # load json and create model
# json_file = open(filename_model+'.json', 'r')
# classifier2 = json_file.read()
# json_file.close()
# from keras.models import model_from_json
# classifier2 = model_from_json(classifier2)
# # load weights into new model
# classifier2.load_weights(filename_model+'.h5')
# print("Loaded model from disk")
## Load test data
bk_ini=wt_query['unix_timestamp_ini']
bk_end=wt_query['unix_timestamp_end']
wt_query['unix_timestamp_ini']=wt_query['unix_timestamp_ini_test']
wt_query['unix_timestamp_end']=wt_query['unix_timestamp_end_test']
filename=str(result_folder+'/'+wt_query['ld_code'])+'_wtdata_test_'+wt_query['fault']+'_'+wt_query['target_name']+'_'+str(wt_query['unix_timestamp_ini'])+'_'+str(wt_query['unix_timestamp_end'])+'.csv.gz'
if not Path(filename).is_file():
print(filename + " not found...Downloading test data...")
wtdata_test=load_wtdata(wt_query=wt_query,db_config=db_config)
wtdata_test.to_csv(filename, sep=',',index =False,compression='gzip')
else:
print("Loading disk test data...")
wtdata_test = pd.read_csv(filename, sep=',', compression='gzip',low_memory=False)
wt_query['unix_timestamp_ini']=bk_ini
wt_query['unix_timestamp_end']=bk_end
wtdata_test[datetime_name]=pd.to_datetime(wtdata_test[datetime_name],format='%Y-%m-%d %H:%M:%S')
if wt_query['target_name']=='alarm' and 'ot_all' in wtdata_test.columns:
wtdata_test.loc[wtdata_test['ot_all'] == 1, 'alarm'] = 0
if wt_query['target_name']=='alarm' and 'ot' in wtdata_test.columns:
wtdata_test.loc[wtdata_test['ot'] == 1, 'alarm'] = 0
to_drop = set(wtdata_test.columns).intersection(exclude_columns).difference([wt_query['target_name']])
if any(to_drop):
wtdata_test = wtdata_test.drop(to_drop, axis=1)
dates_prealarm=[]
active_alarms=wtdata_test[wtdata_test[wt_query['target_name']]==1][datetime_name].values
for alarm in active_alarms:
for m in range(0,Marging):
dates_prealarm.append(alarm - np.timedelta64(m, 'D'))
wtdata_test.loc[wtdata_test[datetime_name].isin(active_alarms),wt_query['target_name']]=0
wtdata_test.loc[wtdata_test[datetime_name].isin(dates_prealarm),wt_query['target_name']]=1
if any(idx_NA_columns_train):
wtdata_test=wtdata_test.drop(idx_NA_columns_train[idx_NA_columns_train==True].index,axis=1)
wtdata_test = wtdata_test.dropna(axis=0,how='any',subset=set(wtdata_test.columns).difference(['date_time']))
y_test = wtdata_test.loc[:, wt_query['target_name']]
y_test = y_test.as_matrix()
X_test = wtdata_test.drop([wt_query['target_name']], axis=1)
del wtdata_test
X_test_df=X_test[datetime_name]
to_drop = set(X_test.columns).intersection([datetime_name,wt_query['target_name']])
X_test=X_test.drop(to_drop, axis=1)
X_test = sc.transform(X_test.as_matrix())
temp_test = tempfile.NamedTemporaryFile(prefix='temp_train')
X_temp_timestepped=np.memmap(temp_test, dtype='float64', mode='w+', shape=((X_test.shape[0]-timesteps),timesteps,X_test.shape[1]))
#X_temp_timestepped=np.empty(shape=((num_rows-timesteps)*timesteps,num_features))
#X_temp_timestepped=np.memmap('temp_matrix.tmp', dtype='float64', mode='w+', shape=((num_rows-timesteps)*timesteps,num_features))
for i in range(timesteps,X_test.shape[0]):
X_temp_timestepped[i-timesteps,:]=np.reshape(X_test[i-timesteps:i, :],(timesteps,X_test.shape[1]))
X_test=X_temp_timestepped
del X_temp_timestepped
y_test=y_test[timesteps:]
gc.collect()
## End prepare test data
# Predicting the Test set results
y_pred = classifier2.predict(X_test)
y_pred_df = | pd.DataFrame(y_pred) | pandas.DataFrame |
#
# Prepare the hvorg_movies
#
import os
import datetime
import pickle
import json
import numpy as np
import pandas as pd
from sunpy.time import parse_time
# The sources ids
get_sources_ids = 'getDataSources.json'
# Save the data
save_directory = os.path.expanduser('~/Data/hvanalysis/derived')
# Read in the data
directory = os.path.expanduser('~/Data/hvanalysis/source')
hvorg_movies = 'movies.csv'
hvorg_movies = 'movies_20171128.csv'
path = os.path.expanduser(os.path.join(directory, hvorg_movies))
df = | pd.read_csv(path) | pandas.read_csv |
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
# from http://imachordata.com/2016/02/05/you-complete-me/
@pytest.fixture
def df1():
return pd.DataFrame(
{
"Year": [1999, 2000, 2004, 1999, 2004],
"Taxon": [
"Saccharina",
"Saccharina",
"Saccharina",
"Agarum",
"Agarum",
],
"Abundance": [4, 5, 2, 1, 8],
}
)
def test_empty_column(df1):
"""Return dataframe if `columns` is empty."""
assert_frame_equal(df1.complete(), df1)
def test_MultiIndex_column(df1):
"""Raise ValueError if column is a MultiIndex."""
df = df1
df.columns = [["A", "B", "C"], list(df.columns)]
with pytest.raises(ValueError):
df1.complete(["Year", "Taxon"])
def test_column_duplicated(df1):
"""Raise ValueError if column is duplicated in `columns`"""
with pytest.raises(ValueError):
df1.complete(
columns=[
"Year",
"Taxon",
{"Year": lambda x: range(x.Year.min().x.Year.max() + 1)},
]
)
def test_type_columns(df1):
"""Raise error if columns is not a list object."""
with pytest.raises(TypeError):
df1.complete(columns="Year")
def test_fill_value_is_a_dict(df1):
"""Raise error if fill_value is not a dictionary"""
with pytest.raises(TypeError):
df1.complete(columns=["Year", "Taxon"], fill_value=0)
def test_wrong_column_fill_value(df1):
"""Raise ValueError if column in `fill_value` does not exist."""
with pytest.raises(ValueError):
df1.complete(columns=["Taxon", "Year"], fill_value={"year": 0})
def test_wrong_data_type_dict(df1):
"""
Raise ValueError if value in dictionary
is not a 1-dimensional object.
"""
with pytest.raises(ValueError):
df1.complete(columns=[{"Year": pd.DataFrame([2005, 2006, 2007])}])
frame = pd.DataFrame(
{
"Year": [1999, 2000, 2004, 1999, 2004],
"Taxon": [
"Saccharina",
"Saccharina",
"Saccharina",
"Agarum",
"Agarum",
],
"Abundance": [4, 5, 2, 1, 8],
}
)
wrong_columns = (
(frame, ["b", "Year"]),
(frame, [{"Yayay": range(7)}]),
(frame, ["Year", ["Abundant", "Taxon"]]),
(frame, ["Year", ("Abundant", "Taxon")]),
)
empty_sub_columns = [
(frame, ["Year", []]),
(frame, ["Year", {}]),
(frame, ["Year", ()]),
]
@pytest.mark.parametrize("frame,wrong_columns", wrong_columns)
def test_wrong_columns(frame, wrong_columns):
"""Test that ValueError is raised if wrong column is supplied."""
with pytest.raises(ValueError):
frame.complete(columns=wrong_columns)
@pytest.mark.parametrize("frame,empty_sub_cols", empty_sub_columns)
def test_empty_subcols(frame, empty_sub_cols):
"""Raise ValueError for an empty group in columns"""
with pytest.raises(ValueError):
frame.complete(columns=empty_sub_cols)
def test_fill_value(df1):
"""Test fill_value argument."""
output1 = pd.DataFrame(
{
"Year": [1999, 1999, 2000, 2000, 2004, 2004],
"Taxon": [
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
],
"Abundance": [1, 4.0, 0, 5, 8, 2],
}
)
result = df1.complete(
columns=["Year", "Taxon"], fill_value={"Abundance": 0}
)
assert_frame_equal(result, output1)
@pytest.fixture
def df1_output():
return pd.DataFrame(
{
"Year": [
1999,
1999,
2000,
2000,
2001,
2001,
2002,
2002,
2003,
2003,
2004,
2004,
],
"Taxon": [
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
"Agarum",
"Saccharina",
],
"Abundance": [1.0, 4, 0, 5, 0, 0, 0, 0, 0, 0, 8, 2],
}
)
def test_fill_value_all_years(df1, df1_output):
"""
Test the complete function accurately replicates for
all the years from 1999 to 2004.
"""
result = df1.complete(
columns=[
{"Year": lambda x: range(x.Year.min(), x.Year.max() + 1)},
"Taxon",
],
fill_value={"Abundance": 0},
)
assert_frame_equal(result, df1_output)
def test_dict_series(df1, df1_output):
"""
Test the complete function if a dictionary containing a Series
is present in `columns`.
"""
result = df1.complete(
columns=[
{
"Year": lambda x: pd.Series(
range(x.Year.min(), x.Year.max() + 1)
)
},
"Taxon",
],
fill_value={"Abundance": 0},
)
assert_frame_equal(result, df1_output)
def test_dict_series_duplicates(df1, df1_output):
"""
Test the complete function if a dictionary containing a
Series (with duplicates) is present in `columns`.
"""
result = df1.complete(
columns=[
{
"Year": pd.Series(
[1999, 2000, 2000, 2001, 2002, 2002, 2002, 2003, 2004]
)
},
"Taxon",
],
fill_value={"Abundance": 0},
)
assert_frame_equal(result, df1_output)
def test_dict_values_outside_range(df1):
"""
Test the output if a dictionary is present,
and none of the values in the dataframe,
for the corresponding label, is not present
in the dictionary's values.
"""
result = df1.complete(
columns=[("Taxon", "Abundance"), {"Year": np.arange(2005, 2007)}]
)
expected = pd.DataFrame(
[
{"Taxon": "Agarum", "Abundance": 1, "Year": 1999},
{"Taxon": "Agarum", "Abundance": 1, "Year": 2005},
{"Taxon": "Agarum", "Abundance": 1, "Year": 2006},
{"Taxon": "Agarum", "Abundance": 8, "Year": 2004},
{"Taxon": "Agarum", "Abundance": 8, "Year": 2005},
{"Taxon": "Agarum", "Abundance": 8, "Year": 2006},
{"Taxon": "Saccharina", "Abundance": 2, "Year": 2004},
{"Taxon": "Saccharina", "Abundance": 2, "Year": 2005},
{"Taxon": "Saccharina", "Abundance": 2, "Year": 2006},
{"Taxon": "Saccharina", "Abundance": 4, "Year": 1999},
{"Taxon": "Saccharina", "Abundance": 4, "Year": 2005},
{"Taxon": "Saccharina", "Abundance": 4, "Year": 2006},
{"Taxon": "Saccharina", "Abundance": 5, "Year": 2000},
{"Taxon": "Saccharina", "Abundance": 5, "Year": 2005},
{"Taxon": "Saccharina", "Abundance": 5, "Year": 2006},
]
)
assert_frame_equal(result, expected)
# adapted from https://tidyr.tidyverse.org/reference/complete.html
complete_parameters = [
(
pd.DataFrame(
{
"group": [1, 2, 1],
"item_id": [1, 2, 2],
"item_name": ["a", "b", "b"],
"value1": [1, 2, 3],
"value2": [4, 5, 6],
}
),
["group", "item_id", "item_name"],
pd.DataFrame(
{
"group": [1, 1, 1, 1, 2, 2, 2, 2],
"item_id": [1, 1, 2, 2, 1, 1, 2, 2],
"item_name": ["a", "b", "a", "b", "a", "b", "a", "b"],
"value1": [
1.0,
np.nan,
np.nan,
3.0,
np.nan,
np.nan,
np.nan,
2.0,
],
"value2": [
4.0,
np.nan,
np.nan,
6.0,
np.nan,
np.nan,
np.nan,
5.0,
],
}
),
),
(
pd.DataFrame(
{
"group": [1, 2, 1],
"item_id": [1, 2, 2],
"item_name": ["a", "b", "b"],
"value1": [1, 2, 3],
"value2": [4, 5, 6],
}
),
["group", ("item_id", "item_name")],
pd.DataFrame(
{
"group": [1, 1, 2, 2],
"item_id": [1, 2, 1, 2],
"item_name": ["a", "b", "a", "b"],
"value1": [1.0, 3.0, np.nan, 2.0],
"value2": [4.0, 6.0, np.nan, 5.0],
}
),
),
]
@pytest.mark.parametrize("df,columns,output", complete_parameters)
def test_complete(df, columns, output):
"Test the complete function, with and without groupings."
assert_frame_equal(df.complete(columns), output)
@pytest.fixture
def duplicates():
return pd.DataFrame(
{
"row": [
"21.08.2020",
"21.08.2020",
"21.08.2020",
"21.08.2020",
"22.08.2020",
"22.08.2020",
"22.08.2020",
"22.08.2020",
],
"column": ["A", "A", "B", "C", "A", "B", "B", "C"],
"value": [43.0, 36, 36, 28, 16, 40, 34, 0],
}
)
# https://stackoverflow.com/questions/63541729/
# pandas-how-to-include-all-columns-for-all-rows-although-value-is-missing-in-a-d
# /63543164#63543164
def test_duplicates(duplicates):
"""Test that the complete function works for duplicate values."""
df = pd.DataFrame(
{
"row": {
0: "21.08.2020",
1: "21.08.2020",
2: "21.08.2020",
3: "21.08.2020",
4: "22.08.2020",
5: "22.08.2020",
6: "22.08.2020",
},
"column": {0: "A", 1: "A", 2: "B", 3: "C", 4: "A", 5: "B", 6: "B"},
"value": {0: 43, 1: 36, 2: 36, 3: 28, 4: 16, 5: 40, 6: 34},
}
)
result = df.complete(columns=["row", "column"], fill_value={"value": 0})
assert_frame_equal(result, duplicates)
def test_unsorted_duplicates(duplicates):
"""Test output for unsorted duplicates."""
df = pd.DataFrame(
{
"row": {
0: "22.08.2020",
1: "22.08.2020",
2: "21.08.2020",
3: "21.08.2020",
4: "21.08.2020",
5: "21.08.2020",
6: "22.08.2020",
},
"column": {
0: "B",
1: "B",
2: "A",
3: "A",
4: "B",
5: "C",
6: "A",
},
"value": {0: 40, 1: 34, 2: 43, 3: 36, 4: 36, 5: 28, 6: 16},
}
)
result = df.complete(columns=["row", "column"], fill_value={"value": 0})
assert_frame_equal(result, duplicates)
# https://stackoverflow.com/questions/32874239/
# how-do-i-use-tidyr-to-fill-in-completed-rows-within-each-value-of-a-grouping-var
def test_grouping_first_columns():
"""
Test complete function when the first entry
in columns is a grouping.
"""
df2 = pd.DataFrame(
{
"id": [1, 2, 3],
"choice": [5, 6, 7],
"c": [9.0, np.nan, 11.0],
"d": [
pd.NaT,
pd.Timestamp("2015-09-30 00:00:00"),
pd.Timestamp("2015-09-29 00:00:00"),
],
}
)
output2 = pd.DataFrame(
{
"id": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"c": [9.0, 9.0, 9.0, np.nan, np.nan, np.nan, 11.0, 11.0, 11.0],
"d": [
pd.NaT,
pd.NaT,
pd.NaT,
pd.Timestamp("2015-09-30 00:00:00"),
pd.Timestamp("2015-09-30 00:00:00"),
pd.Timestamp("2015-09-30 00:00:00"),
pd.Timestamp("2015-09-29 00:00:00"),
pd.Timestamp("2015-09-29 00:00:00"),
pd.Timestamp("2015-09-29 00:00:00"),
],
"choice": [5, 6, 7, 5, 6, 7, 5, 6, 7],
}
)
result = df2.complete(columns=[("id", "c", "d"), "choice"])
| assert_frame_equal(result, output2) | pandas.testing.assert_frame_equal |
# -*- coding:utf-8 _*-
"""
@author:<NAME>
@time: 2019/12/02
"""
from urllib.parse import unquote
import pandas as pd
from redis import ConnectionPool, Redis
from scrapy.utils.project import get_project_settings
from dingxiangyuan import settings
from sqlalchemy import create_engine
from DBUtils.PooledDB import PooledDB
class DBPoolHelper(object):
def __init__(self, dbname, user=None, password=None, db_type='postgressql', host='localhost', port=5432):
"""
# sqlite3
# 连接数据库文件名,sqlite不支持加密,不使用用户名和密码
import sqlite3
config = {"datanase": "path/to/your/dbname.db"}
pool = PooledDB(sqlite3, maxcached=50, maxconnections=1000, maxusage=1000, **config)
# mysql
import pymysql
pool = PooledDB(pymysql,5,host='localhost', user='root',passwd='<PASSWORD>',db='myDB',port=3306) #5为连接池里的最少连接数
# postgressql
import psycopg2
POOL = PooledDB(creator=psycopg2, host="127.0.0.1", port="5342", user, password, database)
# sqlserver
import pymssql
pool = PooledDB(creator=pymssql, host=host, port=port, user=user, password=password, database=database, charset="utf8")
:param type:
"""
if db_type == 'postgressql':
import psycopg2
pool = PooledDB(creator=psycopg2, host=host, port=port, user=user, password=password, database=dbname)
elif db_type == 'mysql':
import pymysql
pool = PooledDB(pymysql, 5, host='localhost', user='root', passwd='<PASSWORD>', db='myDB',port=3306) # 5为连接池里的最少连接数
elif db_type == 'sqlite':
import sqlite3
config = {"datanase": dbname}
pool = PooledDB(sqlite3, maxcached=50, maxconnections=1000, maxusage=1000, **config)
else:
raise Exception('请输入正确的数据库类型, db_type="postgresql" or db_type="mysql" or db_type="sqlite"' )
self.conn = pool.connection()
self.cursor = self.conn.cursor()
def connect_close(self):
"""关闭连接"""
self.cursor.close()
self.conn.close()
def execute(self, sql, params=tuple()):
self.cursor.execute(sql, params) # 执行这个语句
self.conn.commit()
def fetchone(self, sql, params=tuple()):
self.cursor.execute(sql, params)
data = self.cursor.fetchone()
return data
def fetchall(self, sql, params=tuple()):
self.cursor.execute(sql, params)
data = self.cursor.fetchall()
return data
def pandas_db_helper():
"""
'postgresql://postgres:[email protected]:5432/xiaomuchong'
"mysql+pymysql://root:[email protected]:3306/srld?charset=utf8mb4"
"sqlite: ///sqlite3.db"
"""
engine = create_engine(settings.DATABASE_ENGINE)
conn = engine.connect()
return conn
def redis_init():
settings = get_project_settings()
if settings["REDIS_PARAMS"]:
pool = ConnectionPool(host=settings["REDIS_HOST"], port=settings["REDIS_PORT"],
password=settings["REDIS_PARAMS"]['password'])
else:
pool = ConnectionPool(host=settings["REDIS_HOST"], port=settings["REDIS_PORT"])
conn = Redis(connection_pool=pool)
return conn
redis_conn = redis_init()
db_conn = pandas_db_helper()
def cal_page_url(row):
topic_url, reply_num = row[0], row[1]
page_num = reply_num // 35 + 1
redis_conn.sadd('topic_page_urls', topic_url)
for page in range(2, page_num + 1):
redis_conn.sadd('topic_page_urls', f'{topic_url}?ppg={page}')
print(topic_url)
def insert_redis_topic_page_urls():
data = pd.read_sql(sql="topics", con=db_conn, columns=["topic_url", "reply_num"])
data.apply(cal_page_url, axis=1)
def get_topic_left_start_urls():
topic_urls = pd.read_sql(sql="select distinct topic_url from posts_replies", con=db_conn)
topic_urls_floor_one = pd.read_sql(sql="select topic_url from posts_replies where floor=1", con=db_conn)
has_topic_urls = set(topic_urls['topic_url']) - set(topic_urls_floor_one['topic_url'])
topic_page_urls = redis_conn.smembers('topic_page_urls')
start_urls = {url.decode() for url in topic_page_urls if url.decode().split('?')[0] in has_topic_urls}
print(len(has_topic_urls), len(start_urls))
def get_user_start_urls():
""" 获取用户表起始url """
user_urls = pd.read_sql(sql="select distinct author_url from posts_replies", con=db_conn)
moderator_urls = pd.read_sql(sql="select distinct moderator_url_list from board", con=db_conn)
moderator_urls_list = [url for moderator_list in moderator_urls['moderator_url_list'].str.split('; ') for url in moderator_list]
for url in user_urls['author_url']:
redis_conn.sadd('dingxiangke_start_urls', url)
for url in moderator_urls_list:
redis_conn.sadd('dingxiangke_start_urls', url)
print('添加完成')
def insert_into_topic_rate():
""" 插入积分表 """
postgres = DBPoolHelper(db_type='postgressql', dbname='dingxiangyuan', user='postgres', password='<PASSWORD>', host='localhost', port='5432')
data1 = pd.read_sql(sql="select topic_url from posts_replies where floor=1", con=db_conn)
data2 = pd.read_sql(sql="select topic_url from topic_rate_get", con=db_conn)
topic_urls = set(data1['topic_url']) - set(data2['topic_url'])
for topic_url in topic_urls:
res = pd.read_sql(sql='select topic_type, board_name from posts_replies where floor=1 and topic_url=%s', con=db_conn, params=(topic_url,))
topic_type, board_name = res['topic_type'].values[0], res['board_name'].values[0]
try:
postgres.execute(sql="INSERT INTO topic_rate_get(topic_url, topic_type, board_name, rate_get) VALUES(%s, %s, %s, 0)", params=(topic_url, topic_type, board_name))
print('插入成功')
except Exception as e:
print('插入失败', e)
postgres.connect_close()
def delete_empty_topic_url():
""" 删除主题帖不存在的回复 """
postgres = DBPoolHelper(db_type='postgressql', dbname='dingxiangyuan', user='postgres', password='<PASSWORD>', host='localhost', port='5432')
data1 = pd.read_sql('select topic_url from posts_replies where floor=1', con=db_conn)
data2 = pd.read_sql('select distinct topic_url from posts_replies', con=db_conn)
topic_urls = set(data2['topic_url']) - set(data1['topic_url'])
# topic_urls = {'http://www.dxy.cn/bbs/topic/16938569', 'http://www.dxy.cn/bbs/topic/30229085', 'http://www.dxy.cn/bbs/topic/16568390', 'http://www.dxy.cn/bbs/topic/36096787', 'http://www.dxy.cn/bbs/topic/15125086', 'http://www.dxy.cn/bbs/topic/17948811', 'http://www.dxy.cn/bbs/topic/25201985', 'http://cardiovascular.dxy.cn/bbs/topic/36725028', 'http://www.dxy.cn/bbs/topic/7716905', 'http://www.dxy.cn/bbs/topic/14908986', 'http://www.dxy.cn/bbs/topic/40363469', 'http://www.dxy.cn/bbs/topic/25248231', 'http://www.dxy.cn/bbs/topic/11875242', 'http://cardiovascular.dxy.cn/bbs/topic/29575155', 'http://chest.dxy.cn/bbs/topic/11838188', 'http://www.dxy.cn/bbs/topic/18213734', 'http://www.dxy.cn/bbs/topic/1546642', 'http://www.dxy.cn/bbs/topic/28689847', 'http://www.dxy.cn/bbs/topic/24223943', 'http://www.dxy.cn/bbs/topic/11647123'}
# print(len(topic_urls))
for url in topic_urls:
try:
postgres.execute('delete from posts_replies where topic_url=%s', params=(url,))
print('删除成功')
except Exception as e:
print('删除失败', e)
postgres.connect_close()
def update_user_url():
""" 更新用户url """
postgres = DBPoolHelper(db_type='postgressql', dbname='dingxiangyuan', user='postgres', password='<PASSWORD>', host='localhost', port='5432')
def url_unquote(url):
global Num
unquote_url = unquote(url)
print(url, unquote_url)
try:
postgres.execute(sql='update dingxiangke set user_url_unquote=%s where user_url=%s',
params=(unquote_url, url))
Num += 1
print('更新成功', Num)
except Exception as e:
print('更新失败', e)
data = pd.read_sql(sql='select distinct user_url from dingxiangke', con=db_conn)
data['user_url'].apply(url_unquote)
def delete_user_invalid_posts():
postgres = DBPoolHelper(db_type='postgressql', dbname='dingxiangyuan', user='postgres', password='<PASSWORD>', host='localhost', port='5432')
data1 = pd.read_sql(sql='select distinct author_url from posts_replies', con=db_conn)
data2 = pd.read_sql(sql='select distinct user_url_unquote from dingxiangke', con=db_conn)
author_urls = set(data1['author_url']) - set(data2['user_url_unquote'])
# user_urls = set(data2['user_url_unquote']) - set(data1['author_url'])
# print(len(author_urls), len(user_urls))
for user_url in author_urls:
try:
postgres.execute(sql='delete from posts_replies where author_url=%s', params=(user_url,))
print('删除成功', user_url)
except Exception as e:
print('删除失败', e)
postgres.connect_close()
def calc_board_size():
""" 计算社区规模 """
data1 = pd.read_sql(sql='''select board_name board, to_char(post_time, 'YYYY') as year, count(distinct topic_url) as topics_nums from posts_replies where floor=1 GROUP BY board_name, year''', con=db_conn)
data2 = pd.read_sql(sql='''select board_name board, to_char(post_time, 'YYYY') as year, count(distinct author_url) users_num from posts_replies GROUP BY board_name, year;''', con=db_conn)
data = pd.merge(data2, data1, on=['board', 'year'])
def board_size(row):
return round(row.users_num / row.topics_nums, 4)
data['board_size'] = data.apply(board_size)
data.to_excel('res/env_board_size.xlsx', engine='xlsxwriter', index=False)
def calc_board_members_level_quality():
""" 计算社区板块成员质量 """
data_list = []
board_names = ['心血管', '呼吸胸外', '肿瘤医学', '神经内外', '危重急救', '内分泌', '消化内科', '肾脏泌尿', '感染']
for board in board_names:
data = pd.read_sql(sql='''select board_name, to_char(posts_replies.post_time, 'YYYY') as year, user_level, count(distinct dingxiangke.user_url) user_count,sum(dingxiangke.posts) 用户总发帖数 from dingxiangke
inner join posts_replies on posts_replies.author_url=dingxiangke.user_url_unquote
where posts_replies.board_name=%s
GROUP BY board_name, year, user_level''', con=db_conn, params=(board, ))
for year in data.year.unique():
user_nums = data.loc[data.year == year, 'user_count'].sum()
high_user_nums = data.loc[(data.year == year) & (~data.user_level.isin(['常驻站友', '入门站友', '铁杆站友'])), 'user_count'].sum()
high_user_prop = round(high_user_nums / user_nums, 4)
data_list.append({'board': board, 'year': year, 'high_user_prop': high_user_prop})
df = pd.DataFrame(data=data_list)
df.to_excel('res/borad_user_quality.xlsx', index=False, engine='xlsxwriter')
# df.to_csv('res/borad_user_quality.csv', index=False, encoding='utf_8_sig')
def calc_board_members_identify_quality():
""" 计算社区板块成员质量 """
data_list = []
board_names = ['心血管', '呼吸胸外', '肿瘤医学', '神经内外', '危重急救', '内分泌', '消化内科', '肾脏泌尿', '感染']
for board in board_names:
data = | pd.read_sql(sql='''select board_name, to_char(posts_replies.post_time, 'YYYY') as year, author_identify, count(distinct dingxiangke.user_url) user_count from dingxiangke
inner join posts_replies on posts_replies.author_url=dingxiangke.user_url_unquote
where posts_replies.board_name='心血管'
GROUP BY board_name, year, author_identify''', con=db_conn, params=(board, )) | pandas.read_sql |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.format(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
def test_setitem_series_object(self):
obj = pd.Series(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Series(['a', 1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Series(['a', 1.1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = pd.Series(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = pd.Series(['a', True, 'c', 'd'])
self._assert_setitem_series_conversion(obj, True, exp, np.object)
def test_setitem_series_int64(self):
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1, exp, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
# int + complex -> complex
exp = pd.Series([1, 1 + 1j, 3, 4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# int + bool -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, True, exp, np.int64)
def test_setitem_series_float64(self):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Series([1.1, 1.1, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.float64)
# float + complex -> complex
exp = pd.Series([1.1, 1 + 1j, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp,
np.complex128)
# float + bool -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, True, exp, np.float64)
def test_setitem_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
def test_setitem_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1, exp, np.bool)
# TODO_GH12747 The result must be int
# assigning int greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 3, exp, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.bool)
# bool + bool -> bool
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, True, exp, np.bool)
def test_setitem_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, 1, exp, 'datetime64[ns]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz -> datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_setitem_series_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_setitem_series_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_timedelta64(self):
pass
def test_setitem_series_period(self):
pass
def _assert_setitem_index_conversion(self, original_series, loc_key,
expected_index, expected_dtype):
""" test index's coercion triggered by assign key """
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
def test_setitem_index_object(self):
obj = pd.Series([1, 2, 3, 4], index=list('abcd'))
self.assertEqual(obj.index.dtype, np.object)
# object + object -> object
exp_index = pd.Index(list('abcdx'))
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
# object + int -> IndexError, regarded as location
temp = obj.copy()
with tm.assertRaises(IndexError):
temp[5] = 5
# object + float -> object
exp_index = pd.Index(['a', 'b', 'c', 'd', 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.object)
def test_setitem_index_int64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.index.dtype, np.int64)
# int + int -> int
exp_index = pd.Index([0, 1, 2, 3, 5])
self._assert_setitem_index_conversion(obj, 5, exp_index, np.int64)
# int + float -> float
exp_index = pd.Index([0, 1, 2, 3, 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.float64)
# int + object -> object
exp_index = pd.Index([0, 1, 2, 3, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_float64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(obj.index.dtype, np.float64)
# float + int -> int
temp = obj.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 5.1])
self._assert_setitem_index_conversion(obj, 5.1, exp_index, np.float64)
# float + object -> object
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_complex128(self):
pass
def test_setitem_index_bool(self):
pass
def test_setitem_index_datetime64(self):
pass
def test_setitem_index_datetime64tz(self):
pass
def test_setitem_index_timedelta64(self):
pass
def test_setitem_index_period(self):
pass
class TestInsertIndexCoercion(CoercionBase, tm.TestCase):
klasses = ['index']
method = 'insert'
def _assert_insert_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by insert """
target = original.copy()
res = target.insert(1, value)
tm.assert_index_equal(res, expected)
self.assertEqual(res.dtype, expected_dtype)
def test_insert_index_object(self):
obj = pd.Index(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Index(['a', 1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Index(['a', 1.1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1.1, exp, np.object)
# object + bool -> object
res = obj.insert(1, False)
tm.assert_index_equal(res, pd.Index(['a', False, 'b', 'c', 'd']))
self.assertEqual(res.dtype, np.object)
# object + object -> object
exp = pd.Index(['a', 'x', 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_int64(self):
obj = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Index([1, 1, 2, 3, 4])
self._assert_insert_conversion(obj, 1, exp, np.int64)
# int + float -> float
exp = pd.Index([1, 1.1, 2, 3, 4])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# int + bool -> int
exp = pd.Index([1, 0, 2, 3, 4])
self._assert_insert_conversion(obj, False, exp, np.int64)
# int + object -> object
exp = pd.Index([1, 'x', 2, 3, 4])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_float64(self):
obj = pd.Float64Index([1., 2., 3., 4.])
self.assertEqual(obj.dtype, np.float64)
# float + int -> int
exp = pd.Index([1., 1., 2., 3., 4.])
self._assert_insert_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Index([1., 1.1, 2., 3., 4.])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# float + bool -> float
exp = pd.Index([1., 0., 2., 3., 4.])
self._assert_insert_conversion(obj, False, exp, np.float64)
# float + object -> object
exp = pd.Index([1., 'x', 2., 3., 4.])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_complex128(self):
pass
def test_insert_index_bool(self):
pass
def test_insert_index_datetime64(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'])
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_datetime64tz(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'], tz='US/Eastern')
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'], tz='US/Eastern')
val = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_insert_conversion(obj, val, exp,
'datetime64[ns, US/Eastern]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='Asia/Tokyo'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(['1 day', '2 day', '3 day', '4 day'])
self.assertEqual(obj.dtype, 'timedelta64[ns]')
# timedelta64 + timedelta64 => timedelta64
exp = pd.TimedeltaIndex(['1 day', '10 day', '2 day', '3 day', '4 day'])
self._assert_insert_conversion(obj, pd.Timedelta('10 day'),
exp, 'timedelta64[ns]')
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_period(self):
obj = pd.PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'],
freq='M')
self.assertEqual(obj.dtype, 'period[M]')
# period + period => period
exp = pd.PeriodIndex(['2011-01', '2012-01', '2011-02',
'2011-03', '2011-04'], freq='M')
self._assert_insert_conversion(obj, pd.Period('2012-01', freq='M'),
exp, 'period[M]')
# period + datetime64 => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
pd.Timestamp('2012-01-01'),
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, np.object)
# period + int => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
1,
pd.Period('2011-02', freq='M'),
| pd.Period('2011-03', freq='M') | pandas.Period |
from utils import load_yaml
import pandas as pd
import click
from datetime import datetime, timedelta
import numpy as np
import os
cli = click.Group()
@cli.command()
@click.option('--lan', default='en')
@click.option('--config', default="configs/configuration.yaml")
def dump(lan, config, country_code):
# load the tweets of the requested language
config = load_yaml(config)[lan]
data = pd.read_csv(config['path']+"tweets_id_0.csv")
tweets = data[data.is_retweet == False]
# fetch only tweets from yesterday
tweets.set_index(pd.to_datetime(tweets.created_at, format='%a %b %d %H:%M:%S +0000 %Y'), inplace=True)
yesterday = datetime.now() - timedelta(1)
# filter past ones (the cron should run at 00:00:01)
tweets = tweets[tweets.index >= yesterday]
tweets.to_csv(config['path']+"."+str(yesterday)[:10]+"."+country_code+".csv", index=False)
@cli.command()
@click.option('--lan', default='en')
@click.option('--config', default="configs/configuration.yaml")
@click.option('--days', default=7)
@click.option('--country_code', default="US")
def aggregate_n_dump(lan, config, days, country_code):
# load the tweets of the requested language
config = load_yaml(config)[lan]
paths = [filepath for filepath in os.listdir(config['path']) if filepath.endswith(".csv")]
dataframes = [pd.read_csv(config['path']+filepath, lineterminator='\n') for filepath in paths]
data = pd.concat(dataframes)
tweets = data[data.is_retweet == False]
tweets['day'] = pd.to_datetime(tweets.created_at, format='%a %b %d %H:%M:%S +0000 %Y').dt.strftime('%Y-%m-%d')
# fetch only tweets from yesterday
tweets.set_index(pd.to_datetime(tweets.created_at, format='%a %b %d %H:%M:%S +0000 %Y'), inplace=True)
past = datetime.now() - timedelta(days)
# filter past ones (the cron should run at 00:00:01)
tweets = tweets[tweets.index >= past]
tweets = tweets[tweets.country_code == country_code]
tweets = tweets[tweets.full_name.notna()]
tweets["state"] = tweets.full_name.apply(find_us_state)
places = | pd.DataFrame() | pandas.DataFrame |
# ------------------------------------------
# Copyright (c) Rygor. 2021.
# ------------------------------------------
""" Configuration file management """
import os
import pathlib
import sys
import datetime
import errno
import click
from appdirs import user_data_dir
import pandas as pd
from typing import Optional
class Config:
"""Configuration file management"""
def __init__(self, path="", ini_name="") -> None:
self.path = path if path != "" else user_data_dir("nbrb_by", appauthor=False)
self.ini_name = ini_name if ini_name != "" else "nbrb_config.ini"
self.config_path = os.path.join(self.set_path(path=self.path), self.ini_name)
if not os.path.isfile(self.config_path):
click.echo("Загружаю справочник валют")
ret = self.create()
if ret is None:
click.echo("List of currencies is reloaded")
else:
click.echo(ret)
def read(self, currency: str, datum: str) -> str:
"""Return currency information object after reading configuration file"""
date_to_compare = datetime.datetime.strptime(datum, "%Y-%m-%d").date()
currency = str(currency).upper()
data = pd.read_json(self.config_path, orient="records", convert_dates=False)
data["Cur_DateStart"] = | pd.to_datetime(data["Cur_DateStart"]) | pandas.to_datetime |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
import seaborn as sns
from scipy import stats
import math
def clean_data(df):
"""
INPUT
df_listings - pandas dataframe
OUTPUT
X - A matrix holding all of the variables you want to consider when predicting the response
y - the corresponding response vector
This function cleans df_listings using the following steps to produce X and y:
1. Drop rows with 0 price and outlier prices (prices above 2950)
2. Create y as the price column, transformed by log
3. Create X from selected columns
4. Deal with missing values
5. Create dummy variables for selected categorical variables, drop the original columns
"""
# Drop rows with 0 price
df = df[df.price > 0]
df = df[df.price < 2950]
# Create y
y = df['price'].apply(math.log)
# Select columns for X
potential_vars = ['host_listings_count',
'calculated_host_listings_count_private_rooms',
'neighbourhood_cleansed',
'room_type',
'property_type',
'beds',
'availability_365',
'number_of_reviews',
'neighborhood_overview',
'space',
'notes',
'transit',
'access',
'interaction',
'house_rules',
'host_about',
'host_is_superhost',
'host_has_profile_pic',
'host_identity_verified',
'instant_bookable',
'require_guest_profile_picture',
'require_guest_phone_verification',]
bool_vars = ['host_is_superhost',
'host_has_profile_pic',
'host_identity_verified',
'instant_bookable',
'require_guest_profile_picture',
'require_guest_phone_verification']
free_text_vars = ['neighborhood_overview',
'space',
'notes',
'transit',
'access',
'interaction',
'house_rules',
'host_about']
df = df[potential_vars]
# Deal with missing values
df['number_of_reviews'].fillna(0, inplace=True)
df[bool_vars].fillna('f', inplace=True)
df[free_text_vars].fillna('', inplace=True)
def translate_bool(col):
for index, value in col.iteritems():
col[index] = 1 if value == 't' else 0
return col
def create_bool(col):
for index, value in col.iteritems():
col[index] = 0 if value == '' else 1
return col
fill_mean = lambda col: col.fillna(col.mean())
num_vars = df.select_dtypes(include=['int', 'float']).columns
df[num_vars] = df[num_vars].apply(fill_mean, axis=0)
df[bool_vars] = df[bool_vars].apply(translate_bool, axis=0)
df[bool_vars].dtype = int
df[free_text_vars] = df[free_text_vars].apply(create_bool, axis=0)
df[free_text_vars].dtype = int
# Dummy the categorical variables
cat_vars = ['neighbourhood_cleansed', 'room_type', 'property_type']
for var in cat_vars:
# for each cat add dummy var, drop original column
df = pd.concat([df.drop(var, axis=1), pd.get_dummies(df[var], prefix=var, prefix_sep='_', drop_first=True)], axis=1)
X = df
return X, y
def find_optimal_lm_mod(X, y, cutoffs, test_size = .30, random_state=42, plot=True):
'''
INPUT
X - pandas dataframe, X matrix
y - pandas dataframe, response variable
cutoffs - list of ints, cutoff for number of non-zero values in dummy categorical vars
test_size - float between 0 and 1, default 0.3, determines the proportion of data as test data
random_state - int, default 42, controls random state for train_test_split
plot - boolean, default 0.3, True to plot result
OUTPUT
r2_scores_test - list of floats of r2 scores on the test data
r2_scores_train - list of floats of r2 scores on the train data
lm_model - model object from sklearn
X_train, X_test, y_train, y_test - output from sklearn train test split used for optimal model
'''
r2_scores_test, r2_scores_train, num_feats, results = [], [], [], dict()
for cutoff in cutoffs:
#reduce X matrix
reduce_X = X.iloc[:, np.where((X.sum() > cutoff) == True)[0]]
num_feats.append(reduce_X.shape[1])
#split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = test_size, random_state=random_state)
#fit the model and obtain pred response
lm_model = LinearRegression(normalize=True)
lm_model.fit(X_train, y_train)
y_test_preds = lm_model.predict(X_test)
y_train_preds = lm_model.predict(X_train)
#append the r2 value from the test set
r2_scores_test.append(r2_score(y_test, y_test_preds))
r2_scores_train.append(r2_score(y_train, y_train_preds))
results[str(cutoff)] = r2_score(y_test, y_test_preds)
if plot:
plt.plot(num_feats, r2_scores_test, label="Test", alpha=.5)
plt.plot(num_feats, r2_scores_train, label="Train", alpha=.5)
plt.xlabel('Number of Features')
plt.ylabel('Rsquared')
plt.title('Rsquared by Number of Features')
plt.legend(loc=1)
plt.show()
best_cutoff = max(results, key=results.get)
#reduce X matrix
reduce_X = X.iloc[:, np.where((X.sum() > int(best_cutoff)) == True)[0]]
num_feats.append(reduce_X.shape[1])
#split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = test_size, random_state=random_state)
#fit the model
lm_model = LinearRegression(normalize=True)
lm_model.fit(X_train, y_train)
return r2_scores_test, r2_scores_train, lm_model, X_train, X_test, y_train, y_test
def main():
plot = False # set to true if you would like to see plots
print_log = True # set to true if you would like to see stats outputted to console
print_result = True
# Data Exploration
desired_width=320
pd.set_option('display.width', desired_width)
pd.set_option('display.max_columns', 50)
# Get a sense of the numerical data in the available datasets.
df_listings = pd.read_csv('data/listings_boston.csv', dtype={"price": str,
"weekly_price": str,
"monthly_price": str,
"security_deposit": str,
"cleaning_fee": str,
"extra_people": str,
"host_response_rate": str})
# clean up price data to make it numeric
df_listings.loc[:, "price"] = df_listings["price"].str.replace(',', '').str.replace('$', '').astype('float')
df_listings.loc[:, "weekly_price"] = df_listings["weekly_price"].str.replace(',', '').str.replace('$', '').astype('float')
df_listings.loc[:, "monthly_price"] = df_listings["monthly_price"].str.replace(',', '').str.replace('$', '').astype('float')
df_listings.loc[:, "security_deposit"] = df_listings["security_deposit"].str.replace(',', '').str.replace('$', '').astype('float')
df_listings.loc[:, "cleaning_fee"] = df_listings["cleaning_fee"].str.replace(',', '').str.replace('$', '').astype('float')
df_listings.loc[:, "extra_people"] = df_listings["extra_people"].str.replace(',', '').str.replace('$', '').astype('float')
df_listings["host_response_rate"].fillna("0", inplace=True)
df_listings.loc[:, "host_response_rate"] = df_listings["host_response_rate"].str.replace('%', '').astype('int')
if print_log:
print(df_listings.describe())
df_neighborhoods = pd.read_csv('data/neighbourhoods_boston.csv')
if print_log:
print(df_neighborhoods.describe())
df_reviews = | pd.read_csv('data/reviews_boston.csv') | pandas.read_csv |
"""
Get data for past matches
"""
import requests
import pandas as pd
import json
import os
from mappings import regions_map, game_mode_map, match_cols, player_cols
# get the starting gameID for the API calls
try:
final_gameID_df = pd.read_csv(os.path.join('output', 'matchData.csv'), usecols=['match_id'])
if len(final_gameID_df) == 1:
final_gameID = 5992892504
else:
final_gameID = final_gameID_df.min()[0] - 1
except pd.errors.EmptyDataError:
final_gameID = 5992892504
# instantiate dataframe that will hold API call processed data
total_match_df = pd.DataFrame()
try:
for match_id in range(final_gameID, final_gameID - 300, -1):
match = requests.get('https://api.opendota.com/api/matches/{}'.format(match_id))
match = json.loads(match.text)
if len(match) == 1:
continue
match_df = pd.json_normalize(match)
match_missing_cols = set(match_cols).difference(match_df.columns)
match_existing_cols = set(match_cols).intersection(match_df.columns)
match_df = match_df[match_existing_cols]
match_missing_df = pd.DataFrame(columns=match_missing_cols)
match_df = | pd.concat([match_df, match_missing_df], 1) | pandas.concat |
import os
from nose.tools import *
import unittest
import pandas as pd
import numpy as np
import py_entitymatching as em
from py_entitymatching.utils.generic_helper import get_install_path
import py_entitymatching.catalog.catalog_manager as cm
import py_entitymatching.utils.catalog_helper as ch
from py_entitymatching.io.parsers import read_csv_metadata
#import sys
#sys.path.insert(0, '../debugblocker')
#import debugblocker as db
import py_entitymatching.debugblocker.debugblocker as db
from operator import itemgetter
from array import array
datasets_path = os.sep.join([get_install_path(), 'tests', 'test_datasets'])
catalog_datasets_path = os.sep.join([get_install_path(), 'tests', 'test_datasets', 'catalog'])
debugblocker_datasets_path = os.sep.join([get_install_path(), 'tests', 'test_datasets', 'debugblocker'])
path_a = os.sep.join([datasets_path, 'A.csv'])
path_b = os.sep.join([datasets_path, 'B.csv'])
path_c = os.sep.join([datasets_path, 'C.csv'])
class DebugblockerTestCases(unittest.TestCase):
def test_validate_types_1(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable_ID',
fk_rtable='rtable_ID', key = '_id')
A_key = em.get_key(A)
B_key = em.get_key(B)
attr_corres = None
db._validate_types(A, B, C, 100, attr_corres, False)
def test_validate_types_2(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
A_key = em.get_key(A)
B_key = em.get_key(B)
C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable_' +
A_key, fk_rtable='rtable_' + B_key, key = '_id')
attr_corres = [('ID', 'ID'), ('name', 'name'),
('birth_year', 'birth_year'),
('hourly_wage', 'hourly_wage'),
('address', 'address'),
('zipcode', 'zipcode')]
db._validate_types(A, B, C, 100, attr_corres, False)
def test_check_input_field_correspondence_list_1(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
field_corres_list = None
db._check_input_field_correspondence_list(A, B, field_corres_list)
def test_check_input_field_correspondence_list_2(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
field_corres_list = []
db._check_input_field_correspondence_list(A, B, field_corres_list)
@raises(AssertionError)
def test_check_input_field_correspondence_list_3(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
field_corres_list = [('adsf', 'fdsa'), 'asdf']
db._check_input_field_correspondence_list(A, B, field_corres_list)
@raises(AssertionError)
def test_check_input_field_correspondence_list_4(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
field_corres_list = [('asdf', 'fdsa')]
db._check_input_field_correspondence_list(A, B, field_corres_list)
@raises(AssertionError)
def test_check_input_field_correspondence_list_5(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
field_corres_list = [('address', 'fdsa')]
db._check_input_field_correspondence_list(A, B, field_corres_list)
def test_check_input_field_correspondence_list_7(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
field_corres_list = [('zipcode', 'zipcode'),
('birth_year', 'birth_year')]
db._check_input_field_correspondence_list(A, B, field_corres_list)
def test_get_field_correspondence_list_1(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
A_key = em.get_key(A)
B_key = em.get_key(B)
expected_list = [('ID', 'ID'), ('name', 'name'),
('birth_year', 'birth_year'),
('hourly_wage', 'hourly_wage'),
('address', 'address'),
('zipcode', 'zipcode')]
attr_corres = None
corres_list = db._get_field_correspondence_list(
A, B, A_key, B_key, attr_corres)
self.assertEqual(corres_list, expected_list)
attr_corres = []
corres_list = db._get_field_correspondence_list(
A, B, A_key, B_key, attr_corres)
self.assertEqual(corres_list, expected_list)
def test_get_field_correspondence_list_2(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
A_key = em.get_key(A)
B_key = em.get_key(B)
expected_list = [('ID', 'ID'), ('name', 'name'),
('address', 'address'),
('zipcode', 'zipcode')]
attr_corres = [('ID', 'ID'), ('name', 'name'),
('address', 'address'),
('zipcode', 'zipcode')]
corres_list = db._get_field_correspondence_list(
A, B, A_key, B_key, attr_corres)
self.assertEqual(corres_list, expected_list)
def test_get_field_correspondence_list_3(self):
data = [[1, 'asdf', 'a0001']]
A = pd.DataFrame(data)
A.columns = ['Id', 'Title', 'ISBN']
A_key = 'Id'
B = pd.DataFrame(data)
B.columns = ['Id', 'title', 'ISBN']
B_key = 'Id'
attr_corres = []
corres_list = db._get_field_correspondence_list(
A, B, A_key, B_key, attr_corres)
expected_list = [('Id', 'Id'), ('ISBN', 'ISBN')]
self.assertEqual(corres_list, expected_list)
@raises(AssertionError)
def test_get_field_correspondence_list_4(self):
data = [[1, 'asdf', 'a0001']]
A = pd.DataFrame(data)
A.columns = ['ID', 'Title', 'isbn']
A_key = 'ID'
B = pd.DataFrame(data)
B.columns = ['Id', 'title', 'ISBN']
B_key = 'Id'
attr_corres = []
db._get_field_correspondence_list(
A, B, A_key, B_key, attr_corres)
def test_get_field_correspondence_list_5(self):
A = pd.DataFrame([[0, 'A', 0.11, 'ASDF']])
A.columns = ['ID', 'name', 'price', 'desc']
em.set_key(A, 'ID')
A_key = em.get_key(A)
B = pd.DataFrame([['B', 'B001', 'ASDF', 0.111]])
B.columns = ['item_name', 'item_id', 'item_desc', 'item_price']
em.set_key(B, 'item_id')
B_key = em.get_key(B)
attr_corres = [('name', 'item_name'),
('price', 'item_price')]
actual_attr_corres = db._get_field_correspondence_list(
A, B, A_key, B_key, attr_corres)
expected_attr_corres = [('name', 'item_name'),
('price', 'item_price'),
('ID', 'item_id')]
self.assertEqual(expected_attr_corres, actual_attr_corres)
def test_build_col_name_index_dict_1(self):
A = pd.DataFrame([[]])
A.columns = []
col_index = db._build_col_name_index_dict(A)
def test_build_col_name_index_dict_2(self):
A = pd.DataFrame([[0, 'A', 0.11, 'ASDF']])
A.columns = ['ID', 'name', 'price', 'desc']
em.set_key(A, 'ID')
col_index = db._build_col_name_index_dict(A)
self.assertEqual(col_index['ID'], 0)
self.assertEqual(col_index['name'], 1)
self.assertEqual(col_index['price'], 2)
self.assertEqual(col_index['desc'], 3)
@raises(AssertionError)
def test_filter_corres_list_1(self):
A = pd.DataFrame([[0, 20, 0.11, 4576]])
A.columns = ['ID', 'age', 'price', 'zip code']
em.set_key(A, 'ID')
B = pd.DataFrame([[0, 240, 0.311, 4474]])
B.columns = ['ID', 'age', 'price', 'zip code']
em.set_key(A, 'ID')
A_key = 'ID'
B_key = 'ID'
ltable_col_dict = db._build_col_name_index_dict(A)
rtable_col_dict = db._build_col_name_index_dict(B)
attr_corres = [('ID', 'ID'), ('age', 'age'),
('price', 'price'),
('zip code', 'zip code')]
db._filter_corres_list(A, B, A_key, B_key, ltable_col_dict,
rtable_col_dict, attr_corres)
def test_filter_corres_list_2(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
A_key = em.get_key(A)
B_key = em.get_key(B)
ltable_col_dict = db._build_col_name_index_dict(A)
rtable_col_dict = db._build_col_name_index_dict(B)
attr_corres = [('ID', 'ID'), ('name', 'name'),
('birth_year', 'birth_year'),
('hourly_wage', 'hourly_wage'),
('address', 'address'),
('zipcode', 'zipcode')]
expected_filtered_attr = [('ID', 'ID'), ('name', 'name'),
('address', 'address')]
db._filter_corres_list(A, B, A_key, B_key, ltable_col_dict,
rtable_col_dict, attr_corres)
self.assertEqual(expected_filtered_attr, attr_corres)
def test_get_filtered_table(self):
A = pd.DataFrame([['a1', 'A', 0.11, 53704]])
A.columns = ['ID', 'name', 'price', 'zip code']
em.set_key(A, 'ID')
B = pd.DataFrame([['b1', 'A', 0.11, 54321]])
B.columns = ['ID', 'name', 'price', 'zip code']
em.set_key(B, 'ID')
A_key = 'ID'
B_key = 'ID'
ltable_col_dict = db._build_col_name_index_dict(A)
rtable_col_dict = db._build_col_name_index_dict(B)
attr_corres = [('ID', 'ID'), ('name', 'name'),
('price', 'price'),
('zip code', 'zip code')]
db._filter_corres_list(A, B, A_key, B_key, ltable_col_dict,
rtable_col_dict, attr_corres)
filtered_A, filtered_B = db._get_filtered_table(A, B, attr_corres)
expected_filtered_A = pd.DataFrame([['a1', 'A']])
expected_filtered_A.columns = ['ID', 'name']
em.set_key(expected_filtered_A, 'ID')
expected_filtered_B = pd.DataFrame([['b1', 'A']])
expected_filtered_B.columns = ['ID', 'name']
em.set_key(expected_filtered_B, 'ID')
self.assertEqual(expected_filtered_A.equals(filtered_A), True)
self.assertEqual(expected_filtered_B.equals(filtered_B), True)
@raises(AssertionError)
def test_get_feature_weight_1(self):
A = []
dataframe = pd.DataFrame(A)
db._get_feature_weight(dataframe)
def test_get_feature_weight_2(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
A_key = em.get_key(A)
B_key = em.get_key(B)
cols_A = list(A.columns)
cols_B = list(B.columns)
corres_list = [(cols_A[0], cols_B[0]), (cols_A[1], cols_B[1]), (cols_A[4],
cols_B[4]),
(cols_A[5], cols_B[5])]
A_filtered, B_filtered = db._get_filtered_table(
A, B, corres_list)
A_wlist = db._get_feature_weight(A_filtered)
expected_A_wlist = [2.0, 2.0, 2.0, 1.4]
self.assertEqual(A_wlist, expected_A_wlist)
B_wlist = db._get_feature_weight(B_filtered)
expected_B_wlist = [2.0, 2.0, 2.0, 1.3333333333333333]
self.assertEqual(B_wlist, expected_B_wlist)
def test_get_feature_weight_3(self):
table = [[''], [np.nan]]
dataframe = pd.DataFrame(table)
weight_list = db._get_feature_weight(dataframe)
self.assertEqual(weight_list, [0.0])
def test_select_features_1(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
A_key = em.get_key(A)
B_key = em.get_key(B)
actual_selected_features = db._select_features(A, B, A_key, B_key)
expected_selected_features = [1, 3, 4, 2, 5]
self.assertEqual(actual_selected_features, expected_selected_features)
def test_select_features_2(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
A_key = em.get_key(A)
B_key = em.get_key(B)
cols_A = list(A.columns)
cols_B = list(B.columns)
corres_list = [(cols_A[0], cols_B[0]), (cols_A[1], cols_B[1]), (cols_A[4],
cols_B[4])]
A_filtered, B_filtered = db._get_filtered_table(A, B, corres_list)
actual_selected_features = db._select_features(
A_filtered, B_filtered, A_key, B_key)
expected_selected_features = [1, 2]
self.assertEqual(actual_selected_features, expected_selected_features)
def test_select_features_3(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
A_key = em.get_key(A)
B_key = em.get_key(B)
cols_A = list(A.columns)
cols_B = list(B.columns)
corres_list = [(cols_A[0], cols_B[0])]
A_filtered, B_filtered = db._get_filtered_table(A, B, corres_list)
actual_selected_features = db._select_features(
A_filtered, B_filtered, A_key, B_key)
expected_selected_features = []
self.assertEqual(actual_selected_features, expected_selected_features)
@raises(AssertionError)
def test_select_features_4(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
A_key = em.get_key(A)
B_key = em.get_key(B)
cols_A = list(A.columns)
cols_B = list(B.columns)
A_field_set = [0, 1, 2]
B_field_set = [0, 1, 2, 3]
A_field_set = list(itemgetter(*A_field_set)(cols_A))
B_field_set = list(itemgetter(*B_field_set)(cols_B))
A_filtered = A[A_field_set]
B_filtered = B[B_field_set]
db._select_features(
A_filtered, B_filtered, A_key, B_key)
@raises(AssertionError)
def test_select_features_5(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
A_key = em.get_key(A)
B_key = em.get_key(B)
cols_A = list(A.columns)
cols_B = list(B.columns)
A_field_set = [0, 1, 2, 3]
B_field_set = [0, 1, 2]
A_field_set = list(itemgetter(*A_field_set)(cols_A))
B_field_set = list(itemgetter(*B_field_set)(cols_B))
A_filtered = A[A_field_set]
B_filtered = B[B_field_set]
db._select_features(
A_filtered, B_filtered, A_key, B_key)
def test_build_id_to_index_map_1(self):
A = read_csv_metadata(path_a, key='ID')
key = em.get_key(A)
actual_rec_id_to_idx = db._build_id_to_index_map(A, key)
expected_rec_id_to_idx = {'a1': 0, 'a3': 2, 'a2': 1, 'a5': 4, 'a4': 3}
self.assertEqual(actual_rec_id_to_idx, expected_rec_id_to_idx)
@raises(AssertionError)
def test_build_id_to_index_map_2(self):
table = [['a1', 'hello'], ['a1', 'world']]
key = 'ID'
dataframe = pd.DataFrame(table)
dataframe.columns = ['ID', 'title']
em.set_key(dataframe, key)
db._build_id_to_index_map(dataframe, key)
def test_replace_nan_to_empty_1(self):
field = np.nan
self.assertEqual(db._replace_nan_to_empty(field), '')
def test_replace_nan_to_empty_2(self):
field = ''
self.assertEqual(db._replace_nan_to_empty(field), '')
field = 'string'
self.assertEqual(db._replace_nan_to_empty(field), 'string')
def test_replace_nan_to_empty_3(self):
field = 1
self.assertEqual(db._replace_nan_to_empty(field), '1')
field = 3.57
self.assertEqual(db._replace_nan_to_empty(field), '4')
field = 1234.5678e5
self.assertEqual(db._replace_nan_to_empty(field), '123456780')
def test_get_tokenized_column_1(self):
column = []
actual_ret_column = db._get_tokenized_column(column)
expected_ret_column = []
self.assertEqual(actual_ret_column, expected_ret_column)
def test_get_tokenized_column_2(self):
column = ['hello world', np.nan, 'how are you',
'', 'this is a blocking debugger']
actual_ret_column = db._get_tokenized_column(column)
expected_ret_column = [['hello', 'world'], [''],
['how', 'are', 'you'], [''],
['this', 'is', 'a', 'blocking', 'debugger']]
self.assertEqual(actual_ret_column, expected_ret_column)
def test_get_tokenized_table_1(self):
A = read_csv_metadata(path_a, key='ID')
A_key = em.get_key(A)
feature_list = range(len(A.columns))
actual_record_list = db._get_tokenized_table(A, A_key, feature_list)
expected_record_list = [[('a1', 0), ('kevin', 1), ('smith', 1), ('1989', 2), ('30', 3),
('607', 4), ('from', 4), ('st,', 4), ('san', 4), ('francisco', 4), ('94107',5)],
[('a2', 0), ('michael', 1), ('franklin', 1), ('1988', 2), ('28', 3), ('1652', 4),
('stockton', 4), ('st,', 4), ('san', 4), ('francisco', 4), ('94122', 5)], [('a3', 0),
('william', 1), ('bridge', 1), ('1986', 2), ('32', 3), ('3131', 4), ('webster', 4),
('st,', 4), ('san', 4), ('francisco', 4), ('94107', 5)], [('a4', 0), ('binto', 1),
('george', 1), ('1987', 2), ('32', 3), ('423', 4), ('powell', 4), ('st,', 4),
('san', 4), ('francisco', 4), ('94122', 5)], [('a5', 0), ('alphonse', 1), ('kemper', 1),
('1984', 2), ('35', 3), ('1702', 4), ('post', 4), ('street,', 4), ('san', 4),
('francisco', 4), ('94122', 5)]]
self.assertEqual(actual_record_list, expected_record_list)
def test_get_tokenized_table_2(self):
B = read_csv_metadata(path_b, key='ID')
B_key = em.get_key(B)
feature_list = [0, 1, 3]
actual_record_list = db._get_tokenized_table(B, B_key, feature_list)
expected_record_list = [[('b1', 0), ('mark', 1), ('levene', 1), ('30', 2)],
[('b2', 0), ('bill', 1), ('bridge', 1), ('32', 2)],
[('b3', 0), ('mike', 1), ('franklin', 1), ('28', 2)],
[('b4', 0), ('joseph', 1), ('kuan', 1), ('26', 2)],
[('b5', 0), ('alfons', 1), ('kemper', 1), ('35', 2)],
[('b6', 0), ('michael',1), ('brodie', 1), ('32', 2)]]
self.assertEqual(actual_record_list, expected_record_list)
def test_get_tokenized_table_3(self):
table = [[1, 'abc abc asdf', '123-3456-7890', np.nan, '',
'135 east abc st'],
[2, 'aaa bbb', '000-111-2222', '', '', '246 west abc st'],
[3, 'cc dd', '123-123-1231', 'cc', 'unknown', ' 246 west def st']]
dataframe = pd.DataFrame(table)
dataframe.columns = ['ID', 'name', 'phone', 'department', 'school', 'address']
key = 'ID'
em.set_key(dataframe, key)
feature_list = [1, 3, 4, 5]
actual_record_list = db._get_tokenized_table(dataframe, key, feature_list)
expected_record_list = [[('abc', 0), ('abc_1', 0), ('asdf', 0), ('135', 3), ('east', 3),
('abc_2', 3), ('st', 3)], [('aaa', 0), ('bbb', 0), ('246', 3),
('west', 3), ('abc', 3), ('st', 3)], [('cc', 0), ('dd', 0),
('cc_1', 1), ('unknown', 2), ('246', 3), ('west', 3),
('def', 3), ('st', 3)]]
self.assertEqual(actual_record_list, expected_record_list)
def test_build_global_token_order_impl_1(self):
record_list = []
actual_dict = {}
expected_dict = {}
db._build_global_token_order_impl(record_list, actual_dict)
self.assertEqual(actual_dict, expected_dict)
record_list = [[], [], []]
actual_dict = {}
expected_dict = {}
db._build_global_token_order_impl(record_list, actual_dict)
self.assertEqual(actual_dict, expected_dict)
def test_build_global_token_order_impl_2(self):
record_list = [['c', 'b', 'a'], [], ['b', 'c'], ['c', 'c']]
actual_dict = {}
expected_dict = {'a': 1, 'c': 4, 'b': 2}
db._build_global_token_order_impl(record_list, actual_dict)
self.assertEqual(actual_dict, expected_dict)
def test_build_global_token_order_1(self):
l_record_list = []
r_record_list = []
expected_order_dict = {}
expected_token_index_dict = {}
order_dict, token_index_dict = db._build_global_token_order(l_record_list, r_record_list)
self.assertEqual(order_dict, expected_order_dict)
self.assertEqual(token_index_dict, expected_token_index_dict)
def test_build_global_token_order_2(self):
l_record_list = [[], [], []]
r_record_list = [[]]
expected_order_dict = {}
expected_token_index_dict = {}
order_dict, token_index_dict = db._build_global_token_order(l_record_list, r_record_list)
self.assertEqual(order_dict, expected_order_dict)
self.assertEqual(token_index_dict, expected_token_index_dict)
def test_build_global_token_order_3(self):
l_record_list = [['c', 'b', 'a'], [], ['b', 'c'], ['c', 'c']]
r_record_list = [['e'], ['b', 'a']]
expected_token_index_dict = {0: 'e', 1: 'a', 2: 'b', 3: 'c'}
order_dict, token_index_dict = db._build_global_token_order(l_record_list, r_record_list)
self.assertEqual(order_dict['e'], 0)
self.assertEqual(order_dict['a'], 1)
self.assertEqual(order_dict['b'], 2)
self.assertEqual(order_dict['c'], 3)
self.assertEqual(token_index_dict, expected_token_index_dict)
def test_replace_token_with_numeric_index_1(self):
l_record_list = []
r_record_list = []
order_dict, token_index_dict = db._build_global_token_order(l_record_list, r_record_list)
expected_l_record_list = []
expected_r_record_list = []
db._replace_token_with_numeric_index(l_record_list, order_dict)
db._replace_token_with_numeric_index(r_record_list, order_dict)
self.assertEqual(l_record_list, expected_l_record_list)
self.assertEqual(r_record_list, expected_r_record_list)
def test_replace_token_with_numeric_index_2(self):
l_record_list = [[], []]
r_record_list = [[]]
order_dict, token_index_dict = db._build_global_token_order(l_record_list, r_record_list)
expected_l_record_list = [[], []]
expected_r_record_list = [[]]
db._replace_token_with_numeric_index(l_record_list, order_dict)
db._replace_token_with_numeric_index(r_record_list, order_dict)
self.assertEqual(l_record_list, expected_l_record_list)
self.assertEqual(r_record_list, expected_r_record_list)
def test_replace_token_with_numeric_index_3(self):
l_record_list = [[('c', 0), ('b', 0), ('a', 1)], [('b', 0), ('c', 1)]]
r_record_list = [[('e', 0), ('b', 0)], [('b', 0), ('a', 1)]]
order_dict, token_index_dict = db._build_global_token_order(l_record_list, r_record_list)
expected_l_record_list = [[(2, 0), (3, 0), (1, 1)], [(3, 0), (2, 1)]]
expected_r_record_list = [[(0, 0), (3, 0)], [(3, 0), (1, 1)]]
db._replace_token_with_numeric_index(l_record_list, order_dict)
db._replace_token_with_numeric_index(r_record_list, order_dict)
self.assertEqual(l_record_list, expected_l_record_list)
self.assertEqual(r_record_list, expected_r_record_list)
def test_sort_record_tokens_by_global_order_1(self):
record_list = []
expected_record_list = []
db._sort_record_tokens_by_global_order(record_list)
self.assertEqual(record_list, expected_record_list)
def test_sort_record_tokens_by_global_order_2(self):
record_list = [[], []]
expected_record_list = [[], []]
db._sort_record_tokens_by_global_order(record_list)
self.assertEqual(record_list, expected_record_list)
def test_sort_record_tokens_by_global_order_3(self):
record_list = [[(3, 1), (4, 2), (100, 0), (1, 2)], [(2, 1), (0, 1), (10, 3)]]
expected_record_list = [[(1, 2), (3, 1), (4, 2), (100, 0)], [(0, 1),
(2,1), (10, 3)]]
db._sort_record_tokens_by_global_order(record_list)
self.assertEqual(record_list, expected_record_list)
def test_sort_record_tokens_by_global_order_4(self):
l_record_list = [[('c', 0), ('b', 0), ('a', 1)], [('b', 0), ('c', 1)]]
r_record_list = [[('e', 0), ('b', 0)], [('b', 0), ('a', 1)]]
order_dict, token_index_dict = db._build_global_token_order(l_record_list, r_record_list)
expected_l_record_list = [[(1, 1), (2, 0), (3, 0)], [(2, 1), (3, 0)]]
expected_r_record_list = [[(0, 0), (3, 0)], [(1, 1), (3, 0)]]
db._replace_token_with_numeric_index(l_record_list, order_dict)
db._replace_token_with_numeric_index(r_record_list, order_dict)
db._sort_record_tokens_by_global_order(l_record_list)
db._sort_record_tokens_by_global_order(r_record_list)
self.assertEqual(l_record_list, expected_l_record_list)
self.assertEqual(r_record_list, expected_r_record_list)
def test_split_record_token_and_index_1(self):
record_list = []
record_token_list, record_index_list =\
db._split_record_token_and_index(record_list)
expected_record_token_list = []
expected_record_index_list = []
self.assertEqual(record_token_list, expected_record_token_list)
self.assertEqual(record_index_list, expected_record_index_list)
def test_split_record_token_and_index_2(self):
record_list = [[], []]
record_token_list, record_index_list =\
db._split_record_token_and_index(record_list)
expected_record_token_list = [array('I'), array('I')]
expected_record_index_list = [array('I'), array('I')]
self.assertEqual(record_token_list, expected_record_token_list)
self.assertEqual(record_index_list, expected_record_index_list)
def test_split_record_token_and_index_3(self):
record_list = [[(1, 2), (3, 1), (4, 2), (100, 0)], [(0, 1), (2, 1), (10, 3)]]
record_token_list, record_index_list =\
db._split_record_token_and_index(record_list)
expected_record_token_list = [array('I', [1, 3, 4, 100]), array('I', [0, 2, 10])]
expected_record_index_list = [array('I', [2, 1, 2, 0]), array('I', [1, 1, 3])]
self.assertEqual(record_token_list, expected_record_token_list)
self.assertEqual(record_index_list, expected_record_index_list)
def test_index_candidate_set_1(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
l_key = cm.get_key(A)
r_key = cm.get_key(B)
C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable_' +
l_key, fk_rtable='rtable_' + r_key, key = '_id')
lrecord_id_to_index_map = db._build_id_to_index_map(A, l_key)
rrecord_id_to_index_map = db._build_id_to_index_map(B, r_key)
expected_cand_set = {0: set([0, 1, 5]), 1: set([2, 3, 4]), 2: set([0, 1,
5]), 3: set([2, 3, 4]), 4: set([2, 3, 4])}
actual_cand_set = db._index_candidate_set(C,
lrecord_id_to_index_map, rrecord_id_to_index_map, False)
self.assertEqual(expected_cand_set, actual_cand_set)
@raises(AssertionError)
def test_index_candidate_set_2(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
l_key = cm.get_key(A)
r_key = cm.get_key(B)
C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable_' +
l_key, fk_rtable='rtable_' + r_key, key = '_id')
C.loc[0, 'ltable_ID'] = 'aaaa'
lrecord_id_to_index_map = db._build_id_to_index_map(A, l_key)
rrecord_id_to_index_map = db._build_id_to_index_map(B, r_key)
db._index_candidate_set(C,
lrecord_id_to_index_map, rrecord_id_to_index_map, False)
@raises(AssertionError)
def test_index_candidate_set_3(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
l_key = cm.get_key(A)
r_key = cm.get_key(B)
C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable_' +
l_key, fk_rtable='rtable_' + r_key, key = '_id')
C.loc[0, 'rtable_ID'] = 'bbbb'
lrecord_id_to_index_map = db._build_id_to_index_map(A, l_key)
rrecord_id_to_index_map = db._build_id_to_index_map(B, r_key)
db._index_candidate_set(C,
lrecord_id_to_index_map, rrecord_id_to_index_map, False)
def test_index_candidate_set_4(self):
A_list = [[1, 'asdf', 'fdas'], [2, 'fdsa', 'asdf']]
B_list = [['B002', 'qqqq', 'wwww'], ['B003', 'rrrr', 'fdsa']]
A = pd.DataFrame(A_list)
A.columns = ['ID', 'f1', 'f2']
em.set_key(A, 'ID')
B = pd.DataFrame(B_list)
B.columns = ['ID', 'f1', 'f2']
em.set_key(B, 'ID')
C_list = [[0, 1, 'B003'], [1, 2, 'B002']]
C = pd.DataFrame(C_list)
C.columns = ['_id', 'ltable_ID', 'rtable_ID']
cm.set_candset_properties(C, '_id', 'ltable_ID',
'rtable_ID', A, B)
lrecord_id_to_index_map = db._build_id_to_index_map(A, 'ID')
rrecord_id_to_index_map = db._build_id_to_index_map(B, 'ID')
expected_cand_set = {0: set([1]), 1: set([0])}
actual_cand_set = db._index_candidate_set(C,
lrecord_id_to_index_map, rrecord_id_to_index_map, False)
self.assertEqual(expected_cand_set, actual_cand_set)
@raises(AssertionError)
def test_index_candidate_set_5(self):
A_list = [[1, 'asdf', 'fdas'], [2, 'fdsa', 'asdf']]
B_list = [['B002', 'qqqq', 'wwww'], ['B003', 'rrrr', 'fdsa']]
A = pd.DataFrame(A_list)
A.columns = ['ID', 'f1', 'f2']
em.set_key(A, 'ID')
B = pd.DataFrame(B_list)
B.columns = ['ID', 'f1', 'f2']
em.set_key(B, 'ID')
C_list = [[0, 1, 'B001'], [1, 2, 'B002']]
C = pd.DataFrame(C_list)
C.columns = ['_id', 'ltable_ID', 'rtable_ID']
cm.set_candset_properties(C, '_id', 'ltable_ID',
'rtable_ID', A, B)
lrecord_id_to_index_map = db._build_id_to_index_map(A, 'ID')
rrecord_id_to_index_map = db._build_id_to_index_map(B, 'ID')
db._index_candidate_set(C,
lrecord_id_to_index_map, rrecord_id_to_index_map, False)
def test_index_candidate_set_6(self):
A_list = [[1, 'asdf', 'fdas'], [2, 'fdsa', 'asdf']]
B_list = [['B002', 'qqqq', 'wwww'], ['B003', 'rrrr', 'fdsa']]
A = pd.DataFrame(A_list)
A.columns = ['ID', 'f1', 'f2']
em.set_key(A, 'ID')
B = pd.DataFrame(B_list)
B.columns = ['ID', 'f1', 'f2']
em.set_key(B, 'ID')
C = pd.DataFrame()
lrecord_id_to_index_map = db._build_id_to_index_map(A, 'ID')
rrecord_id_to_index_map = db._build_id_to_index_map(B, 'ID')
new_C = db._index_candidate_set(C,
lrecord_id_to_index_map, rrecord_id_to_index_map, False)
self.assertEqual(new_C, {})
def test_calc_table_field_length_1(self):
record_index_list = []
field_length_list = db._calc_table_field_length(record_index_list, 4)
expected_field_length_list = []
self.assertEqual(field_length_list, expected_field_length_list)
def test_calc_table_field_length_2(self):
record_index_list = [array('I', [2, 1, 2, 0]), array('I', [1, 1, 3])]
field_length_list = db._calc_table_field_length(record_index_list, 4)
expected_field_length_list = [array('I', [1, 1, 2, 0]), array('I', [0,
2, 0, 1])]
self.assertEqual(field_length_list, expected_field_length_list)
@raises(AssertionError)
def test_calc_table_field_length_3(self):
record_index_list = [array('I', [2, 1, 2, 0]), array('I', [1, 1, 3])]
field_length_list = db._calc_table_field_length(record_index_list, 3)
expected_field_length_list = [array('I', [1, 1, 2, 0]), array('I', [0,
2, 0, 1])]
self.assertEqual(field_length_list, expected_field_length_list)
def test_calc_table_field_token_sum_1(self):
field_length_list = []
field_token_sum = db._calc_table_field_token_sum(field_length_list, 4)
expected_field_token_sum = [0, 0, 0, 0]
self.assertEqual(field_token_sum, expected_field_token_sum)
def test_calc_table_field_token_sum_2(self):
field_length_list = [array('I', [1, 1, 2, 0]), array('I', [0,
2, 0, 1])]
field_token_sum = db._calc_table_field_token_sum(field_length_list, 4)
expected_field_token_sum = [1, 3, 2, 1]
self.assertEqual(field_token_sum, expected_field_token_sum)
def test_assemble_topk_table_1(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
A_key = em.get_key(A)
B_key = em.get_key(B)
topk_heap = []
ret_dataframe = db._assemble_topk_table(topk_heap, A, B, A_key, B_key)
self.assertEqual(len(ret_dataframe), 0)
self.assertEqual(list(ret_dataframe.columns), [])
def test_assemble_topk_table_2(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
A_key = em.get_key(A)
B_key = em.get_key(B)
topk_heap = [(0.2727272727272727, 1, 0), (0.23076923076923078, 0, 4),
(0.16666666666666666, 0, 3)]
ret_dataframe = db._assemble_topk_table(topk_heap, A, B, A_key, B_key)
expected_columns = ['_id', 'ltable_ID', 'rtable_ID',
'ltable_name', 'ltable_birth_year',
'ltable_hourly_wage',
'ltable_address', 'ltable_zipcode', 'rtable_name',
'rtable_birth_year', 'rtable_hourly_wage',
'rtable_address', 'rtable_zipcode']
self.assertEqual(len(ret_dataframe), 3)
self.assertEqual(list(ret_dataframe.columns), expected_columns)
expected_recs = [[0, 'a2', 'b1', '<NAME>',
1988, 27.5, '1652 Stockton St, San Francisco',
94122, '<NAME>', 1987, 29.5,
'108 Clement St, San Francisco', 94107],
[1, 'a1', 'b5', '<NAME>',
1989, 30.0, '607 From St, San Francisco', 94107,
'<NAME>', 1984, 35.0,
'170 Post St, Apt 4, San Francisco', 94122],
[2, 'a1', 'b4', '<NAME>',
1989, 30.0, '607 From St, San Francisco', 94107,
'<NAME>', 1982, 26.0,
'108 South Park, San Francisco', 94122]]
self.assertEqual(list(ret_dataframe.loc[0]), expected_recs[0])
self.assertEqual(list(ret_dataframe.loc[1]), expected_recs[1])
self.assertEqual(list(ret_dataframe.loc[2]), expected_recs[2])
def test_debugblocker_config_cython_1(self):
ltable_field_token_sum = {1}
rtable_field_token_sum = {1}
py_num_fields = 1
config_list = db.debugblocker_config_cython(ltable_field_token_sum, rtable_field_token_sum,
py_num_fields, 2, 2)
expected_config_list = [[0]]
self.assertEqual(config_list, expected_config_list)
def test_debugblocker_config_cython_2(self):
ltable_field_token_sum = {4, 3, 2, 1}
rtable_field_token_sum = {4, 3, 2, 1}
py_num_fields = 4
config_list = db.debugblocker_config_cython(ltable_field_token_sum, rtable_field_token_sum,
py_num_fields, 2, 2)
expected_config_list = [[0, 1, 2, 3], [0, 1, 2], [0, 1], [0], [1, 2, 3], [0, 2, 3], [0, 1, 3],
[1, 2], [0, 2], [1]]
self.assertEqual(config_list, expected_config_list)
def test_debugblocker_topk_cython_1(self):
py_config = []
lrecord_token_list = [[]]
rrecord_token_list = [[]]
lrecord_index_list = [[]]
rrecord_index_list = [[]]
py_cand_set = []
py_output_size = 100
rec_list = db.debugblocker_topk_cython(py_config, lrecord_token_list, rrecord_token_list,
lrecord_index_list, rrecord_index_list, py_cand_set, py_output_size)
expected_rec_list = []
self.assertEqual(rec_list, expected_rec_list)
def test_debugblocker_topk_cython_2(self):
py_config = []
lrecord_token_list = [[]]
rrecord_token_list = [[]]
lrecord_index_list = [[]]
rrecord_index_list = [[]]
py_cand_set = None
py_output_size = 100
rec_list = db.debugblocker_topk_cython(py_config, lrecord_token_list, rrecord_token_list,
lrecord_index_list, rrecord_index_list, py_cand_set, py_output_size)
expected_rec_list = []
self.assertEqual(rec_list, expected_rec_list)
def test_debugblocker_topk_cython_3(self):
py_config = [0, 1]
lrecord_token_list = [[1, 2]]
rrecord_token_list = [[0, 1]]
lrecord_index_list = [[1, 2]]
rrecord_index_list = [[0, 1]]
py_cand_set = None
py_output_size = 100
rec_list = db.debugblocker_topk_cython(py_config, lrecord_token_list, rrecord_token_list,
lrecord_index_list, rrecord_index_list, py_cand_set, py_output_size)
expected_rec_list = [[0, 0, 1]]
self.assertEqual(rec_list, expected_rec_list)
def test_debugblocker_topk_cython_4(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable_ID',
fk_rtable='rtable_ID', key = '_id')
py_config = [0, 1]
lrecord_token_list = [array('I', [5, 12, 15, 22, 37, 38, 39]),
array('I', [26, 30, 32, 34, 37, 38, 39]),
array('I', [24, 27, 28, 36, 37, 38, 39]),
array('I', [4, 10, 13, 21, 37, 38, 39]),
array('I', [2, 7, 31, 33, 35, 38, 39])]
rrecord_token_list = [array('I', [17, 18, 25, 29, 37, 38, 39]),
array('I', [9, 27, 28, 36, 37, 38, 39]),
array('I', [19, 26, 30, 34, 37, 38, 39]),
array('I', [14, 16, 20, 23, 25, 38, 39]),
array('I', [1, 3, 6, 8, 31, 33, 37, 38, 39]),
array('I', [0, 11, 29, 32, 35, 38, 39])]
lrecord_index_list = [array('I', [1, 1, 0, 0, 1, 1, 1]),
array('I', [1, 0, 0, 1, 1, 1, 1]),
array('I', [0, 1, 0, 1, 1, 1, 1]),
array('I', [1, 0, 0, 1, 1, 1, 1]),
array('I', [1, 0, 0, 1, 1, 1, 1])]
rrecord_index_list = [array('I', [0, 0, 1, 1, 1, 1, 1]),
array('I', [0, 1, 0, 1, 1, 1, 1]),
array('I', [0, 1, 0, 1, 1, 1, 1]),
array('I', [0, 0, 1, 1, 1, 1, 1]),
array('I', [1, 1, 0, 1, 0, 1, 1, 1, 1]),
array('I', [1, 0, 1, 0, 1, 1, 1])]
py_cand_set = {0: set([0, 1, 5]), 1: set([2, 3, 4]), 2: set([0, 1, 5]), 3: set([2, 3, 4]), 4: set([2, 3, 4])}
py_output_size = 100
rec_list = db.debugblocker_topk_cython(py_config, lrecord_token_list, rrecord_token_list,
lrecord_index_list, rrecord_index_list, py_cand_set, py_output_size)
expected_rec_list = [[0, 2, 13], [0, 3, 3], [0, 4, 6], [1, 0, 12], [1, 1, 11],
[1, 5, 10], [2, 2, 9], [2, 3, 2], [2, 4, 7], [3, 0, 14],
[3, 1, 15], [3, 5, 5], [4, 0, 1], [4, 1, 4], [4, 5, 8]]
self.assertEqual(len(rec_list), len(expected_rec_list))
def test_debugblocker_merge_topk_cython_1(self):
rec_lists = []
rec_list = db.debugblocker_merge_topk_cython(rec_lists);
expected_rec_list = []
self.assertEqual(rec_list, expected_rec_list)
def test_debugblocker_merge_topk_cython_2(self):
rec_lists = [[[1, 2, 1]], [[1, 2, 2]], [[1, 2, 3]]]
rec_list = db.debugblocker_merge_topk_cython(rec_lists);
expected_rec_list = [(2, 1, 2)]
self.assertEqual(rec_list, expected_rec_list)
def test_debugblocker_merge_topk_cython_3(self):
rec_lists = [[[1, 2, 1], [2, 3, 2]], [[1, 2, 2], [2, 3, 3]], [[1, 2, 3],
[2, 3, 4]]]
rec_list = db.debugblocker_merge_topk_cython(rec_lists);
expected_rec_list = [(2, 1, 2), (3, 2, 3)]
self.assertEqual(rec_list, expected_rec_list)
def test_debugblocker_merge_topk_cython_4(self):
rec_lists = [[(1, 2, 1)], [(1, 2, 2)], [(1, 2, 3)]]
rec_list = db.debugblocker_merge_topk_cython(rec_lists);
expected_rec_list = [(2, 1, 2)]
self.assertEqual(rec_list, expected_rec_list)
@raises(AssertionError)
def test_debugblocker_1(self):
A = []
B = []
C = []
db.debug_blocker(C, A, B)
@raises(AssertionError)
def test_debugblocker_2(self):
A = read_csv_metadata(path_a)
B = []
C = []
db.debug_blocker(C, A, B)
@raises(AssertionError)
def test_debugblocker_3(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = None
db.debug_blocker(C, A, B)
@raises(AssertionError)
def test_debugblocker_4(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = read_csv_metadata(path_c, ltable=A, rtable=B)
output_size = '200'
db.debug_blocker(C, A, B, output_size)
@raises(AssertionError)
def test_debugblocker_5(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = read_csv_metadata(path_c, ltable=A, rtable=B)
attr_corres = set()
db.debug_blocker(C, A, B, 200, attr_corres)
def test_debugblocker_6(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B,
fk_ltable='ltable_ID',
fk_rtable='rtable_ID',
key='_id')
attr_corres = []
db.debug_blocker(C, A, B, 200, attr_corres)
@raises(AssertionError)
def test_debugblocker_7(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B,
fk_ltable='ltable_ID',
fk_rtable='rtable_ID',
key='_id')
attr_corres = [('ID', 'ID'), ['ID', 'ID']]
db.debug_blocker(C, A, B, 200, attr_corres)
@raises(AssertionError)
def test_debugblocker_8(self):
A = read_csv_metadata(path_a, key='ID')
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B,
fk_ltable='ltable_ID',
fk_rtable='rtable_ID',
key='_id')
attr_corres = [('ID', 'ID')]
verbose = 'true'
db.debug_blocker(C, A, B, 200, attr_corres, verbose)
@raises(AssertionError)
def test_debugblocker_9(self):
A = pd.DataFrame([])
B = read_csv_metadata(path_b)
C = pd.DataFrame([])
db.debug_blocker(C, A, B)
@raises(AssertionError)
def test_debugblocker_10(self):
A = read_csv_metadata(path_a)
B = pd.DataFrame([])
C = pd.DataFrame([])
db.debug_blocker(C, A, B)
@raises(AssertionError)
def test_debugblocker_11(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = | pd.DataFrame([]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 9 08:04:31 2020
@author: <NAME>
Functions to run the station characterization notebook on exploredata.
"""
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import math
import numpy as np
from netCDF4 import Dataset
import textwrap
import datetime as dt
import os
import six
import requests
from icoscp.station import station as station_data
#for the widgets
from IPython.core.display import display, HTML
from ipywidgets import Dropdown, SelectMultiple, HBox, VBox, Button, Output, IntText, RadioButtons,IntProgress, GridspecLayout
from IPython.display import clear_output, display
# import required libraries
#%pylab inline
import netCDF4 as cdf
#import pickle
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import warnings
warnings.filterwarnings('ignore')
#added - not show the figure that I am saving (different size than the one displayed
#for land cover bar graph)
matplotlib.pyplot.ioff()
#stations that have footprints as well as year and months with footprints. Also altitude.
#path to footprints
pathFP='/data/stiltweb/stations/'
#Earth's radius in km (for calculating distances between the station and cells)
R = 6373.8
#saved distances to the 192 000 cells for all the labeled atmospheric stations
#if the selected station is not found in this document, the distances are calculated
approved_stations_distances = pd.read_csv('approved_stations_distances.csv')
#saved degree angles from the stations to all 192 000 cells for all the labeled atmospheric stations
approved_stations_degrees = pd.read_csv('approved_stations_degrees.csv')
#functions from Ute
#function to read and aggregate footprints for given time range
def read_aggreg_footprints(station, date_range, timeselect='all'):
# loop over all dates and read netcdf files
# path to footprint files in new stiltweb directory structure
pathFP='/data/stiltweb/stations/'
# print ('date range: ',date_range)
fp=[]
nfp=0
first = True
for date in date_range:
filename=(pathFP+station+'/'+str(date.year)+'/'+str(date.month).zfill(2)+'/'
+str(date.year)+'x'+str(date.month).zfill(2)+'x'+str(date.day).zfill(2)+'x'+str(date.hour).zfill(2)+'/foot')
#print (filename)
if os.path.isfile(filename):
f_fp = cdf.Dataset(filename)
if (first):
fp=f_fp.variables['foot'][:,:,:]
lon=f_fp.variables['lon'][:]
lat=f_fp.variables['lat'][:]
first = False
else:
fp=fp+f_fp.variables['foot'][:,:,:]
f_fp.close()
nfp+=1
#else:
#print ('file does not exist: ',filename)
if nfp > 0:
fp=fp/nfp
else:
print ('no footprints found')
#print (np.shape(fp))
#print (np.max(fp))
title = 'not used'
#title = (start_date.strftime('%Y-%m-%d')+' - '+end_date.strftime('%Y-%m-%d')+'\n'+
# 'time selection: '+timeselect)
return nfp, fp, lon, lat, title
def get_station_class():
# Query the ICOS SPARQL endpoint for a station list
# query stationId, class, lng name and country
# output is an object "data" containing the results in JSON
url = 'https://meta.icos-cp.eu/sparql'
query = """
prefix st: <http://meta.icos-cp.eu/ontologies/stationentry/>
select distinct ?stationId ?stationClass ?country ?longName
from <http://meta.icos-cp.eu/resources/stationentry/>
where{
?s a st:AS .
?s st:hasShortName ?stationId .
?s st:hasStationClass ?stationClass .
?s st:hasCountry ?country .
?s st:hasLongName ?longName .
filter (?stationClass = "1" || ?stationClass = "2")
}
ORDER BY ?stationClass ?stationId
"""
r = requests.get(url, params = {'format': 'json', 'query': query})
data = r.json()
# convert the the result into a table
# output is an array, where each row contains
# information about the station
cols = data['head']['vars']
datatable = []
for row in data['results']['bindings']:
item = []
for c in cols:
item.append(row.get(c, {}).get('value'))
datatable.append(item)
# print the table
df_datatable = pd.DataFrame(datatable, columns=cols)
#df_datatable.head(5)
return df_datatable
def available_STILT_dictionary():
# store availability of STILT footprints in a dictionary
# get all ICOS station IDs by listing subdirectories in stiltweb
# extract availability from directory structure
#new:
pathStations='/data/stiltweb/stations/'
#pathStations='/opt/stiltdata/fsicos2/stiltweb/stations/'
allStations = os.listdir(pathStations)
# empty dictionary
available = {}
# fill dictionary with station name, years and months for each year
for ist in sorted(list(set(allStations))):
if os.path.exists(pathStations+'/'+ist):
#print ('directory '+pathStations+'/'+ist+' exits')
available[ist] = {}
years = os.listdir(pathStations+'/'+ist)
available[ist]['years'] = years
for yy in sorted(available[ist]['years']):
available[ist][yy] = {}
months = os.listdir(pathStations+'/'+ist+'/'+yy)
available[ist][yy]['months'] = months
available[ist][yy]['nmonths'] = len(available[ist][yy]['months'])
#else:
# print ('directory '+pathStations+'/'+ist+' does not exit')
# Get list of ICOS class 1 and class 2 stations from Carbon Portal
df_datatable = get_station_class()
# add information if ICOS class 1 or class 2 site
for ist in sorted(available):
available[ist]['stationClass'] = np.nan
for istICOS in df_datatable['stationId']:
ic = int(df_datatable[df_datatable['stationId']==istICOS].index.values)
if istICOS in ist:
available[ist]['stationClass'] = df_datatable['stationClass'][ic]
# print availability
#for ist in sorted(available):
# print ('station:', ist)
# for k in available[ist]:
# print (k,':', available[ist][k])
return available
def create_STILT_dictionary():
# store all STILT station information in a dictionary
# get all ICOS station IDs by listing subdirectories in stiltweb
# extract location from filename of link
#UPDATE
pathStations='/data/stiltweb/stations/'
#pathStations='/opt/stiltdata/fsicos2/stiltweb/stations/'
allStations = os.listdir(pathStations)
# empty dictionary
stations = {}
# fill dictionary with ICOS station id, latitude, longitude and altitude
for ist in sorted(list(set(allStations))):
stations[ist] = {}
# get filename of link (original stiltweb directory structure)
# and extract location information
if os.path.exists(pathStations+ist):
loc_ident = os.readlink(pathStations+ist)
clon = loc_ident[-13:-6]
lon = np.float(clon[:-1])
if clon[-1:] == 'W':
lon = -lon
clat = loc_ident[-20:-14]
lat = np.float(clat[:-1])
if clat[-1:] == 'S':
lat = -lat
alt = np.int(loc_ident[-5:])
stations[ist]['lat']=lat
stations[ist]['lon']=lon
stations[ist]['alt']=alt
stations[ist]['locIdent']=os.path.split(loc_ident)[-1]
# add information on station name (and new STILT station id) from stations.csv file used in stiltweb
url="https://stilt.icos-cp.eu/viewer/stationinfo"
df = pd.read_csv(url)
for ist in sorted(list(set(stations))):
stationName = df.loc[df['STILT id'] == ist]['STILT name']
if len(stationName.value_counts()) > 0:
stations[ist]['name'] = stationName.item()
else:
stations[ist]['name'] = ''
# Get list of ICOS class 1 and class 2 stations from Carbon Portal
df_datatable = get_station_class()
# add information if ICOS class 1 or class 2 site
for ist in sorted(list(set(stations))):
stations[ist]['stationClass'] = np.nan
for istICOS in df_datatable['stationId']:
ic = int(df_datatable[df_datatable['stationId']==istICOS].index.values)
if istICOS in ist:
stations[ist]['stationClass'] = df_datatable['stationClass'][ic]
# print dictionary
#for ist in sorted(stations):
# print ('station:', ist)
# for k in stations[ist]:
# print (k,':', stations[ist][k])
# write dictionary to pickle file for further use
#pickle.dump( stations, open( "stationsDict.pickle", "wb" ) )
return stations
#previously up top
stations = create_STILT_dictionary()
#updated --> take the timeselect list and returns the "correct" dataframe
#otherwise - not correct hours!
# function to read STILT concentration time series (new format of STILT results)
def read_stilt_timeseries_upd(station,date_range,timeselect_list):
url = 'https://stilt.icos-cp.eu/viewer/stiltresult'
headers = {'Content-Type': 'application/json', 'Accept-Charset': 'UTF-8'}
# check if STILT results exist
pathFP='/data/stiltweb/stations/'
new_range=[]
for date in date_range:
#--> new : pathStations='/data/stiltweb/stations/'
#pathStations='/opt/stiltdata/fsicos2/stiltweb/stations/'
if os.path.exists(pathFP+station+'/'+str(date.year)+'/'+str(date.month).zfill(2)+'/'
+str(date.year)+'x'+str(date.month).zfill(2)+'x'+str(date.day).zfill(2)+'x'+str(date.hour).zfill(2)+'/'):
new_range.append(date)
#if os.path.exists('/opt/stiltdata/fsicos2/stiltweb/slots/'+stations[station]['locIdent']+'/'+str(zDate.year)+'/'+str(zDate.month).zfill(2)+'/'
# +str(zDate.year)+'x'+str(zDate.month).zfill(2)+'x'+str(zDate.day).zfill(2)+'x'+str(zDate.hour).zfill(2)+'/'):
#
#filename=(pathFP+station+'/'+str(date.year)+'/'+str(date.month).zfill(2)+'/'
# +str(date.year)+'x'+str(date.month).zfill(2)+'x'+str(date.day).zfill(2)+'x'+str(date.hour).zfill(2)+'/foot')
if len(new_range) > 0:
date_range = new_range
fromDate = date_range[0].strftime('%Y-%m-%d')
toDate = date_range[-1].strftime('%Y-%m-%d')
columns = ('["isodate","co2.stilt","co2.fuel","co2.bio","co2.bio.gee","co2.bio.resp","co2.fuel.coal","co2.fuel.oil",'+
'"co2.fuel.gas","co2.fuel.bio","co2.energy","co2.transport", "co2.industry",'+
'"co2.others", "co2.cement", "co2.background",'+
'"co.stilt","co.fuel","co.bio","co.fuel.coal","co.fuel.oil",'+
'"co.fuel.gas","co.fuel.bio","co.energy","co.transport", "co.industry",'+
'"co.others", "co.cement", "co.background",'+
'"rn", "rn.era","rn.noah","wind.dir","wind.u","wind.v","latstart","lonstart"]')
data = '{"columns": '+columns+', "fromDate": "'+fromDate+'", "toDate": "'+toDate+'", "stationId": "'+station+'"}'
#print (data)
response = requests.post(url, headers=headers, data=data)
if response.status_code != 500:
#print (response.json())
output=np.asarray(response.json())
df = pd.DataFrame(output[:,:], columns=eval(columns))
df = df.replace('null',np.NaN)
df = df.astype(float)
df['date'] = pd.to_datetime(df['isodate'], unit='s')
df.set_index(['date'],inplace=True)
df['name'] = station
df['model'] = 'STILT'
df['wind.speed']=np.sqrt((df['wind.u']**2)+(df['wind.v']**2))
#print (df.columns)
else:
df=pd.DataFrame({'A' : []})
df=df[(df['co2.fuel'].index.hour.isin(timeselect_list))]
return df
#given the input - create a pandas date range
def date_range_station_char(start_date, end_date, timeselect_list):
date_range = pd.date_range(start_date, end_date, freq='3H')
#depending on how many input (max 8 for 0 3 6 9 12 15 18 21), filter to include hours.
for time_value in timeselect_list:
if len(timeselect_list)==1:
date_range = date_range[(timeselect_list[0] == date_range.hour)]
#df_nine = df.loc[(timeselect_list[count_timeselect] == df.index.hour)]
if len(timeselect_list)==2:
date_range = date_range[(timeselect_list[0] == date_range.hour)] | date_range[(timeselect_list[1] == date_range.hour)]
if len(timeselect_list)==3:
date_range = date_range[(timeselect_list[0] == date_range.hour)] | date_range[(timeselect_list[1] == date_range.hour)] \
| date_range[(timeselect_list[2] == date_range.hour)]
if len(timeselect_list)==4:
date_range = date_range[(timeselect_list[0] == date_range.hour)] | date_range[(timeselect_list[1] == date_range.hour)] \
| date_range[(timeselect_list[2] == date_range.hour)] | date_range[(timeselect_list[3] == date_range.hour)]
if len(timeselect_list)==5:
date_range = date_range[(timeselect_list[0] == date_range.hour)] | date_range[(timeselect_list[1] == date_range.hour)] \
| date_range[(timeselect_list[2] == date_range.hour)] | date_range[(timeselect_list[3] == date_range.hour)]\
| date_range[(timeselect_list[4] == date_range.hour)]
if len(timeselect_list)==6:
date_range = date_range[(timeselect_list[0] == date_range.hour)] | date_range[(timeselect_list[1] == date_range.hour)] \
| date_range[(timeselect_list[2] == date_range.hour)] | date_range[(timeselect_list[3] == date_range.hour)]\
| date_range[(timeselect_list[4] == date_range.hour)] | date_range[(timeselect_list[5] == date_range.hour)]
if len(timeselect_list)==7:
date_range = date_range[(timeselect_list[0] == date_range.hour)] | date_range[(timeselect_list[1] == date_range.hour)] \
| date_range[(timeselect_list[2] == date_range.hour)] | date_range[(timeselect_list[3] == date_range.hour)]\
| date_range[(timeselect_list[4] == date_range.hour)] | date_range[(timeselect_list[5] == date_range.hour)]\
| date_range[(timeselect_list[6] == date_range.hour)]
if len(timeselect_list)==8:
date_range = date_range[(timeselect_list[0] == date_range.hour)] | date_range[(timeselect_list[1] == date_range.hour)] \
| date_range[(timeselect_list[2] == date_range.hour)] | date_range[(timeselect_list[3] == date_range.hour)]\
| date_range[(timeselect_list[4] == date_range.hour)] | date_range[(timeselect_list[5] == date_range.hour)]\
| date_range[(timeselect_list[6] == date_range.hour)] | date_range[(timeselect_list[7] == date_range.hour)]
#consider return timeselect
return date_range
def import_landcover():
all_corine_classes= Dataset('all_corine_except_ocean.nc')
#the "onceans_finalized" dataset is seperate: CORINE class 523 (oceans) did not extend beyond exclusive zone
#complemented with Natural Earth data.
#CORINE does not cover the whole area, "nodata" area is never ocean, rather landbased data.
oceans_finalized= Dataset('oceans_finalized.nc')
#access all the different land cover classes in the .nc files:
fp_111 = all_corine_classes.variables['area_111'][:,:]
fp_112 = all_corine_classes.variables['area_112'][:,:]
fp_121 = all_corine_classes.variables['area_121'][:,:]
fp_122 = all_corine_classes.variables['area_122'][:,:]
fp_123 = all_corine_classes.variables['area_123'][:,:]
fp_124 = all_corine_classes.variables['area_124'][:,:]
fp_131 = all_corine_classes.variables['area_131'][:,:]
fp_132 = all_corine_classes.variables['area_132'][:,:]
fp_133 = all_corine_classes.variables['area_133'][:,:]
fp_141 = all_corine_classes.variables['area_141'][:,:]
fp_142 = all_corine_classes.variables['area_142'][:,:]
fp_211 = all_corine_classes.variables['area_211'][:,:]
fp_212 = all_corine_classes.variables['area_212'][:,:]
fp_213 = all_corine_classes.variables['area_213'][:,:]
fp_221 = all_corine_classes.variables['area_221'][:,:]
fp_222 = all_corine_classes.variables['area_222'][:,:]
fp_223 = all_corine_classes.variables['area_223'][:,:]
fp_231 = all_corine_classes.variables['area_231'][:,:]
fp_241 = all_corine_classes.variables['area_241'][:,:]
fp_242 = all_corine_classes.variables['area_242'][:,:]
fp_243 = all_corine_classes.variables['area_243'][:,:]
fp_244 = all_corine_classes.variables['area_244'][:,:]
fp_311 = all_corine_classes.variables['area_311'][:,:]
fp_312 = all_corine_classes.variables['area_312'][:,:]
fp_313 = all_corine_classes.variables['area_313'][:,:]
fp_321 = all_corine_classes.variables['area_321'][:,:]
fp_322 = all_corine_classes.variables['area_322'][:,:]
fp_323 = all_corine_classes.variables['area_323'][:,:]
fp_324 = all_corine_classes.variables['area_324'][:,:]
fp_331 = all_corine_classes.variables['area_331'][:,:]
fp_332 = all_corine_classes.variables['area_332'][:,:]
fp_333 = all_corine_classes.variables['area_333'][:,:]
fp_334 = all_corine_classes.variables['area_334'][:,:]
fp_335 = all_corine_classes.variables['area_335'][:,:]
fp_411 = all_corine_classes.variables['area_411'][:,:]
fp_412 = all_corine_classes.variables['area_412'][:,:]
fp_421 = all_corine_classes.variables['area_421'][:,:]
fp_422 = all_corine_classes.variables['area_422'][:,:]
fp_423 = all_corine_classes.variables['area_423'][:,:]
fp_511 = all_corine_classes.variables['area_511'][:,:]
fp_512 = all_corine_classes.variables['area_512'][:,:]
fp_521 = all_corine_classes.variables['area_521'][:,:]
fp_522 = all_corine_classes.variables['area_522'][:,:]
#CORINE combined with natural earth data for oceans:
fp_523 = oceans_finalized.variables['ocean_ar2'][:,:]
#have a variable that represents the whole area of the cell,
#used to get a percentage breakdown of each corine class.
fp_total_area = all_corine_classes.variables['area_stilt'][:,:]
#19 aggregated classes (these are used in the current bar graphs but can be updated by each user)
urban = fp_111+fp_112+fp_141+fp_142
industrial = fp_131 + fp_133 + fp_121
road_and_rail = fp_122
ports_and_apirports= fp_123+fp_124
dump_sites = fp_132
staple_cropland_not_rice = fp_211 + fp_212 + fp_241 + fp_242 + fp_243
rice_fields = fp_213
cropland_fruit_berry_grapes_olives = fp_221 + fp_222 + fp_223
pastures = fp_231
broad_leaved_forest = fp_311
coniferous_forest = fp_312
mixed_forest = fp_313 + fp_244
natural_grasslands = fp_321 + fp_322
transitional_woodland_shrub= fp_323 + fp_324
bare_natural_areas = fp_331 + fp_332 + fp_333 + fp_334
glaciers_prepetual_snow = fp_335
wet_area= fp_411 + fp_412 + fp_421 + fp_422
inland_water_bodies = fp_423 + fp_511 + fp_512 + fp_521 + fp_522
oceans = fp_523
#added: the "missing area" is out of the CORINE domain. Alltogether add upp to "fp_total_area"
out_of_domain=fp_total_area-oceans-inland_water_bodies-wet_area-glaciers_prepetual_snow-bare_natural_areas-transitional_woodland_shrub-natural_grasslands-mixed_forest-coniferous_forest-broad_leaved_forest-pastures-cropland_fruit_berry_grapes_olives-rice_fields-staple_cropland_not_rice-dump_sites-ports_and_apirports-road_and_rail-industrial-urban
#further aggregated classes for the land cover wind polar graph and land cover bar graph
urban_aggreg= urban + industrial + road_and_rail + dump_sites + ports_and_apirports
cropland_aggreg= staple_cropland_not_rice + rice_fields + cropland_fruit_berry_grapes_olives
forests= broad_leaved_forest + coniferous_forest + mixed_forest
pastures_grasslands= pastures + natural_grasslands
oceans=oceans
other=transitional_woodland_shrub+bare_natural_areas+glaciers_prepetual_snow +wet_area + inland_water_bodies
return out_of_domain, urban_aggreg, cropland_aggreg, forests, pastures_grasslands, oceans, other
def import_population_data():
pop_data= Dataset('point_with_pop_data.nc')
fp_pop=pop_data.variables['Sum_TOT_P'][:,:]
return fp_pop
def import_point_source_data():
#point source:
point_source_data= Dataset('final_netcdf_point_source_emission.nc')
#emissions in kg/year in the variable "Sum_Tota_1"
fp_point_source=point_source_data.variables['Sum_Tota_1'][:,:]
#different from population data: can translate the emissions within each stilt cell to the effect it will have to the final CO2 concentrations at the stations.
#just need to get it in the right unit (micromole/m2s) and multiply by the individual or aggregated footprints
#divide by the molar weight in kg. 12 (C)+16(O)+16(O) =44 0.044 in kg. get number of moles of C this way. Want it in micromole though: 1 mole= 1000000 micromole
fp_point_source_moles_C=fp_point_source/0.044
#how many micro-mole is that? multiply by 1000000
fp_point_source_micromoles_C=fp_point_source_moles_C*1000000
#a NetCDF file with the grid size calues in m2
f_gridarea = cdf.Dataset('gridareaSTILT.nc')
#area stored in "cell_area"
gridarea = f_gridarea.variables['cell_area'][:]
fp_point_source_m2= fp_point_source_micromoles_C/gridarea
#how many micro moles let out per second (have yearly data)
fp_point_source_m2_s= fp_point_source_m2/31536000
return fp_point_source_m2_s
#function to generate maps with cells binned by defined intervals and direction
def nondirection_labels(bins, units):
labels = []
#for the label - want bin before and after (range)
for left, right in zip(bins[:-1], bins[1:]):
#if the last object - everything above (>value unit)
if np.isinf(right):
labels.append('>{} {}'.format(left, units))
else:
#how the labels normally look (value - value unit)
labels.append('{} - {} {}'.format(left, right, units))
return list(labels)
def calculate_initial_compass_bearing(pointA, pointB):
"""
Calculates the bearing between two points.
The formulae used is the following:
θ = atan2(sin(Δlong).cos(lat2),
cos(lat1).sin(lat2) − sin(lat1).cos(lat2).cos(Δlong))
:Parameters:
- `pointA: The tuple representing the latitude/longitude for the
first point. Latitude and longitude must be in decimal degrees
- `pointB: The tuple representing the latitude/longitude for the
second point. Latitude and longitude must be in decimal degrees
:Returns:
The bearing in degrees
:Returns Type:
float
"""
if (type(pointA) != tuple) or (type(pointB) != tuple):
raise TypeError("Only tuples are supported as arguments")
lat1 = math.radians(pointA[0])
lat2 = math.radians(pointB[0])
diffLong = math.radians(pointB[1] - pointA[1])
x = math.sin(diffLong) * math.cos(lat2)
y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1)
* math.cos(lat2) * math.cos(diffLong))
initial_bearing = math.atan2(x, y)
# Now we have the initial bearing but math.atan2 return values
# from -180° to + 180° which is not what we want for a compass bearing
# The solution is to normalize the initial bearing as shown below
initial_bearing = math.degrees(initial_bearing)
compass_bearing = (initial_bearing + 360) % 360
return compass_bearing
def define_bins_maprose(km_intervals, bin_size):
#the number of bins
number_bins=round((5000/km_intervals),0)
#start at 0 km of the station. Then append to this list
interval_bins=[0]
#ex 100, 200, 300 if km_intervals=100
for number in range(1, int(number_bins)):
interval_bins.append(km_intervals*number)
#the last number is infinity - however far the domain stretches marks the end of the last bin.
interval_bins.append(np.inf)
#labels: not used in map - but used in the grouping
interval_labels = nondirection_labels(interval_bins, units='km')
#direction: using the input (degree_bin) to set the bins so that the first bin has "north (0 degrees) in the middle"
#"from_degree" becomes a negative value (half of the degree value "to the left" of 0)
from_degree=-(bin_size/2)
#"to_degree" is a vale to indicate the last bins ending. Must check values all the way to 360 which means the last bin
#will go past 360 and later be joined with the "0" bin (replace function in next cell)
to_degree= 360 + (bin_size/2) + 1
#the bin_size is the "step". generate an array with all the direction bins
dir_bins = np.arange(from_degree, to_degree, bin_size)
#the direction bin is the first bin + the next bin divided by two:
dir_labels = (dir_bins[:-1] + dir_bins[1:]) / 2
#return these values to use in the function map_representation_polar_graph
return interval_bins, interval_labels, dir_bins, dir_labels
# function to convert station longitude and latitude (slat, slon) to indices of STILT model grid (ix,jy)
def lonlat_2_ixjy(slon,slat,mlon,mlat):
#slon, slat: longitude and latitude of station
#mlon, mlat: 1-dim. longitude and latitude of model grid
ix = (np.abs(mlon-slon)).argmin()
jy = (np.abs(mlat-slat)).argmin()
return ix,jy
# function to plot maps (show station location if station is provided and zoom in second plot if zoom is provided)
def plot_maps(field, lon, lat, title='', label='', unit='', linlog='linear', station='', zoom='',
vmin=0.0001, vmax=None, colors='GnBu',pngfile=''):
mcolor='m'
# Set scale for features from Natural Earth
NEscale = '50m'
# Create a feature for Countries at 1:50m from Natural Earth
countries = cfeature.NaturalEarthFeature(
category='cultural',
name='admin_0_countries',
scale=NEscale,
facecolor='none')
fig = plt.figure(figsize=(18,10))
# set up a map
ax = plt.subplot(1, 2, 1, projection=ccrs.PlateCarree())
img_extent = (lon.min(), lon.max(), lat.min(), lat.max())
ax.set_extent([lon.min(), lon.max(), lat.min(), lat.max()],crs=ccrs.PlateCarree())
ax.add_feature(countries, edgecolor='black', linewidth=0.3)
cmap = plt.get_cmap(colors)
cmap.set_under(color='white')
if linlog == 'linear':
im = ax.imshow(field[:,:],interpolation=None,origin='lower', extent=img_extent,cmap=cmap,vmin=vmin,vmax=vmax)
cbar=plt.colorbar(im,orientation='horizontal',pad=0.03,fraction=0.055,extend='both')
cbar.set_label(label+' '+unit)
else:
im = ax.imshow(np.log10(field)[:,:],interpolation='none',origin='lower', extent=img_extent,cmap=cmap,vmin=vmin,vmax=vmax)
cbar=plt.colorbar(im,orientation='horizontal',pad=0.03,fraction=0.055,extend='both')
cbar.set_label(label+' log$_{10}$ '+unit)
plt.title(title)
ax.text(0.01, -0.25, 'min: %.2f' % np.min(field[:,:]), horizontalalignment='left',transform=ax.transAxes)
ax.text(0.99, -0.25, 'max: %.2f' % np.max(field[:,:]), horizontalalignment='right',transform=ax.transAxes)
#show station location if station is provided
if station != '':
station_lon=[]
station_lat=[]
station_lon.append(stations[station]['lon'])
station_lat.append(stations[station]['lat'])
ax.plot(station_lon,station_lat,'+',color=mcolor,ms=10,markeredgewidth=1,transform=ccrs.PlateCarree())
zoom=str(zoom)
if zoom != '':
#grid cell index of station
ix,jy = lonlat_2_ixjy(stations[zoom]['lon'],stations[zoom]['lat'],lon,lat)
# define zoom area
i1 = np.max([ix-35,0])
i2 = np.min([ix+35,400])
j1 = np.max([jy-42,0])
j2 = np.min([jy+42,480])
lon_z=lon[i1:i2]
lat_z=lat[j1:j2]
field_z=field[j1:j2,i1:i2]
# set up a map
ax = plt.subplot(1, 2, 2, projection=ccrs.PlateCarree())
img_extent = (lon_z.min(), lon_z.max(), lat_z.min(), lat_z.max())
ax.set_extent([lon_z.min(), lon_z.max(), lat_z.min(), lat_z.max()],crs=ccrs.PlateCarree())
ax.add_feature(countries, edgecolor='black', linewidth=0.3)
if linlog == 'linear':
im = ax.imshow(field_z,interpolation='none',origin='lower', extent=img_extent,cmap=cmap,vmin=vmin,vmax=vmax)
cbar=plt.colorbar(im,orientation='horizontal',pad=0.03,fraction=0.055,extend='both')
cbar.set_label(label+' '+unit)
else:
im = ax.imshow(np.log10(field_z),interpolation='none',origin='lower', extent=img_extent,cmap=cmap,vmin=vmin,vmax=vmax)
cbar=plt.colorbar(im,orientation='horizontal',pad=0.03,fraction=0.055,extend='both')
cbar.set_label(label+' log$_{10}$ '+unit)
#show station location if station is provided
if station != '':
station_lon=[]
station_lat=[]
station_lon.append(stations[station]['lon'])
station_lat.append(stations[station]['lat'])
ax.plot(station_lon,station_lat,'+',color=mcolor,ms=10,markeredgewidth=1,transform=ccrs.PlateCarree())
plt.title(title)
ax.text(0.01, -0.25, 'min: %.2f' % np.min(field[j1:j2,i1:i2]), horizontalalignment='left',transform=ax.transAxes)
ax.text(0.99, -0.25, 'max: %.2f' % np.max(field[j1:j2,i1:i2]), horizontalalignment='right',transform=ax.transAxes)
plt.show()
if len(pngfile)>0:
plotdir='figures'
if not os.path.exists(plotdir):
os.mkdir(plotdir)
fig.savefig(plotdir+'/'+pngfile+'.pdf',dpi=100,bbox_inches='tight')
plt.close()
def map_representation_polar_graph(station, date_range, timeselect, bin_size, unit, rose_type='sensitivity', colorbar='gist_heat_r', km_intervals=200, zoom='', title='', save_figs=''):
#bins in terms of interval and direction
interval_bins, interval_labels, dir_bins, dir_labels=define_bins_maprose(km_intervals=km_intervals, bin_size=bin_size)
st_lon= stations[station]['lon']
st_lat= stations[station]['lat']
#get the aggregated footprint
nfp, fp, fp_lon, fp_lat, title_not_used = read_aggreg_footprints(station, date_range, timeselect=timeselect)
#if not saved distances to all 192000 cells, calculate it.
if station not in approved_stations_distances.columns:
x = [math.radians(st_lon-lon)*math.cos(math.radians(st_lat+lat)/2) for lat in fp_lat for lon in fp_lon]
y = [math.radians(st_lat-lat) for lat in fp_lat for lon in fp_lon]
distance=[math.sqrt((x[index]*x[index])+(y[index]*y[index])) * R for index in range(len(x))]
#if in the existing list, access it.
else:
distance=approved_stations_distances[station]
#same function used to all three types of map
if rose_type=='sensitivity':
#only want to look at the aggregated footprint - not multiplied by anciallary datalayer
grid_to_display=fp
elif rose_type=='point source contribution':
#import the point source data for multiplication with the aggregated footprint
fp_point_source_m2_s = import_point_source_data()
grid_to_display=fp*fp_point_source_m2_s
elif rose_type=='population sensitivity':
#import the population data for multiplication with the aggregated footprint
fp_pop= import_population_data()
grid_to_display=fp*fp_pop
#list with 192000 sens values. same place in list for distance to sttion and degree
sens_value=[grid_to_display[0][lat_value][lon_value] for lat_value in range(len(fp_lat)) for lon_value in range(len(fp_lon))]
#degrees - calculate at what degree each cell is in case not in saved list
if station not in approved_stations_degrees.columns:
degrees_0_360=[calculate_initial_compass_bearing((st_lat, st_lon), (lat, lon)) for lat in fp_lat for lon in fp_lon]
else:
degrees_0_360=approved_stations_degrees[station]
#putting it into a dataframe - to perform groupby etc
df_sensitivity_map = pd.DataFrame()
df_sensitivity_map['distance'] = distance
df_sensitivity_map['sensitivity'] = sens_value
df_sensitivity_map['degrees'] = degrees_0_360
#for % later - sensitivity within certain bin (distance and direction)
total_sensitivity= sum(df_sensitivity_map['sensitivity'])
#binning - by the distace intervals and degree intervals. Summarize these.
rosedata=df_sensitivity_map.assign(WindSpd_bins=lambda df: pd.cut(df['distance'], bins=interval_bins, labels=interval_labels, right=True))
rosedata=rosedata.assign(WindDir_bins=lambda df: pd.cut(df['degrees'], bins=dir_bins, labels=dir_labels, right=False))
#the 360 degree are the same as 0:
rosedata=rosedata.replace({'WindDir_bins': {360: 0}})
#the combination of the distance and direction columns is used to create a unique column value for all cells
#with certain direction/distance combination.
#make it to string to be able to combine.
rosedata['key']=rosedata['WindDir_bins'].astype(str) +rosedata['WindSpd_bins'].astype(str)
#group by the unique combination of direction and distance
rosedata_grouped_key=rosedata.groupby(by=['key'], as_index=False)['sensitivity'].sum().reset_index()
#merge between the 192000 cells and the "groupedby" values: each cell in a specific direction and distnace will
#get the sum of the cells in that same specific bin. Same color on the map corresponing to % or absolute sensitivity.
#reset_index() creates a column with the original index of the dataframes that are joined. Needed to sort the dataframe
#in the next spted because default is to sort by the key used.
rosedata_merge=rosedata.reset_index().merge(rosedata_grouped_key, left_on='key', right_on='key', sort=False)
#sort by the original index of the 192000 cells:
rosedata_merge=rosedata_merge.sort_values(by=['index_x'])
#x is the "fist" (rosedata.merge) dataframe that was merged (the 192000 individual cells)
#y is the dataframe that is merged to the first. Both columns name "sensitivity".
#sensitivity_y is the merged data - the summarized sensitivity value for the whole bin (direction and distance bin)
rosedata_merge_list=rosedata_merge['sensitivity_y'].tolist()
#now starts the process of "packing it back up" so that it can be displayed as a map (same format as the netCDF files with 480
#lists of lists - the first list is all tha values that has "the first" latitude value and all 400 different longitude values)
#calculate the % sensitivity - can be changed to absolute sensitivity
if unit=='percent':
rosedata_merge_list=[(sensitivity_value/total_sensitivity)*100 for sensitivity_value in rosedata_merge_list]
#the "netcdf simulation" (see text above)
rosedata_merge_list_of_lists=[]
index=0
while index<192000:
index_to=index+400
#for each list: need to grab the 400 values that are the combination of the same latitude value
#but different longitude values
rosedata_merge_list_of_lists.append(rosedata_merge_list[index:index_to])
#start at the next 400 in the list in the next turn of the loop:
index=index+400
#numpy array works to display in map
rosedata_merge_list_of_lists_array=np.array(rosedata_merge_list_of_lists)
#added
date_index_number = (len(date_range) - 1)
if title=='yes':
for_title=('Station: ' + str(station) + '\n' + unit + ' ' + rose_type + ' given direction and distance: ' + '\n' + str(bin_size) + \
' degree bins and ' + str(km_intervals) +' km increments'
'\n' + str(date_range[0].year) + '-' + str(date_range[0].month) + '-' + str(date_range[0].day)\
+ ' to ' + str(date_range[date_index_number].year) + '-' + str(date_range[date_index_number].month) + '-' + str(date_range[date_index_number].day)+\
' Hour(s): ' + timeselect+ '\n')
else:
for_title=''
#font
matplotlib.rcParams.update({'font.size': 12})
if unit=='percent':
unit='%'
else:
unit='absolute'
if save_figs=='yes':
if rose_type=='sensitivity':
figure_number='_figure_1'
if rose_type=='point source contribution':
figure_number='_figure_2'
if rose_type=='population sensitivity':
figure_number='_figure_3'
string_fig=station+figure_number
else:
string_fig=''
#use the plot_maps function
#need two different?
if unit=='percent':
plot_maps(rosedata_merge_list_of_lists_array, fp_lon, fp_lat, title=for_title, label=rose_type,
unit=unit, linlog='linear', station=station,
zoom=zoom, colors=colorbar, pngfile=string_fig)
else:
plot_maps(rosedata_merge_list_of_lists_array, fp_lon, fp_lat, title=for_title, label=rose_type,
unit=unit, linlog='linear', station=station,
zoom=zoom, colors=colorbar, pngfile=string_fig)
#land cover bar graph:
def land_cover_bar_graph(station, date_range, timeselect, title='', save_figs=''):
#get all the land cover data
out_of_domain, urban_aggreg, cropland_aggreg, forests, pastures_grasslands, oceans, other= import_landcover()
approved_stations_degrees = pd.read_csv('approved_stations_degrees.csv')
st_lon= stations[station]['lon']
st_lat= stations[station]['lat']
#selected date_range, aggregated footprint for selected station
nfp, fp, fp_lon, fp_lat, title_not_used = read_aggreg_footprints(station, date_range, timeselect=timeselect)
#land cover classes (imported in the land cover section):
cropland_multiplied=fp*cropland_aggreg
urban_multiplied=fp*urban_aggreg
forests_multiplied=fp*forests
pastures_grasslands_multiplied=fp*pastures_grasslands
oceans_multiplied=fp*oceans
other_multiplied=fp*other
out_of_domain_multiplied=fp*out_of_domain
cropland_values=[cropland_multiplied[0][lat_value][lon_value] for lat_value in range(len(fp_lat)) for lon_value in range(len(fp_lon))]
urban_values=[urban_multiplied[0][lat_value][lon_value] for lat_value in range(len(fp_lat)) for lon_value in range(len(fp_lon))]
forests_values=[forests_multiplied[0][lat_value][lon_value] for lat_value in range(len(fp_lat)) for lon_value in range(len(fp_lon))]
pastures_grasslands_values=[pastures_grasslands_multiplied[0][lat_value][lon_value] for lat_value in range(len(fp_lat)) for lon_value in range(len(fp_lon))]
oceans_values=[oceans_multiplied[0][lat_value][lon_value] for lat_value in range(len(fp_lat)) for lon_value in range(len(fp_lon))]
others_values=[other_multiplied[0][lat_value][lon_value] for lat_value in range(len(fp_lat)) for lon_value in range(len(fp_lon))]
#added: out_of_domain
out_of_domain_values=[out_of_domain_multiplied[0][lat_value][lon_value] for lat_value in range(len(fp_lat)) for lon_value in range(len(fp_lon))]
approved_stations_degrees = pd.read_csv('approved_stations_degrees.csv')
#degrees (no distance bins for land cover):
if station not in approved_stations_degrees.columns:
degrees_0_360=[calculate_initial_compass_bearing((st_lat, st_lon), (lat, lon)) for lat in fp_lat for lon in fp_lon]
else:
degrees_0_360=approved_stations_degrees[station]
#putting it into a dataframe: initially 192000 values (one per cell) for each of the aggregated land cover classes
#into same dataframe - have the same coulmn heading. "landcover_type" will be used in "groupby" together with the "slice" (in degrees)
df_cropland = pd.DataFrame()
df_cropland['landcover_vals'] = cropland_values
df_cropland['degrees'] = degrees_0_360
df_cropland['landcover_type'] = 'Cropland'
df_urban= pd.DataFrame()
df_urban['landcover_vals'] = urban_values
df_urban['degrees'] = degrees_0_360
df_urban['landcover_type'] = 'Urban'
df_forests = pd.DataFrame()
df_forests['landcover_vals'] = forests_values
df_forests['degrees'] = degrees_0_360
df_forests['landcover_type'] = 'Forests'
df_pastures_grassland = pd.DataFrame()
df_pastures_grassland['landcover_vals'] = pastures_grasslands_values
df_pastures_grassland['degrees'] = degrees_0_360
df_pastures_grassland['landcover_type'] = 'Pastures and grassland'
df_oceans = pd.DataFrame()
df_oceans['landcover_vals'] = oceans_values
df_oceans['degrees'] = degrees_0_360
df_oceans['landcover_type'] = 'Oceans'
df_others = pd.DataFrame()
df_others['landcover_vals'] = others_values
df_others['degrees'] = degrees_0_360
df_others['landcover_type'] = 'Other'
#out of domain
df_out_of_domain = pd.DataFrame()
df_out_of_domain['landcover_vals'] = out_of_domain_values
df_out_of_domain['degrees'] = degrees_0_360
df_out_of_domain['landcover_type'] = 'No data'
matplotlib.rcParams.update({'font.size': 20})
#into one dataframe
#possibly add: df_out_of_domain
df_all = df_cropland.append([df_urban, df_forests, df_pastures_grassland, df_oceans, df_others, df_out_of_domain])
#for % later - sensitivity to landcover within certain bin (landcover and direction)
#how works now when have "no data" also?
total_all= sum(df_all['landcover_vals'])
##added - for the matplotlib breakdown
dir_bins = np.arange(22.5, 383.5, 45)
#dir_bins = np.asarray(['N', 'NE', 'E', 'SE', 'S', 'SW', 'W', 'NW', 'N'])
dir_bins= np.asarray([0, 22.5,67.5,112.5,157.5,202.5,247.5,292.5,337.5,383.5])
#dir_labels= np.asarray([0, 22.5,67.5,112.5,157.5,202.5,247.5,292.5,337.5])
dir_labels= np.asarray([0, 22.5,67.5,112.5,157.5,202.5,247.5,292.5,337.5])
#dir_labels = (dir_bins[:-1] + dir_bins[1:]) / 2
#get columns - for each degree
rosedata=df_all.assign(WindDir_bins=lambda df: | pd.cut(df['degrees'], bins=dir_bins, labels=dir_labels, right=False) | pandas.cut |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
tm.assert_panel_equal(shifted1, shifted2)
with tm.assert_produces_warning(False):
shifted3 = self.panel.shift()
tm.assert_panel_equal(shifted1, shifted3)
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
self.assertIs(wp._data, self.panel._data)
wp = Panel(self.panel._data, copy=True)
self.assertIsNot(wp._data, self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel.values
# no copy
wp = Panel(vals)
self.assertIs(wp.values, vals)
# copy
wp = Panel(vals, copy=True)
self.assertIsNot(wp.values, vals)
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
self.assertEqual(len(empty.items), 0)
self.assertEqual(len(empty.major_axis), 0)
self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
self.assertEqual(panel.values.dtype, np.object_)
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
self.assertEqual(panel[i].values.dtype.name, dtype)
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.random.randn(2, 10, 5), items=lrange(
2), major_axis=lrange(10), minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
with tm.assertRaisesRegexp(ValueError,
"The number of dimensions required is 3"):
Panel(np.random.randn(10, 2))
def test_consolidate(self):
self.assertTrue(self.panel._data.is_consolidated())
self.panel['foo'] = 1.
self.assertFalse(self.panel._data.is_consolidated())
panel = self.panel.consolidate()
self.assertTrue(panel._data.is_consolidated())
def test_ctor_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A': itema, 'B': itemb[5:]}
d2 = {'A': itema._series, 'B': itemb[5:]._series}
d3 = {'A': None,
'B': DataFrame(itemb[5:]._series),
'C': DataFrame(itema._series)}
wp = Panel.from_dict(d)
wp2 = Panel.from_dict(d2) # nested Dict
# TODO: unused?
wp3 = Panel.from_dict(d3) # noqa
self.assertTrue(wp.major_axis.equals(self.panel.major_axis))
assert_panel_equal(wp, wp2)
# intersect
wp = Panel.from_dict(d, intersect=True)
self.assertTrue(wp.major_axis.equals(itemb.index[5:]))
# use constructor
assert_panel_equal(Panel(d), Panel.from_dict(d))
assert_panel_equal(Panel(d2), Panel.from_dict(d2))
assert_panel_equal(Panel(d3), Panel.from_dict(d3))
# a pathological case
d4 = {'A': None, 'B': None}
# TODO: unused?
wp4 = Panel.from_dict(d4) # noqa
assert_panel_equal(Panel(d4), Panel(items=['A', 'B']))
# cast
dcasted = dict((k, v.reindex(wp.major_axis).fillna(0))
for k, v in compat.iteritems(d))
result = Panel(dcasted, dtype=int)
expected = Panel(dict((k, v.astype(int))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
result = Panel(dcasted, dtype=np.int32)
expected = Panel(dict((k, v.astype(np.int32))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
def test_constructor_dict_mixed(self):
data = dict((k, v.values) for k, v in self.panel.iteritems())
result = Panel(data)
exp_major = Index(np.arange(len(self.panel.major_axis)))
self.assertTrue(result.major_axis.equals(exp_major))
result = Panel(data, items=self.panel.items,
major_axis=self.panel.major_axis,
minor_axis=self.panel.minor_axis)
assert_panel_equal(result, self.panel)
data['ItemC'] = self.panel['ItemC']
result = Panel(data)
assert_panel_equal(result, self.panel)
# corner, blow up
data['ItemB'] = data['ItemB'][:-1]
self.assertRaises(Exception, Panel, data)
data['ItemB'] = self.panel['ItemB'].values[:, :-1]
self.assertRaises(Exception, Panel, data)
def test_ctor_orderedDict(self):
keys = list(set(np.random.randint(0, 5000, 100)))[
:50] # unique random int keys
d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
p = Panel(d)
self.assertTrue(list(p.items) == keys)
p = Panel.from_dict(d)
self.assertTrue(list(p.items) == keys)
def test_constructor_resize(self):
data = self.panel._data
items = self.panel.items[:-1]
major = self.panel.major_axis[:-1]
minor = self.panel.minor_axis[:-1]
result = Panel(data, items=items, major_axis=major, minor_axis=minor)
expected = self.panel.reindex(items=items, major=major, minor=minor)
assert_panel_equal(result, expected)
result = Panel(data, items=items, major_axis=major)
expected = self.panel.reindex(items=items, major=major)
assert_panel_equal(result, expected)
result = Panel(data, items=items)
expected = self.panel.reindex(items=items)
assert_panel_equal(result, expected)
result = Panel(data, minor_axis=minor)
expected = self.panel.reindex(minor=minor)
assert_panel_equal(result, expected)
def test_from_dict_mixed_orient(self):
df = tm.makeDataFrame()
df['foo'] = 'bar'
data = {'k1': df, 'k2': df}
panel = Panel.from_dict(data, orient='minor')
self.assertEqual(panel['foo'].values.dtype, np.object_)
self.assertEqual(panel['A'].values.dtype, np.float64)
def test_constructor_error_msgs(self):
def testit():
Panel(np.random.randn(3, 4, 5), lrange(4), lrange(5), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(4, 5, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(4), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 4, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(5), lrange(4))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 5, 4\)",
testit)
def test_conform(self):
df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
conformed = self.panel.conform(df)
assert (conformed.index.equals(self.panel.major_axis))
assert (conformed.columns.equals(self.panel.minor_axis))
def test_convert_objects(self):
# GH 4937
p = Panel(dict(A=dict(a=['1', '1.0'])))
expected = Panel(dict(A=dict(a=[1, 1.0])))
result = p._convert(numeric=True, coerce=True)
assert_panel_equal(result, expected)
def test_dtypes(self):
result = self.panel.dtypes
expected = Series(np.dtype('float64'), index=self.panel.items)
assert_series_equal(result, expected)
def test_apply(self):
# GH1148
# ufunc
applied = self.panel.apply(np.sqrt)
self.assertTrue(assert_almost_equal(applied.values, np.sqrt(
self.panel.values)))
# ufunc same shape
result = self.panel.apply(lambda x: x * 2, axis='items')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='major_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='minor_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
# reduction to DataFrame
result = self.panel.apply(lambda x: x.dtype, axis='items')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.minor_axis)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='major_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.minor_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='minor_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
# reductions via other dims
expected = self.panel.sum(0)
result = self.panel.apply(lambda x: x.sum(), axis='items')
assert_frame_equal(result, expected)
expected = self.panel.sum(1)
result = self.panel.apply(lambda x: x.sum(), axis='major_axis')
assert_frame_equal(result, expected)
expected = self.panel.sum(2)
result = self.panel.apply(lambda x: x.sum(), axis='minor_axis')
assert_frame_equal(result, expected)
# pass kwargs
result = self.panel.apply(lambda x, y: x.sum() + y, axis='items', y=5)
expected = self.panel.sum(0) + 5
assert_frame_equal(result, expected)
def test_apply_slabs(self):
# same shape as original
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'major_axis'])
expected = (self.panel * 2).transpose('minor_axis', 'major_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'minor_axis'])
expected = (self.panel * 2).transpose('major_axis', 'minor_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'minor_axis'])
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'major_axis'])
assert_panel_equal(result, expected)
# reductions
result = self.panel.apply(lambda x: x.sum(0), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(1).T
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.sum(1), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(0)
assert_frame_equal(result, expected)
# transforms
f = lambda x: ((x.T - x.mean(1)) / x.std(1)).T
# make sure that we don't trigger any warnings
with tm.assert_produces_warning(False):
result = self.panel.apply(f, axis=['items', 'major_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[:, :, ax]))
for ax in self.panel.minor_axis]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['major_axis', 'minor_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[ax]))
for ax in self.panel.items]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['minor_axis', 'items'])
expected = Panel(dict([(ax, f(self.panel.loc[:, ax]))
for ax in self.panel.major_axis]))
assert_panel_equal(result, expected)
# with multi-indexes
# GH7469
index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
'two', 'a'), ('two', 'b')])
dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
p = Panel({'f': dfa, 'g': dfb})
result = p.apply(lambda x: x.sum(), axis=0)
# on windows this will be in32
result = result.astype('int64')
expected = p.sum(0)
assert_frame_equal(result, expected)
def test_apply_no_or_zero_ndim(self):
# GH10332
self.panel = Panel(np.random.rand(5, 5, 5))
result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
result_int64 = self.panel.apply(lambda df: np.int64(0), axis=[1, 2])
result_float64 = self.panel.apply(lambda df: np.float64(0.0),
axis=[1, 2])
expected_int = expected_int64 = Series([0] * 5)
expected_float = expected_float64 = Series([0.0] * 5)
assert_series_equal(result_int, expected_int)
assert_series_equal(result_int64, expected_int64)
assert_series_equal(result_float, expected_float)
assert_series_equal(result_float64, expected_float64)
def test_reindex(self):
ref = self.panel['ItemB']
# items
result = self.panel.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['ItemB'], ref)
# major
new_major = list(self.panel.major_axis[:10])
result = self.panel.reindex(major=new_major)
assert_frame_equal(result['ItemB'], ref.reindex(index=new_major))
# raise exception put both major and major_axis
self.assertRaises(Exception, self.panel.reindex, major_axis=new_major,
major=new_major)
# minor
new_minor = list(self.panel.minor_axis[:2])
result = self.panel.reindex(minor=new_minor)
assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor))
# this ok
result = self.panel.reindex()
assert_panel_equal(result, self.panel)
self.assertFalse(result is self.panel)
# with filling
smaller_major = self.panel.major_axis[::5]
smaller = self.panel.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel.major_axis, method='pad')
assert_frame_equal(larger.major_xs(self.panel.major_axis[1]),
smaller.major_xs(smaller_major[0]))
# don't necessarily copy
result = self.panel.reindex(major=self.panel.major_axis, copy=False)
assert_panel_equal(result, self.panel)
self.assertTrue(result is self.panel)
def test_reindex_multi(self):
# with and without copy full reindexing
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
self.assertIs(result.items, self.panel.items)
self.assertIs(result.major_axis, self.panel.major_axis)
self.assertIs(result.minor_axis, self.panel.minor_axis)
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
assert_panel_equal(result, self.panel)
# multi-axis indexing consistency
# GH 5900
df = DataFrame(np.random.randn(4, 3))
p = Panel({'Item1': df})
expected = Panel({'Item1': df})
expected['Item2'] = np.nan
items = ['Item1', 'Item2']
major_axis = np.arange(4)
minor_axis = np.arange(3)
results = []
results.append(p.reindex(items=items, major_axis=major_axis,
copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
copy=False))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=True))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=False))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=False))
for i, r in enumerate(results):
assert_panel_equal(expected, r)
def test_reindex_like(self):
# reindex_like
smaller = self.panel.reindex(items=self.panel.items[:-1],
major=self.panel.major_axis[:-1],
minor=self.panel.minor_axis[:-1])
smaller_like = self.panel.reindex_like(smaller)
assert_panel_equal(smaller, smaller_like)
def test_take(self):
# axis == 0
result = self.panel.take([2, 0, 1], axis=0)
expected = self.panel.reindex(items=['ItemC', 'ItemA', 'ItemB'])
assert_panel_equal(result, expected)
# axis >= 1
result = self.panel.take([3, 0, 1, 2], axis=2)
expected = self.panel.reindex(minor=['D', 'A', 'B', 'C'])
assert_panel_equal(result, expected)
# neg indicies ok
expected = self.panel.reindex(minor=['D', 'D', 'B', 'C'])
result = self.panel.take([3, -1, 1, 2], axis=2)
assert_panel_equal(result, expected)
self.assertRaises(Exception, self.panel.take, [4, 0, 1, 2], axis=2)
def test_sort_index(self):
import random
ritems = list(self.panel.items)
rmajor = list(self.panel.major_axis)
rminor = list(self.panel.minor_axis)
random.shuffle(ritems)
random.shuffle(rmajor)
random.shuffle(rminor)
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0)
assert_panel_equal(sorted_panel, self.panel)
# descending
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0, ascending=False)
assert_panel_equal(sorted_panel,
self.panel.reindex(items=self.panel.items[::-1]))
random_order = self.panel.reindex(major=rmajor)
sorted_panel = random_order.sort_index(axis=1)
assert_panel_equal(sorted_panel, self.panel)
random_order = self.panel.reindex(minor=rminor)
sorted_panel = random_order.sort_index(axis=2)
assert_panel_equal(sorted_panel, self.panel)
def test_fillna(self):
filled = self.panel.fillna(0)
self.assertTrue(np.isfinite(filled.values).all())
filled = self.panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
self.panel['ItemA'].fillna(method='backfill'))
panel = self.panel.copy()
panel['str'] = 'foo'
filled = panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
panel['ItemA'].fillna(method='backfill'))
empty = self.panel.reindex(items=[])
filled = empty.fillna(0)
assert_panel_equal(filled, empty)
self.assertRaises(ValueError, self.panel.fillna)
self.assertRaises(ValueError, self.panel.fillna, 5, method='ffill')
self.assertRaises(TypeError, self.panel.fillna, [1, 2])
self.assertRaises(TypeError, self.panel.fillna, (1, 2))
# limit not implemented when only value is specified
p = Panel(np.random.randn(3, 4, 5))
p.iloc[0:2, 0:2, 0:2] = np.nan
self.assertRaises(NotImplementedError, lambda: p.fillna(999, limit=1))
def test_ffill_bfill(self):
assert_panel_equal(self.panel.ffill(),
self.panel.fillna(method='ffill'))
assert_panel_equal(self.panel.bfill(),
self.panel.fillna(method='bfill'))
def test_truncate_fillna_bug(self):
# #1823
result = self.panel.truncate(before=None, after=None, axis='items')
# it works!
result.fillna(value=0.0)
def test_swapaxes(self):
result = self.panel.swapaxes('items', 'minor')
self.assertIs(result.items, self.panel.minor_axis)
result = self.panel.swapaxes('items', 'major')
self.assertIs(result.items, self.panel.major_axis)
result = self.panel.swapaxes('major', 'minor')
self.assertIs(result.major_axis, self.panel.minor_axis)
panel = self.panel.copy()
result = panel.swapaxes('major', 'minor')
panel.values[0, 0, 1] = np.nan
expected = panel.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
# this should also work
result = self.panel.swapaxes(0, 1)
self.assertIs(result.items, self.panel.major_axis)
# this works, but return a copy
result = self.panel.swapaxes('items', 'items')
assert_panel_equal(self.panel, result)
self.assertNotEqual(id(self.panel), id(result))
def test_transpose(self):
result = self.panel.transpose('minor', 'major', 'items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# test kwargs
result = self.panel.transpose(items='minor', major='major',
minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# text mixture of args
result = self.panel.transpose('minor', major='major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# duplicate axes
with tm.assertRaisesRegexp(TypeError,
'not enough/duplicate arguments'):
self.panel.transpose('minor', maj='major', minor='items')
with tm.assertRaisesRegexp(ValueError, 'repeated axis in transpose'):
self.panel.transpose('minor', 'major', major='minor',
minor='items')
result = self.panel.transpose(2, 1, 0)
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'items', 'major')
expected = self.panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose(2, 0, 1)
assert_panel_equal(result, expected)
self.assertRaises(ValueError, self.panel.transpose, 0, 0, 1)
def test_transpose_copy(self):
panel = self.panel.copy()
result = panel.transpose(2, 0, 1, copy=True)
expected = panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
panel.values[0, 1, 1] = np.nan
self.assertTrue(notnull(result.values[1, 0, 1]))
@ignore_sparse_panel_future_warning
def test_to_frame(self):
# filtered
filtered = self.panel.to_frame()
expected = self.panel.to_frame().dropna(how='any')
assert_frame_equal(filtered, expected)
# unfiltered
unfiltered = self.panel.to_frame(filter_observations=False)
assert_panel_equal(unfiltered.to_panel(), self.panel)
# names
self.assertEqual(unfiltered.index.names, ('major', 'minor'))
# unsorted, round trip
df = self.panel.to_frame(filter_observations=False)
unsorted = df.take(np.random.permutation(len(df)))
pan = unsorted.to_panel()
assert_panel_equal(pan, self.panel)
# preserve original index names
df = DataFrame(np.random.randn(6, 2),
index=[['a', 'a', 'b', 'b', 'c', 'c'],
[0, 1, 0, 1, 0, 1]],
columns=['one', 'two'])
df.index.names = ['foo', 'bar']
df.columns.name = 'baz'
rdf = df.to_panel().to_frame()
self.assertEqual(rdf.index.names, df.index.names)
self.assertEqual(rdf.columns.names, df.columns.names)
def test_to_frame_mixed(self):
panel = self.panel.fillna(0)
panel['str'] = 'foo'
panel['bool'] = panel['ItemA'] > 0
lp = panel.to_frame()
wp = lp.to_panel()
self.assertEqual(wp['bool'].values.dtype, np.bool_)
# Previously, this was mutating the underlying index and changing its
# name
assert_frame_equal(wp['bool'], panel['bool'], check_names=False)
# GH 8704
# with categorical
df = panel.to_frame()
df['category'] = df['str'].astype('category')
# to_panel
# TODO: this converts back to object
p = df.to_panel()
expected = panel.copy()
expected['category'] = 'foo'
assert_panel_equal(p, expected)
def test_to_frame_multi_major(self):
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
expected_idx = MultiIndex.from_tuples(
[
(1, 'one', 'A'), (1, 'one', 'B'),
(1, 'one', 'C'), (1, 'two', 'A'),
(1, 'two', 'B'), (1, 'two', 'C'),
(2, 'one', 'A'), (2, 'one', 'B'),
(2, 'one', 'C'), (2, 'two', 'A'),
(2, 'two', 'B'), (2, 'two', 'C')
],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1, 'a', 1, 2, 'b', 1, 3,
'c', 1, 4, 'd', 1],
'i2': [1, 'a', 1, 2, 'b',
1, 3, 'c', 1, 4, 'd', 1]},
index=expected_idx)
result = wp.to_frame()
assert_frame_equal(result, expected)
wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
result = wp.to_frame()
assert_frame_equal(result, expected[1:])
idx = MultiIndex.from_tuples([(1, 'two'), (1, 'one'), (2, 'one'), (
np.nan, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
ex_idx = MultiIndex.from_tuples([(1, 'two', 'A'), (1, 'two', 'B'),
(1, 'two', 'C'),
(1, 'one', 'A'),
(1, 'one', 'B'),
(1, 'one', 'C'),
(2, 'one', 'A'),
(2, 'one', 'B'),
(2, 'one', 'C'),
(np.nan, 'two', 'A'),
(np.nan, 'two', 'B'),
(np.nan, 'two', 'C')],
names=[None, None, 'minor'])
expected.index = ex_idx
result = wp.to_frame()
assert_frame_equal(result, expected)
def test_to_frame_multi_major_minor(self):
cols = MultiIndex(levels=[['C_A', 'C_B'], ['C_1', 'C_2']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two'), (3, 'three'), (4, 'four')])
df = DataFrame([[1, 2, 11, 12], [3, 4, 13, 14],
['a', 'b', 'w', 'x'],
['c', 'd', 'y', 'z'], [-1, -2, -3, -4],
[-5, -6, -7, -8]], columns=cols, index=idx)
wp = Panel({'i1': df, 'i2': df})
exp_idx = MultiIndex.from_tuples(
[(1, 'one', 'C_A', 'C_1'), (1, 'one', 'C_A', 'C_2'),
(1, 'one', 'C_B', 'C_1'), (1, 'one', 'C_B', 'C_2'),
(1, 'two', 'C_A', 'C_1'), (1, 'two', 'C_A', 'C_2'),
(1, 'two', 'C_B', 'C_1'), (1, 'two', 'C_B', 'C_2'),
(2, 'one', 'C_A', 'C_1'), (2, 'one', 'C_A', 'C_2'),
(2, 'one', 'C_B', 'C_1'), (2, 'one', 'C_B', 'C_2'),
(2, 'two', 'C_A', 'C_1'), (2, 'two', 'C_A', 'C_2'),
(2, 'two', 'C_B', 'C_1'), (2, 'two', 'C_B', 'C_2'),
(3, 'three', 'C_A', 'C_1'), (3, 'three', 'C_A', 'C_2'),
(3, 'three', 'C_B', 'C_1'), (3, 'three', 'C_B', 'C_2'),
(4, 'four', 'C_A', 'C_1'), (4, 'four', 'C_A', 'C_2'),
(4, 'four', 'C_B', 'C_1'), (4, 'four', 'C_B', 'C_2')],
names=[None, None, None, None])
exp_val = [[1, 1], [2, 2], [11, 11], [12, 12], [3, 3], [4, 4],
[13, 13], [14, 14], ['a', 'a'], ['b', 'b'], ['w', 'w'],
['x', 'x'], ['c', 'c'], ['d', 'd'], ['y', 'y'], ['z', 'z'],
[-1, -1], [-2, -2], [-3, -3], [-4, -4], [-5, -5], [-6, -6],
[-7, -7], [-8, -8]]
result = wp.to_frame()
expected = DataFrame(exp_val, columns=['i1', 'i2'], index=exp_idx)
assert_frame_equal(result, expected)
def test_to_frame_multi_drop_level(self):
idx = MultiIndex.from_tuples([(1, 'one'), (2, 'one'), (2, 'two')])
df = DataFrame({'A': [np.nan, 1, 2]}, index=idx)
wp = Panel({'i1': df, 'i2': df})
result = wp.to_frame()
exp_idx = MultiIndex.from_tuples([(2, 'one', 'A'), (2, 'two', 'A')],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx)
assert_frame_equal(result, expected)
def test_to_panel_na_handling(self):
df = DataFrame(np.random.randint(0, 10, size=20).reshape((10, 2)),
index=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 2, 3, 4, 5, 2, 3, 4, 5]])
panel = df.to_panel()
self.assertTrue(isnull(panel[0].ix[1, [0, 1]]).all())
def test_to_panel_duplicates(self):
# #2441
df = DataFrame({'a': [0, 0, 1], 'b': [1, 1, 1], 'c': [1, 2, 3]})
idf = df.set_index(['a', 'b'])
assertRaisesRegexp(ValueError, 'non-uniquely indexed', idf.to_panel)
def test_panel_dups(self):
# GH 4960
# duplicates in an index
# items
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, items=list("ABCDE"))
panel = Panel(data, items=list("AACDE"))
expected = no_dup_panel['A']
result = panel.iloc[0]
assert_frame_equal(result, expected)
expected = no_dup_panel['E']
result = panel.loc['E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[['A', 'B']]
expected.items = ['A', 'A']
result = panel.loc['A']
assert_panel_equal(result, expected)
# major
data = np.random.randn(5, 5, 5)
no_dup_panel = Panel(data, major_axis=list("ABCDE"))
panel = Panel(data, major_axis=list("AACDE"))
expected = no_dup_panel.loc[:, 'A']
result = panel.iloc[:, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, 'E']
result = panel.loc[:, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, ['A', 'B']]
expected.major_axis = ['A', 'A']
result = panel.loc[:, 'A']
assert_panel_equal(result, expected)
# minor
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, minor_axis=list("ABCDE"))
panel = Panel(data, minor_axis=list("AACDE"))
expected = no_dup_panel.loc[:, :, 'A']
result = panel.iloc[:, :, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, 'E']
result = panel.loc[:, :, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, ['A', 'B']]
expected.minor_axis = ['A', 'A']
result = panel.loc[:, :, 'A']
assert_panel_equal(result, expected)
def test_filter(self):
pass
def test_compound(self):
compounded = self.panel.compound()
assert_series_equal(compounded['ItemA'],
(1 + self.panel['ItemA']).product(0) - 1,
check_names=False)
def test_shift(self):
# major
idx = self.panel.major_axis[0]
idx_lag = self.panel.major_axis[1]
shifted = self.panel.shift(1)
assert_frame_equal(self.panel.major_xs(idx), shifted.major_xs(idx_lag))
# minor
idx = self.panel.minor_axis[0]
idx_lag = self.panel.minor_axis[1]
shifted = self.panel.shift(1, axis='minor')
assert_frame_equal(self.panel.minor_xs(idx), shifted.minor_xs(idx_lag))
# items
idx = self.panel.items[0]
idx_lag = self.panel.items[1]
shifted = self.panel.shift(1, axis='items')
assert_frame_equal(self.panel[idx], shifted[idx_lag])
# negative numbers, #2164
result = self.panel.shift(-1)
expected = Panel(dict((i, f.shift(-1)[:-1])
for i, f in self.panel.iteritems()))
assert_panel_equal(result, expected)
# mixed dtypes #6959
data = [('item ' + ch, makeMixedDataFrame()) for ch in list('abcde')]
data = dict(data)
mixed_panel = Panel.from_dict(data, orient='minor')
shifted = mixed_panel.shift(1)
assert_series_equal(mixed_panel.dtypes, shifted.dtypes)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodPanel()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_panel_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=bday)
assert_panel_equal(shifted, shifted3)
assertRaisesRegexp(ValueError, 'does not match', ps.tshift, freq='M')
# DatetimeIndex
panel = _panel
shifted = panel.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(panel, unshifted)
shifted2 = panel.tshift(freq=panel.major_axis.freq)
assert_panel_equal(shifted, shifted2)
inferred_ts = Panel(panel.values, items=panel.items,
major_axis=Index(np.asarray(panel.major_axis)),
minor_axis=panel.minor_axis)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(shifted, panel.tshift(1))
assert_panel_equal(unshifted, inferred_ts)
no_freq = panel.ix[:, [0, 5, 7], :]
self.assertRaises(ValueError, no_freq.tshift)
def test_pct_change(self):
df1 = DataFrame({'c1': [1, 2, 5], 'c2': [3, 4, 6]})
df2 = df1 + 1
df3 = DataFrame({'c1': [3, 4, 7], 'c2': [5, 6, 8]})
wp = Panel({'i1': df1, 'i2': df2, 'i3': df3})
# major, 1
result = wp.pct_change() # axis='major'
expected = Panel({'i1': df1.pct_change(),
'i2': df2.pct_change(),
'i3': df3.pct_change()})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=1)
assert_panel_equal(result, expected)
# major, 2
result = wp.pct_change(periods=2)
expected = Panel({'i1': df1.pct_change(2),
'i2': df2.pct_change(2),
'i3': df3.pct_change(2)})
assert_panel_equal(result, expected)
# minor, 1
result = wp.pct_change(axis='minor')
expected = Panel({'i1': df1.pct_change(axis=1),
'i2': df2.pct_change(axis=1),
'i3': df3.pct_change(axis=1)})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=2)
assert_panel_equal(result, expected)
# minor, 2
result = wp.pct_change(periods=2, axis='minor')
expected = Panel({'i1': df1.pct_change(periods=2, axis=1),
'i2': df2.pct_change(periods=2, axis=1),
'i3': df3.pct_change(periods=2, axis=1)})
assert_panel_equal(result, expected)
# items, 1
result = wp.pct_change(axis='items')
expected = Panel({'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i2': DataFrame({'c1': [1, 0.5, .2],
'c2': [1. / 3, 0.25, 1. / 6]}),
'i3': DataFrame({'c1': [.5, 1. / 3, 1. / 6],
'c2': [.25, .2, 1. / 7]})})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=0)
assert_panel_equal(result, expected)
# items, 2
result = wp.pct_change(periods=2, axis='items')
expected = Panel({'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i2': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i3': DataFrame({'c1': [2, 1, .4],
'c2': [2. / 3, .5, 1. / 3]})})
assert_panel_equal(result, expected)
def test_round(self):
values = [[[-3.2, 2.2], [0, -4.8213], [3.123, 123.12],
[-1566.213, 88.88], [-12, 94.5]],
[[-5.82, 3.5], [6.21, -73.272], [-9.087, 23.12],
[272.212, -99.99], [23, -76.5]]]
evalues = [[[float(np.around(i)) for i in j] for j in k]
for k in values]
p = Panel(values, items=['Item1', 'Item2'],
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
expected = Panel(evalues, items=['Item1', 'Item2'],
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
result = p.round()
self.assert_panel_equal(expected, result)
def test_multiindex_get(self):
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b', 2)],
names=['first', 'second'])
wp = Panel(np.random.random((4, 5, 5)),
items=ind,
major_axis=np.arange(5),
minor_axis=np.arange(5))
f1 = wp['a']
f2 = wp.ix['a']
assert_panel_equal(f1, f2)
self.assertTrue((f1.items == [1, 2]).all())
self.assertTrue((f2.items == [1, 2]).all())
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
def test_multiindex_blocks(self):
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
wp = Panel(self.panel._data)
wp.items = ind
f1 = wp['a']
self.assertTrue((f1.items == [1, 2]).all())
f1 = wp[('b', 1)]
self.assertTrue((f1.columns == ['A', 'B', 'C', 'D']).all())
def test_repr_empty(self):
empty = Panel()
repr(empty)
def test_rename(self):
mapper = {'ItemA': 'foo', 'ItemB': 'bar', 'ItemC': 'baz'}
renamed = self.panel.rename_axis(mapper, axis=0)
exp = Index(['foo', 'bar', 'baz'])
self.assertTrue(renamed.items.equals(exp))
renamed = self.panel.rename_axis(str.lower, axis=2)
exp = Index(['a', 'b', 'c', 'd'])
self.assertTrue(renamed.minor_axis.equals(exp))
# don't copy
renamed_nocopy = self.panel.rename_axis(mapper, axis=0, copy=False)
renamed_nocopy['foo'] = 3.
self.assertTrue((self.panel['ItemA'].values == 3).all())
def test_get_attr(self):
assert_frame_equal(self.panel['ItemA'], self.panel.ItemA)
# specific cases from #3440
self.panel['a'] = self.panel['ItemA']
assert_frame_equal(self.panel['a'], self.panel.a)
self.panel['i'] = self.panel['ItemA']
assert_frame_equal(self.panel['i'], self.panel.i)
def test_from_frame_level1_unsorted(self):
tuples = [('MSFT', 3), ('MSFT', 2), ('AAPL', 2), ('AAPL', 1),
('MSFT', 1)]
midx = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.rand(5, 4), index=midx)
p = df.to_panel()
assert_frame_equal(p.minor_xs(2), df.xs(2, level=1).sort_index())
def test_to_excel(self):
try:
import xlwt # noqa
import xlrd # noqa
import openpyxl # noqa
from pandas.io.excel import ExcelFile
except ImportError:
raise nose.SkipTest("need xlwt xlrd openpyxl")
for ext in ['xls', 'xlsx']:
path = '__tmp__.' + ext
with ensure_clean(path) as path:
self.panel.to_excel(path)
try:
reader = ExcelFile(path)
except ImportError:
raise nose.SkipTest("need xlwt xlrd openpyxl")
for item, df in self.panel.iteritems():
recdf = reader.parse(str(item), index_col=0)
assert_frame_equal(df, recdf)
def test_to_excel_xlsxwriter(self):
try:
import xlrd # noqa
import xlsxwriter # noqa
from pandas.io.excel import ExcelFile
except ImportError:
raise nose.SkipTest("Requires xlrd and xlsxwriter. Skipping test.")
path = '__tmp__.xlsx'
with ensure_clean(path) as path:
self.panel.to_excel(path, engine='xlsxwriter')
try:
reader = ExcelFile(path)
except ImportError as e:
raise nose.SkipTest("cannot write excel file: %s" % e)
for item, df in self.panel.iteritems():
recdf = reader.parse(str(item), index_col=0)
assert_frame_equal(df, recdf)
def test_dropna(self):
p = Panel(np.random.randn(4, 5, 6), major_axis=list('abcde'))
p.ix[:, ['b', 'd'], 0] = np.nan
result = p.dropna(axis=1)
exp = p.ix[:, ['a', 'c', 'e'], :]
assert_panel_equal(result, exp)
inp = p.copy()
inp.dropna(axis=1, inplace=True)
assert_panel_equal(inp, exp)
result = p.dropna(axis=1, how='all')
assert_panel_equal(result, p)
p.ix[:, ['b', 'd'], :] = np.nan
result = p.dropna(axis=1, how='all')
exp = p.ix[:, ['a', 'c', 'e'], :]
assert_panel_equal(result, exp)
p = Panel(np.random.randn(4, 5, 6), items=list('abcd'))
p.ix[['b'], :, 0] = np.nan
result = p.dropna()
exp = p.ix[['a', 'c', 'd']]
assert_panel_equal(result, exp)
result = p.dropna(how='all')
assert_panel_equal(result, p)
p.ix['b'] = np.nan
result = p.dropna(how='all')
exp = p.ix[['a', 'c', 'd']]
assert_panel_equal(result, exp)
def test_drop(self):
df = DataFrame({"A": [1, 2], "B": [3, 4]})
panel = Panel({"One": df, "Two": df})
def check_drop(drop_val, axis_number, aliases, expected):
try:
actual = panel.drop(drop_val, axis=axis_number)
assert_panel_equal(actual, expected)
for alias in aliases:
actual = panel.drop(drop_val, axis=alias)
assert_panel_equal(actual, expected)
except AssertionError:
com.pprint_thing("Failed with axis_number %d and aliases: %s" %
(axis_number, aliases))
raise
# Items
expected = Panel({"One": df})
check_drop('Two', 0, ['items'], expected)
self.assertRaises(ValueError, panel.drop, 'Three')
# errors = 'ignore'
dropped = panel.drop('Three', errors='ignore')
| assert_panel_equal(dropped, panel) | pandas.util.testing.assert_panel_equal |
#coding:utf-8
import json
import pandas as pd
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
def road_json(path = 'json/analyzeTarget.json'):
'''
pathからJSON形式のデータを読み取って返します。
'''
f = open(path, 'r')
jsonData = json.load(f)
f.close()
return jsonData
def clsify_data(learn_path = 'res/learn.csv' ,
input_path = 'res/input.csv'):
'''
CSVファイルをロードして学習データを元にナイーブベイズで分類します。
'''
# 学習データ読み込み
df_learn = | pd.read_csv(learn_path, encoding="SHIFT-JIS") | pandas.read_csv |
import pandas as pd
import sys
job_df = pd.read_csv(sys.argv[1])
my_index = | pd.MultiIndex(levels = [[],[]], codes=[[],[]], names=[u'labels', u'path_idx']) | pandas.MultiIndex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.